Commit 064a1899 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2019-11-20' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
Mellanox, mlx5 fixes 2019-11-20

This series introduces some fixes to mlx5 driver.

Please pull and let me know if there is any problem.

For -stable v4.9:
 ('net/mlx5e: Fix set vf link state error flow')

For -stable v4.14
 ('net/mlxfw: Verify FSM error code translation doesn't exceed array size')

For -stable v4.19
 ('net/mlx5: Fix auto group size calculation')

For -stable v5.3
 ('net/mlx5e: Fix error flow cleanup in mlx5e_tc_tun_create_header_ipv4/6')
 ('net/mlx5e: Do not use non-EXT link modes in EXT mode')
 ('net/mlx5: Update the list of the PCI supported devices')
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 84811412 30e9e055
......@@ -239,12 +239,15 @@ int mlx5e_tc_tun_create_header_ipv4(struct mlx5e_priv *priv,
if (max_encap_size < ipv4_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv4_encap_size, max_encap_size);
return -EOPNOTSUPP;
err = -EOPNOTSUPP;
goto out;
}
encap_header = kzalloc(ipv4_encap_size, GFP_KERNEL);
if (!encap_header)
return -ENOMEM;
if (!encap_header) {
err = -ENOMEM;
goto out;
}
/* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule
......@@ -355,12 +358,15 @@ int mlx5e_tc_tun_create_header_ipv6(struct mlx5e_priv *priv,
if (max_encap_size < ipv6_encap_size) {
mlx5_core_warn(priv->mdev, "encap size %d too big, max supported is %d\n",
ipv6_encap_size, max_encap_size);
return -EOPNOTSUPP;
err = -EOPNOTSUPP;
goto out;
}
encap_header = kzalloc(ipv6_encap_size, GFP_KERNEL);
if (!encap_header)
return -ENOMEM;
if (!encap_header) {
err = -ENOMEM;
goto out;
}
/* used by mlx5e_detach_encap to lookup a neigh hash table
* entry in the neigh hash table when a user deletes a rule
......
......@@ -708,9 +708,9 @@ static int get_fec_supported_advertised(struct mlx5_core_dev *dev,
static void ptys2ethtool_supported_advertised_port(struct ethtool_link_ksettings *link_ksettings,
u32 eth_proto_cap,
u8 connector_type)
u8 connector_type, bool ext)
{
if (!connector_type || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
if ((!connector_type && !ext) || connector_type >= MLX5E_CONNECTOR_TYPE_NUMBER) {
if (eth_proto_cap & (MLX5E_PROT_MASK(MLX5E_10GBASE_CR)
| MLX5E_PROT_MASK(MLX5E_10GBASE_SR)
| MLX5E_PROT_MASK(MLX5E_40GBASE_CR4)
......@@ -842,9 +842,9 @@ static int ptys2connector_type[MLX5E_CONNECTOR_TYPE_NUMBER] = {
[MLX5E_PORT_OTHER] = PORT_OTHER,
};
static u8 get_connector_port(u32 eth_proto, u8 connector_type)
static u8 get_connector_port(u32 eth_proto, u8 connector_type, bool ext)
{
if (connector_type && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
if ((connector_type || ext) && connector_type < MLX5E_CONNECTOR_TYPE_NUMBER)
return ptys2connector_type[connector_type];
if (eth_proto &
......@@ -945,9 +945,9 @@ int mlx5e_ethtool_get_link_ksettings(struct mlx5e_priv *priv,
eth_proto_oper = eth_proto_oper ? eth_proto_oper : eth_proto_cap;
link_ksettings->base.port = get_connector_port(eth_proto_oper,
connector_type);
connector_type, ext);
ptys2ethtool_supported_advertised_port(link_ksettings, eth_proto_admin,
connector_type);
connector_type, ext);
get_lp_advertising(mdev, eth_proto_lp, link_ksettings);
if (an_status == MLX5_AN_COMPLETE)
......
......@@ -4252,9 +4252,12 @@ static netdev_features_t mlx5e_tunnel_features_check(struct mlx5e_priv *priv,
switch (proto) {
case IPPROTO_GRE:
return features;
case IPPROTO_IPIP:
case IPPROTO_IPV6:
return features;
if (mlx5e_tunnel_proto_supported(priv->mdev, IPPROTO_IPIP))
return features;
break;
case IPPROTO_UDP:
udph = udp_hdr(skb);
port = be16_to_cpu(udph->dest);
......
......@@ -3268,7 +3268,20 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
action |= MLX5_FLOW_CONTEXT_ACTION_FWD_DEST |
MLX5_FLOW_CONTEXT_ACTION_COUNT;
if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
if (encap) {
parse_attr->mirred_ifindex[attr->out_count] =
out_dev->ifindex;
parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
if (!parse_attr->tun_info[attr->out_count])
return -ENOMEM;
encap = false;
attr->dests[attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
attr->out_count++;
/* attr->dests[].rep is resolved when we
* handle encap
*/
} else if (netdev_port_same_parent_id(priv->netdev, out_dev)) {
struct mlx5_eswitch *esw = priv->mdev->priv.eswitch;
struct net_device *uplink_dev = mlx5_eswitch_uplink_get_proto_dev(esw, REP_ETH);
struct net_device *uplink_upper;
......@@ -3310,19 +3323,6 @@ static int parse_tc_fdb_actions(struct mlx5e_priv *priv,
attr->dests[attr->out_count].rep = rpriv->rep;
attr->dests[attr->out_count].mdev = out_priv->mdev;
attr->out_count++;
} else if (encap) {
parse_attr->mirred_ifindex[attr->out_count] =
out_dev->ifindex;
parse_attr->tun_info[attr->out_count] = dup_tun_info(info);
if (!parse_attr->tun_info[attr->out_count])
return -ENOMEM;
encap = false;
attr->dests[attr->out_count].flags |=
MLX5_ESW_DEST_ENCAP;
attr->out_count++;
/* attr->dests[].rep is resolved when we
* handle encap
*/
} else if (parse_attr->filter_dev != priv->netdev) {
/* All mlx5 devices are called to configure
* high level device filters. Therefore, the
......@@ -4000,9 +4000,8 @@ int mlx5e_tc_configure_matchall(struct mlx5e_priv *priv,
struct tc_cls_matchall_offload *ma)
{
struct netlink_ext_ack *extack = ma->common.extack;
int prio = TC_H_MAJ(ma->common.prio) >> 16;
if (prio != 1) {
if (ma->common.prio != 1) {
NL_SET_ERR_MSG_MOD(extack, "only priority 1 is supported");
return -EINVAL;
}
......
......@@ -2117,7 +2117,7 @@ int mlx5_eswitch_set_vport_state(struct mlx5_eswitch *esw,
unlock:
mutex_unlock(&esw->state_lock);
return 0;
return err;
}
int mlx5_eswitch_get_vport_config(struct mlx5_eswitch *esw,
......
......@@ -579,7 +579,7 @@ static void del_sw_flow_group(struct fs_node *node)
rhashtable_destroy(&fg->ftes_hash);
ida_destroy(&fg->fte_allocator);
if (ft->autogroup.active)
if (ft->autogroup.active && fg->max_ftes == ft->autogroup.group_size)
ft->autogroup.num_groups--;
err = rhltable_remove(&ft->fgs_hash,
&fg->hash,
......@@ -1126,6 +1126,8 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
ft->autogroup.active = true;
ft->autogroup.required_groups = max_num_groups;
/* We save place for flow groups in addition to max types */
ft->autogroup.group_size = ft->max_fte / (max_num_groups + 1);
return ft;
}
......@@ -1328,8 +1330,7 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
return ERR_PTR(-ENOENT);
if (ft->autogroup.num_groups < ft->autogroup.required_groups)
/* We save place for flow groups in addition to max types */
group_size = ft->max_fte / (ft->autogroup.required_groups + 1);
group_size = ft->autogroup.group_size;
/* ft->max_fte == ft->autogroup.max_types */
if (group_size == 0)
......@@ -1356,7 +1357,8 @@ static struct mlx5_flow_group *alloc_auto_flow_group(struct mlx5_flow_table *ft
if (IS_ERR(fg))
goto out;
ft->autogroup.num_groups++;
if (group_size == ft->autogroup.group_size)
ft->autogroup.num_groups++;
out:
return fg;
......
......@@ -162,6 +162,7 @@ struct mlx5_flow_table {
struct {
bool active;
unsigned int required_groups;
unsigned int group_size;
unsigned int num_groups;
} autogroup;
/* Protect fwd_rules */
......
......@@ -1566,6 +1566,7 @@ static const struct pci_device_id mlx5_core_pci_table[] = {
{ PCI_VDEVICE(MELLANOX, 0x101c), MLX5_PCI_DEV_IS_VF}, /* ConnectX-6 VF */
{ PCI_VDEVICE(MELLANOX, 0x101d) }, /* ConnectX-6 Dx */
{ PCI_VDEVICE(MELLANOX, 0x101e), MLX5_PCI_DEV_IS_VF}, /* ConnectX Family mlx5Gen Virtual Function */
{ PCI_VDEVICE(MELLANOX, 0x101f) }, /* ConnectX-6 LX */
{ PCI_VDEVICE(MELLANOX, 0xa2d2) }, /* BlueField integrated ConnectX-5 network controller */
{ PCI_VDEVICE(MELLANOX, 0xa2d3), MLX5_PCI_DEV_IS_VF}, /* BlueField integrated ConnectX-5 network controller VF */
{ PCI_VDEVICE(MELLANOX, 0xa2d6) }, /* BlueField-2 integrated ConnectX-6 Dx network controller */
......
......@@ -595,6 +595,18 @@ static void dr_rule_clean_rule_members(struct mlx5dr_rule *rule,
}
}
static u16 dr_get_bits_per_mask(u16 byte_mask)
{
u16 bits = 0;
while (byte_mask) {
byte_mask = byte_mask & (byte_mask - 1);
bits++;
}
return bits;
}
static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
struct mlx5dr_domain *dmn,
struct mlx5dr_domain_rx_tx *nic_dmn)
......@@ -607,6 +619,9 @@ static bool dr_rule_need_enlarge_hash(struct mlx5dr_ste_htbl *htbl,
if (!ctrl->may_grow)
return false;
if (dr_get_bits_per_mask(htbl->byte_mask) * BITS_PER_BYTE <= htbl->chunk_size)
return false;
if (ctrl->num_of_collisions >= ctrl->increase_threshold &&
(ctrl->num_of_valid_entries - ctrl->num_of_collisions) >= ctrl->increase_threshold)
return true;
......
......@@ -700,6 +700,7 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
unsigned int irqn;
void *cqc, *in;
__be64 *pas;
int vector;
u32 i;
cq = kzalloc(sizeof(*cq), GFP_KERNEL);
......@@ -728,7 +729,8 @@ static struct mlx5dr_cq *dr_create_cq(struct mlx5_core_dev *mdev,
if (!in)
goto err_cqwq;
err = mlx5_vector2eqn(mdev, smp_processor_id(), &eqn, &irqn);
vector = smp_processor_id() % mlx5_comp_vectors_count(mdev);
err = mlx5_vector2eqn(mdev, vector, &eqn, &irqn);
if (err) {
kvfree(in);
goto err_cqwq;
......
......@@ -560,18 +560,6 @@ bool mlx5dr_ste_not_used_ste(struct mlx5dr_ste *ste)
return !refcount_read(&ste->refcount);
}
static u16 get_bits_per_mask(u16 byte_mask)
{
u16 bits = 0;
while (byte_mask) {
byte_mask = byte_mask & (byte_mask - 1);
bits++;
}
return bits;
}
/* Init one ste as a pattern for ste data array */
void mlx5dr_ste_set_formatted_ste(u16 gvmi,
struct mlx5dr_domain_rx_tx *nic_dmn,
......@@ -620,20 +608,12 @@ int mlx5dr_ste_create_next_htbl(struct mlx5dr_matcher *matcher,
struct mlx5dr_ste_htbl *next_htbl;
if (!mlx5dr_ste_is_last_in_rule(nic_matcher, ste->ste_chain_location)) {
u32 bits_in_mask;
u8 next_lu_type;
u16 byte_mask;
next_lu_type = MLX5_GET(ste_general, hw_ste, next_lu_type);
byte_mask = MLX5_GET(ste_general, hw_ste, byte_mask);
/* Don't allocate table more than required,
* the size of the table defined via the byte_mask, so no need
* to allocate more than that.
*/
bits_in_mask = get_bits_per_mask(byte_mask) * BITS_PER_BYTE;
log_table_size = min(log_table_size, bits_in_mask);
next_htbl = mlx5dr_ste_htbl_alloc(dmn->ste_icm_pool,
log_table_size,
next_lu_type,
......@@ -671,7 +651,7 @@ static void dr_ste_set_ctrl(struct mlx5dr_ste_htbl *htbl)
htbl->ctrl.may_grow = true;
if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1)
if (htbl->chunk_size == DR_CHUNK_SIZE_MAX - 1 || !htbl->byte_mask)
htbl->ctrl.may_grow = false;
/* Threshold is 50%, one is added to table of size 1 */
......
......@@ -66,6 +66,8 @@ static int mlxfw_fsm_state_wait(struct mlxfw_dev *mlxfw_dev, u32 fwhandle,
return err;
if (fsm_state_err != MLXFW_FSM_STATE_ERR_OK) {
fsm_state_err = min_t(enum mlxfw_fsm_state_err,
fsm_state_err, MLXFW_FSM_STATE_ERR_MAX);
pr_err("Firmware flash failed: %s\n",
mlxfw_fsm_state_err_str[fsm_state_err]);
NL_SET_ERR_MSG_MOD(extack, "Firmware flash failed");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment