Commit 220efcf9 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-fixes-2021-01-07' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2021-01-07

* tag 'mlx5-fixes-2021-01-07' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Fix memleak in mlx5e_create_l2_table_groups
  net/mlx5e: Fix two double free cases
  net/mlx5: Release devlink object if adev fails
  net/mlx5e: ethtool, Fix restriction of autoneg with 56G
  net/mlx5e: In skb build skip setting mark in switchdev mode
  net/mlx5: E-Switch, fix changing vf VLANID
  net/mlx5e: Fix SWP offsets when vlan inserted by driver
  net/mlx5e: CT: Use per flow counter when CT flow accounting is enabled
  net/mlx5: Use port_num 1 instead of 0 when delete a RoCE address
  net/mlx5e: Add missing capability check for uplink follow
  net/mlx5: Check if lag is supported before creating one
====================

Link: https://lore.kernel.org/r/20210107202845.470205-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 3545454c 5b0bb12c
...@@ -626,6 +626,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe, ...@@ -626,6 +626,11 @@ bool mlx5e_rep_tc_update_skb(struct mlx5_cqe64 *cqe,
if (!reg_c0) if (!reg_c0)
return true; return true;
/* If reg_c0 is not equal to the default flow tag then skb->mark
* is not supported and must be reset back to 0.
*/
skb->mark = 0;
priv = netdev_priv(skb->dev); priv = netdev_priv(skb->dev);
esw = priv->mdev->priv.eswitch; esw = priv->mdev->priv.eswitch;
......
...@@ -118,16 +118,17 @@ struct mlx5_ct_tuple { ...@@ -118,16 +118,17 @@ struct mlx5_ct_tuple {
u16 zone; u16 zone;
}; };
struct mlx5_ct_shared_counter { struct mlx5_ct_counter {
struct mlx5_fc *counter; struct mlx5_fc *counter;
refcount_t refcount; refcount_t refcount;
bool is_shared;
}; };
struct mlx5_ct_entry { struct mlx5_ct_entry {
struct rhash_head node; struct rhash_head node;
struct rhash_head tuple_node; struct rhash_head tuple_node;
struct rhash_head tuple_nat_node; struct rhash_head tuple_nat_node;
struct mlx5_ct_shared_counter *shared_counter; struct mlx5_ct_counter *counter;
unsigned long cookie; unsigned long cookie;
unsigned long restore_cookie; unsigned long restore_cookie;
struct mlx5_ct_tuple tuple; struct mlx5_ct_tuple tuple;
...@@ -394,13 +395,14 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec, ...@@ -394,13 +395,14 @@ mlx5_tc_ct_set_tuple_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec,
} }
static void static void
mlx5_tc_ct_shared_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry) mlx5_tc_ct_counter_put(struct mlx5_tc_ct_priv *ct_priv, struct mlx5_ct_entry *entry)
{ {
if (!refcount_dec_and_test(&entry->shared_counter->refcount)) if (entry->counter->is_shared &&
!refcount_dec_and_test(&entry->counter->refcount))
return; return;
mlx5_fc_destroy(ct_priv->dev, entry->shared_counter->counter); mlx5_fc_destroy(ct_priv->dev, entry->counter->counter);
kfree(entry->shared_counter); kfree(entry->counter);
} }
static void static void
...@@ -699,7 +701,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -699,7 +701,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
attr->dest_ft = ct_priv->post_ct; attr->dest_ft = ct_priv->post_ct;
attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct; attr->ft = nat ? ct_priv->ct_nat : ct_priv->ct;
attr->outer_match_level = MLX5_MATCH_L4; attr->outer_match_level = MLX5_MATCH_L4;
attr->counter = entry->shared_counter->counter; attr->counter = entry->counter->counter;
attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT; attr->flags |= MLX5_ESW_ATTR_FLAG_NO_IN_PORT;
mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule); mlx5_tc_ct_set_tuple_match(netdev_priv(ct_priv->netdev), spec, flow_rule);
...@@ -732,13 +734,34 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, ...@@ -732,13 +734,34 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv,
return err; return err;
} }
static struct mlx5_ct_shared_counter * static struct mlx5_ct_counter *
mlx5_tc_ct_counter_create(struct mlx5_tc_ct_priv *ct_priv)
{
struct mlx5_ct_counter *counter;
int ret;
counter = kzalloc(sizeof(*counter), GFP_KERNEL);
if (!counter)
return ERR_PTR(-ENOMEM);
counter->is_shared = false;
counter->counter = mlx5_fc_create(ct_priv->dev, true);
if (IS_ERR(counter->counter)) {
ct_dbg("Failed to create counter for ct entry");
ret = PTR_ERR(counter->counter);
kfree(counter);
return ERR_PTR(ret);
}
return counter;
}
static struct mlx5_ct_counter *
mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv, mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
struct mlx5_ct_entry *entry) struct mlx5_ct_entry *entry)
{ {
struct mlx5_ct_tuple rev_tuple = entry->tuple; struct mlx5_ct_tuple rev_tuple = entry->tuple;
struct mlx5_ct_shared_counter *shared_counter; struct mlx5_ct_counter *shared_counter;
struct mlx5_core_dev *dev = ct_priv->dev;
struct mlx5_ct_entry *rev_entry; struct mlx5_ct_entry *rev_entry;
__be16 tmp_port; __be16 tmp_port;
int ret; int ret;
...@@ -767,25 +790,20 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv, ...@@ -767,25 +790,20 @@ mlx5_tc_ct_shared_counter_get(struct mlx5_tc_ct_priv *ct_priv,
rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple, rev_entry = rhashtable_lookup_fast(&ct_priv->ct_tuples_ht, &rev_tuple,
tuples_ht_params); tuples_ht_params);
if (rev_entry) { if (rev_entry) {
if (refcount_inc_not_zero(&rev_entry->shared_counter->refcount)) { if (refcount_inc_not_zero(&rev_entry->counter->refcount)) {
mutex_unlock(&ct_priv->shared_counter_lock); mutex_unlock(&ct_priv->shared_counter_lock);
return rev_entry->shared_counter; return rev_entry->counter;
} }
} }
mutex_unlock(&ct_priv->shared_counter_lock); mutex_unlock(&ct_priv->shared_counter_lock);
shared_counter = kzalloc(sizeof(*shared_counter), GFP_KERNEL); shared_counter = mlx5_tc_ct_counter_create(ct_priv);
if (!shared_counter) if (IS_ERR(shared_counter)) {
return ERR_PTR(-ENOMEM); ret = PTR_ERR(shared_counter);
shared_counter->counter = mlx5_fc_create(dev, true);
if (IS_ERR(shared_counter->counter)) {
ct_dbg("Failed to create counter for ct entry");
ret = PTR_ERR(shared_counter->counter);
kfree(shared_counter);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
shared_counter->is_shared = true;
refcount_set(&shared_counter->refcount, 1); refcount_set(&shared_counter->refcount, 1);
return shared_counter; return shared_counter;
} }
...@@ -798,10 +816,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv, ...@@ -798,10 +816,13 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
{ {
int err; int err;
entry->shared_counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry); if (nf_ct_acct_enabled(dev_net(ct_priv->netdev)))
if (IS_ERR(entry->shared_counter)) { entry->counter = mlx5_tc_ct_counter_create(ct_priv);
err = PTR_ERR(entry->shared_counter); else
ct_dbg("Failed to create counter for ct entry"); entry->counter = mlx5_tc_ct_shared_counter_get(ct_priv, entry);
if (IS_ERR(entry->counter)) {
err = PTR_ERR(entry->counter);
return err; return err;
} }
...@@ -820,7 +841,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv, ...@@ -820,7 +841,7 @@ mlx5_tc_ct_entry_add_rules(struct mlx5_tc_ct_priv *ct_priv,
err_nat: err_nat:
mlx5_tc_ct_entry_del_rule(ct_priv, entry, false); mlx5_tc_ct_entry_del_rule(ct_priv, entry, false);
err_orig: err_orig:
mlx5_tc_ct_shared_counter_put(ct_priv, entry); mlx5_tc_ct_counter_put(ct_priv, entry);
return err; return err;
} }
...@@ -918,7 +939,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv, ...@@ -918,7 +939,7 @@ mlx5_tc_ct_del_ft_entry(struct mlx5_tc_ct_priv *ct_priv,
rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node, rhashtable_remove_fast(&ct_priv->ct_tuples_ht, &entry->tuple_node,
tuples_ht_params); tuples_ht_params);
mutex_unlock(&ct_priv->shared_counter_lock); mutex_unlock(&ct_priv->shared_counter_lock);
mlx5_tc_ct_shared_counter_put(ct_priv, entry); mlx5_tc_ct_counter_put(ct_priv, entry);
} }
...@@ -956,7 +977,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft, ...@@ -956,7 +977,7 @@ mlx5_tc_ct_block_flow_offload_stats(struct mlx5_ct_ft *ft,
if (!entry) if (!entry)
return -ENOENT; return -ENOENT;
mlx5_fc_query_cached(entry->shared_counter->counter, &bytes, &packets, &lastuse); mlx5_fc_query_cached(entry->counter->counter, &bytes, &packets, &lastuse);
flow_stats_update(&f->stats, bytes, packets, 0, lastuse, flow_stats_update(&f->stats, bytes, packets, 0, lastuse,
FLOW_ACTION_HW_STATS_DELAYED); FLOW_ACTION_HW_STATS_DELAYED);
......
...@@ -371,6 +371,15 @@ struct mlx5e_swp_spec { ...@@ -371,6 +371,15 @@ struct mlx5e_swp_spec {
u8 tun_l4_proto; u8 tun_l4_proto;
}; };
static inline void mlx5e_eseg_swp_offsets_add_vlan(struct mlx5_wqe_eth_seg *eseg)
{
/* SWP offsets are in 2-bytes words */
eseg->swp_outer_l3_offset += VLAN_HLEN / 2;
eseg->swp_outer_l4_offset += VLAN_HLEN / 2;
eseg->swp_inner_l3_offset += VLAN_HLEN / 2;
eseg->swp_inner_l4_offset += VLAN_HLEN / 2;
}
static inline void static inline void
mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, mlx5e_set_eseg_swp(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg,
struct mlx5e_swp_spec *swp_spec) struct mlx5e_swp_spec *swp_spec)
......
...@@ -51,7 +51,7 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev) ...@@ -51,7 +51,7 @@ static inline bool mlx5_geneve_tx_allowed(struct mlx5_core_dev *mdev)
} }
static inline void static inline void
mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{ {
struct mlx5e_swp_spec swp_spec = {}; struct mlx5e_swp_spec swp_spec = {};
unsigned int offset = 0; unsigned int offset = 0;
...@@ -85,6 +85,8 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) ...@@ -85,6 +85,8 @@ mlx5e_tx_tunnel_accel(struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg)
} }
mlx5e_set_eseg_swp(skb, eseg, &swp_spec); mlx5e_set_eseg_swp(skb, eseg, &swp_spec);
if (skb_vlan_tag_present(skb) && ihs)
mlx5e_eseg_swp_offsets_add_vlan(eseg);
} }
#else #else
...@@ -163,7 +165,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq, ...@@ -163,7 +165,7 @@ static inline unsigned int mlx5e_accel_tx_ids_len(struct mlx5e_txqsq *sq,
static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv, static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
struct sk_buff *skb, struct sk_buff *skb,
struct mlx5_wqe_eth_seg *eseg) struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{ {
#ifdef CONFIG_MLX5_EN_IPSEC #ifdef CONFIG_MLX5_EN_IPSEC
if (xfrm_offload(skb)) if (xfrm_offload(skb))
...@@ -172,7 +174,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv, ...@@ -172,7 +174,7 @@ static inline bool mlx5e_accel_tx_eseg(struct mlx5e_priv *priv,
#if IS_ENABLED(CONFIG_GENEVE) #if IS_ENABLED(CONFIG_GENEVE)
if (skb->encapsulation) if (skb->encapsulation)
mlx5e_tx_tunnel_accel(skb, eseg); mlx5e_tx_tunnel_accel(skb, eseg, ihs);
#endif #endif
return true; return true;
......
...@@ -1010,6 +1010,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev, ...@@ -1010,6 +1010,22 @@ static int mlx5e_get_link_ksettings(struct net_device *netdev,
return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings); return mlx5e_ethtool_get_link_ksettings(priv, link_ksettings);
} }
static int mlx5e_speed_validate(struct net_device *netdev, bool ext,
const unsigned long link_modes, u8 autoneg)
{
/* Extended link-mode has no speed limitations. */
if (ext)
return 0;
if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) &&
autoneg != AUTONEG_ENABLE) {
netdev_err(netdev, "%s: 56G link speed requires autoneg enabled\n",
__func__);
return -EINVAL;
}
return 0;
}
static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes) static u32 mlx5e_ethtool2ptys_adver_link(const unsigned long *link_modes)
{ {
u32 i, ptys_modes = 0; u32 i, ptys_modes = 0;
...@@ -1103,13 +1119,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv, ...@@ -1103,13 +1119,9 @@ int mlx5e_ethtool_set_link_ksettings(struct mlx5e_priv *priv,
link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) : link_modes = autoneg == AUTONEG_ENABLE ? ethtool2ptys_adver_func(adver) :
mlx5e_port_speed2linkmodes(mdev, speed, !ext); mlx5e_port_speed2linkmodes(mdev, speed, !ext);
if ((link_modes & MLX5E_PROT_MASK(MLX5E_56GBASE_R4)) && err = mlx5e_speed_validate(priv->netdev, ext, link_modes, autoneg);
autoneg != AUTONEG_ENABLE) { if (err)
netdev_err(priv->netdev, "%s: 56G link speed requires autoneg enabled\n",
__func__);
err = -EINVAL;
goto out; goto out;
}
link_modes = link_modes & eproto.cap; link_modes = link_modes & eproto.cap;
if (!link_modes) { if (!link_modes) {
......
...@@ -942,6 +942,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc, ...@@ -942,6 +942,7 @@ static int mlx5e_create_ttc_table_groups(struct mlx5e_ttc_table *ttc,
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in) { if (!in) {
kfree(ft->g); kfree(ft->g);
ft->g = NULL;
return -ENOMEM; return -ENOMEM;
} }
...@@ -1087,6 +1088,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc) ...@@ -1087,6 +1088,7 @@ static int mlx5e_create_inner_ttc_table_groups(struct mlx5e_ttc_table *ttc)
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in) { if (!in) {
kfree(ft->g); kfree(ft->g);
ft->g = NULL;
return -ENOMEM; return -ENOMEM;
} }
...@@ -1390,6 +1392,7 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table) ...@@ -1390,6 +1392,7 @@ static int mlx5e_create_l2_table_groups(struct mlx5e_l2_table *l2_table)
ft->g[ft->num_groups] = NULL; ft->g[ft->num_groups] = NULL;
mlx5e_destroy_groups(ft); mlx5e_destroy_groups(ft);
kvfree(in); kvfree(in);
kfree(ft->g);
return err; return err;
} }
......
...@@ -3161,7 +3161,8 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev, ...@@ -3161,7 +3161,8 @@ static void mlx5e_modify_admin_state(struct mlx5_core_dev *mdev,
mlx5_set_port_admin_status(mdev, state); mlx5_set_port_admin_status(mdev, state);
if (mlx5_eswitch_mode(mdev) != MLX5_ESWITCH_LEGACY) if (mlx5_eswitch_mode(mdev) == MLX5_ESWITCH_OFFLOADS ||
!MLX5_CAP_GEN(mdev, uplink_follow))
return; return;
if (state == MLX5_PORT_UP) if (state == MLX5_PORT_UP)
......
...@@ -682,9 +682,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq) ...@@ -682,9 +682,9 @@ void mlx5e_tx_mpwqe_ensure_complete(struct mlx5e_txqsq *sq)
static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq, static bool mlx5e_txwqe_build_eseg(struct mlx5e_priv *priv, struct mlx5e_txqsq *sq,
struct sk_buff *skb, struct mlx5e_accel_tx_state *accel, struct sk_buff *skb, struct mlx5e_accel_tx_state *accel,
struct mlx5_wqe_eth_seg *eseg) struct mlx5_wqe_eth_seg *eseg, u16 ihs)
{ {
if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg))) if (unlikely(!mlx5e_accel_tx_eseg(priv, skb, eseg, ihs)))
return false; return false;
mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg); mlx5e_txwqe_build_eseg_csum(sq, skb, accel, eseg);
...@@ -714,7 +714,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -714,7 +714,8 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) { if (mlx5e_tx_skb_supports_mpwqe(skb, &attr)) {
struct mlx5_wqe_eth_seg eseg = {}; struct mlx5_wqe_eth_seg eseg = {};
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg))) if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &eseg,
attr.ihs)))
return NETDEV_TX_OK; return NETDEV_TX_OK;
mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more()); mlx5e_sq_xmit_mpwqe(sq, skb, &eseg, netdev_xmit_more());
...@@ -731,7 +732,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -731,7 +732,7 @@ netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev)
/* May update the WQE, but may not post other WQEs. */ /* May update the WQE, but may not post other WQEs. */
mlx5e_accel_tx_finish(sq, wqe, &accel, mlx5e_accel_tx_finish(sq, wqe, &accel,
(struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl)); (struct mlx5_wqe_inline_seg *)(wqe->data + wqe_attr.ds_cnt_inl));
if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth))) if (unlikely(!mlx5e_txwqe_build_eseg(priv, sq, skb, &accel, &wqe->eth, attr.ihs)))
return NETDEV_TX_OK; return NETDEV_TX_OK;
mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more()); mlx5e_sq_xmit_wqe(sq, skb, &attr, &wqe_attr, wqe, pi, netdev_xmit_more());
......
...@@ -95,22 +95,21 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw, ...@@ -95,22 +95,21 @@ int esw_acl_egress_lgcy_setup(struct mlx5_eswitch *esw,
return 0; return 0;
} }
if (!IS_ERR_OR_NULL(vport->egress.acl)) if (!vport->egress.acl) {
return 0; vport->egress.acl = esw_acl_table_create(esw, vport->vport,
MLX5_FLOW_NAMESPACE_ESW_EGRESS,
vport->egress.acl = esw_acl_table_create(esw, vport->vport, table_size);
MLX5_FLOW_NAMESPACE_ESW_EGRESS, if (IS_ERR(vport->egress.acl)) {
table_size); err = PTR_ERR(vport->egress.acl);
if (IS_ERR(vport->egress.acl)) { vport->egress.acl = NULL;
err = PTR_ERR(vport->egress.acl); goto out;
vport->egress.acl = NULL; }
goto out;
err = esw_acl_egress_lgcy_groups_create(esw, vport);
if (err)
goto out;
} }
err = esw_acl_egress_lgcy_groups_create(esw, vport);
if (err)
goto out;
esw_debug(esw->dev, esw_debug(esw->dev,
"vport[%d] configure egress rules, vlan(%d) qos(%d)\n", "vport[%d] configure egress rules, vlan(%d) qos(%d)\n",
vport->vport, vport->info.vlan, vport->info.qos); vport->vport, vport->info.vlan, vport->info.qos);
......
...@@ -564,7 +564,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) ...@@ -564,7 +564,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
struct mlx5_core_dev *tmp_dev; struct mlx5_core_dev *tmp_dev;
int i, err; int i, err;
if (!MLX5_CAP_GEN(dev, vport_group_manager)) if (!MLX5_CAP_GEN(dev, vport_group_manager) ||
!MLX5_CAP_GEN(dev, lag_master) ||
MLX5_CAP_GEN(dev, num_lag_ports) != MLX5_MAX_PORTS)
return; return;
tmp_dev = mlx5_get_next_phys_dev(dev); tmp_dev = mlx5_get_next_phys_dev(dev);
...@@ -582,12 +584,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev) ...@@ -582,12 +584,9 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0) if (mlx5_lag_dev_add_pf(ldev, dev, netdev) < 0)
return; return;
for (i = 0; i < MLX5_MAX_PORTS; i++) { for (i = 0; i < MLX5_MAX_PORTS; i++)
tmp_dev = ldev->pf[i].dev; if (!ldev->pf[i].dev)
if (!tmp_dev || !MLX5_CAP_GEN(tmp_dev, lag_master) ||
MLX5_CAP_GEN(tmp_dev, num_lag_ports) != MLX5_MAX_PORTS)
break; break;
}
if (i >= MLX5_MAX_PORTS) if (i >= MLX5_MAX_PORTS)
ldev->flags |= MLX5_LAG_FLAG_READY; ldev->flags |= MLX5_LAG_FLAG_READY;
......
...@@ -1368,8 +1368,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1368,8 +1368,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
MLX5_COREDEV_VF : MLX5_COREDEV_PF; MLX5_COREDEV_VF : MLX5_COREDEV_PF;
dev->priv.adev_idx = mlx5_adev_idx_alloc(); dev->priv.adev_idx = mlx5_adev_idx_alloc();
if (dev->priv.adev_idx < 0) if (dev->priv.adev_idx < 0) {
return dev->priv.adev_idx; err = dev->priv.adev_idx;
goto adev_init_err;
}
err = mlx5_mdev_init(dev, prof_sel); err = mlx5_mdev_init(dev, prof_sel);
if (err) if (err)
...@@ -1403,6 +1405,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id) ...@@ -1403,6 +1405,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *id)
mlx5_mdev_uninit(dev); mlx5_mdev_uninit(dev);
mdev_init_err: mdev_init_err:
mlx5_adev_idx_free(dev->priv.adev_idx); mlx5_adev_idx_free(dev->priv.adev_idx);
adev_init_err:
mlx5_devlink_free(devlink); mlx5_devlink_free(devlink);
return err; return err;
......
...@@ -116,7 +116,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev) ...@@ -116,7 +116,7 @@ static int mlx5_rdma_enable_roce_steering(struct mlx5_core_dev *dev)
static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev) static void mlx5_rdma_del_roce_addr(struct mlx5_core_dev *dev)
{ {
mlx5_core_roce_gid_set(dev, 0, 0, 0, mlx5_core_roce_gid_set(dev, 0, 0, 0,
NULL, NULL, false, 0, 0); NULL, NULL, false, 0, 1);
} }
static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid) static void mlx5_rdma_make_default_gid(struct mlx5_core_dev *dev, union ib_gid *gid)
......
...@@ -1280,7 +1280,8 @@ struct mlx5_ifc_cmd_hca_cap_bits { ...@@ -1280,7 +1280,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 ece_support[0x1]; u8 ece_support[0x1];
u8 reserved_at_a4[0x7]; u8 reserved_at_a4[0x7];
u8 log_max_srq[0x5]; u8 log_max_srq[0x5];
u8 reserved_at_b0[0x2]; u8 reserved_at_b0[0x1];
u8 uplink_follow[0x1];
u8 ts_cqe_to_dest_cqn[0x1]; u8 ts_cqe_to_dest_cqn[0x1];
u8 reserved_at_b3[0xd]; u8 reserved_at_b3[0xd];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment