Commit 5a4cb546 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-fixes-2021-02-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2021-02-01

Please note the first patch in this series
("Fix function calculation for page trees") is fixing a regression
due to previous fix in net which you didn't include in your previous
rc pr. So I hope this series will make it into your next rc pr,
so mlx5 won't be broken in the next rc.

* tag 'mlx5-fixes-2021-02-01' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: Release skb in case of failure in tc update skb
  net/mlx5e: Update max_opened_tc also when channels are closed
  net/mlx5: Fix leak upon failure of rule creation
  net/mlx5: Fix function calculation for page trees
====================

Link: https://lore.kernel.org/r/20210202070703.617251-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents f2539e14 a34ffec8
...@@ -3627,12 +3627,10 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv, ...@@ -3627,12 +3627,10 @@ static int mlx5e_setup_tc_mqprio(struct mlx5e_priv *priv,
err = mlx5e_safe_switch_channels(priv, &new_channels, err = mlx5e_safe_switch_channels(priv, &new_channels,
mlx5e_num_channels_changed_ctx, NULL); mlx5e_num_channels_changed_ctx, NULL);
if (err)
goto out;
priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
new_channels.params.num_tc);
out: out:
priv->max_opened_tc = max_t(u8, priv->max_opened_tc,
priv->channels.params.num_tc);
mutex_unlock(&priv->state_lock); mutex_unlock(&priv->state_lock);
return err; return err;
} }
......
...@@ -1262,8 +1262,10 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1262,8 +1262,10 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe)) if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb(cqe, skb)) if (!mlx5e_tc_update_skb(cqe, skb)) {
dev_kfree_skb_any(skb);
goto free_wqe; goto free_wqe;
}
napi_gro_receive(rq->cq.napi, skb); napi_gro_receive(rq->cq.napi, skb);
...@@ -1316,8 +1318,10 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1316,8 +1318,10 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
if (rep->vlan && skb_vlan_tag_present(skb)) if (rep->vlan && skb_vlan_tag_present(skb))
skb_vlan_pop(skb); skb_vlan_pop(skb);
if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
dev_kfree_skb_any(skb);
goto free_wqe; goto free_wqe;
}
napi_gro_receive(rq->cq.napi, skb); napi_gro_receive(rq->cq.napi, skb);
...@@ -1371,8 +1375,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 ...@@ -1371,8 +1375,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) if (!mlx5e_rep_tc_update_skb(cqe, skb, &tc_priv)) {
dev_kfree_skb_any(skb);
goto mpwrq_cqe_out; goto mpwrq_cqe_out;
}
napi_gro_receive(rq->cq.napi, skb); napi_gro_receive(rq->cq.napi, skb);
...@@ -1528,8 +1534,10 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq ...@@ -1528,8 +1534,10 @@ static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cq
mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb); mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
if (mlx5e_cqe_regb_chain(cqe)) if (mlx5e_cqe_regb_chain(cqe))
if (!mlx5e_tc_update_skb(cqe, skb)) if (!mlx5e_tc_update_skb(cqe, skb)) {
dev_kfree_skb_any(skb);
goto mpwrq_cqe_out; goto mpwrq_cqe_out;
}
napi_gro_receive(rq->cq.napi, skb); napi_gro_receive(rq->cq.napi, skb);
......
...@@ -1760,6 +1760,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, ...@@ -1760,6 +1760,7 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
if (!fte_tmp) if (!fte_tmp)
continue; continue;
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp); rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte_tmp);
/* No error check needed here, because insert_fte() is not called */
up_write_ref_node(&fte_tmp->node, false); up_write_ref_node(&fte_tmp->node, false);
tree_put_node(&fte_tmp->node, false); tree_put_node(&fte_tmp->node, false);
kmem_cache_free(steering->ftes_cache, fte); kmem_cache_free(steering->ftes_cache, fte);
...@@ -1812,6 +1813,8 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, ...@@ -1812,6 +1813,8 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft,
up_write_ref_node(&g->node, false); up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false); up_write_ref_node(&fte->node, false);
if (IS_ERR(rule))
tree_put_node(&fte->node, false);
return rule; return rule;
} }
rule = ERR_PTR(-ENOENT); rule = ERR_PTR(-ENOENT);
...@@ -1910,6 +1913,8 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft, ...@@ -1910,6 +1913,8 @@ _mlx5_add_flow_rules(struct mlx5_flow_table *ft,
up_write_ref_node(&g->node, false); up_write_ref_node(&g->node, false);
rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte);
up_write_ref_node(&fte->node, false); up_write_ref_node(&fte->node, false);
if (IS_ERR(rule))
tree_put_node(&fte->node, false);
tree_put_node(&g->node, false); tree_put_node(&g->node, false);
return rule; return rule;
......
...@@ -76,7 +76,7 @@ enum { ...@@ -76,7 +76,7 @@ enum {
static u32 get_function(u16 func_id, bool ec_function) static u32 get_function(u16 func_id, bool ec_function)
{ {
return func_id & (ec_function << 16); return (u32)func_id | (ec_function << 16);
} }
static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function) static struct rb_root *page_root_per_function(struct mlx5_core_dev *dev, u32 function)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment