Commit 4863b57b authored by Jakub Kicinski's avatar Jakub Kicinski

Merge tag 'mlx5-fixes-2023-07-05' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahameed says:

====================
mlx5 fixes 2023-07-05

This series provides bug fixes to mlx5 driver.

* tag 'mlx5-fixes-2023-07-05' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux:
  net/mlx5e: RX, Fix page_pool page fragment tracking for XDP
  net/mlx5: Query hca_cap_2 only when supported
  net/mlx5e: TC, CT: Offload ct clear only once
  net/mlx5e: Check for NOT_READY flag state after locking
  net/mlx5: Register a unique thermal zone per device
  net/mlx5e: RX, Fix flush and close release flow of regular rq for legacy rq
  net/mlx5e: fix memory leak in mlx5e_ptp_open
  net/mlx5e: fix memory leak in mlx5e_fs_tt_redirect_any_create
  net/mlx5e: fix double free in mlx5e_destroy_flow_table
====================

Link: https://lore.kernel.org/r/20230705175757.284614-1-saeed@kernel.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 0323bce5 7abd955a
...@@ -594,7 +594,7 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs) ...@@ -594,7 +594,7 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
err = fs_any_create_table(fs); err = fs_any_create_table(fs);
if (err) if (err)
return err; goto err_free_any;
err = fs_any_enable(fs); err = fs_any_enable(fs);
if (err) if (err)
...@@ -606,8 +606,8 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs) ...@@ -606,8 +606,8 @@ int mlx5e_fs_tt_redirect_any_create(struct mlx5e_flow_steering *fs)
err_destroy_table: err_destroy_table:
fs_any_destroy_table(fs_any); fs_any_destroy_table(fs_any);
err_free_any:
kfree(fs_any);
mlx5e_fs_set_any(fs, NULL); mlx5e_fs_set_any(fs, NULL);
kfree(fs_any);
return err; return err;
} }
...@@ -729,8 +729,10 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params, ...@@ -729,8 +729,10 @@ int mlx5e_ptp_open(struct mlx5e_priv *priv, struct mlx5e_params *params,
c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev))); c = kvzalloc_node(sizeof(*c), GFP_KERNEL, dev_to_node(mlx5_core_dma_dev(mdev)));
cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL); cparams = kvzalloc(sizeof(*cparams), GFP_KERNEL);
if (!c || !cparams) if (!c || !cparams) {
return -ENOMEM; err = -ENOMEM;
goto err_free;
}
c->priv = priv; c->priv = priv;
c->mdev = priv->mdev; c->mdev = priv->mdev;
......
...@@ -1545,7 +1545,8 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv, ...@@ -1545,7 +1545,8 @@ mlx5_tc_ct_parse_action(struct mlx5_tc_ct_priv *priv,
attr->ct_attr.ct_action |= act->ct.action; /* So we can have clear + ct */ attr->ct_attr.ct_action |= act->ct.action; /* So we can have clear + ct */
attr->ct_attr.zone = act->ct.zone; attr->ct_attr.zone = act->ct.zone;
attr->ct_attr.nf_ft = act->ct.flow_table; if (!(act->ct.action & TCA_CT_ACT_CLEAR))
attr->ct_attr.nf_ft = act->ct.flow_table;
attr->ct_attr.act_miss_cookie = act->miss_cookie; attr->ct_attr.act_miss_cookie = act->miss_cookie;
return 0; return 0;
...@@ -1990,6 +1991,9 @@ mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *att ...@@ -1990,6 +1991,9 @@ mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *att
if (!priv) if (!priv)
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (attr->ct_attr.offloaded)
return 0;
if (attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR) { if (attr->ct_attr.ct_action & TCA_CT_ACT_CLEAR) {
err = mlx5_tc_ct_entry_set_registers(priv, &attr->parse_attr->mod_hdr_acts, err = mlx5_tc_ct_entry_set_registers(priv, &attr->parse_attr->mod_hdr_acts,
0, 0, 0, 0); 0, 0, 0, 0);
...@@ -1999,11 +2003,15 @@ mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *att ...@@ -1999,11 +2003,15 @@ mlx5_tc_ct_flow_offload(struct mlx5_tc_ct_priv *priv, struct mlx5_flow_attr *att
attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR; attr->action |= MLX5_FLOW_CONTEXT_ACTION_MOD_HDR;
} }
if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */ if (!attr->ct_attr.nf_ft) { /* means only ct clear action, and not ct_clear,ct() */
attr->ct_attr.offloaded = true;
return 0; return 0;
}
mutex_lock(&priv->control_lock); mutex_lock(&priv->control_lock);
err = __mlx5_tc_ct_flow_offload(priv, attr); err = __mlx5_tc_ct_flow_offload(priv, attr);
if (!err)
attr->ct_attr.offloaded = true;
mutex_unlock(&priv->control_lock); mutex_unlock(&priv->control_lock);
return err; return err;
...@@ -2021,7 +2029,7 @@ void ...@@ -2021,7 +2029,7 @@ void
mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv, mlx5_tc_ct_delete_flow(struct mlx5_tc_ct_priv *priv,
struct mlx5_flow_attr *attr) struct mlx5_flow_attr *attr)
{ {
if (!attr->ct_attr.ft) /* no ct action, return */ if (!attr->ct_attr.offloaded) /* no ct action, return */
return; return;
if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */ if (!attr->ct_attr.nf_ft) /* means only ct clear action, and not ct_clear,ct() */
return; return;
......
...@@ -29,6 +29,7 @@ struct mlx5_ct_attr { ...@@ -29,6 +29,7 @@ struct mlx5_ct_attr {
u32 ct_labels_id; u32 ct_labels_id;
u32 act_miss_mapping; u32 act_miss_mapping;
u64 act_miss_cookie; u64 act_miss_cookie;
bool offloaded;
struct mlx5_ct_ft *ft; struct mlx5_ct_ft *ft;
}; };
......
...@@ -662,8 +662,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq, ...@@ -662,8 +662,7 @@ static void mlx5e_free_xdpsq_desc(struct mlx5e_xdpsq *sq,
/* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE) /* No need to check ((page->pp_magic & ~0x3UL) == PP_SIGNATURE)
* as we know this is a page_pool page. * as we know this is a page_pool page.
*/ */
page_pool_put_defragged_page(page->pp, page_pool_recycle_direct(page->pp, page);
page, -1, true);
} while (++n < num); } while (++n < num);
break; break;
......
...@@ -190,6 +190,7 @@ static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft, ...@@ -190,6 +190,7 @@ static int accel_fs_tcp_create_groups(struct mlx5e_flow_table *ft,
in = kvzalloc(inlen, GFP_KERNEL); in = kvzalloc(inlen, GFP_KERNEL);
if (!in || !ft->g) { if (!in || !ft->g) {
kfree(ft->g); kfree(ft->g);
ft->g = NULL;
kvfree(in); kvfree(in);
return -ENOMEM; return -ENOMEM;
} }
......
...@@ -390,10 +390,18 @@ static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix) ...@@ -390,10 +390,18 @@ static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
{ {
struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix); struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
if (rq->xsk_pool) if (rq->xsk_pool) {
mlx5e_xsk_free_rx_wqe(wi); mlx5e_xsk_free_rx_wqe(wi);
else } else {
mlx5e_free_rx_wqe(rq, wi); mlx5e_free_rx_wqe(rq, wi);
/* Avoid a second release of the wqe pages: dealloc is called
* for the same missing wqes on regular RQ flush and on regular
* RQ close. This happens when XSK RQs come into play.
*/
for (int i = 0; i < rq->wqe.info.num_frags; i++, wi++)
wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
}
} }
static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk) static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
...@@ -1743,11 +1751,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi ...@@ -1743,11 +1751,11 @@ mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi
prog = rcu_dereference(rq->xdp_prog); prog = rcu_dereference(rq->xdp_prog);
if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) { if (prog && mlx5e_xdp_handle(rq, prog, &mxbuf)) {
if (test_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
struct mlx5e_wqe_frag_info *pwi; struct mlx5e_wqe_frag_info *pwi;
for (pwi = head_wi; pwi < wi; pwi++) for (pwi = head_wi; pwi < wi; pwi++)
pwi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE); pwi->frag_page->frags++;
} }
return NULL; /* page/packet was consumed by XDP */ return NULL; /* page/packet was consumed by XDP */
} }
...@@ -1817,12 +1825,8 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1817,12 +1825,8 @@ static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
rq, wi, cqe, cqe_bcnt); rq, wi, cqe, cqe_bcnt);
if (!skb) { if (!skb) {
/* probably for XDP */ /* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
/* do not return page to cache, wi->frag_page->frags++;
* it will be returned on XDP_TX completion.
*/
wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
}
goto wq_cyc_pop; goto wq_cyc_pop;
} }
...@@ -1868,12 +1872,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) ...@@ -1868,12 +1872,8 @@ static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
rq, wi, cqe, cqe_bcnt); rq, wi, cqe, cqe_bcnt);
if (!skb) { if (!skb) {
/* probably for XDP */ /* probably for XDP */
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
/* do not return page to cache, wi->frag_page->frags++;
* it will be returned on XDP_TX completion.
*/
wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
}
goto wq_cyc_pop; goto wq_cyc_pop;
} }
...@@ -2052,12 +2052,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w ...@@ -2052,12 +2052,12 @@ mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *w
if (prog) { if (prog) {
if (mlx5e_xdp_handle(rq, prog, &mxbuf)) { if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
int i; struct mlx5e_frag_page *pfp;
for (pfp = head_page; pfp < frag_page; pfp++)
pfp->frags++;
for (i = 0; i < sinfo->nr_frags; i++) wi->linear_page.frags++;
/* non-atomic */
__set_bit(page_idx + i, wi->skip_release_bitmap);
return NULL;
} }
mlx5e_page_release_fragmented(rq, &wi->linear_page); mlx5e_page_release_fragmented(rq, &wi->linear_page);
return NULL; /* page/packet was consumed by XDP */ return NULL; /* page/packet was consumed by XDP */
...@@ -2155,7 +2155,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, ...@@ -2155,7 +2155,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
cqe_bcnt, &mxbuf); cqe_bcnt, &mxbuf);
if (mlx5e_xdp_handle(rq, prog, &mxbuf)) { if (mlx5e_xdp_handle(rq, prog, &mxbuf)) {
if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
__set_bit(page_idx, wi->skip_release_bitmap); /* non-atomic */ frag_page->frags++;
return NULL; /* page/packet was consumed by XDP */ return NULL; /* page/packet was consumed by XDP */
} }
......
...@@ -1639,7 +1639,8 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow) ...@@ -1639,7 +1639,8 @@ static void remove_unready_flow(struct mlx5e_tc_flow *flow)
uplink_priv = &rpriv->uplink_priv; uplink_priv = &rpriv->uplink_priv;
mutex_lock(&uplink_priv->unready_flows_lock); mutex_lock(&uplink_priv->unready_flows_lock);
unready_flow_del(flow); if (flow_flag_test(flow, NOT_READY))
unready_flow_del(flow);
mutex_unlock(&uplink_priv->unready_flows_lock); mutex_unlock(&uplink_priv->unready_flows_lock);
} }
...@@ -1932,8 +1933,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, ...@@ -1932,8 +1933,7 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv,
esw_attr = attr->esw_attr; esw_attr = attr->esw_attr;
mlx5e_put_flow_tunnel_id(flow); mlx5e_put_flow_tunnel_id(flow);
if (flow_flag_test(flow, NOT_READY)) remove_unready_flow(flow);
remove_unready_flow(flow);
if (mlx5e_is_offloaded_flow(flow)) { if (mlx5e_is_offloaded_flow(flow)) {
if (flow_flag_test(flow, SLOW)) if (flow_flag_test(flow, SLOW))
......
...@@ -807,6 +807,9 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport * ...@@ -807,6 +807,9 @@ static int mlx5_esw_vport_caps_get(struct mlx5_eswitch *esw, struct mlx5_vport *
hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability); hca_caps = MLX5_ADDR_OF(query_hca_cap_out, query_ctx, capability);
vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce); vport->info.roce_enabled = MLX5_GET(cmd_hca_cap, hca_caps, roce);
if (!MLX5_CAP_GEN_MAX(esw->dev, hca_cap_2))
goto out_free;
memset(query_ctx, 0, query_out_sz); memset(query_ctx, 0, query_out_sz);
err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx, err = mlx5_vport_get_other_func_cap(esw->dev, vport->vport, query_ctx,
MLX5_CAP_GENERAL_2); MLX5_CAP_GENERAL_2);
......
...@@ -68,14 +68,19 @@ static struct thermal_zone_device_ops mlx5_thermal_ops = { ...@@ -68,14 +68,19 @@ static struct thermal_zone_device_ops mlx5_thermal_ops = {
int mlx5_thermal_init(struct mlx5_core_dev *mdev) int mlx5_thermal_init(struct mlx5_core_dev *mdev)
{ {
char data[THERMAL_NAME_LENGTH];
struct mlx5_thermal *thermal; struct mlx5_thermal *thermal;
struct thermal_zone_device *tzd; int err;
const char *data = "mlx5";
tzd = thermal_zone_get_zone_by_name(data); if (!mlx5_core_is_pf(mdev) && !mlx5_core_is_ecpf(mdev))
if (!IS_ERR(tzd))
return 0; return 0;
err = snprintf(data, sizeof(data), "mlx5_%s", dev_name(mdev->device));
if (err < 0 || err >= sizeof(data)) {
mlx5_core_err(mdev, "Failed to setup thermal zone name, %d\n", err);
return -EINVAL;
}
thermal = kzalloc(sizeof(*thermal), GFP_KERNEL); thermal = kzalloc(sizeof(*thermal), GFP_KERNEL);
if (!thermal) if (!thermal)
return -ENOMEM; return -ENOMEM;
...@@ -89,10 +94,10 @@ int mlx5_thermal_init(struct mlx5_core_dev *mdev) ...@@ -89,10 +94,10 @@ int mlx5_thermal_init(struct mlx5_core_dev *mdev)
&mlx5_thermal_ops, &mlx5_thermal_ops,
NULL, 0, MLX5_THERMAL_POLL_INT_MSEC); NULL, 0, MLX5_THERMAL_POLL_INT_MSEC);
if (IS_ERR(thermal->tzdev)) { if (IS_ERR(thermal->tzdev)) {
dev_err(mdev->device, "Failed to register thermal zone device (%s) %ld\n", err = PTR_ERR(thermal->tzdev);
data, PTR_ERR(thermal->tzdev)); mlx5_core_err(mdev, "Failed to register thermal zone device (%s) %d\n", data, err);
kfree(thermal); kfree(thermal);
return -EINVAL; return err;
} }
mdev->thermal = thermal; mdev->thermal = thermal;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment