Commit 50645610 authored by David S. Miller's avatar David S. Miller

Merge tag 'mlx5-fixes-2023-02-24' of git://git.kernel.org/pub/scm/linux/kernel/git/saeed/linux

Saeed Mahemeed says:

====================
mlx5 fixes 2023-02-24

V1->V2:
 - Toss away arguably non-fixes patches

This series provides bug fixes for mlx5 driver.
Please pull and let me know if there is any problem.
====================
parents 25ff6f8a d28a06d7
...@@ -98,4 +98,8 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev) ...@@ -98,4 +98,8 @@ void mlx5_ec_cleanup(struct mlx5_core_dev *dev)
err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]); err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_HOST_PF]);
if (err) if (err)
mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err); mlx5_core_warn(dev, "Timeout reclaiming external host PF pages err(%d)\n", err);
err = mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]);
if (err)
mlx5_core_warn(dev, "Timeout reclaiming external host VFs pages err(%d)\n", err);
} }
...@@ -86,7 +86,19 @@ static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb ...@@ -86,7 +86,19 @@ static bool mlx5e_ptp_ts_cqe_drop(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb
return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id)); return (ptpsq->ts_cqe_ctr_mask && (skb_cc != skb_id));
} }
static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc, u16 skb_id) static bool mlx5e_ptp_ts_cqe_ooo(struct mlx5e_ptpsq *ptpsq, u16 skb_id)
{
u16 skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
u16 skb_pc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_pc);
if (PTP_WQE_CTR2IDX(skb_id - skb_cc) >= PTP_WQE_CTR2IDX(skb_pc - skb_cc))
return true;
return false;
}
static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_cc,
u16 skb_id, int budget)
{ {
struct skb_shared_hwtstamps hwts = {}; struct skb_shared_hwtstamps hwts = {};
struct sk_buff *skb; struct sk_buff *skb;
...@@ -98,6 +110,7 @@ static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_ ...@@ -98,6 +110,7 @@ static void mlx5e_ptp_skb_fifo_ts_cqe_resync(struct mlx5e_ptpsq *ptpsq, u16 skb_
hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp; hwts.hwtstamp = mlx5e_skb_cb_get_hwts(skb)->cqe_hwtstamp;
skb_tstamp_tx(skb, &hwts); skb_tstamp_tx(skb, &hwts);
ptpsq->cq_stats->resync_cqe++; ptpsq->cq_stats->resync_cqe++;
napi_consume_skb(skb, budget);
skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc); skb_cc = PTP_WQE_CTR2IDX(ptpsq->skb_fifo_cc);
} }
} }
...@@ -118,8 +131,14 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq, ...@@ -118,8 +131,14 @@ static void mlx5e_ptp_handle_ts_cqe(struct mlx5e_ptpsq *ptpsq,
goto out; goto out;
} }
if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id)) if (mlx5e_ptp_ts_cqe_drop(ptpsq, skb_cc, skb_id)) {
mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id); if (mlx5e_ptp_ts_cqe_ooo(ptpsq, skb_id)) {
/* already handled by a previous resync */
ptpsq->cq_stats->ooo_cqe_drop++;
return;
}
mlx5e_ptp_skb_fifo_ts_cqe_resync(ptpsq, skb_cc, skb_id, budget);
}
skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo); skb = mlx5e_skb_fifo_pop(&ptpsq->skb_fifo);
hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe)); hwtstamp = mlx5e_cqe_ts_to_ns(sq->ptp_cyc2time, sq->clock, get_cqe_ts(cqe));
......
...@@ -710,8 +710,7 @@ void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq, ...@@ -710,8 +710,7 @@ void mlx5e_rep_tc_receive(struct mlx5_cqe64 *cqe, struct mlx5e_rq *rq,
else else
napi_gro_receive(rq->cq.napi, skb); napi_gro_receive(rq->cq.napi, skb);
if (tc_priv.fwd_dev) dev_put(tc_priv.fwd_dev);
dev_put(tc_priv.fwd_dev);
return; return;
......
...@@ -37,7 +37,7 @@ mlx5e_tc_act_stats_create(void) ...@@ -37,7 +37,7 @@ mlx5e_tc_act_stats_create(void)
int err; int err;
handle = kvzalloc(sizeof(*handle), GFP_KERNEL); handle = kvzalloc(sizeof(*handle), GFP_KERNEL);
if (IS_ERR(handle)) if (!handle)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
err = rhashtable_init(&handle->ht, &act_counters_ht_params); err = rhashtable_init(&handle->ht, &act_counters_ht_params);
......
...@@ -86,7 +86,7 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq); ...@@ -86,7 +86,7 @@ void mlx5e_free_txqsq_descs(struct mlx5e_txqsq *sq);
static inline bool static inline bool
mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo) mlx5e_skb_fifo_has_room(struct mlx5e_skb_fifo *fifo)
{ {
return (*fifo->pc - *fifo->cc) < fifo->mask; return (u16)(*fifo->pc - *fifo->cc) < fifo->mask;
} }
static inline bool static inline bool
...@@ -302,6 +302,8 @@ void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb) ...@@ -302,6 +302,8 @@ void mlx5e_skb_fifo_push(struct mlx5e_skb_fifo *fifo, struct sk_buff *skb)
static inline static inline
struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo) struct sk_buff *mlx5e_skb_fifo_pop(struct mlx5e_skb_fifo *fifo)
{ {
WARN_ON_ONCE(*fifo->pc == *fifo->cc);
return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++); return *mlx5e_skb_fifo_get(fifo, (*fifo->cc)++);
} }
......
...@@ -2138,6 +2138,7 @@ static const struct counter_desc ptp_cq_stats_desc[] = { ...@@ -2138,6 +2138,7 @@ static const struct counter_desc ptp_cq_stats_desc[] = {
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, abort_abs_diff_ns) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_cqe) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) }, { MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, resync_event) },
{ MLX5E_DECLARE_PTP_CQ_STAT(struct mlx5e_ptp_cq_stats, ooo_cqe_drop) },
}; };
static const struct counter_desc ptp_rq_stats_desc[] = { static const struct counter_desc ptp_rq_stats_desc[] = {
......
...@@ -461,6 +461,7 @@ struct mlx5e_ptp_cq_stats { ...@@ -461,6 +461,7 @@ struct mlx5e_ptp_cq_stats {
u64 abort_abs_diff_ns; u64 abort_abs_diff_ns;
u64 resync_cqe; u64 resync_cqe;
u64 resync_event; u64 resync_event;
u64 ooo_cqe_drop;
}; };
struct mlx5e_rep_stats { struct mlx5e_rep_stats {
......
...@@ -869,7 +869,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw, ...@@ -869,7 +869,8 @@ mlx5_eswitch_add_send_to_vport_rule(struct mlx5_eswitch *on_esw,
dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID; dest.vport.flags |= MLX5_FLOW_DEST_VPORT_VHCA_ID;
flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST; flow_act.action = MLX5_FLOW_CONTEXT_ACTION_FWD_DEST;
if (rep->vport == MLX5_VPORT_UPLINK) if (MLX5_CAP_ESW_FLOWTABLE(on_esw->dev, flow_source) &&
rep->vport == MLX5_VPORT_UPLINK)
spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT; spec->flow_context.flow_source = MLX5_FLOW_CONTEXT_FLOW_SOURCE_LOCAL_VPORT;
flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw), flow_rule = mlx5_add_flow_rules(mlx5_eswitch_get_slow_fdb(on_esw),
......
...@@ -105,6 +105,7 @@ int mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *op ...@@ -105,6 +105,7 @@ int mlx5_geneve_tlv_option_add(struct mlx5_geneve *geneve, struct geneve_opt *op
geneve->opt_type = opt->type; geneve->opt_type = opt->type;
geneve->obj_id = res; geneve->obj_id = res;
geneve->refcount++; geneve->refcount++;
res = 0;
} }
unlock: unlock:
......
...@@ -147,6 +147,10 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf) ...@@ -147,6 +147,10 @@ mlx5_device_disable_sriov(struct mlx5_core_dev *dev, int num_vfs, bool clear_vf)
mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf); mlx5_eswitch_disable_sriov(dev->priv.eswitch, clear_vf);
/* For ECPFs, skip waiting for host VF pages until ECPF is destroyed */
if (mlx5_core_is_ecpf(dev))
return;
if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF])) if (mlx5_wait_for_pages(dev, &dev->priv.page_counters[MLX5_VF]))
mlx5_core_warn(dev, "timeout reclaiming VFs pages\n"); mlx5_core_warn(dev, "timeout reclaiming VFs pages\n");
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment