Commit d5a84e96 authored by Yevgeny Kliteynik's avatar Yevgeny Kliteynik Committed by Saeed Mahameed

net/mlx5: DR, Warn and ignore SW steering rule insertion on QP err

In the event of SW steering QP entering error state, SW steering
cannot insert more rules, and will silently ignore the insertion
after issuing a warning.
Signed-off-by: default avatarYuval Avnery <yuvalav@mellanox.com>
Signed-off-by: default avatarYevgeny Kliteynik <kliteyn@nvidia.com>
Reviewed-by: default avatarAlex Vesker <valex@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
parent f35715a6
...@@ -325,10 +325,14 @@ static int dr_handle_pending_wc(struct mlx5dr_domain *dmn, ...@@ -325,10 +325,14 @@ static int dr_handle_pending_wc(struct mlx5dr_domain *dmn,
do { do {
ne = dr_poll_cq(send_ring->cq, 1); ne = dr_poll_cq(send_ring->cq, 1);
if (ne < 0) if (unlikely(ne < 0)) {
mlx5_core_warn_once(dmn->mdev, "SMFS QPN 0x%x is disabled/limited",
send_ring->qp->qpn);
send_ring->err_state = true;
return ne; return ne;
else if (ne == 1) } else if (ne == 1) {
send_ring->pending_wqe -= send_ring->signal_th; send_ring->pending_wqe -= send_ring->signal_th;
}
} while (is_drain && send_ring->pending_wqe); } while (is_drain && send_ring->pending_wqe);
return 0; return 0;
...@@ -361,6 +365,14 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn, ...@@ -361,6 +365,14 @@ static int dr_postsend_icm_data(struct mlx5dr_domain *dmn,
u32 buff_offset; u32 buff_offset;
int ret; int ret;
if (unlikely(dmn->mdev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR ||
send_ring->err_state)) {
mlx5_core_dbg_once(dmn->mdev,
"Skipping post send: QP err state: %d, device state: %d\n",
send_ring->err_state, dmn->mdev->state);
return 0;
}
spin_lock(&send_ring->lock); spin_lock(&send_ring->lock);
ret = dr_handle_pending_wc(dmn, send_ring); ret = dr_handle_pending_wc(dmn, send_ring);
......
...@@ -1285,6 +1285,7 @@ struct mlx5dr_send_ring { ...@@ -1285,6 +1285,7 @@ struct mlx5dr_send_ring {
u8 sync_buff[MIN_READ_SYNC]; u8 sync_buff[MIN_READ_SYNC];
struct mlx5dr_mr *sync_mr; struct mlx5dr_mr *sync_mr;
spinlock_t lock; /* Protect the data path of the send ring */ spinlock_t lock; /* Protect the data path of the send ring */
bool err_state; /* send_ring is not usable in err state */
}; };
int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn); int mlx5dr_send_ring_alloc(struct mlx5dr_domain *dmn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment