Commit 350d0e4c authored by Yishai Hadas's avatar Yishai Hadas Committed by Doug Ledford

IB/mlx5: Track asynchronous events on a receive work queue

Track asynchronous events on a receive work queue by using the
mlx5_core_create_rq_tracked API.

In case a fatal error has occurred letting the IB layer know about by
using the ib_wq event handler.
Signed-off-by: default avatarYishai Hadas <yishaih@mellanox.com>
Signed-off-by: default avatarLeon Romanovsky <leon@kernel.org>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 466fa6d2
...@@ -227,7 +227,7 @@ struct mlx5_ib_wq { ...@@ -227,7 +227,7 @@ struct mlx5_ib_wq {
struct mlx5_ib_rwq { struct mlx5_ib_rwq {
struct ib_wq ibwq; struct ib_wq ibwq;
u32 rqn; struct mlx5_core_qp core_qp;
u32 rq_num_pas; u32 rq_num_pas;
u32 log_rq_stride; u32 log_rq_stride;
u32 log_rq_size; u32 log_rq_size;
...@@ -664,6 +664,11 @@ static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp) ...@@ -664,6 +664,11 @@ static inline struct mlx5_ib_qp *to_mibqp(struct mlx5_core_qp *mqp)
return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp; return container_of(mqp, struct mlx5_ib_qp_base, mqp)->container_mibqp;
} }
static inline struct mlx5_ib_rwq *to_mibrwq(struct mlx5_core_qp *core_qp)
{
return container_of(core_qp, struct mlx5_ib_rwq, core_qp);
}
static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey) static inline struct mlx5_ib_mr *to_mibmr(struct mlx5_core_mkey *mmkey)
{ {
return container_of(mmkey, struct mlx5_ib_mr, mmkey); return container_of(mmkey, struct mlx5_ib_mr, mmkey);
......
...@@ -4536,6 +4536,28 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd) ...@@ -4536,6 +4536,28 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
return 0; return 0;
} }
static void mlx5_ib_wq_event(struct mlx5_core_qp *core_qp, int type)
{
struct mlx5_ib_rwq *rwq = to_mibrwq(core_qp);
struct mlx5_ib_dev *dev = to_mdev(rwq->ibwq.device);
struct ib_event event;
if (rwq->ibwq.event_handler) {
event.device = rwq->ibwq.device;
event.element.wq = &rwq->ibwq;
switch (type) {
case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
event.event = IB_EVENT_WQ_FATAL;
break;
default:
mlx5_ib_warn(dev, "Unexpected event type %d on WQ %06x\n", type, core_qp->qpn);
return;
}
rwq->ibwq.event_handler(&event, rwq->ibwq.wq_context);
}
}
static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
struct ib_wq_init_attr *init_attr) struct ib_wq_init_attr *init_attr)
{ {
...@@ -4573,7 +4595,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd, ...@@ -4573,7 +4595,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma); MLX5_SET64(wq, wq, dbr_addr, rwq->db.dma);
rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas); rq_pas0 = (__be64 *)MLX5_ADDR_OF(wq, wq, pas);
mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0); mlx5_ib_populate_pas(dev, rwq->umem, rwq->page_shift, rq_pas0, 0);
err = mlx5_core_create_rq(dev->mdev, in, inlen, &rwq->rqn); err = mlx5_core_create_rq_tracked(dev->mdev, in, inlen, &rwq->core_qp);
kvfree(in); kvfree(in);
return err; return err;
} }
...@@ -4689,7 +4711,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, ...@@ -4689,7 +4711,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
rwq->ibwq.wq_num = rwq->rqn; rwq->ibwq.wq_num = rwq->core_qp.qpn;
rwq->ibwq.state = IB_WQS_RESET; rwq->ibwq.state = IB_WQS_RESET;
if (udata->outlen) { if (udata->outlen) {
resp.response_length = offsetof(typeof(resp), response_length) + resp.response_length = offsetof(typeof(resp), response_length) +
...@@ -4699,10 +4721,12 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd, ...@@ -4699,10 +4721,12 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
goto err_copy; goto err_copy;
} }
rwq->core_qp.event = mlx5_ib_wq_event;
rwq->ibwq.event_handler = init_attr->event_handler;
return &rwq->ibwq; return &rwq->ibwq;
err_copy: err_copy:
mlx5_core_destroy_rq(dev->mdev, rwq->rqn); mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
err_user_rq: err_user_rq:
destroy_user_rq(pd, rwq); destroy_user_rq(pd, rwq);
err: err:
...@@ -4715,7 +4739,7 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq) ...@@ -4715,7 +4739,7 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq)
struct mlx5_ib_dev *dev = to_mdev(wq->device); struct mlx5_ib_dev *dev = to_mdev(wq->device);
struct mlx5_ib_rwq *rwq = to_mrwq(wq); struct mlx5_ib_rwq *rwq = to_mrwq(wq);
mlx5_core_destroy_rq(dev->mdev, rwq->rqn); mlx5_core_destroy_rq_tracked(dev->mdev, &rwq->core_qp);
destroy_user_rq(wq->pd, rwq); destroy_user_rq(wq->pd, rwq);
kfree(rwq); kfree(rwq);
...@@ -4847,7 +4871,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr, ...@@ -4847,7 +4871,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state); MLX5_SET(modify_rq_in, in, rq_state, curr_wq_state);
MLX5_SET(rqc, rqc, state, wq_state); MLX5_SET(rqc, rqc, state, wq_state);
err = mlx5_core_modify_rq(dev->mdev, rwq->rqn, in, inlen); err = mlx5_core_modify_rq(dev->mdev, rwq->core_qp.qpn, in, inlen);
kvfree(in); kvfree(in);
if (!err) if (!err)
rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state; rwq->ibwq.state = (wq_state == MLX5_RQC_STATE_ERR) ? IB_WQS_ERR : wq_state;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment