Commit 4703b4f0 authored by Bob Pearson's avatar Bob Pearson Committed by Jason Gunthorpe

RDMA/rxe: Enforce IBA C11-17

Add a counter to keep track of the number of WQs connected to a CQ and
return an error if destroy_cq() is called while the counter is non zero.

Link: https://lore.kernel.org/r/20220421014042.26985-8-rpearsonhpe@gmail.comSigned-off-by: default avatarBob Pearson <rpearsonhpe@gmail.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent cde3f5d6
...@@ -322,6 +322,9 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, ...@@ -322,6 +322,9 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
qp->scq = scq; qp->scq = scq;
qp->srq = srq; qp->srq = srq;
atomic_inc(&rcq->num_wq);
atomic_inc(&scq->num_wq);
rxe_qp_init_misc(rxe, qp, init); rxe_qp_init_misc(rxe, qp, init);
err = rxe_qp_init_req(rxe, qp, init, udata, uresp); err = rxe_qp_init_req(rxe, qp, init, udata, uresp);
...@@ -341,6 +344,9 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd, ...@@ -341,6 +344,9 @@ int rxe_qp_from_init(struct rxe_dev *rxe, struct rxe_qp *qp, struct rxe_pd *pd,
rxe_queue_cleanup(qp->sq.queue); rxe_queue_cleanup(qp->sq.queue);
qp->sq.queue = NULL; qp->sq.queue = NULL;
err1: err1:
atomic_dec(&rcq->num_wq);
atomic_dec(&scq->num_wq);
qp->pd = NULL; qp->pd = NULL;
qp->rcq = NULL; qp->rcq = NULL;
qp->scq = NULL; qp->scq = NULL;
...@@ -798,10 +804,14 @@ static void rxe_qp_do_cleanup(struct work_struct *work) ...@@ -798,10 +804,14 @@ static void rxe_qp_do_cleanup(struct work_struct *work)
if (qp->rq.queue) if (qp->rq.queue)
rxe_queue_cleanup(qp->rq.queue); rxe_queue_cleanup(qp->rq.queue);
atomic_dec(&qp->scq->num_wq);
if (qp->scq) if (qp->scq)
rxe_put(qp->scq); rxe_put(qp->scq);
atomic_dec(&qp->rcq->num_wq);
if (qp->rcq) if (qp->rcq)
rxe_put(qp->rcq); rxe_put(qp->rcq);
if (qp->pd) if (qp->pd)
rxe_put(qp->pd); rxe_put(qp->pd);
......
...@@ -795,6 +795,12 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata) ...@@ -795,6 +795,12 @@ static int rxe_destroy_cq(struct ib_cq *ibcq, struct ib_udata *udata)
{ {
struct rxe_cq *cq = to_rcq(ibcq); struct rxe_cq *cq = to_rcq(ibcq);
/* See IBA C11-17: The CI shall return an error if this Verb is
* invoked while a Work Queue is still associated with the CQ.
*/
if (atomic_read(&cq->num_wq))
return -EINVAL;
rxe_cq_disable(cq); rxe_cq_disable(cq);
rxe_put(cq); rxe_put(cq);
......
...@@ -67,6 +67,7 @@ struct rxe_cq { ...@@ -67,6 +67,7 @@ struct rxe_cq {
bool is_dying; bool is_dying;
bool is_user; bool is_user;
struct tasklet_struct comp_task; struct tasklet_struct comp_task;
atomic_t num_wq;
}; };
enum wqe_state { enum wqe_state {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment