Commit 7183451f authored by Weihang Li's avatar Weihang Li Committed by Jason Gunthorpe

RDMA/cxgb4: Use refcount_t instead of atomic_t for reference counting

The refcount_t API will WARN on underflow and overflow of a reference
counter, and avoid use-after-free risks.

Link: https://lore.kernel.org/r/1622194663-2383-12-git-send-email-liweihang@huawei.com
Cc: Potnuri Bharat Teja <bharat@chelsio.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 8f9513d8
...@@ -976,8 +976,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata) ...@@ -976,8 +976,8 @@ int c4iw_destroy_cq(struct ib_cq *ib_cq, struct ib_udata *udata)
chp = to_c4iw_cq(ib_cq); chp = to_c4iw_cq(ib_cq);
xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid); xa_erase_irq(&chp->rhp->cqs, chp->cq.cqid);
atomic_dec(&chp->refcnt); refcount_dec(&chp->refcnt);
wait_event(chp->wait, !atomic_read(&chp->refcnt)); wait_event(chp->wait, !refcount_read(&chp->refcnt));
ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext, ucontext = rdma_udata_to_drv_context(udata, struct c4iw_ucontext,
ibucontext); ibucontext);
...@@ -1080,7 +1080,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr, ...@@ -1080,7 +1080,7 @@ int c4iw_create_cq(struct ib_cq *ibcq, const struct ib_cq_init_attr *attr,
chp->ibcq.cqe = entries - 2; chp->ibcq.cqe = entries - 2;
spin_lock_init(&chp->lock); spin_lock_init(&chp->lock);
spin_lock_init(&chp->comp_handler_lock); spin_lock_init(&chp->comp_handler_lock);
atomic_set(&chp->refcnt, 1); refcount_set(&chp->refcnt, 1);
init_waitqueue_head(&chp->wait); init_waitqueue_head(&chp->wait);
ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL); ret = xa_insert_irq(&rhp->cqs, chp->cq.cqid, chp, GFP_KERNEL);
if (ret) if (ret)
......
...@@ -151,7 +151,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) ...@@ -151,7 +151,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
} }
c4iw_qp_add_ref(&qhp->ibqp); c4iw_qp_add_ref(&qhp->ibqp);
atomic_inc(&chp->refcnt); refcount_inc(&chp->refcnt);
xa_unlock_irq(&dev->qps); xa_unlock_irq(&dev->qps);
/* Bad incoming write */ /* Bad incoming write */
...@@ -213,7 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe) ...@@ -213,7 +213,7 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
break; break;
} }
done: done:
if (atomic_dec_and_test(&chp->refcnt)) if (refcount_dec_and_test(&chp->refcnt))
wake_up(&chp->wait); wake_up(&chp->wait);
c4iw_qp_rem_ref(&qhp->ibqp); c4iw_qp_rem_ref(&qhp->ibqp);
out: out:
...@@ -228,13 +228,13 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid) ...@@ -228,13 +228,13 @@ int c4iw_ev_handler(struct c4iw_dev *dev, u32 qid)
xa_lock_irqsave(&dev->cqs, flag); xa_lock_irqsave(&dev->cqs, flag);
chp = xa_load(&dev->cqs, qid); chp = xa_load(&dev->cqs, qid);
if (chp) { if (chp) {
atomic_inc(&chp->refcnt); refcount_inc(&chp->refcnt);
xa_unlock_irqrestore(&dev->cqs, flag); xa_unlock_irqrestore(&dev->cqs, flag);
t4_clear_cq_armed(&chp->cq); t4_clear_cq_armed(&chp->cq);
spin_lock_irqsave(&chp->comp_handler_lock, flag); spin_lock_irqsave(&chp->comp_handler_lock, flag);
(*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context); (*chp->ibcq.comp_handler)(&chp->ibcq, chp->ibcq.cq_context);
spin_unlock_irqrestore(&chp->comp_handler_lock, flag); spin_unlock_irqrestore(&chp->comp_handler_lock, flag);
if (atomic_dec_and_test(&chp->refcnt)) if (refcount_dec_and_test(&chp->refcnt))
wake_up(&chp->wait); wake_up(&chp->wait);
} else { } else {
pr_debug("unknown cqid 0x%x\n", qid); pr_debug("unknown cqid 0x%x\n", qid);
......
...@@ -427,7 +427,7 @@ struct c4iw_cq { ...@@ -427,7 +427,7 @@ struct c4iw_cq {
struct t4_cq cq; struct t4_cq cq;
spinlock_t lock; spinlock_t lock;
spinlock_t comp_handler_lock; spinlock_t comp_handler_lock;
atomic_t refcnt; refcount_t refcnt;
wait_queue_head_t wait; wait_queue_head_t wait;
struct c4iw_wr_wait *wr_waitp; struct c4iw_wr_wait *wr_waitp;
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment