Commit 8f9513d8 authored by Weihang Li's avatar Weihang Li Committed by Jason Gunthorpe

RDMA/hns: Use refcount_t instead of atomic_t for QP reference counting

The refcount_t API will WARN on underflow and overflow of a reference
counter, and avoid use-after-free risks.

Link: https://lore.kernel.org/r/1622194663-2383-11-git-send-email-liweihang@huawei.comSigned-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 33649cd3
...@@ -639,7 +639,7 @@ struct hns_roce_qp { ...@@ -639,7 +639,7 @@ struct hns_roce_qp {
u32 xrcdn; u32 xrcdn;
atomic_t refcount; refcount_t refcount;
struct completion free; struct completion free;
struct hns_roce_sge sge; struct hns_roce_sge sge;
......
...@@ -65,7 +65,7 @@ static void flush_work_handle(struct work_struct *work) ...@@ -65,7 +65,7 @@ static void flush_work_handle(struct work_struct *work)
* make sure we signal QP destroy leg that flush QP was completed * make sure we signal QP destroy leg that flush QP was completed
* so that it can safely proceed ahead now and destroy QP * so that it can safely proceed ahead now and destroy QP
*/ */
if (atomic_dec_and_test(&hr_qp->refcount)) if (refcount_dec_and_test(&hr_qp->refcount))
complete(&hr_qp->free); complete(&hr_qp->free);
} }
...@@ -75,7 +75,7 @@ void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) ...@@ -75,7 +75,7 @@ void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
flush_work->hr_dev = hr_dev; flush_work->hr_dev = hr_dev;
INIT_WORK(&flush_work->work, flush_work_handle); INIT_WORK(&flush_work->work, flush_work_handle);
atomic_inc(&hr_qp->refcount); refcount_inc(&hr_qp->refcount);
queue_work(hr_dev->irq_workq, &flush_work->work); queue_work(hr_dev->irq_workq, &flush_work->work);
} }
...@@ -87,7 +87,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) ...@@ -87,7 +87,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
xa_lock(&hr_dev->qp_table_xa); xa_lock(&hr_dev->qp_table_xa);
qp = __hns_roce_qp_lookup(hr_dev, qpn); qp = __hns_roce_qp_lookup(hr_dev, qpn);
if (qp) if (qp)
atomic_inc(&qp->refcount); refcount_inc(&qp->refcount);
xa_unlock(&hr_dev->qp_table_xa); xa_unlock(&hr_dev->qp_table_xa);
if (!qp) { if (!qp) {
...@@ -108,7 +108,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type) ...@@ -108,7 +108,7 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
qp->event(qp, (enum hns_roce_event)event_type); qp->event(qp, (enum hns_roce_event)event_type);
if (atomic_dec_and_test(&qp->refcount)) if (refcount_dec_and_test(&qp->refcount))
complete(&qp->free); complete(&qp->free);
} }
...@@ -1076,7 +1076,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -1076,7 +1076,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->ibqp.qp_num = hr_qp->qpn; hr_qp->ibqp.qp_num = hr_qp->qpn;
hr_qp->event = hns_roce_ib_qp_event; hr_qp->event = hns_roce_ib_qp_event;
atomic_set(&hr_qp->refcount, 1); refcount_set(&hr_qp->refcount, 1);
init_completion(&hr_qp->free); init_completion(&hr_qp->free);
return 0; return 0;
...@@ -1099,7 +1099,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev, ...@@ -1099,7 +1099,7 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
struct ib_udata *udata) struct ib_udata *udata)
{ {
if (atomic_dec_and_test(&hr_qp->refcount)) if (refcount_dec_and_test(&hr_qp->refcount))
complete(&hr_qp->free); complete(&hr_qp->free);
wait_for_completion(&hr_qp->free); wait_for_completion(&hr_qp->free);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment