Commit b65afbd2 authored by Chengchang Tang's avatar Chengchang Tang Committed by Jason Gunthorpe

RDMA/hns: Refactor the alloc_srqc()

Abstract the alloc_srqc() into several parts and separate the alloc_srqn()
from the alloc_srqc().

Link: https://lore.kernel.org/r/20220302064830.61706-9-liangwenpeng@huawei.comSigned-off-by: default avatarChengchang Tang <tangchengchang@huawei.com>
Signed-off-by: default avatarWenpeng Liang <liangwenpeng@huawei.com>
Reviewed-by: default avatarLeon Romanovsky <leonro@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 904de76c
...@@ -59,40 +59,39 @@ static void hns_roce_ib_srq_event(struct hns_roce_srq *srq, ...@@ -59,40 +59,39 @@ static void hns_roce_ib_srq_event(struct hns_roce_srq *srq,
} }
} }
static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) static int alloc_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{ {
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct hns_roce_ida *srq_ida = &hr_dev->srq_table.srq_ida; struct hns_roce_ida *srq_ida = &hr_dev->srq_table.srq_ida;
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
int id; int id;
id = ida_alloc_range(&srq_ida->ida, srq_ida->min, srq_ida->max, id = ida_alloc_range(&srq_ida->ida, srq_ida->min, srq_ida->max,
GFP_KERNEL); GFP_KERNEL);
if (id < 0) { if (id < 0) {
ibdev_err(ibdev, "failed to alloc srq(%d).\n", id); ibdev_err(&hr_dev->ib_dev, "failed to alloc srq(%d).\n", id);
return -ENOMEM; return -ENOMEM;
} }
srq->srqn = (unsigned long)id;
ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn); srq->srqn = id;
if (ret) {
ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
goto err_out;
}
ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL)); return 0;
if (ret) { }
ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
goto err_put; static void free_srqn(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
} {
ida_free(&hr_dev->srq_table.srq_ida.ida, (int)srq->srqn);
}
static int hns_roce_create_srqc(struct hns_roce_dev *hr_dev,
struct hns_roce_srq *srq)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_cmd_mailbox *mailbox;
int ret;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
if (IS_ERR(mailbox)) { if (IS_ERR(mailbox)) {
ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n"); ibdev_err(ibdev, "failed to alloc mailbox for SRQC.\n");
ret = PTR_ERR(mailbox); return PTR_ERR(mailbox);
goto err_xa;
} }
ret = hr_dev->hw->write_srqc(srq, mailbox->buf); ret = hr_dev->hw->write_srqc(srq, mailbox->buf);
...@@ -103,23 +102,42 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) ...@@ -103,23 +102,42 @@ static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_SRQ, ret = hns_roce_create_hw_ctx(hr_dev, mailbox, HNS_ROCE_CMD_CREATE_SRQ,
srq->srqn); srq->srqn);
if (ret) { if (ret)
ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret); ibdev_err(ibdev, "failed to config SRQC, ret = %d.\n", ret);
goto err_mbox;
}
err_mbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
return ret;
}
static int alloc_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
{
struct hns_roce_srq_table *srq_table = &hr_dev->srq_table;
struct ib_device *ibdev = &hr_dev->ib_dev;
int ret;
ret = hns_roce_table_get(hr_dev, &srq_table->table, srq->srqn);
if (ret) {
ibdev_err(ibdev, "failed to get SRQC table, ret = %d.\n", ret);
return ret;
}
ret = xa_err(xa_store(&srq_table->xa, srq->srqn, srq, GFP_KERNEL));
if (ret) {
ibdev_err(ibdev, "failed to store SRQC, ret = %d.\n", ret);
goto err_put;
}
ret = hns_roce_create_srqc(hr_dev, srq);
if (ret)
goto err_xa;
return 0; return 0;
err_mbox:
hns_roce_free_cmd_mailbox(hr_dev, mailbox);
err_xa: err_xa:
xa_erase(&srq_table->xa, srq->srqn); xa_erase(&srq_table->xa, srq->srqn);
err_put: err_put:
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
err_out:
ida_free(&srq_ida->ida, id);
return ret; return ret;
} }
...@@ -142,7 +160,6 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq) ...@@ -142,7 +160,6 @@ static void free_srqc(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq)
wait_for_completion(&srq->free); wait_for_completion(&srq->free);
hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn); hns_roce_table_put(hr_dev, &srq_table->table, srq->srqn);
ida_free(&srq_table->srq_ida.ida, (int)srq->srqn);
} }
static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq, static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
...@@ -390,10 +407,14 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, ...@@ -390,10 +407,14 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
if (ret) if (ret)
return ret; return ret;
ret = alloc_srqc(hr_dev, srq); ret = alloc_srqn(hr_dev, srq);
if (ret) if (ret)
goto err_srq_buf; goto err_srq_buf;
ret = alloc_srqc(hr_dev, srq);
if (ret)
goto err_srqn;
if (udata) { if (udata) {
resp.srqn = srq->srqn; resp.srqn = srq->srqn;
if (ib_copy_to_udata(udata, &resp, if (ib_copy_to_udata(udata, &resp,
...@@ -412,6 +433,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, ...@@ -412,6 +433,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
err_srqc: err_srqc:
free_srqc(hr_dev, srq); free_srqc(hr_dev, srq);
err_srqn:
free_srqn(hr_dev, srq);
err_srq_buf: err_srq_buf:
free_srq_buf(hr_dev, srq); free_srq_buf(hr_dev, srq);
...@@ -424,6 +447,7 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) ...@@ -424,6 +447,7 @@ int hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
struct hns_roce_srq *srq = to_hr_srq(ibsrq); struct hns_roce_srq *srq = to_hr_srq(ibsrq);
free_srqc(hr_dev, srq); free_srqc(hr_dev, srq);
free_srqn(hr_dev, srq);
free_srq_buf(hr_dev, srq); free_srq_buf(hr_dev, srq);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment