Commit d938d785 authored by Yixian Liu's avatar Yixian Liu Committed by Jason Gunthorpe

RDMA/hns: Modify fields of struct hns_roce_srq

Use wqe_cnt instead of max which means the queue size of srq, and remove
wqe_ctr which is not used.

Link: https://lore.kernel.org/r/1572952082-6681-5-git-send-email-liweihang@hisilicon.comSigned-off-by: default avatarYixian Liu <liuyixian@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@hisilicon.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 03ccba5c
...@@ -517,9 +517,8 @@ struct hns_roce_idx_que { ...@@ -517,9 +517,8 @@ struct hns_roce_idx_que {
struct hns_roce_srq { struct hns_roce_srq {
struct ib_srq ibsrq; struct ib_srq ibsrq;
void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
unsigned long srqn; unsigned long srqn;
int max; u32 wqe_cnt;
int max_gs; int max_gs;
int wqe_shift; int wqe_shift;
void __iomem *db_reg_l; void __iomem *db_reg_l;
...@@ -535,8 +534,8 @@ struct hns_roce_srq { ...@@ -535,8 +534,8 @@ struct hns_roce_srq {
spinlock_t lock; spinlock_t lock;
int head; int head;
int tail; int tail;
u16 wqe_ctr;
struct mutex mutex; struct mutex mutex;
void (*event)(struct hns_roce_srq *srq, enum hns_roce_event event);
}; };
struct hns_roce_uar_table { struct hns_roce_uar_table {
......
...@@ -6040,7 +6040,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev, ...@@ -6040,7 +6040,7 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
hr_dev->caps.srqwqe_hop_num)); hr_dev->caps.srqwqe_hop_num));
roce_set_field(srq_context->byte_4_srqn_srqst, roce_set_field(srq_context->byte_4_srqn_srqst,
SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S, SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
ilog2(srq->max)); ilog2(srq->wqe_cnt));
roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M, roce_set_field(srq_context->byte_4_srqn_srqst, SRQC_BYTE_4_SRQN_M,
SRQC_BYTE_4_SRQN_S, srq->srqn); SRQC_BYTE_4_SRQN_S, srq->srqn);
...@@ -6126,7 +6126,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq, ...@@ -6126,7 +6126,7 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
int ret; int ret;
if (srq_attr_mask & IB_SRQ_LIMIT) { if (srq_attr_mask & IB_SRQ_LIMIT) {
if (srq_attr->srq_limit >= srq->max) if (srq_attr->srq_limit >= srq->wqe_cnt)
return -EINVAL; return -EINVAL;
mailbox = hns_roce_alloc_cmd_mailbox(hr_dev); mailbox = hns_roce_alloc_cmd_mailbox(hr_dev);
...@@ -6186,7 +6186,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) ...@@ -6186,7 +6186,7 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
SRQC_BYTE_8_SRQ_LIMIT_WL_S); SRQC_BYTE_8_SRQ_LIMIT_WL_S);
attr->srq_limit = limit_wl; attr->srq_limit = limit_wl;
attr->max_wr = srq->max - 1; attr->max_wr = srq->wqe_cnt - 1;
attr->max_sge = srq->max_gs; attr->max_sge = srq->max_gs;
memcpy(srq_context, mailbox->buf, sizeof(*srq_context)); memcpy(srq_context, mailbox->buf, sizeof(*srq_context));
...@@ -6239,7 +6239,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, ...@@ -6239,7 +6239,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
spin_lock_irqsave(&srq->lock, flags); spin_lock_irqsave(&srq->lock, flags);
ind = srq->head & (srq->max - 1); ind = srq->head & (srq->wqe_cnt - 1);
for (nreq = 0; wr; ++nreq, wr = wr->next) { for (nreq = 0; wr; ++nreq, wr = wr->next) {
if (unlikely(wr->num_sge > srq->max_gs)) { if (unlikely(wr->num_sge > srq->max_gs)) {
...@@ -6254,7 +6254,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, ...@@ -6254,7 +6254,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
break; break;
} }
wqe_idx = find_empty_entry(&srq->idx_que, srq->max); wqe_idx = find_empty_entry(&srq->idx_que, srq->wqe_cnt);
if (wqe_idx < 0) { if (wqe_idx < 0) {
ret = -ENOMEM; ret = -ENOMEM;
*bad_wr = wr; *bad_wr = wr;
...@@ -6278,7 +6278,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq, ...@@ -6278,7 +6278,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
} }
srq->wrid[wqe_idx] = wr->wr_id; srq->wrid[wqe_idx] = wr->wr_id;
ind = (ind + 1) & (srq->max - 1); ind = (ind + 1) & (srq->wqe_cnt - 1);
} }
if (likely(nreq)) { if (likely(nreq)) {
......
...@@ -255,7 +255,7 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq, ...@@ -255,7 +255,7 @@ static int hns_roce_create_idx_que(struct ib_pd *pd, struct hns_roce_srq *srq,
struct hns_roce_dev *hr_dev = to_hr_dev(pd->device); struct hns_roce_dev *hr_dev = to_hr_dev(pd->device);
struct hns_roce_idx_que *idx_que = &srq->idx_que; struct hns_roce_idx_que *idx_que = &srq->idx_que;
idx_que->bitmap = bitmap_zalloc(srq->max, GFP_KERNEL); idx_que->bitmap = bitmap_zalloc(srq->wqe_cnt, GFP_KERNEL);
if (!idx_que->bitmap) if (!idx_que->bitmap)
return -ENOMEM; return -ENOMEM;
...@@ -281,7 +281,7 @@ static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size) ...@@ -281,7 +281,7 @@ static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
return -ENOMEM; return -ENOMEM;
srq->head = 0; srq->head = 0;
srq->tail = srq->max - 1; srq->tail = srq->wqe_cnt - 1;
ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift, ret = hns_roce_mtt_init(hr_dev, srq->buf.npages, srq->buf.page_shift,
&srq->mtt); &srq->mtt);
...@@ -312,7 +312,7 @@ static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size) ...@@ -312,7 +312,7 @@ static int create_kernel_srq(struct hns_roce_srq *srq, int srq_buf_size)
if (ret) if (ret)
goto err_kernel_idx_buf; goto err_kernel_idx_buf;
srq->wrid = kvmalloc_array(srq->max, sizeof(u64), GFP_KERNEL); srq->wrid = kvmalloc_array(srq->wqe_cnt, sizeof(u64), GFP_KERNEL);
if (!srq->wrid) { if (!srq->wrid) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_kernel_idx_buf; goto err_kernel_idx_buf;
...@@ -358,7 +358,7 @@ static void destroy_kernel_srq(struct hns_roce_dev *hr_dev, ...@@ -358,7 +358,7 @@ static void destroy_kernel_srq(struct hns_roce_dev *hr_dev,
} }
int hns_roce_create_srq(struct ib_srq *ib_srq, int hns_roce_create_srq(struct ib_srq *ib_srq,
struct ib_srq_init_attr *srq_init_attr, struct ib_srq_init_attr *init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device); struct hns_roce_dev *hr_dev = to_hr_dev(ib_srq->device);
...@@ -370,24 +370,24 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, ...@@ -370,24 +370,24 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
u32 cqn; u32 cqn;
/* Check the actual SRQ wqe and SRQ sge num */ /* Check the actual SRQ wqe and SRQ sge num */
if (srq_init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs || if (init_attr->attr.max_wr >= hr_dev->caps.max_srq_wrs ||
srq_init_attr->attr.max_sge > hr_dev->caps.max_srq_sges) init_attr->attr.max_sge > hr_dev->caps.max_srq_sges)
return -EINVAL; return -EINVAL;
mutex_init(&srq->mutex); mutex_init(&srq->mutex);
spin_lock_init(&srq->lock); spin_lock_init(&srq->lock);
srq->max = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1); srq->wqe_cnt = roundup_pow_of_two(init_attr->attr.max_wr + 1);
srq->max_gs = srq_init_attr->attr.max_sge; srq->max_gs = init_attr->attr.max_sge;
srq_desc_size = max(16, 16 * srq->max_gs); srq_desc_size = max(16, 16 * srq->max_gs);
srq->wqe_shift = ilog2(srq_desc_size); srq->wqe_shift = ilog2(srq_desc_size);
srq_buf_size = srq->max * srq_desc_size; srq_buf_size = srq->wqe_cnt * srq_desc_size;
srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ; srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
srq->idx_que.buf_size = srq->max * srq->idx_que.entry_sz; srq->idx_que.buf_size = srq->wqe_cnt * srq->idx_que.entry_sz;
srq->mtt.mtt_type = MTT_TYPE_SRQWQE; srq->mtt.mtt_type = MTT_TYPE_SRQWQE;
srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX; srq->idx_que.mtt.mtt_type = MTT_TYPE_IDX;
...@@ -405,8 +405,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq, ...@@ -405,8 +405,8 @@ int hns_roce_create_srq(struct ib_srq *ib_srq,
} }
} }
cqn = ib_srq_has_cq(srq_init_attr->srq_type) ? cqn = ib_srq_has_cq(init_attr->srq_type) ?
to_hr_cq(srq_init_attr->ext.cq)->cqn : 0; to_hr_cq(init_attr->ext.cq)->cqn : 0;
srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG; srq->db_reg_l = hr_dev->reg_base + SRQ_DB_REG;
...@@ -453,7 +453,7 @@ void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata) ...@@ -453,7 +453,7 @@ void hns_roce_destroy_srq(struct ib_srq *ibsrq, struct ib_udata *udata)
hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt); hns_roce_mtt_cleanup(hr_dev, &srq->idx_que.mtt);
} else { } else {
kvfree(srq->wrid); kvfree(srq->wrid);
hns_roce_buf_free(hr_dev, srq->max << srq->wqe_shift, hns_roce_buf_free(hr_dev, srq->wqe_cnt << srq->wqe_shift,
&srq->buf); &srq->buf);
} }
ib_umem_release(srq->idx_que.umem); ib_umem_release(srq->idx_que.umem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment