Commit 67954a6e authored by Xi Wang's avatar Xi Wang Committed by Jason Gunthorpe

RDMA/hns: Optimize SRQ buffer size calculating process

Optimize the SRQ's WQE buffer parameters calculating process to make the
codes more readable by using new functions about multi-hop addressing to
calculating capabilities of SRQ.

Link: https://lore.kernel.org/r/1588071823-40200-6-git-send-email-liweihang@huawei.comSigned-off-by: default avatarXi Wang <wangxi11@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent ffb1308b
......@@ -472,7 +472,7 @@ struct hns_roce_cq {
struct hns_roce_idx_que {
struct hns_roce_mtr mtr;
int entry_sz;
int entry_shift;
unsigned long *bitmap;
};
......
......@@ -699,6 +699,12 @@ static void *get_srq_wqe(struct hns_roce_srq *srq, int n)
return hns_roce_buf_offset(srq->buf_mtr.kmem, n << srq->wqe_shift);
}
static void *get_idx_buf(struct hns_roce_idx_que *idx_que, int n)
{
return hns_roce_buf_offset(idx_que->mtr.kmem,
n << idx_que->entry_shift);
}
static void hns_roce_free_srq_wqe(struct hns_roce_srq *srq, int wqe_index)
{
/* always called with interrupts disabled. */
......@@ -725,16 +731,6 @@ static int find_empty_entry(struct hns_roce_idx_que *idx_que,
return wqe_idx;
}
static void fill_idx_queue(struct hns_roce_idx_que *idx_que,
int cur_idx, int wqe_idx)
{
unsigned int *addr;
addr = (unsigned int *)hns_roce_buf_offset(idx_que->mtr.kmem,
cur_idx * idx_que->entry_sz);
*addr = wqe_idx;
}
static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
const struct ib_recv_wr *wr,
const struct ib_recv_wr **bad_wr)
......@@ -744,6 +740,7 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_v2_db srq_db;
unsigned long flags;
__le32 *srq_idx;
int ret = 0;
int wqe_idx;
void *wqe;
......@@ -775,7 +772,6 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
break;
}
fill_idx_queue(&srq->idx_que, ind, wqe_idx);
wqe = get_srq_wqe(srq, wqe_idx);
dseg = (struct hns_roce_v2_wqe_data_seg *)wqe;
......@@ -791,6 +787,9 @@ static int hns_roce_v2_post_srq_recv(struct ib_srq *ibsrq,
dseg[i].addr = 0;
}
srq_idx = get_idx_buf(&srq->idx_que, ind);
*srq_idx = cpu_to_le32(wqe_idx);
srq->wrid[wqe_idx] = wr->wr_id;
ind = (ind + 1) & (srq->wqe_cnt - 1);
}
......@@ -4901,8 +4900,8 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
roce_set_field(srq_context->byte_4_srqn_srqst,
SRQC_BYTE_4_SRQ_WQE_HOP_NUM_M,
SRQC_BYTE_4_SRQ_WQE_HOP_NUM_S,
(hr_dev->caps.srqwqe_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
hr_dev->caps.srqwqe_hop_num));
to_hr_hem_hopnum(hr_dev->caps.srqwqe_hop_num,
srq->wqe_cnt));
roce_set_field(srq_context->byte_4_srqn_srqst,
SRQC_BYTE_4_SRQ_SHIFT_M, SRQC_BYTE_4_SRQ_SHIFT_S,
ilog2(srq->wqe_cnt));
......@@ -4944,8 +4943,8 @@ static void hns_roce_v2_write_srqc(struct hns_roce_dev *hr_dev,
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_HOP_NUM_M,
SRQC_BYTE_44_SRQ_IDX_HOP_NUM_S,
hr_dev->caps.idx_hop_num == HNS_ROCE_HOP_NUM_0 ? 0 :
hr_dev->caps.idx_hop_num);
to_hr_hem_hopnum(hr_dev->caps.idx_hop_num,
srq->wqe_cnt));
roce_set_field(srq_context->byte_44_idxbufpgsz_addr,
SRQC_BYTE_44_SRQ_IDX_BA_PG_SZ_M,
......
......@@ -181,16 +181,15 @@ static int alloc_srq_buf(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
{
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_buf_attr buf_attr = {};
int sge_size;
int err;
sge_size = roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
HNS_ROCE_SGE_SIZE * srq->max_gs));
srq->wqe_shift = ilog2(sge_size);
srq->wqe_shift = ilog2(roundup_pow_of_two(max(HNS_ROCE_SGE_SIZE,
HNS_ROCE_SGE_SIZE *
srq->max_gs)));
buf_attr.page_shift = hr_dev->caps.srqwqe_buf_pg_sz + PAGE_ADDR_SHIFT;
buf_attr.region[0].size = srq->wqe_cnt * sge_size;
buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
srq->wqe_shift);
buf_attr.region[0].hopnum = hr_dev->caps.srqwqe_hop_num;
buf_attr.region_count = 1;
buf_attr.fixed_page = true;
......@@ -217,10 +216,11 @@ static int alloc_srq_idx(struct hns_roce_dev *hr_dev, struct hns_roce_srq *srq,
struct hns_roce_buf_attr buf_attr = {};
int err;
srq->idx_que.entry_sz = HNS_ROCE_IDX_QUE_ENTRY_SZ;
srq->idx_que.entry_shift = ilog2(HNS_ROCE_IDX_QUE_ENTRY_SZ);
buf_attr.page_shift = hr_dev->caps.idx_buf_pg_sz + PAGE_ADDR_SHIFT;
buf_attr.region[0].size = srq->wqe_cnt * HNS_ROCE_IDX_QUE_ENTRY_SZ;
buf_attr.region[0].size = to_hr_hem_entries_size(srq->wqe_cnt,
srq->idx_que.entry_shift);
buf_attr.region[0].hopnum = hr_dev->caps.idx_hop_num;
buf_attr.region_count = 1;
buf_attr.fixed_page = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment