Commit cc33b23e authored by Xi Wang's avatar Xi Wang Committed by Jason Gunthorpe

RDMA/hns: Optimize MTR level-0 addressing to access huge page

If hns ROCEE is set to level-0 addressing, the length of the entire buffer
can be used as the page size. The driver needn't to split the buffer into
small units because all pages are continuous.

Link: https://lore.kernel.org/r/1593525696-12570-1-git-send-email-liweihang@huawei.comSigned-off-by: default avatarXi Wang <wangxi11@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 5c99274b
...@@ -348,20 +348,22 @@ struct hns_roce_buf_attr { ...@@ -348,20 +348,22 @@ struct hns_roce_buf_attr {
bool mtt_only; /* only alloc buffer-required MTT memory */ bool mtt_only; /* only alloc buffer-required MTT memory */
}; };
struct hns_roce_hem_cfg {
dma_addr_t root_ba; /* root BA table's address */
bool is_direct; /* addressing without BA table */
unsigned int ba_pg_shift; /* BA table page shift */
unsigned int buf_pg_shift; /* buffer page shift */
unsigned int buf_pg_count; /* buffer page count */
struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
int region_count;
};
/* memory translate region */ /* memory translate region */
struct hns_roce_mtr { struct hns_roce_mtr {
struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */ struct hns_roce_hem_list hem_list; /* multi-hop addressing resource */
struct ib_umem *umem; /* user space buffer */ struct ib_umem *umem; /* user space buffer */
struct hns_roce_buf *kmem; /* kernel space buffer */ struct hns_roce_buf *kmem; /* kernel space buffer */
struct { struct hns_roce_hem_cfg hem_cfg; /* config for hardware addressing */
dma_addr_t root_ba; /* root BA table's address */
bool is_direct; /* addressing without BA table */
unsigned int ba_pg_shift; /* BA table page shift */
unsigned int buf_pg_shift; /* buffer page shift */
int buf_pg_count; /* buffer page count */
struct hns_roce_buf_region region[HNS_ROCE_MAX_BT_REGION];
unsigned int region_count;
} hem_cfg; /* config for hardware addressing */
}; };
struct hns_roce_mw { struct hns_roce_mw {
......
...@@ -2483,7 +2483,6 @@ static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, ...@@ -2483,7 +2483,6 @@ static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba) u64 *sq_ba, u64 *rq_ba, dma_addr_t *bt_ba)
{ {
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
int rq_pa_start;
int count; int count;
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba); count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, sq_ba, 1, bt_ba);
...@@ -2491,9 +2490,9 @@ static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp, ...@@ -2491,9 +2490,9 @@ static int find_wqe_mtt(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
ibdev_err(ibdev, "Failed to find SQ ba\n"); ibdev_err(ibdev, "Failed to find SQ ba\n");
return -ENOBUFS; return -ENOBUFS;
} }
rq_pa_start = hr_qp->rq.offset >> hr_qp->mtr.hem_cfg.buf_pg_shift;
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, rq_pa_start, rq_ba, 1, count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, rq_ba,
NULL); 1, NULL);
if (!count) { if (!count) {
ibdev_err(ibdev, "Failed to find RQ ba\n"); ibdev_err(ibdev, "Failed to find RQ ba\n");
return -ENOBUFS; return -ENOBUFS;
......
...@@ -3744,51 +3744,23 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp, ...@@ -3744,51 +3744,23 @@ static void modify_qp_init_to_init(struct ib_qp *ibqp,
} }
} }
static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp, int mtt_cnt,
u32 page_size)
{
struct ib_device *ibdev = &hr_dev->ib_dev;
if (hr_qp->rq.wqe_cnt < 1)
return true;
if (mtt_cnt < 1) {
ibdev_err(ibdev, "failed to find RQWQE buf ba of QP(0x%lx)\n",
hr_qp->qpn);
return false;
}
if (mtt_cnt < MTT_MIN_COUNT &&
(hr_qp->rq.offset + page_size) < hr_qp->buff_size) {
ibdev_err(ibdev,
"failed to find next RQWQE buf ba of QP(0x%lx)\n",
hr_qp->qpn);
return false;
}
return true;
}
static int config_qp_rq_buf(struct hns_roce_dev *hr_dev, static int config_qp_rq_buf(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp, struct hns_roce_qp *hr_qp,
struct hns_roce_v2_qp_context *context, struct hns_roce_v2_qp_context *context,
struct hns_roce_v2_qp_context *qpc_mask) struct hns_roce_v2_qp_context *qpc_mask)
{ {
struct ib_qp *ibqp = &hr_qp->ibqp;
u64 mtts[MTT_MIN_COUNT] = { 0 }; u64 mtts[MTT_MIN_COUNT] = { 0 };
u64 wqe_sge_ba; u64 wqe_sge_ba;
u32 page_size;
int count; int count;
/* Search qp buf's mtts */ /* Search qp buf's mtts */
page_size = 1 << hr_qp->mtr.hem_cfg.buf_pg_shift; count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, hr_qp->rq.offset, mtts,
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
hr_qp->rq.offset / page_size, mtts,
MTT_MIN_COUNT, &wqe_sge_ba); MTT_MIN_COUNT, &wqe_sge_ba);
if (!ibqp->srq) if (hr_qp->rq.wqe_cnt && count < 1) {
if (!check_wqe_rq_mtt_count(hr_dev, hr_qp, count, page_size)) ibdev_err(&hr_dev->ib_dev,
return -EINVAL; "failed to find RQ WQE, QPN = 0x%lx.\n", hr_qp->qpn);
return -EINVAL;
}
context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3); context->wqe_sge_ba = cpu_to_le32(wqe_sge_ba >> 3);
qpc_mask->wqe_sge_ba = 0; qpc_mask->wqe_sge_ba = 0;
...@@ -3890,7 +3862,6 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev, ...@@ -3890,7 +3862,6 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
u64 sge_cur_blk = 0; u64 sge_cur_blk = 0;
u64 sq_cur_blk = 0; u64 sq_cur_blk = 0;
u32 page_size;
int count; int count;
/* search qp buf's mtts */ /* search qp buf's mtts */
...@@ -3901,9 +3872,8 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev, ...@@ -3901,9 +3872,8 @@ static int config_qp_sq_buf(struct hns_roce_dev *hr_dev,
return -EINVAL; return -EINVAL;
} }
if (hr_qp->sge.sge_cnt > 0) { if (hr_qp->sge.sge_cnt > 0) {
page_size = 1 << hr_qp->mtr.hem_cfg.buf_pg_shift;
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr,
hr_qp->sge.offset / page_size, hr_qp->sge.offset,
&sge_cur_blk, 1, NULL); &sge_cur_blk, 1, NULL);
if (count < 1) { if (count < 1) {
ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n", ibdev_err(ibdev, "failed to find QP(0x%lx) SGE buf.\n",
......
...@@ -870,6 +870,15 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, ...@@ -870,6 +870,15 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int err; int err;
int i; int i;
/*
* Only use the first page address as root ba when hopnum is 0, this
* is because the addresses of all pages are consecutive in this case.
*/
if (mtr->hem_cfg.is_direct) {
mtr->hem_cfg.root_ba = pages[0];
return 0;
}
for (i = 0; i < mtr->hem_cfg.region_count; i++) { for (i = 0; i < mtr->hem_cfg.region_count; i++) {
r = &mtr->hem_cfg.region[i]; r = &mtr->hem_cfg.region[i];
if (r->offset + r->count > page_cnt) { if (r->offset + r->count > page_cnt) {
...@@ -895,6 +904,8 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, ...@@ -895,6 +904,8 @@ int hns_roce_mtr_map(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr) int offset, u64 *mtt_buf, int mtt_max, u64 *base_addr)
{ {
struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
int start_index;
int mtt_count; int mtt_count;
int total = 0; int total = 0;
__le64 *mtts; __le64 *mtts;
...@@ -906,26 +917,32 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, ...@@ -906,26 +917,32 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
goto done; goto done;
/* no mtt memory in direct mode, so just return the buffer address */ /* no mtt memory in direct mode, so just return the buffer address */
if (mtr->hem_cfg.is_direct) { if (cfg->is_direct) {
npage = offset; start_index = offset >> HNS_HW_PAGE_SHIFT;
for (total = 0; total < mtt_max; total++, npage++) { for (mtt_count = 0; mtt_count < cfg->region_count &&
addr = mtr->hem_cfg.root_ba + total < mtt_max; mtt_count++) {
(npage << mtr->hem_cfg.buf_pg_shift); npage = cfg->region[mtt_count].offset;
if (npage < start_index)
continue;
addr = cfg->root_ba + (npage << HNS_HW_PAGE_SHIFT);
if (hr_dev->hw_rev == HNS_ROCE_HW_VER1) if (hr_dev->hw_rev == HNS_ROCE_HW_VER1)
mtt_buf[total] = to_hr_hw_page_addr(addr); mtt_buf[total] = to_hr_hw_page_addr(addr);
else else
mtt_buf[total] = addr; mtt_buf[total] = addr;
total++;
} }
goto done; goto done;
} }
start_index = offset >> cfg->buf_pg_shift;
left = mtt_max; left = mtt_max;
while (left > 0) { while (left > 0) {
mtt_count = 0; mtt_count = 0;
mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list, mtts = hns_roce_hem_list_find_mtt(hr_dev, &mtr->hem_list,
offset + total, start_index + total,
&mtt_count, NULL); &mtt_count, NULL);
if (!mtts || !mtt_count) if (!mtts || !mtt_count)
goto done; goto done;
...@@ -938,104 +955,136 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, ...@@ -938,104 +955,136 @@ int hns_roce_mtr_find(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
done: done:
if (base_addr) if (base_addr)
*base_addr = mtr->hem_cfg.root_ba; *base_addr = cfg->root_ba;
return total; return total;
} }
/* convert buffer size to page index and page count */ static int mtr_init_buf_cfg(struct hns_roce_dev *hr_dev,
static unsigned int mtr_init_region(struct hns_roce_buf_attr *attr, struct hns_roce_buf_attr *attr,
int page_cnt, struct hns_roce_hem_cfg *cfg,
struct hns_roce_buf_region *regions, unsigned int *buf_page_shift)
int region_cnt, unsigned int page_shift)
{ {
unsigned int page_size = 1 << page_shift;
int max_region = attr->region_count;
struct hns_roce_buf_region *r; struct hns_roce_buf_region *r;
unsigned int i = 0; unsigned int page_shift = 0;
int page_idx = 0; int page_cnt = 0;
size_t buf_size;
for (; i < region_cnt && i < max_region && page_idx < page_cnt; i++) { int region_cnt;
r = &regions[i];
r->hopnum = attr->region[i].hopnum == HNS_ROCE_HOP_NUM_0 ? if (cfg->is_direct) {
0 : attr->region[i].hopnum; buf_size = cfg->buf_pg_count << cfg->buf_pg_shift;
r->offset = page_idx; page_cnt = DIV_ROUND_UP(buf_size, HNS_HW_PAGE_SIZE);
r->count = DIV_ROUND_UP(attr->region[i].size, page_size); /*
page_idx += r->count; * When HEM buffer use level-0 addressing, the page size equals
* the buffer size, and the the page size = 4K * 2^N.
*/
cfg->buf_pg_shift = HNS_HW_PAGE_SHIFT + order_base_2(page_cnt);
if (attr->region_count > 1) {
cfg->buf_pg_count = page_cnt;
page_shift = HNS_HW_PAGE_SHIFT;
} else {
cfg->buf_pg_count = 1;
page_shift = cfg->buf_pg_shift;
if (buf_size != 1 << page_shift) {
ibdev_err(&hr_dev->ib_dev,
"failed to check direct size %zu shift %d.\n",
buf_size, page_shift);
return -EINVAL;
}
}
} else {
page_shift = cfg->buf_pg_shift;
}
/* convert buffer size to page index and page count */
for (page_cnt = 0, region_cnt = 0; page_cnt < cfg->buf_pg_count &&
region_cnt < attr->region_count &&
region_cnt < ARRAY_SIZE(cfg->region); region_cnt++) {
r = &cfg->region[region_cnt];
r->offset = page_cnt;
buf_size = hr_hw_page_align(attr->region[region_cnt].size);
r->count = DIV_ROUND_UP(buf_size, 1 << page_shift);
page_cnt += r->count;
r->hopnum = to_hr_hem_hopnum(attr->region[region_cnt].hopnum,
r->count);
}
if (region_cnt < 1) {
ibdev_err(&hr_dev->ib_dev,
"failed to check mtr region count, pages = %d.\n",
cfg->buf_pg_count);
return -ENOBUFS;
} }
return i; cfg->region_count = region_cnt;
*buf_page_shift = page_shift;
return page_cnt;
} }
/** /**
* hns_roce_mtr_create - Create hns memory translate region. * hns_roce_mtr_create - Create hns memory translate region.
* *
* @mtr: memory translate region * @mtr: memory translate region
* @init_attr: init attribute for creating mtr * @buf_attr: buffer attribute for creating mtr
* @page_shift: page shift for multi-hop base address table * @ba_page_shift: page shift for multi-hop base address table
* @udata: user space context, if it's NULL, means kernel space * @udata: user space context, if it's NULL, means kernel space
* @user_addr: userspace virtual address to start at * @user_addr: userspace virtual address to start at
* @buf_alloced: mtr has private buffer, true means need to alloc
*/ */
int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
struct hns_roce_buf_attr *buf_attr, struct hns_roce_buf_attr *buf_attr,
unsigned int page_shift, struct ib_udata *udata, unsigned int ba_page_shift, struct ib_udata *udata,
unsigned long user_addr) unsigned long user_addr)
{ {
struct hns_roce_hem_cfg *cfg = &mtr->hem_cfg;
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
unsigned int buf_page_shift = 0;
dma_addr_t *pages = NULL; dma_addr_t *pages = NULL;
int region_cnt = 0;
int all_pg_cnt; int all_pg_cnt;
int get_pg_cnt; int get_pg_cnt;
bool has_mtt; int ret = 0;
int err = 0;
/* if disable mtt, all pages must in a continuous address range */
cfg->is_direct = !mtr_has_mtt(buf_attr);
has_mtt = mtr_has_mtt(buf_attr);
/* if buffer only need mtt, just init the hem cfg */ /* if buffer only need mtt, just init the hem cfg */
if (buf_attr->mtt_only) { if (buf_attr->mtt_only) {
mtr->hem_cfg.buf_pg_shift = buf_attr->page_shift; cfg->buf_pg_shift = buf_attr->page_shift;
mtr->hem_cfg.buf_pg_count = mtr_bufs_size(buf_attr) >> cfg->buf_pg_count = mtr_bufs_size(buf_attr) >>
buf_attr->page_shift; buf_attr->page_shift;
mtr->umem = NULL; mtr->umem = NULL;
mtr->kmem = NULL; mtr->kmem = NULL;
} else { } else {
err = mtr_alloc_bufs(hr_dev, mtr, buf_attr, !has_mtt, udata, ret = mtr_alloc_bufs(hr_dev, mtr, buf_attr, cfg->is_direct,
user_addr); udata, user_addr);
if (err) { if (ret) {
ibdev_err(ibdev, "Failed to alloc mtr bufs, err %d\n", ibdev_err(ibdev,
err); "failed to alloc mtr bufs, ret = %d.\n", ret);
return err; return ret;
} }
} }
/* alloc mtt memory */ all_pg_cnt = mtr_init_buf_cfg(hr_dev, buf_attr, cfg, &buf_page_shift);
all_pg_cnt = mtr->hem_cfg.buf_pg_count; if (all_pg_cnt < 1) {
hns_roce_hem_list_init(&mtr->hem_list); ret = -ENOBUFS;
mtr->hem_cfg.is_direct = !has_mtt; ibdev_err(ibdev, "failed to init mtr buf cfg.\n");
mtr->hem_cfg.ba_pg_shift = page_shift;
mtr->hem_cfg.region_count = 0;
region_cnt = mtr_init_region(buf_attr, all_pg_cnt,
mtr->hem_cfg.region,
ARRAY_SIZE(mtr->hem_cfg.region),
mtr->hem_cfg.buf_pg_shift);
if (region_cnt < 1) {
err = -ENOBUFS;
ibdev_err(ibdev, "failed to init mtr region %d\n", region_cnt);
goto err_alloc_bufs; goto err_alloc_bufs;
} }
mtr->hem_cfg.region_count = region_cnt; hns_roce_hem_list_init(&mtr->hem_list);
if (!cfg->is_direct) {
if (has_mtt) { ret = hns_roce_hem_list_request(hr_dev, &mtr->hem_list,
err = hns_roce_hem_list_request(hr_dev, &mtr->hem_list, cfg->region, cfg->region_count,
mtr->hem_cfg.region, region_cnt, ba_page_shift);
page_shift); if (ret) {
if (err) { ibdev_err(ibdev, "failed to request mtr hem, ret = %d.\n",
ibdev_err(ibdev, "Failed to request mtr hem, err %d\n", ret);
err);
goto err_alloc_bufs; goto err_alloc_bufs;
} }
mtr->hem_cfg.root_ba = mtr->hem_list.root_ba; cfg->root_ba = mtr->hem_list.root_ba;
cfg->ba_pg_shift = ba_page_shift;
} else {
cfg->ba_pg_shift = cfg->buf_pg_shift;
} }
/* no buffer to map */ /* no buffer to map */
...@@ -1045,31 +1094,26 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, ...@@ -1045,31 +1094,26 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
/* alloc a tmp array to store buffer's dma address */ /* alloc a tmp array to store buffer's dma address */
pages = kvcalloc(all_pg_cnt, sizeof(dma_addr_t), GFP_KERNEL); pages = kvcalloc(all_pg_cnt, sizeof(dma_addr_t), GFP_KERNEL);
if (!pages) { if (!pages) {
err = -ENOMEM; ret = -ENOMEM;
ibdev_err(ibdev, "Failed to alloc mtr page list %d\n", ibdev_err(ibdev, "failed to alloc mtr page list %d.\n",
all_pg_cnt); all_pg_cnt);
goto err_alloc_hem_list; goto err_alloc_hem_list;
} }
get_pg_cnt = mtr_get_pages(hr_dev, mtr, pages, all_pg_cnt, get_pg_cnt = mtr_get_pages(hr_dev, mtr, pages, all_pg_cnt,
mtr->hem_cfg.buf_pg_shift); buf_page_shift);
if (get_pg_cnt != all_pg_cnt) { if (get_pg_cnt != all_pg_cnt) {
ibdev_err(ibdev, "Failed to get mtr page %d != %d\n", ibdev_err(ibdev, "failed to get mtr page %d != %d.\n",
get_pg_cnt, all_pg_cnt); get_pg_cnt, all_pg_cnt);
err = -ENOBUFS; ret = -ENOBUFS;
goto err_alloc_page_list; goto err_alloc_page_list;
} }
if (!has_mtt) { /* write buffer's dma address to BA table */
mtr->hem_cfg.root_ba = pages[0]; ret = hns_roce_mtr_map(hr_dev, mtr, pages, all_pg_cnt);
} else { if (ret) {
/* write buffer's dma address to BA table */ ibdev_err(ibdev, "failed to map mtr pages, ret = %d.\n", ret);
err = hns_roce_mtr_map(hr_dev, mtr, pages, all_pg_cnt); goto err_alloc_page_list;
if (err) {
ibdev_err(ibdev, "Failed to map mtr pages, err %d\n",
err);
goto err_alloc_page_list;
}
} }
/* drop tmp array */ /* drop tmp array */
...@@ -1081,7 +1125,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr, ...@@ -1081,7 +1125,7 @@ int hns_roce_mtr_create(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr,
hns_roce_hem_list_release(hr_dev, &mtr->hem_list); hns_roce_hem_list_release(hr_dev, &mtr->hem_list);
err_alloc_bufs: err_alloc_bufs:
mtr_free_bufs(hr_dev, mtr); mtr_free_bufs(hr_dev, mtr);
return err; return ret;
} }
void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr) void hns_roce_mtr_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_mtr *mtr)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment