Commit 22123a0e authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/i40iw: Use ib_umem_num_dma_pages()

If ib_umem_find_best_pgsz() returns > PAGE_SIZE then the equation here is
not correct. 'start' should be 'virt'. Change it to use the core code for
page_num and the canonical calculation of page_shift.

Fixes: eb52c033 ("RDMA/i40iw: Use core helpers to get aligned DMA address within a supported page size")
Link: https://lore.kernel.org/r/8-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.comSigned-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 1f9b6827
......@@ -1745,15 +1745,12 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
struct i40iw_mr *iwmr;
struct ib_umem *region;
struct i40iw_mem_reg_req req;
u64 pbl_depth = 0;
u32 stag = 0;
u16 access;
u64 region_length;
bool use_pbles = false;
unsigned long flags;
int err = -ENOSYS;
int ret;
int pg_shift;
if (!udata)
return ERR_PTR(-EOPNOTSUPP);
......@@ -1788,18 +1785,13 @@ static struct ib_mr *i40iw_reg_user_mr(struct ib_pd *pd,
if (req.reg_type == IW_MEMREG_TYPE_MEM)
iwmr->page_size = ib_umem_find_best_pgsz(region, SZ_4K | SZ_2M,
virt);
region_length = region->length + (start & (iwmr->page_size - 1));
pg_shift = ffs(iwmr->page_size) - 1;
pbl_depth = region_length >> pg_shift;
pbl_depth += (region_length & (iwmr->page_size - 1)) ? 1 : 0;
iwmr->length = region->length;
iwpbl->user_base = virt;
palloc = &iwpbl->pble_alloc;
iwmr->type = req.reg_type;
iwmr->page_cnt = (u32)pbl_depth;
iwmr->page_cnt = ib_umem_num_dma_blocks(region, iwmr->page_size);
switch (req.reg_type) {
case IW_MEMREG_TYPE_QP:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment