Commit 161ebe24 authored by Shiraz, Saleem's avatar Shiraz, Saleem Committed by Jason Gunthorpe

RDMA/bnxt_re: Use for_each_sg_dma_page iterator on umem SGL

Use the for_each_sg_dma_page iterator variant to walk the umem DMA-mapped
SGL and get the page DMA address. This avoids the extra loop to iterate
pages in the SGE when for_each_sg iterator is used.

Additionally, purge umem->page_shift usage in the driver as its only
relevant for ODP MRs. Use system page size and shift instead.
Signed-off-by: default avatarShiraz, Saleem <shiraz.saleem@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent d901b276
...@@ -3553,19 +3553,14 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig, ...@@ -3553,19 +3553,14 @@ static int fill_umem_pbl_tbl(struct ib_umem *umem, u64 *pbl_tbl_orig,
u64 *pbl_tbl = pbl_tbl_orig; u64 *pbl_tbl = pbl_tbl_orig;
u64 paddr; u64 paddr;
u64 page_mask = (1ULL << page_shift) - 1; u64 page_mask = (1ULL << page_shift) - 1;
int i, pages; struct sg_dma_page_iter sg_iter;
struct scatterlist *sg;
int entry; for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) {
paddr = sg_page_iter_dma_address(&sg_iter);
for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { if (pbl_tbl == pbl_tbl_orig)
pages = sg_dma_len(sg) >> PAGE_SHIFT; *pbl_tbl++ = paddr & ~page_mask;
for (i = 0; i < pages; i++) { else if ((paddr & page_mask) == 0)
paddr = sg_dma_address(sg) + (i << PAGE_SHIFT); *pbl_tbl++ = paddr;
if (pbl_tbl == pbl_tbl_orig)
*pbl_tbl++ = paddr & ~page_mask;
else if ((paddr & page_mask) == 0)
*pbl_tbl++ = paddr;
}
} }
return pbl_tbl - pbl_tbl_orig; return pbl_tbl - pbl_tbl_orig;
} }
...@@ -3628,7 +3623,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length, ...@@ -3628,7 +3623,7 @@ struct ib_mr *bnxt_re_reg_user_mr(struct ib_pd *ib_pd, u64 start, u64 length,
goto free_umem; goto free_umem;
} }
page_shift = umem->page_shift; page_shift = PAGE_SHIFT;
if (!bnxt_re_page_size_ok(page_shift)) { if (!bnxt_re_page_size_ok(page_shift)) {
dev_err(rdev_to_dev(rdev), "umem page size unsupported!"); dev_err(rdev_to_dev(rdev), "umem page size unsupported!");
......
...@@ -85,7 +85,7 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, ...@@ -85,7 +85,7 @@ static void __free_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
struct scatterlist *sghead, u32 pages, u32 pg_size) struct scatterlist *sghead, u32 pages, u32 pg_size)
{ {
struct scatterlist *sg; struct sg_dma_page_iter sg_iter;
bool is_umem = false; bool is_umem = false;
int i; int i;
...@@ -116,12 +116,13 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, ...@@ -116,12 +116,13 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
} else { } else {
i = 0; i = 0;
is_umem = true; is_umem = true;
for_each_sg(sghead, sg, pages, i) { for_each_sg_dma_page (sghead, &sg_iter, pages, 0) {
pbl->pg_map_arr[i] = sg_dma_address(sg); pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
pbl->pg_arr[i] = sg_virt(sg); pbl->pg_arr[i] = NULL;
if (!pbl->pg_arr[i]) if (!pbl->pg_arr[i])
goto fail; goto fail;
i++;
pbl->pg_count++; pbl->pg_count++;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment