Commit 89603f7e authored by Jason Gunthorpe's avatar Jason Gunthorpe

RDMA/umem: Replace for_each_sg_dma_page with rdma_umem_for_each_dma_block

Generally drivers should be using this core helper to split up the umem
into DMA pages.

These drivers are all probably wrong in some way to pass PAGE_SIZE in as
the HW page size. Either the driver doesn't support other page sizes and
it should use 4096, or the driver does support other page sizes and should
use ib_umem_find_best_pgsz() to select the best HW pages size of the HW
supported set.

The only case it could be correct is if the HW has a global setting for
PAGE_SIZE set at driver initialization time.

Link: https://lore.kernel.org/r/5-v2-270386b7e60b+28f4-umem_1_jgg@nvidia.comSigned-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent ebc24096
...@@ -510,7 +510,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -510,7 +510,7 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
__be64 *pages; __be64 *pages;
int shift, n, i; int shift, n, i;
int err = -ENOMEM; int err = -ENOMEM;
struct sg_dma_page_iter sg_iter; struct ib_block_iter biter;
struct c4iw_dev *rhp; struct c4iw_dev *rhp;
struct c4iw_pd *php; struct c4iw_pd *php;
struct c4iw_mr *mhp; struct c4iw_mr *mhp;
...@@ -561,8 +561,8 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -561,8 +561,8 @@ struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
i = n = 0; i = n = 0;
for_each_sg_dma_page(mhp->umem->sg_head.sgl, &sg_iter, mhp->umem->nmap, 0) { rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) {
pages[i++] = cpu_to_be64(sg_page_iter_dma_address(&sg_iter)); pages[i++] = cpu_to_be64(rdma_block_iter_dma_address(&biter));
if (i == PAGE_SIZE / sizeof(*pages)) { if (i == PAGE_SIZE / sizeof(*pages)) {
err = write_pbl(&mhp->rhp->rdev, pages, err = write_pbl(&mhp->rhp->rdev, pages,
mhp->attr.pbl_addr + (n << 3), i, mhp->attr.pbl_addr + (n << 3), i,
......
...@@ -850,7 +850,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -850,7 +850,7 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
u64 virt, int acc, struct ib_udata *udata) u64 virt, int acc, struct ib_udata *udata)
{ {
struct mthca_dev *dev = to_mdev(pd->device); struct mthca_dev *dev = to_mdev(pd->device);
struct sg_dma_page_iter sg_iter; struct ib_block_iter biter;
struct mthca_ucontext *context = rdma_udata_to_drv_context( struct mthca_ucontext *context = rdma_udata_to_drv_context(
udata, struct mthca_ucontext, ibucontext); udata, struct mthca_ucontext, ibucontext);
struct mthca_mr *mr; struct mthca_mr *mr;
...@@ -899,8 +899,8 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, ...@@ -899,8 +899,8 @@ static struct ib_mr *mthca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages)); write_mtt_size = min(mthca_write_mtt_size(dev), (int) (PAGE_SIZE / sizeof *pages));
for_each_sg_dma_page(mr->umem->sg_head.sgl, &sg_iter, mr->umem->nmap, 0) { rdma_umem_for_each_dma_block(mr->umem, &biter, PAGE_SIZE) {
pages[i++] = sg_page_iter_dma_address(&sg_iter); pages[i++] = rdma_block_iter_dma_address(&biter);
/* /*
* Be friendly to write_mtt and pass it chunks * Be friendly to write_mtt and pass it chunks
......
...@@ -815,9 +815,8 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, ...@@ -815,9 +815,8 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
u32 num_pbes) u32 num_pbes)
{ {
struct ocrdma_pbe *pbe; struct ocrdma_pbe *pbe;
struct sg_dma_page_iter sg_iter; struct ib_block_iter biter;
struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table; struct ocrdma_pbl *pbl_tbl = mr->hwmr.pbl_table;
struct ib_umem *umem = mr->umem;
int pbe_cnt, total_num_pbes = 0; int pbe_cnt, total_num_pbes = 0;
u64 pg_addr; u64 pg_addr;
...@@ -827,9 +826,9 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr, ...@@ -827,9 +826,9 @@ static void build_user_pbes(struct ocrdma_dev *dev, struct ocrdma_mr *mr,
pbe = (struct ocrdma_pbe *)pbl_tbl->va; pbe = (struct ocrdma_pbe *)pbl_tbl->va;
pbe_cnt = 0; pbe_cnt = 0;
for_each_sg_dma_page (umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { rdma_umem_for_each_dma_block (mr->umem, &biter, PAGE_SIZE) {
/* store the page address in pbe */ /* store the page address in pbe */
pg_addr = sg_page_iter_dma_address(&sg_iter); pg_addr = rdma_block_iter_dma_address(&biter);
pbe->pa_lo = cpu_to_le32(pg_addr); pbe->pa_lo = cpu_to_le32(pg_addr);
pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr)); pbe->pa_hi = cpu_to_le32(upper_32_bits(pg_addr));
pbe_cnt += 1; pbe_cnt += 1;
......
...@@ -182,17 +182,16 @@ int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx, ...@@ -182,17 +182,16 @@ int pvrdma_page_dir_insert_dma(struct pvrdma_page_dir *pdir, u64 idx,
int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir, int pvrdma_page_dir_insert_umem(struct pvrdma_page_dir *pdir,
struct ib_umem *umem, u64 offset) struct ib_umem *umem, u64 offset)
{ {
struct ib_block_iter biter;
u64 i = offset; u64 i = offset;
int ret = 0; int ret = 0;
struct sg_dma_page_iter sg_iter;
if (offset >= pdir->npages) if (offset >= pdir->npages)
return -EINVAL; return -EINVAL;
for_each_sg_dma_page(umem->sg_head.sgl, &sg_iter, umem->nmap, 0) { rdma_umem_for_each_dma_block (umem, &biter, PAGE_SIZE) {
dma_addr_t addr = sg_page_iter_dma_address(&sg_iter); ret = pvrdma_page_dir_insert_dma(
pdir, i, rdma_block_iter_dma_address(&biter));
ret = pvrdma_page_dir_insert_dma(pdir, i, addr);
if (ret) if (ret)
goto exit; goto exit;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment