Commit dfa0a4ff authored by John Hubbard's avatar John Hubbard Committed by Linus Torvalds

IB/{core,hw,umem}: set FOLL_PIN via pin_user_pages*(), fix up ODP

Convert infiniband to use the new pin_user_pages*() calls.

Also, revert earlier changes to Infiniband ODP that had it using
put_user_page().  ODP is "Case 3" in
Documentation/core-api/pin_user_pages.rst, which is to say, normal
get_user_pages() and put_page() is the API to use there.

The new pin_user_pages*() calls replace corresponding get_user_pages*()
calls, and set the FOLL_PIN flag.  The FOLL_PIN flag requires that the
caller must return the pages via put_user_page*() calls, but infiniband
was already doing that as part of an earlier commit.

Link: http://lkml.kernel.org/r/20200107224558.2362728-14-jhubbard@nvidia.comSigned-off-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Reviewed-by: default avatarJason Gunthorpe <jgg@mellanox.com>
Cc: Alex Williamson <alex.williamson@redhat.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
Cc: Björn Töpel <bjorn.topel@intel.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Hans Verkuil <hverkuil-cisco@xs4all.nl>
Cc: Ira Weiny <ira.weiny@intel.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Leon Romanovsky <leonro@mellanox.com>
Cc: Mauro Carvalho Chehab <mchehab@kernel.org>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 57459435
...@@ -257,7 +257,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr, ...@@ -257,7 +257,7 @@ struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
sg = umem->sg_head.sgl; sg = umem->sg_head.sgl;
while (npages) { while (npages) {
ret = get_user_pages_fast(cur_base, ret = pin_user_pages_fast(cur_base,
min_t(unsigned long, npages, min_t(unsigned long, npages,
PAGE_SIZE / PAGE_SIZE /
sizeof(struct page *)), sizeof(struct page *)),
......
...@@ -293,9 +293,8 @@ EXPORT_SYMBOL(ib_umem_odp_release); ...@@ -293,9 +293,8 @@ EXPORT_SYMBOL(ib_umem_odp_release);
* The function returns -EFAULT if the DMA mapping operation fails. It returns * The function returns -EFAULT if the DMA mapping operation fails. It returns
* -EAGAIN if a concurrent invalidation prevents us from updating the page. * -EAGAIN if a concurrent invalidation prevents us from updating the page.
* *
* The page is released via put_user_page even if the operation failed. For * The page is released via put_page even if the operation failed. For on-demand
* on-demand pinning, the page is released whenever it isn't stored in the * pinning, the page is released whenever it isn't stored in the umem.
* umem.
*/ */
static int ib_umem_odp_map_dma_single_page( static int ib_umem_odp_map_dma_single_page(
struct ib_umem_odp *umem_odp, struct ib_umem_odp *umem_odp,
...@@ -348,7 +347,7 @@ static int ib_umem_odp_map_dma_single_page( ...@@ -348,7 +347,7 @@ static int ib_umem_odp_map_dma_single_page(
} }
out: out:
put_user_page(page); put_page(page);
return ret; return ret;
} }
...@@ -458,7 +457,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, ...@@ -458,7 +457,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
ret = -EFAULT; ret = -EFAULT;
break; break;
} }
put_user_page(local_page_list[j]); put_page(local_page_list[j]);
continue; continue;
} }
...@@ -485,7 +484,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt, ...@@ -485,7 +484,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
* ib_umem_odp_map_dma_single_page(). * ib_umem_odp_map_dma_single_page().
*/ */
if (npages - (j + 1) > 0) if (npages - (j + 1) > 0)
put_user_pages(&local_page_list[j+1], release_pages(&local_page_list[j+1],
npages - (j + 1)); npages - (j + 1));
break; break;
} }
......
...@@ -106,7 +106,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np ...@@ -106,7 +106,7 @@ int hfi1_acquire_user_pages(struct mm_struct *mm, unsigned long vaddr, size_t np
int ret; int ret;
unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0); unsigned int gup_flags = FOLL_LONGTERM | (writable ? FOLL_WRITE : 0);
ret = get_user_pages_fast(vaddr, npages, gup_flags, pages); ret = pin_user_pages_fast(vaddr, npages, gup_flags, pages);
if (ret < 0) if (ret < 0)
return ret; return ret;
......
...@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar, ...@@ -472,7 +472,7 @@ int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
goto out; goto out;
} }
ret = get_user_pages_fast(uaddr & PAGE_MASK, 1, ret = pin_user_pages_fast(uaddr & PAGE_MASK, 1,
FOLL_WRITE | FOLL_LONGTERM, pages); FOLL_WRITE | FOLL_LONGTERM, pages);
if (ret < 0) if (ret < 0)
goto out; goto out;
......
...@@ -108,7 +108,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages, ...@@ -108,7 +108,7 @@ int qib_get_user_pages(unsigned long start_page, size_t num_pages,
down_read(&current->mm->mmap_sem); down_read(&current->mm->mmap_sem);
for (got = 0; got < num_pages; got += ret) { for (got = 0; got < num_pages; got += ret) {
ret = get_user_pages(start_page + got * PAGE_SIZE, ret = pin_user_pages(start_page + got * PAGE_SIZE,
num_pages - got, num_pages - got,
FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE, FOLL_LONGTERM | FOLL_WRITE | FOLL_FORCE,
p + got, NULL); p + got, NULL);
......
...@@ -670,7 +670,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd, ...@@ -670,7 +670,7 @@ static int qib_user_sdma_pin_pages(const struct qib_devdata *dd,
else else
j = npages; j = npages;
ret = get_user_pages_fast(addr, j, FOLL_LONGTERM, pages); ret = pin_user_pages_fast(addr, j, FOLL_LONGTERM, pages);
if (ret != j) { if (ret != j) {
i = 0; i = 0;
j = ret; j = ret;
......
...@@ -141,7 +141,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, ...@@ -141,7 +141,7 @@ static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
ret = 0; ret = 0;
while (npages) { while (npages) {
ret = get_user_pages(cur_base, ret = pin_user_pages(cur_base,
min_t(unsigned long, npages, min_t(unsigned long, npages,
PAGE_SIZE / sizeof(struct page *)), PAGE_SIZE / sizeof(struct page *)),
gup_flags | FOLL_LONGTERM, gup_flags | FOLL_LONGTERM,
......
...@@ -426,7 +426,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable) ...@@ -426,7 +426,7 @@ struct siw_umem *siw_umem_get(u64 start, u64 len, bool writable)
while (nents) { while (nents) {
struct page **plist = &umem->page_chunk[i].plist[got]; struct page **plist = &umem->page_chunk[i].plist[got];
rv = get_user_pages(first_page_va, nents, rv = pin_user_pages(first_page_va, nents,
foll_flags | FOLL_LONGTERM, foll_flags | FOLL_LONGTERM,
plist, NULL); plist, NULL);
if (rv < 0) if (rv < 0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment