Commit 2170ecfa authored by John Hubbard's avatar John Hubbard Committed by Linus Torvalds

drm/i915: convert get_user_pages() --> pin_user_pages()

This code was using get_user_pages*(), in a "Case 2" scenario (DMA/RDMA),
using the categorization from [1].  That means that it's time to convert
the get_user_pages*() + put_page() calls to pin_user_pages*() +
unpin_user_pages() calls.

There is some helpful background in [2]: basically, this is a small part
of fixing a long-standing disconnect between pinning pages, and file
systems' use of those pages.

[1] Documentation/core-api/pin_user_pages.rst

[2] "Explicit pinning of user-space pages":
    https://lwn.net/Articles/807108/Signed-off-by: default avatarJohn Hubbard <jhubbard@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarChris Wilson <chris@chris-wilson.co.uk>
Cc: Souptick Joarder <jrdr.linux@gmail.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Jani Nikula <jani.nikula@linux.intel.com>
Cc: "Joonas Lahtinen" <joonas.lahtinen@linux.intel.com>
Cc: Rodrigo Vivi <rodrigo.vivi@intel.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
Cc: Matthew Auld <matthew.auld@intel.com>
Link: http://lkml.kernel.org/r/20200519002124.2025955-5-jhubbard@nvidia.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 104acc32
...@@ -471,7 +471,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -471,7 +471,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
locked = 1; locked = 1;
} }
ret = get_user_pages_remote ret = pin_user_pages_remote
(work->task, mm, (work->task, mm,
obj->userptr.ptr + pinned * PAGE_SIZE, obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned, npages - pinned,
...@@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -507,7 +507,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
} }
mutex_unlock(&obj->mm.lock); mutex_unlock(&obj->mm.lock);
release_pages(pvec, pinned); unpin_user_pages(pvec, pinned);
kvfree(pvec); kvfree(pvec);
i915_gem_object_put(obj); i915_gem_object_put(obj);
...@@ -564,6 +564,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -564,6 +564,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
struct sg_table *pages; struct sg_table *pages;
bool active; bool active;
int pinned; int pinned;
unsigned int gup_flags = 0;
/* If userspace should engineer that these pages are replaced in /* If userspace should engineer that these pages are replaced in
* the vma between us binding this page into the GTT and completion * the vma between us binding this page into the GTT and completion
...@@ -606,11 +607,14 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -606,11 +607,14 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
* *
* We may or may not care. * We may or may not care.
*/ */
if (pvec) /* defer to worker if malloc fails */ if (pvec) {
pinned = __get_user_pages_fast(obj->userptr.ptr, /* defer to worker if malloc fails */
num_pages, if (!i915_gem_object_is_readonly(obj))
!i915_gem_object_is_readonly(obj), gup_flags |= FOLL_WRITE;
pvec); pinned = pin_user_pages_fast_only(obj->userptr.ptr,
num_pages, gup_flags,
pvec);
}
} }
active = false; active = false;
...@@ -628,7 +632,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj) ...@@ -628,7 +632,7 @@ static int i915_gem_userptr_get_pages(struct drm_i915_gem_object *obj)
__i915_gem_userptr_set_active(obj, true); __i915_gem_userptr_set_active(obj, true);
if (IS_ERR(pages)) if (IS_ERR(pages))
release_pages(pvec, pinned); unpin_user_pages(pvec, pinned);
kvfree(pvec); kvfree(pvec);
return PTR_ERR_OR_ZERO(pages); return PTR_ERR_OR_ZERO(pages);
...@@ -683,7 +687,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, ...@@ -683,7 +687,7 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
} }
mark_page_accessed(page); mark_page_accessed(page);
put_page(page); unpin_user_page(page);
} }
obj->mm.dirty = false; obj->mm.dirty = false;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment