Commit 5b56d49f authored by Lorenzo Stoakes's avatar Lorenzo Stoakes Committed by Linus Torvalds

mm: add locked parameter to get_user_pages_remote()

Patch series "mm: unexport __get_user_pages_unlocked()".

This patch series continues the cleanup of get_user_pages*() functions
taking advantage of the fact we can now pass gup_flags as we please.

It firstly adds an additional 'locked' parameter to
get_user_pages_remote() to allow for its callers to utilise
VM_FAULT_RETRY functionality.  This is necessary as the invocation of
__get_user_pages_unlocked() in process_vm_rw_single_vec() makes use of
this and no other existing higher level function would allow it to do
so.

Secondly existing callers of __get_user_pages_unlocked() are replaced
with the appropriate higher-level replacement -
get_user_pages_unlocked() if the current task and memory descriptor are
referenced, or get_user_pages_remote() if other task/memory descriptors
are referenced (having acquiring mmap_sem.)

This patch (of 2):

Add a int *locked parameter to get_user_pages_remote() to allow
VM_FAULT_RETRY faulting behaviour similar to get_user_pages_[un]locked().

Taking into account the previous adjustments to get_user_pages*()
functions allowing for the passing of gup_flags, we are now in a
position where __get_user_pages_unlocked() need only be exported for his
ability to allow VM_FAULT_RETRY behaviour, this adjustment allows us to
subsequently unexport __get_user_pages_unlocked() as well as allowing
for future flexibility in the use of get_user_pages_remote().

[sfr@canb.auug.org.au: merge fix for get_user_pages_remote API change]
  Link: http://lkml.kernel.org/r/20161122210511.024ec341@canb.auug.org.au
Link: http://lkml.kernel.org/r/20161027095141.2569-2-lstoakes@gmail.comSigned-off-by: default avatarLorenzo Stoakes <lstoakes@gmail.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Signed-off-by: default avatarStephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 370b262c
...@@ -759,7 +759,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages( ...@@ -759,7 +759,7 @@ static struct page **etnaviv_gem_userptr_do_get_pages(
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
while (pinned < npages) { while (pinned < npages) {
ret = get_user_pages_remote(task, mm, ptr, npages - pinned, ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
flags, pvec + pinned, NULL); flags, pvec + pinned, NULL, NULL);
if (ret < 0) if (ret < 0)
break; break;
......
...@@ -515,7 +515,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work) ...@@ -515,7 +515,7 @@ __i915_gem_userptr_get_pages_worker(struct work_struct *_work)
obj->userptr.ptr + pinned * PAGE_SIZE, obj->userptr.ptr + pinned * PAGE_SIZE,
npages - pinned, npages - pinned,
flags, flags,
pvec + pinned, NULL); pvec + pinned, NULL, NULL);
if (ret < 0) if (ret < 0)
break; break;
......
...@@ -578,7 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt, ...@@ -578,7 +578,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
*/ */
npages = get_user_pages_remote(owning_process, owning_mm, npages = get_user_pages_remote(owning_process, owning_mm,
user_virt, gup_num_pages, user_virt, gup_num_pages,
flags, local_page_list, NULL); flags, local_page_list, NULL, NULL);
up_read(&owning_mm->mmap_sem); up_read(&owning_mm->mmap_sem);
if (npages < 0) if (npages < 0)
......
...@@ -362,7 +362,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr, ...@@ -362,7 +362,7 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page, ret = get_user_pages_remote(NULL, mm, vaddr, 1, flags, page,
NULL); NULL, NULL);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
} }
......
...@@ -209,7 +209,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos, ...@@ -209,7 +209,7 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
* doing the exec and bprm->mm is the new process's mm. * doing the exec and bprm->mm is the new process's mm.
*/ */
ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags, ret = get_user_pages_remote(current, bprm->mm, pos, 1, gup_flags,
&page, NULL); &page, NULL, NULL);
if (ret <= 0) if (ret <= 0)
return NULL; return NULL;
......
...@@ -1274,7 +1274,7 @@ extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, ...@@ -1274,7 +1274,7 @@ extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages, unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas); struct vm_area_struct **vmas, int *locked);
long get_user_pages(unsigned long start, unsigned long nr_pages, long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas); struct vm_area_struct **vmas);
......
...@@ -301,7 +301,7 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, ...@@ -301,7 +301,7 @@ int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr,
retry: retry:
/* Read the page with vaddr into memory */ /* Read the page with vaddr into memory */
ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page, ret = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &old_page,
&vma); &vma, NULL);
if (ret <= 0) if (ret <= 0)
return ret; return ret;
...@@ -1712,7 +1712,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr) ...@@ -1712,7 +1712,7 @@ static int is_trap_at_addr(struct mm_struct *mm, unsigned long vaddr)
* essentially a kernel access to the memory. * essentially a kernel access to the memory.
*/ */
result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page, result = get_user_pages_remote(NULL, mm, vaddr, 1, FOLL_FORCE, &page,
NULL); NULL, NULL);
if (result < 0) if (result < 0)
return result; return result;
......
...@@ -917,6 +917,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked); ...@@ -917,6 +917,9 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
* only intends to ensure the pages are faulted in. * only intends to ensure the pages are faulted in.
* @vmas: array of pointers to vmas corresponding to each page. * @vmas: array of pointers to vmas corresponding to each page.
* Or NULL if the caller does not require them. * Or NULL if the caller does not require them.
* @locked: pointer to lock flag indicating whether lock is held and
* subsequently whether VM_FAULT_RETRY functionality can be
* utilised. Lock must initially be held.
* *
* Returns number of pages pinned. This may be fewer than the number * Returns number of pages pinned. This may be fewer than the number
* requested. If nr_pages is 0 or negative, returns 0. If no pages * requested. If nr_pages is 0 or negative, returns 0. If no pages
...@@ -960,10 +963,10 @@ EXPORT_SYMBOL(get_user_pages_unlocked); ...@@ -960,10 +963,10 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages, unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas) struct vm_area_struct **vmas, int *locked)
{ {
return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
NULL, false, locked, true,
gup_flags | FOLL_TOUCH | FOLL_REMOTE); gup_flags | FOLL_TOUCH | FOLL_REMOTE);
} }
EXPORT_SYMBOL(get_user_pages_remote); EXPORT_SYMBOL(get_user_pages_remote);
...@@ -971,8 +974,9 @@ EXPORT_SYMBOL(get_user_pages_remote); ...@@ -971,8 +974,9 @@ EXPORT_SYMBOL(get_user_pages_remote);
/* /*
* This is the same as get_user_pages_remote(), just with a * This is the same as get_user_pages_remote(), just with a
* less-flexible calling convention where we assume that the task * less-flexible calling convention where we assume that the task
* and mm being operated on are the current task's. We also * and mm being operated on are the current task's and don't allow
* obviously don't pass FOLL_REMOTE in here. * passing of a locked parameter. We also obviously don't pass
* FOLL_REMOTE in here.
*/ */
long get_user_pages(unsigned long start, unsigned long nr_pages, long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, unsigned int gup_flags, struct page **pages,
......
...@@ -3919,7 +3919,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm, ...@@ -3919,7 +3919,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
struct page *page = NULL; struct page *page = NULL;
ret = get_user_pages_remote(tsk, mm, addr, 1, ret = get_user_pages_remote(tsk, mm, addr, 1,
gup_flags, &page, &vma); gup_flags, &page, &vma, NULL);
if (ret <= 0) { if (ret <= 0) {
#ifndef CONFIG_HAVE_IOREMAP_PROT #ifndef CONFIG_HAVE_IOREMAP_PROT
break; break;
......
...@@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, ...@@ -881,7 +881,7 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
* the execve(). * the execve().
*/ */
if (get_user_pages_remote(current, bprm->mm, pos, 1, if (get_user_pages_remote(current, bprm->mm, pos, 1,
FOLL_FORCE, &page, NULL) <= 0) FOLL_FORCE, &page, NULL, NULL) <= 0)
return false; return false;
#else #else
page = bprm->page[pos / PAGE_SIZE]; page = bprm->page[pos / PAGE_SIZE];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment