Commit 8b7457ef authored by Lorenzo Stoakes's avatar Lorenzo Stoakes Committed by Linus Torvalds

mm: unexport __get_user_pages_unlocked()

Unexport the low-level __get_user_pages_unlocked() function and replaces
invocations with calls to more appropriate higher-level functions.

In hva_to_pfn_slow() we are able to replace __get_user_pages_unlocked()
with get_user_pages_unlocked() since we can now pass gup_flags.

In async_pf_execute() and process_vm_rw_single_vec() we need to pass
different tsk, mm arguments so get_user_pages_remote() is the sane
replacement in these cases (having added manual acquisition and release
of mmap_sem.)

Additionally get_user_pages_remote() reintroduces use of the FOLL_TOUCH
flag.  However, this flag was originally silently dropped by commit
1e987790 ("mm/gup: Introduce get_user_pages_remote()"), so this
appears to have been unintentional and reintroducing it is therefore not
an issue.

[akpm@linux-foundation.org: coding-style fixes]
Link: http://lkml.kernel.org/r/20161027095141.2569-3-lstoakes@gmail.comSigned-off-by: default avatarLorenzo Stoakes <lstoakes@gmail.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: Radim Krcmar <rkrcmar@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5b56d49f
...@@ -1280,9 +1280,6 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, ...@@ -1280,9 +1280,6 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
struct vm_area_struct **vmas); struct vm_area_struct **vmas);
long get_user_pages_locked(unsigned long start, unsigned long nr_pages, long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, int *locked); unsigned int gup_flags, struct page **pages, int *locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags); struct page **pages, unsigned int gup_flags);
int get_user_pages_fast(unsigned long start, int nr_pages, int write, int get_user_pages_fast(unsigned long start, int nr_pages, int write,
......
...@@ -865,9 +865,10 @@ EXPORT_SYMBOL(get_user_pages_locked); ...@@ -865,9 +865,10 @@ EXPORT_SYMBOL(get_user_pages_locked);
* caller if required (just like with __get_user_pages). "FOLL_GET" * caller if required (just like with __get_user_pages). "FOLL_GET"
* is set implicitly if "pages" is non-NULL. * is set implicitly if "pages" is non-NULL.
*/ */
__always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk,
unsigned long start, unsigned long nr_pages, struct mm_struct *mm, unsigned long start,
struct page **pages, unsigned int gup_flags) unsigned long nr_pages, struct page **pages,
unsigned int gup_flags)
{ {
long ret; long ret;
int locked = 1; int locked = 1;
...@@ -879,7 +880,6 @@ __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct m ...@@ -879,7 +880,6 @@ __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct m
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return ret; return ret;
} }
EXPORT_SYMBOL(__get_user_pages_unlocked);
/* /*
* get_user_pages_unlocked() is suitable to replace the form: * get_user_pages_unlocked() is suitable to replace the form:
......
...@@ -176,9 +176,10 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, ...@@ -176,9 +176,10 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
} }
EXPORT_SYMBOL(get_user_pages_locked); EXPORT_SYMBOL(get_user_pages_locked);
long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, static long __get_user_pages_unlocked(struct task_struct *tsk,
unsigned long start, unsigned long nr_pages, struct mm_struct *mm, unsigned long start,
struct page **pages, unsigned int gup_flags) unsigned long nr_pages, struct page **pages,
unsigned int gup_flags)
{ {
long ret; long ret;
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
...@@ -187,7 +188,6 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm, ...@@ -187,7 +188,6 @@ long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return ret; return ret;
} }
EXPORT_SYMBOL(__get_user_pages_unlocked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags) struct page **pages, unsigned int gup_flags)
......
...@@ -88,7 +88,7 @@ static int process_vm_rw_single_vec(unsigned long addr, ...@@ -88,7 +88,7 @@ static int process_vm_rw_single_vec(unsigned long addr,
ssize_t rc = 0; ssize_t rc = 0;
unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES unsigned long max_pages_per_loop = PVM_MAX_KMALLOC_PAGES
/ sizeof(struct pages *); / sizeof(struct pages *);
unsigned int flags = FOLL_REMOTE; unsigned int flags = 0;
/* Work out address and page range required */ /* Work out address and page range required */
if (len == 0) if (len == 0)
...@@ -100,15 +100,19 @@ static int process_vm_rw_single_vec(unsigned long addr, ...@@ -100,15 +100,19 @@ static int process_vm_rw_single_vec(unsigned long addr,
while (!rc && nr_pages && iov_iter_count(iter)) { while (!rc && nr_pages && iov_iter_count(iter)) {
int pages = min(nr_pages, max_pages_per_loop); int pages = min(nr_pages, max_pages_per_loop);
int locked = 1;
size_t bytes; size_t bytes;
/* /*
* Get the pages we're interested in. We must * Get the pages we're interested in. We must
* add FOLL_REMOTE because task/mm might not * access remotely because task/mm might not
* current/current->mm * current/current->mm
*/ */
pages = __get_user_pages_unlocked(task, mm, pa, pages, down_read(&mm->mmap_sem);
process_pages, flags); pages = get_user_pages_remote(task, mm, pa, pages, flags,
process_pages, NULL, &locked);
if (locked)
up_read(&mm->mmap_sem);
if (pages <= 0) if (pages <= 0)
return -EFAULT; return -EFAULT;
......
...@@ -76,16 +76,20 @@ static void async_pf_execute(struct work_struct *work) ...@@ -76,16 +76,20 @@ static void async_pf_execute(struct work_struct *work)
struct kvm_vcpu *vcpu = apf->vcpu; struct kvm_vcpu *vcpu = apf->vcpu;
unsigned long addr = apf->addr; unsigned long addr = apf->addr;
gva_t gva = apf->gva; gva_t gva = apf->gva;
int locked = 1;
might_sleep(); might_sleep();
/* /*
* This work is run asynchromously to the task which owns * This work is run asynchromously to the task which owns
* mm and might be done in another context, so we must * mm and might be done in another context, so we must
* use FOLL_REMOTE. * access remotely.
*/ */
__get_user_pages_unlocked(NULL, mm, addr, 1, NULL, down_read(&mm->mmap_sem);
FOLL_WRITE | FOLL_REMOTE); get_user_pages_remote(NULL, mm, addr, 1, FOLL_WRITE, NULL, NULL,
&locked);
if (locked)
up_read(&mm->mmap_sem);
kvm_async_page_present_sync(vcpu, apf); kvm_async_page_present_sync(vcpu, apf);
......
...@@ -1418,13 +1418,12 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, ...@@ -1418,13 +1418,12 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
npages = get_user_page_nowait(addr, write_fault, page); npages = get_user_page_nowait(addr, write_fault, page);
up_read(&current->mm->mmap_sem); up_read(&current->mm->mmap_sem);
} else { } else {
unsigned int flags = FOLL_TOUCH | FOLL_HWPOISON; unsigned int flags = FOLL_HWPOISON;
if (write_fault) if (write_fault)
flags |= FOLL_WRITE; flags |= FOLL_WRITE;
npages = __get_user_pages_unlocked(current, current->mm, addr, 1, npages = get_user_pages_unlocked(addr, 1, page, flags);
page, flags);
} }
if (npages != 1) if (npages != 1)
return npages; return npages;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment