Commit 5a87e37e authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'work.get_user_pages_fast' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs

Pull get_user_pages_fast updates from Al Viro:
 "A bit more get_user_pages work"

* 'work.get_user_pages_fast' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  kvm: switch get_user_page_nowait() to get_user_pages_unlocked()
  __get_user_pages_locked(): get rid of notify_drop argument
  get_user_pages_unlocked(): pass true to __get_user_pages_locked() notify_drop
  cris: switch to get_user_pages_fast()
  fold __get_user_pages_unlocked() into its sole remaining caller
parents 19e7b5f9 ce53053c
...@@ -2717,37 +2717,28 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig ...@@ -2717,37 +2717,28 @@ static int cryptocop_ioctl_process(struct inode *inode, struct file *filp, unsig
} }
} }
/* Acquire the mm page semaphore. */ err = get_user_pages_fast((unsigned long)(oper.indata + prev_ix),
down_read(&current->mm->mmap_sem);
err = get_user_pages((unsigned long int)(oper.indata + prev_ix),
noinpages, noinpages,
0, /* read access only for in data */ false, /* read access only for in data */
inpages, inpages);
NULL);
if (err < 0) { if (err < 0) {
up_read(&current->mm->mmap_sem);
nooutpages = noinpages = 0; nooutpages = noinpages = 0;
DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n")); DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages indata\n"));
goto error_cleanup; goto error_cleanup;
} }
noinpages = err; noinpages = err;
if (oper.do_cipher){ if (oper.do_cipher) {
err = get_user_pages((unsigned long int)oper.cipher_outdata, err = get_user_pages_fast((unsigned long)oper.cipher_outdata,
nooutpages, nooutpages,
FOLL_WRITE, /* write access for out data */ true, /* write access for out data */
outpages, outpages);
NULL);
up_read(&current->mm->mmap_sem);
if (err < 0) { if (err < 0) {
nooutpages = 0; nooutpages = 0;
DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n")); DEBUG_API(printk("cryptocop_ioctl_process: get_user_pages outdata\n"));
goto error_cleanup; goto error_cleanup;
} }
nooutpages = err; nooutpages = err;
} else {
up_read(&current->mm->mmap_sem);
} }
/* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and /* Add 6 to nooutpages to make room for possibly inserted buffers for storing digest and
......
...@@ -848,7 +848,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, ...@@ -848,7 +848,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
unsigned long nr_pages, unsigned long nr_pages,
struct page **pages, struct page **pages,
struct vm_area_struct **vmas, struct vm_area_struct **vmas,
int *locked, bool notify_drop, int *locked,
unsigned int flags) unsigned int flags)
{ {
long ret, pages_done; long ret, pages_done;
...@@ -922,7 +922,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk, ...@@ -922,7 +922,7 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
pages++; pages++;
start += PAGE_SIZE; start += PAGE_SIZE;
} }
if (notify_drop && lock_dropped && *locked) { if (lock_dropped && *locked) {
/* /*
* We must let the caller know we temporarily dropped the lock * We must let the caller know we temporarily dropped the lock
* and so the critical section protected by it was lost. * and so the critical section protected by it was lost.
...@@ -959,35 +959,11 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages, ...@@ -959,35 +959,11 @@ long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
int *locked) int *locked)
{ {
return __get_user_pages_locked(current, current->mm, start, nr_pages, return __get_user_pages_locked(current, current->mm, start, nr_pages,
pages, NULL, locked, true, pages, NULL, locked,
gup_flags | FOLL_TOUCH); gup_flags | FOLL_TOUCH);
} }
EXPORT_SYMBOL(get_user_pages_locked); EXPORT_SYMBOL(get_user_pages_locked);
/*
* Same as get_user_pages_unlocked(...., FOLL_TOUCH) but it allows for
* tsk, mm to be specified.
*
* NOTE: here FOLL_TOUCH is not set implicitly and must be set by the
* caller if required (just like with __get_user_pages). "FOLL_GET"
* is set implicitly if "pages" is non-NULL.
*/
static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk,
struct mm_struct *mm, unsigned long start,
unsigned long nr_pages, struct page **pages,
unsigned int gup_flags)
{
long ret;
int locked = 1;
down_read(&mm->mmap_sem);
ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
&locked, false, gup_flags);
if (locked)
up_read(&mm->mmap_sem);
return ret;
}
/* /*
* get_user_pages_unlocked() is suitable to replace the form: * get_user_pages_unlocked() is suitable to replace the form:
* *
...@@ -1006,8 +982,16 @@ static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, ...@@ -1006,8 +982,16 @@ static __always_inline long __get_user_pages_unlocked(struct task_struct *tsk,
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages, long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
struct page **pages, unsigned int gup_flags) struct page **pages, unsigned int gup_flags)
{ {
return __get_user_pages_unlocked(current, current->mm, start, nr_pages, struct mm_struct *mm = current->mm;
pages, gup_flags | FOLL_TOUCH); int locked = 1;
long ret;
down_read(&mm->mmap_sem);
ret = __get_user_pages_locked(current, mm, start, nr_pages, pages, NULL,
&locked, gup_flags | FOLL_TOUCH);
if (locked)
up_read(&mm->mmap_sem);
return ret;
} }
EXPORT_SYMBOL(get_user_pages_unlocked); EXPORT_SYMBOL(get_user_pages_unlocked);
...@@ -1073,7 +1057,7 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1073,7 +1057,7 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
struct vm_area_struct **vmas, int *locked) struct vm_area_struct **vmas, int *locked)
{ {
return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas, return __get_user_pages_locked(tsk, mm, start, nr_pages, pages, vmas,
locked, true, locked,
gup_flags | FOLL_TOUCH | FOLL_REMOTE); gup_flags | FOLL_TOUCH | FOLL_REMOTE);
} }
EXPORT_SYMBOL(get_user_pages_remote); EXPORT_SYMBOL(get_user_pages_remote);
...@@ -1090,7 +1074,7 @@ long get_user_pages(unsigned long start, unsigned long nr_pages, ...@@ -1090,7 +1074,7 @@ long get_user_pages(unsigned long start, unsigned long nr_pages,
struct vm_area_struct **vmas) struct vm_area_struct **vmas)
{ {
return __get_user_pages_locked(current, current->mm, start, nr_pages, return __get_user_pages_locked(current, current->mm, start, nr_pages,
pages, vmas, NULL, false, pages, vmas, NULL,
gup_flags | FOLL_TOUCH); gup_flags | FOLL_TOUCH);
} }
EXPORT_SYMBOL(get_user_pages); EXPORT_SYMBOL(get_user_pages);
......
...@@ -1322,17 +1322,6 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w ...@@ -1322,17 +1322,6 @@ unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *w
return gfn_to_hva_memslot_prot(slot, gfn, writable); return gfn_to_hva_memslot_prot(slot, gfn, writable);
} }
static int get_user_page_nowait(unsigned long start, int write,
struct page **page)
{
int flags = FOLL_NOWAIT | FOLL_HWPOISON;
if (write)
flags |= FOLL_WRITE;
return get_user_pages(start, 1, flags, page, NULL);
}
static inline int check_user_page_hwpoison(unsigned long addr) static inline int check_user_page_hwpoison(unsigned long addr)
{ {
int rc, flags = FOLL_HWPOISON | FOLL_WRITE; int rc, flags = FOLL_HWPOISON | FOLL_WRITE;
...@@ -1381,7 +1370,8 @@ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async, ...@@ -1381,7 +1370,8 @@ static bool hva_to_pfn_fast(unsigned long addr, bool atomic, bool *async,
static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
bool *writable, kvm_pfn_t *pfn) bool *writable, kvm_pfn_t *pfn)
{ {
struct page *page[1]; unsigned int flags = FOLL_HWPOISON;
struct page *page;
int npages = 0; int npages = 0;
might_sleep(); might_sleep();
...@@ -1389,35 +1379,26 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault, ...@@ -1389,35 +1379,26 @@ static int hva_to_pfn_slow(unsigned long addr, bool *async, bool write_fault,
if (writable) if (writable)
*writable = write_fault; *writable = write_fault;
if (async) {
down_read(&current->mm->mmap_sem);
npages = get_user_page_nowait(addr, write_fault, page);
up_read(&current->mm->mmap_sem);
} else {
unsigned int flags = FOLL_HWPOISON;
if (write_fault) if (write_fault)
flags |= FOLL_WRITE; flags |= FOLL_WRITE;
if (async)
flags |= FOLL_NOWAIT;
npages = get_user_pages_unlocked(addr, 1, page, flags); npages = get_user_pages_unlocked(addr, 1, &page, flags);
}
if (npages != 1) if (npages != 1)
return npages; return npages;
/* map read fault as writable if possible */ /* map read fault as writable if possible */
if (unlikely(!write_fault) && writable) { if (unlikely(!write_fault) && writable) {
struct page *wpage[1]; struct page *wpage;
npages = __get_user_pages_fast(addr, 1, 1, wpage); if (__get_user_pages_fast(addr, 1, 1, &wpage) == 1) {
if (npages == 1) {
*writable = true; *writable = true;
put_page(page[0]); put_page(page);
page[0] = wpage[0]; page = wpage;
} }
npages = 1;
} }
*pfn = page_to_pfn(page[0]); *pfn = page_to_pfn(page);
return npages; return npages;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment