Commit fa5bb209 authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

mm: cleanup __get_user_pages()

Get rid of two nested loops over nr_pages, extract vma flags checking to
separate function and other random cleanups.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 16744483
...@@ -315,6 +315,44 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma, ...@@ -315,6 +315,44 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
return 0; return 0;
} }
static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
{
vm_flags_t vm_flags = vma->vm_flags;
if (vm_flags & (VM_IO | VM_PFNMAP))
return -EFAULT;
if (gup_flags & FOLL_WRITE) {
if (!(vm_flags & VM_WRITE)) {
if (!(gup_flags & FOLL_FORCE))
return -EFAULT;
/*
* We used to let the write,force case do COW in a
* VM_MAYWRITE VM_SHARED !VM_WRITE vma, so ptrace could
* set a breakpoint in a read-only mapping of an
* executable, without corrupting the file (yet only
* when that file had been opened for writing!).
* Anon pages in shared mappings are surprising: now
* just reject it.
*/
if (!is_cow_mapping(vm_flags)) {
WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
return -EFAULT;
}
}
} else if (!(vm_flags & VM_READ)) {
if (!(gup_flags & FOLL_FORCE))
return -EFAULT;
/*
* Is there actually any vma we can reach here which does not
* have VM_MAYREAD set?
*/
if (!(vm_flags & VM_MAYREAD))
return -EFAULT;
}
return 0;
}
/** /**
* __get_user_pages() - pin user pages in memory * __get_user_pages() - pin user pages in memory
* @tsk: task_struct of target task * @tsk: task_struct of target task
...@@ -369,9 +407,9 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -369,9 +407,9 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
unsigned int gup_flags, struct page **pages, unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *nonblocking) struct vm_area_struct **vmas, int *nonblocking)
{ {
long i; long i = 0;
unsigned long vm_flags;
unsigned int page_mask; unsigned int page_mask;
struct vm_area_struct *vma = NULL;
if (!nr_pages) if (!nr_pages)
return 0; return 0;
...@@ -386,86 +424,50 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -386,86 +424,50 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (!(gup_flags & FOLL_FORCE)) if (!(gup_flags & FOLL_FORCE))
gup_flags |= FOLL_NUMA; gup_flags |= FOLL_NUMA;
i = 0;
do { do {
struct vm_area_struct *vma; struct page *page;
unsigned int foll_flags = gup_flags;
unsigned int page_increm;
/* first iteration or cross vma bound */
if (!vma || start >= vma->vm_end) {
vma = find_extend_vma(mm, start); vma = find_extend_vma(mm, start);
if (!vma && in_gate_area(mm, start)) { if (!vma && in_gate_area(mm, start)) {
int ret; int ret;
ret = get_gate_page(mm, start & PAGE_MASK, gup_flags, ret = get_gate_page(mm, start & PAGE_MASK,
&vma, pages ? &pages[i] : NULL); gup_flags, &vma,
pages ? &pages[i] : NULL);
if (ret) if (ret)
goto efault; return i ? : ret;
page_mask = 0; page_mask = 0;
goto next_page; goto next_page;
} }
if (!vma) if (!vma || check_vma_flags(vma, gup_flags))
goto efault; return i ? : -EFAULT;
vm_flags = vma->vm_flags;
if (vm_flags & (VM_IO | VM_PFNMAP))
goto efault;
if (gup_flags & FOLL_WRITE) {
if (!(vm_flags & VM_WRITE)) {
if (!(gup_flags & FOLL_FORCE))
goto efault;
/*
* We used to let the write,force case do COW
* in a VM_MAYWRITE VM_SHARED !VM_WRITE vma, so
* ptrace could set a breakpoint in a read-only
* mapping of an executable, without corrupting
* the file (yet only when that file had been
* opened for writing!). Anon pages in shared
* mappings are surprising: now just reject it.
*/
if (!is_cow_mapping(vm_flags)) {
WARN_ON_ONCE(vm_flags & VM_MAYWRITE);
goto efault;
}
}
} else {
if (!(vm_flags & VM_READ)) {
if (!(gup_flags & FOLL_FORCE))
goto efault;
/*
* Is there actually any vma we can reach here
* which does not have VM_MAYREAD set?
*/
if (!(vm_flags & VM_MAYREAD))
goto efault;
}
}
if (is_vm_hugetlb_page(vma)) { if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas, i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i, gup_flags); &start, &nr_pages, i,
gup_flags);
continue; continue;
} }
}
do { retry:
struct page *page;
unsigned int foll_flags = gup_flags;
unsigned int page_increm;
/* /*
* If we have a pending SIGKILL, don't keep faulting * If we have a pending SIGKILL, don't keep faulting pages and
* pages and potentially allocating memory. * potentially allocating memory.
*/ */
if (unlikely(fatal_signal_pending(current))) if (unlikely(fatal_signal_pending(current)))
return i ? i : -ERESTARTSYS; return i ? i : -ERESTARTSYS;
cond_resched(); cond_resched();
while (!(page = follow_page_mask(vma, start, page = follow_page_mask(vma, start, foll_flags, &page_mask);
foll_flags, &page_mask))) { if (!page) {
int ret; int ret;
ret = faultin_page(tsk, vma, start, &foll_flags, ret = faultin_page(tsk, vma, start, &foll_flags,
nonblocking); nonblocking);
switch (ret) { switch (ret) {
case 0: case 0:
break; goto retry;
case -EFAULT: case -EFAULT:
case -ENOMEM: case -ENOMEM:
case -EHWPOISON: case -EHWPOISON:
...@@ -474,16 +476,13 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -474,16 +476,13 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
return i; return i;
case -ENOENT: case -ENOENT:
goto next_page; goto next_page;
default:
BUG();
} }
cond_resched(); BUG();
} }
if (IS_ERR(page)) if (IS_ERR(page))
return i ? i : PTR_ERR(page); return i ? i : PTR_ERR(page);
if (pages) { if (pages) {
pages[i] = page; pages[i] = page;
flush_anon_page(vma, page, start); flush_anon_page(vma, page, start);
flush_dcache_page(page); flush_dcache_page(page);
page_mask = 0; page_mask = 0;
...@@ -499,11 +498,8 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -499,11 +498,8 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
i += page_increm; i += page_increm;
start += page_increm * PAGE_SIZE; start += page_increm * PAGE_SIZE;
nr_pages -= page_increm; nr_pages -= page_increm;
} while (nr_pages && start < vma->vm_end);
} while (nr_pages); } while (nr_pages);
return i; return i;
efault:
return i ? : -EFAULT;
} }
EXPORT_SYMBOL(__get_user_pages); EXPORT_SYMBOL(__get_user_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment