Commit 810a56b9 authored by Mike Kravetz's avatar Mike Kravetz Committed by Linus Torvalds

userfaultfd: hugetlbfs: fix __mcopy_atomic_hugetlb retry/error processing

The new routine copy_huge_page_from_user() uses kmap_atomic() to map
PAGE_SIZE pages.  However, this prevents page faults in the subsequent
call to copy_from_user().  This is OK in the case where the routine is
copied with mmap_sema held.  However, in another case we want to allow
page faults.  So, add a new argument allow_pagefault to indicate if the
routine should allow page faults.

[dan.carpenter@oracle.com: unmap the correct pointer]
  Link: http://lkml.kernel.org/r/20170113082608.GA3548@mwanda
[akpm@linux-foundation.org: kunmap() takes a page*, per Hugh]
Link: http://lkml.kernel.org/r/20161216144821.5183-20-aarcange@redhat.comSigned-off-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Signed-off-by: default avatarDan Carpenter <dan.carpenter@oracle.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Michael Rapoport <RAPOPORT@il.ibm.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 60d4d2d2
...@@ -2426,7 +2426,8 @@ extern void copy_user_huge_page(struct page *dst, struct page *src, ...@@ -2426,7 +2426,8 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
unsigned int pages_per_huge_page); unsigned int pages_per_huge_page);
extern long copy_huge_page_from_user(struct page *dst_page, extern long copy_huge_page_from_user(struct page *dst_page,
const void __user *usr_src, const void __user *usr_src,
unsigned int pages_per_huge_page); unsigned int pages_per_huge_page,
bool allow_pagefault);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
extern struct page_ext_operations debug_guardpage_ops; extern struct page_ext_operations debug_guardpage_ops;
......
...@@ -3973,7 +3973,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, ...@@ -3973,7 +3973,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
ret = copy_huge_page_from_user(page, ret = copy_huge_page_from_user(page,
(const void __user *) src_addr, (const void __user *) src_addr,
pages_per_huge_page(h)); pages_per_huge_page(h), false);
/* fallback to copy_from_user outside mmap_sem */ /* fallback to copy_from_user outside mmap_sem */
if (unlikely(ret)) { if (unlikely(ret)) {
......
...@@ -4155,7 +4155,8 @@ void copy_user_huge_page(struct page *dst, struct page *src, ...@@ -4155,7 +4155,8 @@ void copy_user_huge_page(struct page *dst, struct page *src,
long copy_huge_page_from_user(struct page *dst_page, long copy_huge_page_from_user(struct page *dst_page,
const void __user *usr_src, const void __user *usr_src,
unsigned int pages_per_huge_page) unsigned int pages_per_huge_page,
bool allow_pagefault)
{ {
void *src = (void *)usr_src; void *src = (void *)usr_src;
void *page_kaddr; void *page_kaddr;
...@@ -4163,10 +4164,16 @@ long copy_huge_page_from_user(struct page *dst_page, ...@@ -4163,10 +4164,16 @@ long copy_huge_page_from_user(struct page *dst_page,
unsigned long ret_val = pages_per_huge_page * PAGE_SIZE; unsigned long ret_val = pages_per_huge_page * PAGE_SIZE;
for (i = 0; i < pages_per_huge_page; i++) { for (i = 0; i < pages_per_huge_page; i++) {
if (allow_pagefault)
page_kaddr = kmap(dst_page + i);
else
page_kaddr = kmap_atomic(dst_page + i); page_kaddr = kmap_atomic(dst_page + i);
rc = copy_from_user(page_kaddr, rc = copy_from_user(page_kaddr,
(const void __user *)(src + i * PAGE_SIZE), (const void __user *)(src + i * PAGE_SIZE),
PAGE_SIZE); PAGE_SIZE);
if (allow_pagefault)
kunmap(dst_page + i);
else
kunmap_atomic(page_kaddr); kunmap_atomic(page_kaddr);
ret_val -= (PAGE_SIZE - rc); ret_val -= (PAGE_SIZE - rc);
......
...@@ -274,7 +274,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm, ...@@ -274,7 +274,7 @@ static __always_inline ssize_t __mcopy_atomic_hugetlb(struct mm_struct *dst_mm,
err = copy_huge_page_from_user(page, err = copy_huge_page_from_user(page,
(const void __user *)src_addr, (const void __user *)src_addr,
pages_per_huge_page(h)); pages_per_huge_page(h), true);
if (unlikely(err)) { if (unlikely(err)) {
err = -EFAULT; err = -EFAULT;
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment