Commit 87ffc118 authored by Andrea Arcangeli's avatar Andrea Arcangeli Committed by Linus Torvalds

userfaultfd: hugetlbfs: gup: support VM_FAULT_RETRY

Add support for VM_FAULT_RETRY to follow_hugetlb_page() so that
get_user_pages_unlocked/locked and "nonblocking/FOLL_NOWAIT" features
will work on hugetlbfs.

This is required for fully functional userfaultfd non-present support on
hugetlbfs.

Link: http://lkml.kernel.org/r/20161216144821.5183-25-aarcange@redhat.comSigned-off-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Reviewed-by: default avatarMike Kravetz <mike.kravetz@oracle.com>
Cc: "Dr. David Alan Gilbert" <dgilbert@redhat.com>
Cc: Hillf Danton <hillf.zj@alibaba-inc.com>
Cc: Michael Rapoport <RAPOPORT@il.ibm.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Pavel Emelyanov <xemul@parallels.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 369cd212
...@@ -65,7 +65,8 @@ int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int, ...@@ -65,7 +65,8 @@ int hugetlb_mempolicy_sysctl_handler(struct ctl_table *, int,
int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *); int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, struct vm_area_struct *);
long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *, long follow_hugetlb_page(struct mm_struct *, struct vm_area_struct *,
struct page **, struct vm_area_struct **, struct page **, struct vm_area_struct **,
unsigned long *, unsigned long *, long, unsigned int); unsigned long *, unsigned long *, long, unsigned int,
int *);
void unmap_hugepage_range(struct vm_area_struct *, void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *); unsigned long, unsigned long, struct page *);
void __unmap_hugepage_range_final(struct mmu_gather *tlb, void __unmap_hugepage_range_final(struct mmu_gather *tlb,
...@@ -136,7 +137,7 @@ static inline unsigned long hugetlb_total_pages(void) ...@@ -136,7 +137,7 @@ static inline unsigned long hugetlb_total_pages(void)
return 0; return 0;
} }
#define follow_hugetlb_page(m,v,p,vs,a,b,i,w) ({ BUG(); 0; }) #define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL) #define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; }) #define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
static inline void hugetlb_report_meminfo(struct seq_file *m) static inline void hugetlb_report_meminfo(struct seq_file *m)
......
...@@ -572,7 +572,7 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -572,7 +572,7 @@ static long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
if (is_vm_hugetlb_page(vma)) { if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas, i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i, &start, &nr_pages, i,
gup_flags); gup_flags, nonblocking);
continue; continue;
} }
} }
......
...@@ -4065,7 +4065,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, ...@@ -4065,7 +4065,7 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
struct page **pages, struct vm_area_struct **vmas, struct page **pages, struct vm_area_struct **vmas,
unsigned long *position, unsigned long *nr_pages, unsigned long *position, unsigned long *nr_pages,
long i, unsigned int flags) long i, unsigned int flags, int *nonblocking)
{ {
unsigned long pfn_offset; unsigned long pfn_offset;
unsigned long vaddr = *position; unsigned long vaddr = *position;
...@@ -4128,17 +4128,44 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -4128,17 +4128,44 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
((flags & FOLL_WRITE) && ((flags & FOLL_WRITE) &&
!huge_pte_write(huge_ptep_get(pte)))) { !huge_pte_write(huge_ptep_get(pte)))) {
int ret; int ret;
unsigned int fault_flags = 0;
if (pte) if (pte)
spin_unlock(ptl); spin_unlock(ptl);
ret = hugetlb_fault(mm, vma, vaddr, if (flags & FOLL_WRITE)
(flags & FOLL_WRITE) ? FAULT_FLAG_WRITE : 0); fault_flags |= FAULT_FLAG_WRITE;
if (!(ret & VM_FAULT_ERROR)) if (nonblocking)
continue; fault_flags |= FAULT_FLAG_ALLOW_RETRY;
if (flags & FOLL_NOWAIT)
fault_flags |= FAULT_FLAG_ALLOW_RETRY |
FAULT_FLAG_RETRY_NOWAIT;
if (flags & FOLL_TRIED) {
VM_WARN_ON_ONCE(fault_flags &
FAULT_FLAG_ALLOW_RETRY);
fault_flags |= FAULT_FLAG_TRIED;
}
ret = hugetlb_fault(mm, vma, vaddr, fault_flags);
if (ret & VM_FAULT_ERROR) {
remainder = 0; remainder = 0;
break; break;
} }
if (ret & VM_FAULT_RETRY) {
if (nonblocking)
*nonblocking = 0;
*nr_pages = 0;
/*
* VM_FAULT_RETRY must not return an
* error, it will return zero
* instead.
*
* No need to update "position" as the
* caller will not check it after
* *nr_pages is set to 0.
*/
return i;
}
continue;
}
pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT; pfn_offset = (vaddr & ~huge_page_mask(h)) >> PAGE_SHIFT;
page = pte_page(huge_ptep_get(pte)); page = pte_page(huge_ptep_get(pte));
...@@ -4166,6 +4193,11 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -4166,6 +4193,11 @@ long follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
spin_unlock(ptl); spin_unlock(ptl);
} }
*nr_pages = remainder; *nr_pages = remainder;
/*
* setting position is actually required only if remainder is
* not zero but it's faster not to add a "if (remainder)"
* branch.
*/
*position = vaddr; *position = vaddr;
return i ? i : -EFAULT; return i ? i : -EFAULT;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment