Commit 4a9e1cda authored by Dominik Dingel's avatar Dominik Dingel Committed by Linus Torvalds

mm: bring in additional flag for fixup_user_fault to signal unlock

During Jason's work with postcopy migration support for s390 a problem
regarding gmap faults was discovered.

The gmap code will call fixup_user_fault which will end up always in
handle_mm_fault.  Till now we never cared about retries, but as the
userfaultfd code kind of relies on it.  this needs some fix.

This patchset does not take care of the futex code.  I will now look
closer at this.

This patch (of 2):

With the introduction of userfaultfd, kvm on s390 needs fixup_user_fault
to pass in FAULT_FLAG_ALLOW_RETRY and give feedback if during the
faulting we ever unlocked mmap_sem.

This patch brings in the logic to handle retries as well as it cleans up
the current documentation.  fixup_user_fault was not having the same
semantics as filemap_fault.  It never indicated if a retry happened and
so a caller wasn't able to handle that case.  So we now changed the
behaviour to always retry a locked mmap_sem.
Signed-off-by: default avatarDominik Dingel <dingel@linux.vnet.ibm.com>
Reviewed-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Christian Borntraeger <borntraeger@de.ibm.com>
Cc: "Jason J. Herne" <jjherne@linux.vnet.ibm.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Eric B Munson <emunson@akamai.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Cc: Dominik Dingel <dingel@linux.vnet.ibm.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c046c321
...@@ -585,7 +585,7 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr, ...@@ -585,7 +585,7 @@ int gmap_fault(struct gmap *gmap, unsigned long gaddr,
rc = vmaddr; rc = vmaddr;
goto out_up; goto out_up;
} }
if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags)) { if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags, NULL)) {
rc = -EFAULT; rc = -EFAULT;
goto out_up; goto out_up;
} }
...@@ -727,7 +727,8 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len) ...@@ -727,7 +727,8 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
break; break;
} }
/* Get the page mapped */ /* Get the page mapped */
if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) { if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE,
NULL)) {
rc = -EFAULT; rc = -EFAULT;
break; break;
} }
...@@ -802,7 +803,8 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, ...@@ -802,7 +803,8 @@ int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
if (!(pte_val(*ptep) & _PAGE_INVALID) && if (!(pte_val(*ptep) & _PAGE_INVALID) &&
(pte_val(*ptep) & _PAGE_PROTECT)) { (pte_val(*ptep) & _PAGE_PROTECT)) {
pte_unmap_unlock(ptep, ptl); pte_unmap_unlock(ptep, ptl);
if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) { if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE,
NULL)) {
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return -EFAULT; return -EFAULT;
} }
......
...@@ -1194,7 +1194,8 @@ int invalidate_inode_page(struct page *page); ...@@ -1194,7 +1194,8 @@ int invalidate_inode_page(struct page *page);
extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags); unsigned long address, unsigned int flags);
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags); unsigned long address, unsigned int fault_flags,
bool *unlocked);
#else #else
static inline int handle_mm_fault(struct mm_struct *mm, static inline int handle_mm_fault(struct mm_struct *mm,
struct vm_area_struct *vma, unsigned long address, struct vm_area_struct *vma, unsigned long address,
...@@ -1206,7 +1207,7 @@ static inline int handle_mm_fault(struct mm_struct *mm, ...@@ -1206,7 +1207,7 @@ static inline int handle_mm_fault(struct mm_struct *mm,
} }
static inline int fixup_user_fault(struct task_struct *tsk, static inline int fixup_user_fault(struct task_struct *tsk,
struct mm_struct *mm, unsigned long address, struct mm_struct *mm, unsigned long address,
unsigned int fault_flags) unsigned int fault_flags, bool *unlocked)
{ {
/* should never happen if there's no MMU */ /* should never happen if there's no MMU */
BUG(); BUG();
......
...@@ -604,7 +604,7 @@ static int fault_in_user_writeable(u32 __user *uaddr) ...@@ -604,7 +604,7 @@ static int fault_in_user_writeable(u32 __user *uaddr)
down_read(&mm->mmap_sem); down_read(&mm->mmap_sem);
ret = fixup_user_fault(current, mm, (unsigned long)uaddr, ret = fixup_user_fault(current, mm, (unsigned long)uaddr,
FAULT_FLAG_WRITE); FAULT_FLAG_WRITE, NULL);
up_read(&mm->mmap_sem); up_read(&mm->mmap_sem);
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
......
...@@ -618,6 +618,8 @@ EXPORT_SYMBOL(__get_user_pages); ...@@ -618,6 +618,8 @@ EXPORT_SYMBOL(__get_user_pages);
* @mm: mm_struct of target mm * @mm: mm_struct of target mm
* @address: user address * @address: user address
* @fault_flags:flags to pass down to handle_mm_fault() * @fault_flags:flags to pass down to handle_mm_fault()
* @unlocked: did we unlock the mmap_sem while retrying, maybe NULL if caller
* does not allow retry
* *
* This is meant to be called in the specific scenario where for locking reasons * This is meant to be called in the specific scenario where for locking reasons
* we try to access user memory in atomic context (within a pagefault_disable() * we try to access user memory in atomic context (within a pagefault_disable()
...@@ -629,22 +631,28 @@ EXPORT_SYMBOL(__get_user_pages); ...@@ -629,22 +631,28 @@ EXPORT_SYMBOL(__get_user_pages);
* The main difference with get_user_pages() is that this function will * The main difference with get_user_pages() is that this function will
* unconditionally call handle_mm_fault() which will in turn perform all the * unconditionally call handle_mm_fault() which will in turn perform all the
* necessary SW fixup of the dirty and young bits in the PTE, while * necessary SW fixup of the dirty and young bits in the PTE, while
* handle_mm_fault() only guarantees to update these in the struct page. * get_user_pages() only guarantees to update these in the struct page.
* *
* This is important for some architectures where those bits also gate the * This is important for some architectures where those bits also gate the
* access permission to the page because they are maintained in software. On * access permission to the page because they are maintained in software. On
* such architectures, gup() will not be enough to make a subsequent access * such architectures, gup() will not be enough to make a subsequent access
* succeed. * succeed.
* *
* This has the same semantics wrt the @mm->mmap_sem as does filemap_fault(). * This function will not return with an unlocked mmap_sem. So it has not the
* same semantics wrt the @mm->mmap_sem as does filemap_fault().
*/ */
int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags) unsigned long address, unsigned int fault_flags,
bool *unlocked)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
vm_flags_t vm_flags; vm_flags_t vm_flags;
int ret; int ret, major = 0;
if (unlocked)
fault_flags |= FAULT_FLAG_ALLOW_RETRY;
retry:
vma = find_extend_vma(mm, address); vma = find_extend_vma(mm, address);
if (!vma || address < vma->vm_start) if (!vma || address < vma->vm_start)
return -EFAULT; return -EFAULT;
...@@ -654,6 +662,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, ...@@ -654,6 +662,7 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
return -EFAULT; return -EFAULT;
ret = handle_mm_fault(mm, vma, address, fault_flags); ret = handle_mm_fault(mm, vma, address, fault_flags);
major |= ret & VM_FAULT_MAJOR;
if (ret & VM_FAULT_ERROR) { if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM) if (ret & VM_FAULT_OOM)
return -ENOMEM; return -ENOMEM;
...@@ -663,8 +672,19 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm, ...@@ -663,8 +672,19 @@ int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
return -EFAULT; return -EFAULT;
BUG(); BUG();
} }
if (ret & VM_FAULT_RETRY) {
down_read(&mm->mmap_sem);
if (!(fault_flags & FAULT_FLAG_TRIED)) {
*unlocked = true;
fault_flags &= ~FAULT_FLAG_ALLOW_RETRY;
fault_flags |= FAULT_FLAG_TRIED;
goto retry;
}
}
if (tsk) { if (tsk) {
if (ret & VM_FAULT_MAJOR) if (major)
tsk->maj_flt++; tsk->maj_flt++;
else else
tsk->min_flt++; tsk->min_flt++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment