Commit 4ec31152 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: move FAULT_FLAG_VMA_LOCK check from handle_mm_fault()

Handle a little more of the page fault path outside the mmap sem.  The
hugetlb path doesn't need to check whether the VMA is anonymous; the
VM_HUGETLB flag is only set on hugetlbfs VMAs.  There should be no
performance change from the previous commit; this is simply a step to ease
bisection of any problems.

Link: https://lkml.kernel.org/r/20230724185410.1124082-4-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarSuren Baghdasaryan <surenb@google.com>
Cc: Arjun Roy <arjunroy@google.com>
Cc: Eric Dumazet <edumazet@google.com>
Cc: Punit Agrawal <punit.agrawal@bytedance.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 350f6bbc
......@@ -6062,6 +6062,12 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
int need_wait_lock = 0;
unsigned long haddr = address & huge_page_mask(h);
/* TODO: Handle faults under the VMA lock */
if (flags & FAULT_FLAG_VMA_LOCK) {
vma_end_read(vma);
return VM_FAULT_RETRY;
}
/*
* Serialize hugepage allocation and instantiation, so that we don't
* get spurious allocation failures if two CPUs race to instantiate
......
......@@ -4984,10 +4984,10 @@ static vm_fault_t handle_pte_fault(struct vm_fault *vmf)
}
/*
* By the time we get here, we already hold the mm semaphore
*
* The mmap_lock may have been released depending on flags and our
* return value. See filemap_fault() and __folio_lock_or_retry().
* On entry, we hold either the VMA lock or the mmap_lock
* (FAULT_FLAG_VMA_LOCK tells you which). If VM_FAULT_RETRY is set in
* the result, the mmap_lock is not held on exit. See filemap_fault()
* and __folio_lock_or_retry().
*/
static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
......@@ -5006,6 +5006,11 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
p4d_t *p4d;
vm_fault_t ret;
if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) {
vma_end_read(vma);
return VM_FAULT_RETRY;
}
pgd = pgd_offset(mm, address);
p4d = p4d_alloc(mm, pgd, address);
if (!p4d)
......@@ -5222,11 +5227,6 @@ vm_fault_t handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
goto out;
}
if ((flags & FAULT_FLAG_VMA_LOCK) && !vma_is_anonymous(vma)) {
vma_end_read(vma);
return VM_FAULT_RETRY;
}
/*
* Enable the memcg OOM handling for faults triggered in user
* space. Kernel faults are handled more gracefully.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment