Commit 0bff0aae authored by Suren Baghdasaryan's avatar Suren Baghdasaryan Committed by Andrew Morton

x86/mm: try VMA lock-based page fault handling first

Attempt VMA lock-based page fault handling first, and fall back to the
existing mmap_lock-based handling if that fails.

Link: https://lkml.kernel.org/r/20230227173632.3292573-30-surenb@google.comSigned-off-by: default avatarSuren Baghdasaryan <surenb@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 52f23865
...@@ -27,6 +27,7 @@ config X86_64 ...@@ -27,6 +27,7 @@ config X86_64
# Options that are inherently 64-bit kernel only: # Options that are inherently 64-bit kernel only:
select ARCH_HAS_GIGANTIC_PAGE select ARCH_HAS_GIGANTIC_PAGE
select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 select ARCH_SUPPORTS_INT128 if CC_HAS_INT128
select ARCH_SUPPORTS_PER_VMA_LOCK
select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_CMPXCHG_LOCKREF
select HAVE_ARCH_SOFT_DIRTY select HAVE_ARCH_SOFT_DIRTY
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/uaccess.h> /* faulthandler_disabled() */ #include <linux/uaccess.h> /* faulthandler_disabled() */
#include <linux/efi.h> /* efi_crash_gracefully_on_page_fault()*/ #include <linux/efi.h> /* efi_crash_gracefully_on_page_fault()*/
#include <linux/mm_types.h> #include <linux/mm_types.h>
#include <linux/mm.h> /* find_and_lock_vma() */
#include <asm/cpufeature.h> /* boot_cpu_has, ... */ #include <asm/cpufeature.h> /* boot_cpu_has, ... */
#include <asm/traps.h> /* dotraplinkage, ... */ #include <asm/traps.h> /* dotraplinkage, ... */
...@@ -1333,6 +1334,38 @@ void do_user_addr_fault(struct pt_regs *regs, ...@@ -1333,6 +1334,38 @@ void do_user_addr_fault(struct pt_regs *regs,
} }
#endif #endif
#ifdef CONFIG_PER_VMA_LOCK
if (!(flags & FAULT_FLAG_USER))
goto lock_mmap;
vma = lock_vma_under_rcu(mm, address);
if (!vma)
goto lock_mmap;
if (unlikely(access_error(error_code, vma))) {
vma_end_read(vma);
goto lock_mmap;
}
fault = handle_mm_fault(vma, address, flags | FAULT_FLAG_VMA_LOCK, regs);
vma_end_read(vma);
if (!(fault & VM_FAULT_RETRY)) {
count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
/* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
kernelmode_fixup_or_oops(regs, error_code, address,
SIGBUS, BUS_ADRERR,
ARCH_DEFAULT_PKEY);
return;
}
lock_mmap:
#endif /* CONFIG_PER_VMA_LOCK */
/* /*
* Kernel-mode access to the user address space should only occur * Kernel-mode access to the user address space should only occur
* on well-defined single instructions listed in the exception * on well-defined single instructions listed in the exception
...@@ -1433,6 +1466,9 @@ void do_user_addr_fault(struct pt_regs *regs, ...@@ -1433,6 +1466,9 @@ void do_user_addr_fault(struct pt_regs *regs,
} }
mmap_read_unlock(mm); mmap_read_unlock(mm);
#ifdef CONFIG_PER_VMA_LOCK
done:
#endif
if (likely(!(fault & VM_FAULT_ERROR))) if (likely(!(fault & VM_FAULT_ERROR)))
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment