Commit a050ba1e authored by Linus Torvalds's avatar Linus Torvalds

mm/fault: convert remaining simple cases to lock_mm_and_find_vma()

This does the simple pattern conversion of alpha, arc, csky, hexagon,
loongarch, nios2, sh, sparc32, and xtensa to the lock_mm_and_find_vma()
helper.  They all have the regular fault handling pattern without odd
special cases.

The remaining architectures all have something that keeps us from a
straightforward conversion: ia64 and parisc have stacks that can grow
both up as well as down (and ia64 has special address region checks).

And m68k, microblaze, openrisc, sparc64, and um end up having extra
rules about only expanding the stack down a limited amount below the
user space stack pointer.  That is something that x86 used to do too
(long long ago), and it probably could just be skipped, but it still
makes the conversion less than trivial.

Note that this conversion was done manually and with the exception of
alpha without any build testing, because I have a fairly limited cross-
building environment.  The cases are all simple, and I went through the
changes several times, but...
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8b35ca3e
...@@ -30,6 +30,7 @@ config ALPHA ...@@ -30,6 +30,7 @@ config ALPHA
select HAS_IOPORT select HAS_IOPORT
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select ODD_RT_SIGACTION select ODD_RT_SIGACTION
select OLD_SIGSUSPEND select OLD_SIGSUSPEND
......
...@@ -119,20 +119,12 @@ do_page_fault(unsigned long address, unsigned long mmcsr, ...@@ -119,20 +119,12 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
flags |= FAULT_FLAG_USER; flags |= FAULT_FLAG_USER;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry: retry:
mmap_read_lock(mm); vma = lock_mm_and_find_vma(mm, address, regs);
vma = find_vma(mm, address);
if (!vma) if (!vma)
goto bad_area; goto bad_area_nosemaphore;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/* Ok, we have a good vm_area for this memory access, so /* Ok, we have a good vm_area for this memory access, so
we can handle it. */ we can handle it. */
good_area:
si_code = SEGV_ACCERR; si_code = SEGV_ACCERR;
if (cause < 0) { if (cause < 0) {
if (!(vma->vm_flags & VM_EXEC)) if (!(vma->vm_flags & VM_EXEC))
...@@ -192,6 +184,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, ...@@ -192,6 +184,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
bad_area: bad_area:
mmap_read_unlock(mm); mmap_read_unlock(mm);
bad_area_nosemaphore:
if (user_mode(regs)) if (user_mode(regs))
goto do_sigsegv; goto do_sigsegv;
......
...@@ -41,6 +41,7 @@ config ARC ...@@ -41,6 +41,7 @@ config ARC
select HAVE_PERF_EVENTS select HAVE_PERF_EVENTS
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select IRQ_DOMAIN select IRQ_DOMAIN
select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE
......
...@@ -113,15 +113,9 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) ...@@ -113,15 +113,9 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry: retry:
mmap_read_lock(mm); vma = lock_mm_and_find_vma(mm, address, regs);
vma = find_vma(mm, address);
if (!vma) if (!vma)
goto bad_area; goto bad_area_nosemaphore;
if (unlikely(address < vma->vm_start)) {
if (!(vma->vm_flags & VM_GROWSDOWN) || expand_stack(vma, address))
goto bad_area;
}
/* /*
* vm_area is good, now check permissions for this memory access * vm_area is good, now check permissions for this memory access
...@@ -161,6 +155,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs) ...@@ -161,6 +155,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
bad_area: bad_area:
mmap_read_unlock(mm); mmap_read_unlock(mm);
bad_area_nosemaphore:
/* /*
* Major/minor page fault accounting * Major/minor page fault accounting
* (in case of retry we only land here once) * (in case of retry we only land here once)
......
...@@ -96,6 +96,7 @@ config CSKY ...@@ -96,6 +96,7 @@ config CSKY
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_STACKPROTECTOR select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select LOCK_MM_AND_FIND_VMA
select MAY_HAVE_SPARSE_IRQ select MAY_HAVE_SPARSE_IRQ
select MODULES_USE_ELF_RELA if MODULES select MODULES_USE_ELF_RELA if MODULES
select OF select OF
......
...@@ -97,13 +97,12 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f ...@@ -97,13 +97,12 @@ static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_f
BUG(); BUG();
} }
static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr) static inline void bad_area_nosemaphore(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
{ {
/* /*
* Something tried to access memory that isn't in our memory map. * Something tried to access memory that isn't in our memory map.
* Fix it, but check if it's kernel or user first. * Fix it, but check if it's kernel or user first.
*/ */
mmap_read_unlock(mm);
/* User mode accesses just cause a SIGSEGV */ /* User mode accesses just cause a SIGSEGV */
if (user_mode(regs)) { if (user_mode(regs)) {
do_trap(regs, SIGSEGV, code, addr); do_trap(regs, SIGSEGV, code, addr);
...@@ -238,20 +237,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -238,20 +237,9 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
if (is_write(regs)) if (is_write(regs))
flags |= FAULT_FLAG_WRITE; flags |= FAULT_FLAG_WRITE;
retry: retry:
mmap_read_lock(mm); vma = lock_mm_and_find_vma(mm, address, regs);
vma = find_vma(mm, addr);
if (unlikely(!vma)) { if (unlikely(!vma)) {
bad_area(regs, mm, code, addr); bad_area_nosemaphore(regs, mm, code, addr);
return;
}
if (likely(vma->vm_start <= addr))
goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
bad_area(regs, mm, code, addr);
return;
}
if (unlikely(expand_stack(vma, addr))) {
bad_area(regs, mm, code, addr);
return; return;
} }
...@@ -259,11 +247,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -259,11 +247,11 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
* Ok, we have a good vm_area for this memory access, so * Ok, we have a good vm_area for this memory access, so
* we can handle it. * we can handle it.
*/ */
good_area:
code = SEGV_ACCERR; code = SEGV_ACCERR;
if (unlikely(access_error(regs, vma))) { if (unlikely(access_error(regs, vma))) {
bad_area(regs, mm, code, addr); mmap_read_unlock(mm);
bad_area_nosemaphore(regs, mm, code, addr);
return; return;
} }
......
...@@ -28,6 +28,7 @@ config HEXAGON ...@@ -28,6 +28,7 @@ config HEXAGON
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select STACKTRACE_SUPPORT select STACKTRACE_SUPPORT
select GENERIC_CLOCKEVENTS_BROADCAST select GENERIC_CLOCKEVENTS_BROADCAST
select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select GENERIC_CPU_DEVICES select GENERIC_CPU_DEVICES
select ARCH_WANT_LD_ORPHAN_WARN select ARCH_WANT_LD_ORPHAN_WARN
......
...@@ -57,21 +57,10 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) ...@@ -57,21 +57,10 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry: retry:
mmap_read_lock(mm); vma = lock_mm_and_find_vma(mm, address, regs);
vma = find_vma(mm, address); if (unlikely(!vma))
if (!vma) goto bad_area_nosemaphore;
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
good_area:
/* Address space is OK. Now check access rights. */ /* Address space is OK. Now check access rights. */
si_code = SEGV_ACCERR; si_code = SEGV_ACCERR;
...@@ -143,6 +132,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) ...@@ -143,6 +132,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
bad_area: bad_area:
mmap_read_unlock(mm); mmap_read_unlock(mm);
bad_area_nosemaphore:
if (user_mode(regs)) { if (user_mode(regs)) {
force_sig_fault(SIGSEGV, si_code, (void __user *)address); force_sig_fault(SIGSEGV, si_code, (void __user *)address);
return; return;
......
...@@ -130,6 +130,7 @@ config LOONGARCH ...@@ -130,6 +130,7 @@ config LOONGARCH
select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP select HAVE_VIRT_CPU_ACCOUNTING_GEN if !SMP
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select IRQ_LOONGARCH_CPU select IRQ_LOONGARCH_CPU
select LOCK_MM_AND_FIND_VMA
select MMU_GATHER_MERGE_VMAS if MMU select MMU_GATHER_MERGE_VMAS if MMU
select MODULES_USE_ELF_RELA if MODULES select MODULES_USE_ELF_RELA if MODULES
select NEED_PER_CPU_EMBED_FIRST_CHUNK select NEED_PER_CPU_EMBED_FIRST_CHUNK
......
...@@ -169,22 +169,18 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, ...@@ -169,22 +169,18 @@ static void __kprobes __do_page_fault(struct pt_regs *regs,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry: retry:
mmap_read_lock(mm); vma = lock_mm_and_find_vma(mm, address, regs);
vma = find_vma(mm, address); if (unlikely(!vma))
if (!vma) goto bad_area_nosemaphore;
goto bad_area; goto good_area;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (!expand_stack(vma, address))
goto good_area;
/* /*
* Something tried to access memory that isn't in our memory map.. * Something tried to access memory that isn't in our memory map..
* Fix it, but check if it's kernel or user first.. * Fix it, but check if it's kernel or user first..
*/ */
bad_area: bad_area:
mmap_read_unlock(mm); mmap_read_unlock(mm);
bad_area_nosemaphore:
do_sigsegv(regs, write, address, si_code); do_sigsegv(regs, write, address, si_code);
return; return;
......
...@@ -16,6 +16,7 @@ config NIOS2 ...@@ -16,6 +16,7 @@ config NIOS2
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_KGDB select HAVE_ARCH_KGDB
select IRQ_DOMAIN select IRQ_DOMAIN
select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE
......
...@@ -86,27 +86,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, ...@@ -86,27 +86,14 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
if (!mmap_read_trylock(mm)) {
if (!user_mode(regs) && !search_exception_tables(regs->ea))
goto bad_area_nosemaphore;
retry: retry:
mmap_read_lock(mm); vma = lock_mm_and_find_vma(mm, address, regs);
}
vma = find_vma(mm, address);
if (!vma) if (!vma)
goto bad_area; goto bad_area_nosemaphore;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/* /*
* Ok, we have a good vm_area for this memory access, so * Ok, we have a good vm_area for this memory access, so
* we can handle it.. * we can handle it..
*/ */
good_area:
code = SEGV_ACCERR; code = SEGV_ACCERR;
switch (cause) { switch (cause) {
......
...@@ -59,6 +59,7 @@ config SUPERH ...@@ -59,6 +59,7 @@ config SUPERH
select HAVE_STACKPROTECTOR select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select IRQ_FORCED_THREADING select IRQ_FORCED_THREADING
select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select NEED_SG_DMA_LENGTH select NEED_SG_DMA_LENGTH
select NO_DMA if !MMU && !DMA_COHERENT select NO_DMA if !MMU && !DMA_COHERENT
......
...@@ -439,21 +439,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -439,21 +439,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
} }
retry: retry:
mmap_read_lock(mm); vma = lock_mm_and_find_vma(mm, address, regs);
vma = find_vma(mm, address);
if (unlikely(!vma)) { if (unlikely(!vma)) {
bad_area(regs, error_code, address); bad_area_nosemaphore(regs, error_code, address);
return;
}
if (likely(vma->vm_start <= address))
goto good_area;
if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
bad_area(regs, error_code, address);
return;
}
if (unlikely(expand_stack(vma, address))) {
bad_area(regs, error_code, address);
return; return;
} }
...@@ -461,7 +449,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, ...@@ -461,7 +449,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
* Ok, we have a good vm_area for this memory access, so * Ok, we have a good vm_area for this memory access, so
* we can handle it.. * we can handle it..
*/ */
good_area:
if (unlikely(access_error(error_code, vma))) { if (unlikely(access_error(error_code, vma))) {
bad_area_access_error(regs, error_code, address); bad_area_access_error(regs, error_code, address);
return; return;
......
...@@ -57,6 +57,7 @@ config SPARC32 ...@@ -57,6 +57,7 @@ config SPARC32
select DMA_DIRECT_REMAP select DMA_DIRECT_REMAP
select GENERIC_ATOMIC64 select GENERIC_ATOMIC64
select HAVE_UID16 select HAVE_UID16
select LOCK_MM_AND_FIND_VMA
select OLD_SIGACTION select OLD_SIGACTION
select ZONE_DMA select ZONE_DMA
......
...@@ -143,28 +143,19 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -143,28 +143,19 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
if (pagefault_disabled() || !mm) if (pagefault_disabled() || !mm)
goto no_context; goto no_context;
if (!from_user && address >= PAGE_OFFSET)
goto no_context;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry: retry:
mmap_read_lock(mm); vma = lock_mm_and_find_vma(mm, address, regs);
if (!from_user && address >= PAGE_OFFSET)
goto bad_area;
vma = find_vma(mm, address);
if (!vma) if (!vma)
goto bad_area; goto bad_area_nosemaphore;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/* /*
* Ok, we have a good vm_area for this memory access, so * Ok, we have a good vm_area for this memory access, so
* we can handle it.. * we can handle it..
*/ */
good_area:
code = SEGV_ACCERR; code = SEGV_ACCERR;
if (write) { if (write) {
if (!(vma->vm_flags & VM_WRITE)) if (!(vma->vm_flags & VM_WRITE))
...@@ -321,17 +312,9 @@ static void force_user_fault(unsigned long address, int write) ...@@ -321,17 +312,9 @@ static void force_user_fault(unsigned long address, int write)
code = SEGV_MAPERR; code = SEGV_MAPERR;
mmap_read_lock(mm); vma = lock_mm_and_find_vma(mm, address, regs);
vma = find_vma(mm, address);
if (!vma) if (!vma)
goto bad_area; goto bad_area_nosemaphore;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
good_area:
code = SEGV_ACCERR; code = SEGV_ACCERR;
if (write) { if (write) {
if (!(vma->vm_flags & VM_WRITE)) if (!(vma->vm_flags & VM_WRITE))
...@@ -350,6 +333,7 @@ static void force_user_fault(unsigned long address, int write) ...@@ -350,6 +333,7 @@ static void force_user_fault(unsigned long address, int write)
return; return;
bad_area: bad_area:
mmap_read_unlock(mm); mmap_read_unlock(mm);
bad_area_nosemaphore:
__do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address); __do_fault_siginfo(code, SIGSEGV, tsk->thread.kregs, address);
return; return;
......
...@@ -49,6 +49,7 @@ config XTENSA ...@@ -49,6 +49,7 @@ config XTENSA
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING_GEN select HAVE_VIRT_CPU_ACCOUNTING_GEN
select IRQ_DOMAIN select IRQ_DOMAIN
select LOCK_MM_AND_FIND_VMA
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select PERF_USE_VMALLOC select PERF_USE_VMALLOC
select TRACE_IRQFLAGS_SUPPORT select TRACE_IRQFLAGS_SUPPORT
......
...@@ -130,23 +130,14 @@ void do_page_fault(struct pt_regs *regs) ...@@ -130,23 +130,14 @@ void do_page_fault(struct pt_regs *regs)
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
retry: retry:
mmap_read_lock(mm); vma = lock_mm_and_find_vma(mm, address, regs);
vma = find_vma(mm, address);
if (!vma) if (!vma)
goto bad_area; goto bad_area_nosemaphore;
if (vma->vm_start <= address)
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
if (expand_stack(vma, address))
goto bad_area;
/* Ok, we have a good vm_area for this memory access, so /* Ok, we have a good vm_area for this memory access, so
* we can handle it.. * we can handle it..
*/ */
good_area:
code = SEGV_ACCERR; code = SEGV_ACCERR;
if (is_write) { if (is_write) {
...@@ -205,6 +196,7 @@ void do_page_fault(struct pt_regs *regs) ...@@ -205,6 +196,7 @@ void do_page_fault(struct pt_regs *regs)
*/ */
bad_area: bad_area:
mmap_read_unlock(mm); mmap_read_unlock(mm);
bad_area_nosemaphore:
if (user_mode(regs)) { if (user_mode(regs)) {
force_sig_fault(SIGSEGV, code, (void *) address); force_sig_fault(SIGSEGV, code, (void *) address);
return; return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment