Commit 4ef87322 authored by Peter Xu's avatar Peter Xu Committed by Linus Torvalds

mm: introduce fault_signal_pending()

For most architectures, we've got a quick path to detect fatal signal
after a handle_mm_fault().  Introduce a helper for that quick path.

It cleans the current codes a bit so we don't need to duplicate the same
check across archs.  More importantly, this will be an unified place that
we handle the signal immediately right after an interrupted page fault, so
it'll be much easier for us if we want to change the behavior of handling
signals later on for all the archs.

Note that currently only part of the archs are using this new helper,
because some archs have their own way to handle signals.  In the follow up
patches, we'll try to apply this helper to all the rest of archs.

Another note is that the "regs" parameter in the new helper is not used
yet.  It'll be used very soon.  Now we kept it in this patch only to avoid
touching all the archs again in the follow up patches.

[peterx@redhat.com: fix sparse warnings]
  Link: http://lkml.kernel.org/r/20200311145921.GD479302@xz-x1Signed-off-by: default avatarPeter Xu <peterx@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Tested-by: default avatarBrian Geffon <bgeffon@google.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Bobby Powers <bobbypowers@gmail.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Denis Plotnikov <dplotnikov@virtuozzo.com>
Cc: "Dr . David Alan Gilbert" <dgilbert@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Jerome Glisse <jglisse@redhat.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: "Kirill A . Shutemov" <kirill@shutemov.name>
Cc: Martin Cracauer <cracauer@cons.org>
Cc: Marty McFadden <mcfadden8@llnl.gov>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Maya Gokhale <gokhale2@llnl.gov>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Mike Rapoport <rppt@linux.vnet.ibm.com>
Cc: Pavel Emelyanov <xemul@openvz.org>
Link: http://lkml.kernel.org/r/20200220155353.8676-4-peterx@redhat.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent ad415db8
...@@ -150,7 +150,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr, ...@@ -150,7 +150,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
the fault. */ the fault. */
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if (fault_signal_pending(fault, regs))
return; return;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
......
...@@ -295,7 +295,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) ...@@ -295,7 +295,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* signal first. We do not need to release the mmap_sem because * signal first. We do not need to release the mmap_sem because
* it would already be released in __lock_page_or_retry in * it would already be released in __lock_page_or_retry in
* mm/filemap.c. */ * mm/filemap.c. */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs)) if (!user_mode(regs))
goto no_context; goto no_context;
return 0; return 0;
......
...@@ -91,7 +91,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs) ...@@ -91,7 +91,7 @@ void do_page_fault(unsigned long address, long cause, struct pt_regs *regs)
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if (fault_signal_pending(fault, regs))
return; return;
/* The most common case -- we are done. */ /* The most common case -- we are done. */
......
...@@ -141,7 +141,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re ...@@ -141,7 +141,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
*/ */
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if (fault_signal_pending(fault, regs))
return; return;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
......
...@@ -138,7 +138,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -138,7 +138,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
pr_debug("handle_mm_fault returns %x\n", fault); pr_debug("handle_mm_fault returns %x\n", fault);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if (fault_signal_pending(fault, regs))
return 0; return 0;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
......
...@@ -217,7 +217,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -217,7 +217,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
*/ */
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if (fault_signal_pending(fault, regs))
return; return;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
......
...@@ -154,7 +154,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, ...@@ -154,7 +154,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
*/ */
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if (fault_signal_pending(fault, regs))
return; return;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
......
...@@ -214,7 +214,7 @@ void do_page_fault(unsigned long entry, unsigned long addr, ...@@ -214,7 +214,7 @@ void do_page_fault(unsigned long entry, unsigned long addr,
* signal first. We do not need to release the mmap_sem because it * signal first. We do not need to release the mmap_sem because it
* would already be released in __lock_page_or_retry in mm/filemap.c. * would already be released in __lock_page_or_retry in mm/filemap.c.
*/ */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) { if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs)) if (!user_mode(regs))
goto no_context; goto no_context;
return; return;
......
...@@ -133,7 +133,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause, ...@@ -133,7 +133,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
*/ */
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if (fault_signal_pending(fault, regs))
return; return;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
......
...@@ -161,7 +161,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address, ...@@ -161,7 +161,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long address,
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if (fault_signal_pending(fault, regs))
return; return;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
......
...@@ -304,7 +304,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code, ...@@ -304,7 +304,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if (fault_signal_pending(fault, regs))
return; return;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
......
...@@ -117,7 +117,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs) ...@@ -117,7 +117,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs)
* signal first. We do not need to release the mmap_sem because it * signal first. We do not need to release the mmap_sem because it
* would already be released in __lock_page_or_retry in mm/filemap.c. * would already be released in __lock_page_or_retry in mm/filemap.c.
*/ */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(tsk)) if (fault_signal_pending(fault, regs))
return; return;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
......
...@@ -480,8 +480,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) ...@@ -480,8 +480,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
* the fault. * the fault.
*/ */
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
/* No reason to continue if interrupted by SIGKILL. */ if (fault_signal_pending(fault, regs)) {
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) {
fault = VM_FAULT_SIGNAL; fault = VM_FAULT_SIGNAL;
if (flags & FAULT_FLAG_RETRY_NOWAIT) if (flags & FAULT_FLAG_RETRY_NOWAIT)
goto out_up; goto out_up;
......
...@@ -237,7 +237,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write, ...@@ -237,7 +237,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
*/ */
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if (fault_signal_pending(fault, regs))
return; return;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
......
...@@ -425,7 +425,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) ...@@ -425,7 +425,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if (fault_signal_pending(fault, regs))
goto exit_exception; goto exit_exception;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
......
...@@ -250,7 +250,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs) ...@@ -250,7 +250,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
* signal first. We do not need to release the mmap_sem because * signal first. We do not need to release the mmap_sem because
* it would already be released in __lock_page_or_retry in * it would already be released in __lock_page_or_retry in
* mm/filemap.c. */ * mm/filemap.c. */
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if (fault_signal_pending(fault, regs))
return 0; return 0;
if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) { if (!(fault & VM_FAULT_ERROR) && (flags & FAULT_FLAG_ALLOW_RETRY)) {
......
...@@ -110,7 +110,7 @@ void do_page_fault(struct pt_regs *regs) ...@@ -110,7 +110,7 @@ void do_page_fault(struct pt_regs *regs)
*/ */
fault = handle_mm_fault(vma, address, flags); fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current)) if (fault_signal_pending(fault, regs))
return; return;
if (unlikely(fault & VM_FAULT_ERROR)) { if (unlikely(fault & VM_FAULT_ERROR)) {
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#include <linux/cred.h> #include <linux/cred.h>
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/posix-timers.h> #include <linux/posix-timers.h>
#include <linux/mm_types.h>
#include <asm/ptrace.h>
/* /*
* Types defining task->signal and task->sighand and APIs using them: * Types defining task->signal and task->sighand and APIs using them:
...@@ -369,6 +371,19 @@ static inline int signal_pending_state(long state, struct task_struct *p) ...@@ -369,6 +371,19 @@ static inline int signal_pending_state(long state, struct task_struct *p)
return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p); return (state & TASK_INTERRUPTIBLE) || __fatal_signal_pending(p);
} }
/*
* This should only be used in fault handlers to decide whether we
* should stop the current fault routine to handle the signals
* instead, especially with the case where we've got interrupted with
* a VM_FAULT_RETRY.
*/
static inline bool fault_signal_pending(vm_fault_t fault_flags,
struct pt_regs *regs)
{
return unlikely((fault_flags & VM_FAULT_RETRY) &&
fatal_signal_pending(current));
}
/* /*
* Reevaluate whether the task has signals pending delivery. * Reevaluate whether the task has signals pending delivery.
* Wake the task if so. * Wake the task if so.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment