Commit 6a1bb025 authored by Peter Xu's avatar Peter Xu Committed by Linus Torvalds

mm/arm64: use general page fault accounting

Use the general page fault accounting by passing regs into
handle_mm_fault().  It naturally solve the issue of multiple page fault
accounting when page fault retry happened.  To do this, we pass pt_regs
pointer into __do_page_fault().
Signed-off-by: default avatarPeter Xu <peterx@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarWill Deacon <will@kernel.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Link: http://lkml.kernel.org/r/20200707225021.200906-6-peterx@redhat.comSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 79fea6c6
...@@ -404,7 +404,8 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re ...@@ -404,7 +404,8 @@ static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *re
#define VM_FAULT_BADACCESS 0x020000 #define VM_FAULT_BADACCESS 0x020000
static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
unsigned int mm_flags, unsigned long vm_flags) unsigned int mm_flags, unsigned long vm_flags,
struct pt_regs *regs)
{ {
struct vm_area_struct *vma = find_vma(mm, addr); struct vm_area_struct *vma = find_vma(mm, addr);
...@@ -428,7 +429,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, ...@@ -428,7 +429,7 @@ static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr,
*/ */
if (!(vma->vm_flags & vm_flags)) if (!(vma->vm_flags & vm_flags))
return VM_FAULT_BADACCESS; return VM_FAULT_BADACCESS;
return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, NULL); return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, regs);
} }
static bool is_el0_instruction_abort(unsigned int esr) static bool is_el0_instruction_abort(unsigned int esr)
...@@ -450,7 +451,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, ...@@ -450,7 +451,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
{ {
const struct fault_info *inf; const struct fault_info *inf;
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
vm_fault_t fault, major = 0; vm_fault_t fault;
unsigned long vm_flags = VM_ACCESS_FLAGS; unsigned long vm_flags = VM_ACCESS_FLAGS;
unsigned int mm_flags = FAULT_FLAG_DEFAULT; unsigned int mm_flags = FAULT_FLAG_DEFAULT;
...@@ -516,8 +517,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, ...@@ -516,8 +517,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
#endif #endif
} }
fault = __do_page_fault(mm, addr, mm_flags, vm_flags); fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs);
major |= fault & VM_FAULT_MAJOR;
/* Quick path to respond to signals */ /* Quick path to respond to signals */
if (fault_signal_pending(fault, regs)) { if (fault_signal_pending(fault, regs)) {
...@@ -538,25 +538,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, ...@@ -538,25 +538,8 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
* Handle the "normal" (no error) case first. * Handle the "normal" (no error) case first.
*/ */
if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
VM_FAULT_BADACCESS)))) { VM_FAULT_BADACCESS))))
/*
* Major/minor page fault accounting is only done
* once. If we go through a retry, it is extremely
* likely that the page will be found in page cache at
* that point.
*/
if (major) {
current->maj_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs,
addr);
} else {
current->min_flt++;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs,
addr);
}
return 0; return 0;
}
/* /*
* If we are in kernel mode at this point, we have no context to * If we are in kernel mode at this point, we have no context to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment