Commit d438fabc authored by Marco Elver's avatar Marco Elver Committed by Linus Torvalds

kfence: use pt_regs to generate stack trace on faults

Instead of removing the fault handling portion of the stack trace based on
the fault handler's name, just use struct pt_regs directly.

Change kfence_handle_page_fault() to take a struct pt_regs, and plumb it
through to kfence_report_error() for out-of-bounds, use-after-free, or
invalid access errors, where pt_regs is used to generate the stack trace.

If the kernel is a DEBUG_KERNEL, also show registers for more information.

Link: https://lkml.kernel.org/r/20201105092133.2075331-1-elver@google.comSigned-off-by: default avatarMarco Elver <elver@google.com>
Suggested-by: default avatarMark Rutland <mark.rutland@arm.com>
Acked-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Jann Horn <jannh@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 840b2398
...@@ -10,8 +10,6 @@ ...@@ -10,8 +10,6 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#define KFENCE_SKIP_ARCH_FAULT_HANDLER "el1_sync"
static inline bool arch_kfence_init_pool(void) { return true; } static inline bool arch_kfence_init_pool(void) { return true; }
static inline bool kfence_protect_page(unsigned long addr, bool protect) static inline bool kfence_protect_page(unsigned long addr, bool protect)
......
...@@ -390,7 +390,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr, ...@@ -390,7 +390,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
} else if (addr < PAGE_SIZE) { } else if (addr < PAGE_SIZE) {
msg = "NULL pointer dereference"; msg = "NULL pointer dereference";
} else { } else {
if (kfence_handle_page_fault(addr)) if (kfence_handle_page_fault(addr, regs))
return; return;
msg = "paging request"; msg = "paging request";
......
...@@ -16,12 +16,6 @@ ...@@ -16,12 +16,6 @@
#include <asm/set_memory.h> #include <asm/set_memory.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
/*
* The page fault handler entry function, up to which the stack trace is
* truncated in reports.
*/
#define KFENCE_SKIP_ARCH_FAULT_HANDLER "asm_exc_page_fault"
/* Force 4K pages for __kfence_pool. */ /* Force 4K pages for __kfence_pool. */
static inline bool arch_kfence_init_pool(void) static inline bool arch_kfence_init_pool(void)
{ {
......
...@@ -682,7 +682,7 @@ page_fault_oops(struct pt_regs *regs, unsigned long error_code, ...@@ -682,7 +682,7 @@ page_fault_oops(struct pt_regs *regs, unsigned long error_code,
efi_crash_gracefully_on_page_fault(address); efi_crash_gracefully_on_page_fault(address);
/* Only not-present faults should be handled by KFENCE. */ /* Only not-present faults should be handled by KFENCE. */
if (!(error_code & X86_PF_PROT) && kfence_handle_page_fault(address)) if (!(error_code & X86_PF_PROT) && kfence_handle_page_fault(address, regs))
return; return;
oops: oops:
......
...@@ -186,6 +186,7 @@ static __always_inline __must_check bool kfence_free(void *addr) ...@@ -186,6 +186,7 @@ static __always_inline __must_check bool kfence_free(void *addr)
/** /**
* kfence_handle_page_fault() - perform page fault handling for KFENCE pages * kfence_handle_page_fault() - perform page fault handling for KFENCE pages
* @addr: faulting address * @addr: faulting address
* @regs: current struct pt_regs (can be NULL, but shows full stack trace)
* *
* Return: * Return:
* * false - address outside KFENCE pool, * * false - address outside KFENCE pool,
...@@ -196,7 +197,7 @@ static __always_inline __must_check bool kfence_free(void *addr) ...@@ -196,7 +197,7 @@ static __always_inline __must_check bool kfence_free(void *addr)
* cases KFENCE prints an error message and marks the offending page as * cases KFENCE prints an error message and marks the offending page as
* present, so that the kernel can proceed. * present, so that the kernel can proceed.
*/ */
bool __must_check kfence_handle_page_fault(unsigned long addr); bool __must_check kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs);
#else /* CONFIG_KFENCE */ #else /* CONFIG_KFENCE */
...@@ -209,7 +210,7 @@ static inline size_t kfence_ksize(const void *addr) { return 0; } ...@@ -209,7 +210,7 @@ static inline size_t kfence_ksize(const void *addr) { return 0; }
static inline void *kfence_object_start(const void *addr) { return NULL; } static inline void *kfence_object_start(const void *addr) { return NULL; }
static inline void __kfence_free(void *addr) { } static inline void __kfence_free(void *addr) { }
static inline bool __must_check kfence_free(void *addr) { return false; } static inline bool __must_check kfence_free(void *addr) { return false; }
static inline bool __must_check kfence_handle_page_fault(unsigned long addr) { return false; } static inline bool __must_check kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs) { return false; }
#endif #endif
......
...@@ -216,7 +216,7 @@ static inline bool check_canary_byte(u8 *addr) ...@@ -216,7 +216,7 @@ static inline bool check_canary_byte(u8 *addr)
return true; return true;
atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
kfence_report_error((unsigned long)addr, addr_to_metadata((unsigned long)addr), kfence_report_error((unsigned long)addr, NULL, addr_to_metadata((unsigned long)addr),
KFENCE_ERROR_CORRUPTION); KFENCE_ERROR_CORRUPTION);
return false; return false;
} }
...@@ -351,7 +351,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z ...@@ -351,7 +351,7 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
/* Invalid or double-free, bail out. */ /* Invalid or double-free, bail out. */
atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
kfence_report_error((unsigned long)addr, meta, KFENCE_ERROR_INVALID_FREE); kfence_report_error((unsigned long)addr, NULL, meta, KFENCE_ERROR_INVALID_FREE);
raw_spin_unlock_irqrestore(&meta->lock, flags); raw_spin_unlock_irqrestore(&meta->lock, flags);
return; return;
} }
...@@ -766,7 +766,7 @@ void __kfence_free(void *addr) ...@@ -766,7 +766,7 @@ void __kfence_free(void *addr)
kfence_guarded_free(addr, meta, false); kfence_guarded_free(addr, meta, false);
} }
bool kfence_handle_page_fault(unsigned long addr) bool kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs)
{ {
const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE; const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
struct kfence_metadata *to_report = NULL; struct kfence_metadata *to_report = NULL;
...@@ -829,11 +829,11 @@ bool kfence_handle_page_fault(unsigned long addr) ...@@ -829,11 +829,11 @@ bool kfence_handle_page_fault(unsigned long addr)
out: out:
if (to_report) { if (to_report) {
kfence_report_error(addr, to_report, error_type); kfence_report_error(addr, regs, to_report, error_type);
raw_spin_unlock_irqrestore(&to_report->lock, flags); raw_spin_unlock_irqrestore(&to_report->lock, flags);
} else { } else {
/* This may be a UAF or OOB access, but we can't be sure. */ /* This may be a UAF or OOB access, but we can't be sure. */
kfence_report_error(addr, NULL, KFENCE_ERROR_INVALID); kfence_report_error(addr, regs, NULL, KFENCE_ERROR_INVALID);
} }
return kfence_unprotect(addr); /* Unprotect and let access proceed. */ return kfence_unprotect(addr); /* Unprotect and let access proceed. */
......
...@@ -105,8 +105,8 @@ enum kfence_error_type { ...@@ -105,8 +105,8 @@ enum kfence_error_type {
KFENCE_ERROR_INVALID_FREE, /* Invalid free. */ KFENCE_ERROR_INVALID_FREE, /* Invalid free. */
}; };
void kfence_report_error(unsigned long address, const struct kfence_metadata *meta, void kfence_report_error(unsigned long address, struct pt_regs *regs,
enum kfence_error_type type); const struct kfence_metadata *meta, enum kfence_error_type type);
void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta); void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta);
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include <linux/printk.h> #include <linux/printk.h>
#include <linux/sched/debug.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <linux/string.h> #include <linux/string.h>
...@@ -41,7 +42,6 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries ...@@ -41,7 +42,6 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries
{ {
char buf[64]; char buf[64];
int skipnr, fallback = 0; int skipnr, fallback = 0;
bool is_access_fault = false;
if (type) { if (type) {
/* Depending on error type, find different stack entries. */ /* Depending on error type, find different stack entries. */
...@@ -49,8 +49,12 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries ...@@ -49,8 +49,12 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries
case KFENCE_ERROR_UAF: case KFENCE_ERROR_UAF:
case KFENCE_ERROR_OOB: case KFENCE_ERROR_OOB:
case KFENCE_ERROR_INVALID: case KFENCE_ERROR_INVALID:
is_access_fault = true; /*
break; * kfence_handle_page_fault() may be called with pt_regs
* set to NULL; in that case we'll simply show the full
* stack trace.
*/
return 0;
case KFENCE_ERROR_CORRUPTION: case KFENCE_ERROR_CORRUPTION:
case KFENCE_ERROR_INVALID_FREE: case KFENCE_ERROR_INVALID_FREE:
break; break;
...@@ -60,26 +64,21 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries ...@@ -60,26 +64,21 @@ static int get_stack_skipnr(const unsigned long stack_entries[], int num_entries
for (skipnr = 0; skipnr < num_entries; skipnr++) { for (skipnr = 0; skipnr < num_entries; skipnr++) {
int len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skipnr]); int len = scnprintf(buf, sizeof(buf), "%ps", (void *)stack_entries[skipnr]);
if (is_access_fault) { if (str_has_prefix(buf, "kfence_") || str_has_prefix(buf, "__kfence_") ||
if (!strncmp(buf, KFENCE_SKIP_ARCH_FAULT_HANDLER, len)) !strncmp(buf, "__slab_free", len)) {
goto found; /*
} else { * In case of tail calls from any of the below
if (str_has_prefix(buf, "kfence_") || str_has_prefix(buf, "__kfence_") || * to any of the above.
!strncmp(buf, "__slab_free", len)) { */
/* fallback = skipnr + 1;
* In case of tail calls from any of the below
* to any of the above.
*/
fallback = skipnr + 1;
}
/* Also the *_bulk() variants by only checking prefixes. */
if (str_has_prefix(buf, "kfree") ||
str_has_prefix(buf, "kmem_cache_free") ||
str_has_prefix(buf, "__kmalloc") ||
str_has_prefix(buf, "kmem_cache_alloc"))
goto found;
} }
/* Also the *_bulk() variants by only checking prefixes. */
if (str_has_prefix(buf, "kfree") ||
str_has_prefix(buf, "kmem_cache_free") ||
str_has_prefix(buf, "__kmalloc") ||
str_has_prefix(buf, "kmem_cache_alloc"))
goto found;
} }
if (fallback < num_entries) if (fallback < num_entries)
return fallback; return fallback;
...@@ -157,13 +156,20 @@ static void print_diff_canary(unsigned long address, size_t bytes_to_show, ...@@ -157,13 +156,20 @@ static void print_diff_canary(unsigned long address, size_t bytes_to_show,
pr_cont(" ]"); pr_cont(" ]");
} }
void kfence_report_error(unsigned long address, const struct kfence_metadata *meta, void kfence_report_error(unsigned long address, struct pt_regs *regs,
enum kfence_error_type type) const struct kfence_metadata *meta, enum kfence_error_type type)
{ {
unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 }; unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 };
int num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 1);
int skipnr = get_stack_skipnr(stack_entries, num_stack_entries, &type);
const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1; const ptrdiff_t object_index = meta ? meta - kfence_metadata : -1;
int num_stack_entries;
int skipnr = 0;
if (regs) {
num_stack_entries = stack_trace_save_regs(regs, stack_entries, KFENCE_STACK_DEPTH, 0);
} else {
num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 1);
skipnr = get_stack_skipnr(stack_entries, num_stack_entries, &type);
}
/* Require non-NULL meta, except if KFENCE_ERROR_INVALID. */ /* Require non-NULL meta, except if KFENCE_ERROR_INVALID. */
if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta)) if (WARN_ON(type != KFENCE_ERROR_INVALID && !meta))
...@@ -227,7 +233,10 @@ void kfence_report_error(unsigned long address, const struct kfence_metadata *me ...@@ -227,7 +233,10 @@ void kfence_report_error(unsigned long address, const struct kfence_metadata *me
/* Print report footer. */ /* Print report footer. */
pr_err("\n"); pr_err("\n");
dump_stack_print_info(KERN_ERR); if (IS_ENABLED(CONFIG_DEBUG_KERNEL) && regs)
show_regs(regs);
else
dump_stack_print_info(KERN_ERR);
pr_err("==================================================================\n"); pr_err("==================================================================\n");
lockdep_on(); lockdep_on();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment