Commit 47ad7b9b authored by Chris Metcalf's avatar Chris Metcalf

tile: improve stack backtrace

This commit fixes a number of issues with the tile backtrace code.

- Don't try to identify userspace shared object or executable paths
  if we are doing a backtrace from an interrupt; it's not legal,
  and also unlikely to be interesting.  Likewise, don't try to do
  it for other address spaces, since d_path() assumes it is being
  called in "current" context.

- Move "in_backtrace" from thread_struct to thread_info.
  This way we can access it even if our stack thread_info has been
  clobbered, which makes backtracing more robust.

- Avoid using "current" directly when testing for is_sigreturn().
  Since "current" may be corrupt, we're better off using kbt->task
  explicitly to look up the vdso_base for the current task.
  Conveniently, this simplifies the internal APIs (we only need
  one is_sigreturn() function now).

- Avoid bogus "Odd fault" warning when pc/sp/ex1 are all zero,
  as is true for kernel threads above the last frame.

- Hook into Tejun Heo's dump_stack() framework in lib/dump_stack.c.

- Write last entry in save_stack_trace() as ULONG_MAX, not zero,
  since ftrace (at least) relies on finding that marker.

- Implement save_stack_trace_regs() and save_strack_trace_user(),
  and set CONFIG_USER_STACKTRACE_SUPPORT.
Signed-off-by: default avatarChris Metcalf <cmetcalf@ezchip.com>
parent e8200baa
...@@ -24,6 +24,7 @@ config TILE ...@@ -24,6 +24,7 @@ config TILE
select MODULES_USE_ELF_RELA select MODULES_USE_ELF_RELA
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_SYSCALL_TRACEPOINTS select HAVE_SYSCALL_TRACEPOINTS
select USER_STACKTRACE_SUPPORT
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS
......
...@@ -111,8 +111,6 @@ struct thread_struct { ...@@ -111,8 +111,6 @@ struct thread_struct {
unsigned long long interrupt_mask; unsigned long long interrupt_mask;
/* User interrupt-control 0 state */ /* User interrupt-control 0 state */
unsigned long intctrl_0; unsigned long intctrl_0;
/* Is this task currently doing a backtrace? */
bool in_backtrace;
/* Any other miscellaneous processor state bits */ /* Any other miscellaneous processor state bits */
unsigned long proc_status; unsigned long proc_status;
#if !CHIP_HAS_FIXED_INTVEC_BASE() #if !CHIP_HAS_FIXED_INTVEC_BASE()
......
...@@ -58,17 +58,14 @@ extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt); ...@@ -58,17 +58,14 @@ extern int KBacktraceIterator_end(struct KBacktraceIterator *kbt);
/* Advance to the next frame. */ /* Advance to the next frame. */
extern void KBacktraceIterator_next(struct KBacktraceIterator *kbt); extern void KBacktraceIterator_next(struct KBacktraceIterator *kbt);
/* Dump just the contents of the pt_regs structure. */
extern void tile_show_regs(struct pt_regs *);
/* /*
* Dump stack given complete register info. Use only from the * Dump stack given complete register info. Use only from the
* architecture-specific code; show_stack() * architecture-specific code; show_stack()
* and dump_stack() (in entry.S) are architecture-independent entry points. * and dump_stack() are architecture-independent entry points.
*/ */
extern void tile_show_stack(struct KBacktraceIterator *, int headers); extern void tile_show_stack(struct KBacktraceIterator *);
/* Dump stack of current process, with registers to seed the backtrace. */
extern void dump_stack_regs(struct pt_regs *);
/* Helper method for assembly dump_stack(). */
extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
#endif /* _ASM_TILE_STACK_H */ #endif /* _ASM_TILE_STACK_H */
...@@ -42,6 +42,7 @@ struct thread_info { ...@@ -42,6 +42,7 @@ struct thread_info {
unsigned long unalign_jit_tmp[4]; /* temp r0..r3 storage */ unsigned long unalign_jit_tmp[4]; /* temp r0..r3 storage */
void __user *unalign_jit_base; /* unalign fixup JIT base */ void __user *unalign_jit_base; /* unalign fixup JIT base */
#endif #endif
bool in_backtrace; /* currently doing backtrace? */
}; };
/* /*
......
...@@ -27,13 +27,6 @@ STD_ENTRY(current_text_addr) ...@@ -27,13 +27,6 @@ STD_ENTRY(current_text_addr)
{ move r0, lr; jrp lr } { move r0, lr; jrp lr }
STD_ENDPROC(current_text_addr) STD_ENDPROC(current_text_addr)
STD_ENTRY(dump_stack)
{ move r2, lr; lnk r1 }
{ move r4, r52; addli r1, r1, dump_stack - . }
{ move r3, sp; j _dump_stack }
jrp lr /* keep backtracer happy */
STD_ENDPROC(dump_stack)
STD_ENTRY(KBacktraceIterator_init_current) STD_ENTRY(KBacktraceIterator_init_current)
{ move r2, lr; lnk r1 } { move r2, lr; lnk r1 }
{ move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . } { move r4, r52; addli r1, r1, KBacktraceIterator_init_current - . }
......
...@@ -546,33 +546,43 @@ void exit_thread(void) ...@@ -546,33 +546,43 @@ void exit_thread(void)
#endif #endif
} }
void show_regs(struct pt_regs *regs) void tile_show_regs(struct pt_regs *regs)
{ {
struct task_struct *tsk = validate_current();
int i; int i;
if (tsk != &corrupt_current)
show_regs_print_info(KERN_ERR);
#ifdef __tilegx__ #ifdef __tilegx__
for (i = 0; i < 17; i++) for (i = 0; i < 17; i++)
pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n", pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
i, regs->regs[i], i+18, regs->regs[i+18], i, regs->regs[i], i+18, regs->regs[i+18],
i+36, regs->regs[i+36]); i+36, regs->regs[i+36]);
pr_err(" r17: " REGFMT " r35: " REGFMT " tp : " REGFMT "\n", pr_err(" r17: "REGFMT" r35: "REGFMT" tp : "REGFMT"\n",
regs->regs[17], regs->regs[35], regs->tp); regs->regs[17], regs->regs[35], regs->tp);
pr_err(" sp : " REGFMT " lr : " REGFMT "\n", regs->sp, regs->lr); pr_err(" sp : "REGFMT" lr : "REGFMT"\n", regs->sp, regs->lr);
#else #else
for (i = 0; i < 13; i++) for (i = 0; i < 13; i++)
pr_err(" r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT " r%-2d: " REGFMT "\n", pr_err(" r%-2d: "REGFMT" r%-2d: "REGFMT
" r%-2d: "REGFMT" r%-2d: "REGFMT"\n",
i, regs->regs[i], i+14, regs->regs[i+14], i, regs->regs[i], i+14, regs->regs[i+14],
i+27, regs->regs[i+27], i+40, regs->regs[i+40]); i+27, regs->regs[i+27], i+40, regs->regs[i+40]);
pr_err(" r13: " REGFMT " tp : " REGFMT " sp : " REGFMT " lr : " REGFMT "\n", pr_err(" r13: "REGFMT" tp : "REGFMT" sp : "REGFMT" lr : "REGFMT"\n",
regs->regs[13], regs->tp, regs->sp, regs->lr); regs->regs[13], regs->tp, regs->sp, regs->lr);
#endif #endif
pr_err(" pc : " REGFMT " ex1: %ld faultnum: %ld\n", pr_err(" pc : "REGFMT" ex1: %ld faultnum: %ld flags:%s%s%s%s\n",
regs->pc, regs->ex1, regs->faultnum); regs->pc, regs->ex1, regs->faultnum,
is_compat_task() ? " compat" : "",
(regs->flags & PT_FLAGS_DISABLE_IRQ) ? " noirq" : "",
!(regs->flags & PT_FLAGS_CALLER_SAVES) ? " nocallersave" : "",
(regs->flags & PT_FLAGS_RESTORE_REGS) ? " restoreregs" : "");
}
void show_regs(struct pt_regs *regs)
{
struct KBacktraceIterator kbt;
show_regs_print_info(KERN_DEFAULT);
tile_show_regs(regs);
dump_stack_regs(regs); KBacktraceIterator_init(&kbt, NULL, regs);
tile_show_stack(&kbt);
} }
/* To ensure stack dump on tiles occurs one by one. */ /* To ensure stack dump on tiles occurs one by one. */
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/mmzone.h> #include <linux/mmzone.h>
#include <linux/dcache.h> #include <linux/dcache.h>
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/hardirq.h>
#include <linux/string.h> #include <linux/string.h>
#include <asm/backtrace.h> #include <asm/backtrace.h>
#include <asm/page.h> #include <asm/page.h>
...@@ -109,7 +110,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) ...@@ -109,7 +110,7 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
if (kbt->verbose) if (kbt->verbose)
pr_err(" <%s while in user mode>\n", fault); pr_err(" <%s while in user mode>\n", fault);
} else { } else {
if (kbt->verbose) if (kbt->verbose && (p->pc != 0 || p->sp != 0 || p->ex1 != 0))
pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n", pr_err(" (odd fault: pc %#lx, sp %#lx, ex1 %#lx?)\n",
p->pc, p->sp, p->ex1); p->pc, p->sp, p->ex1);
return NULL; return NULL;
...@@ -119,10 +120,12 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt) ...@@ -119,10 +120,12 @@ static struct pt_regs *valid_fault_handler(struct KBacktraceIterator* kbt)
return p; return p;
} }
/* Is the pc pointing to a sigreturn trampoline? */ /* Is the iterator pointing to a sigreturn trampoline? */
static int is_sigreturn(unsigned long pc) static int is_sigreturn(struct KBacktraceIterator *kbt)
{ {
return current->mm && (pc == VDSO_SYM(&__vdso_rt_sigreturn)); return kbt->task->mm &&
(kbt->it.pc == ((ulong)kbt->task->mm->context.vdso_base +
(ulong)&__vdso_rt_sigreturn));
} }
/* Return a pt_regs pointer for a valid signal handler frame */ /* Return a pt_regs pointer for a valid signal handler frame */
...@@ -131,7 +134,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt, ...@@ -131,7 +134,7 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
{ {
BacktraceIterator *b = &kbt->it; BacktraceIterator *b = &kbt->it;
if (is_sigreturn(b->pc) && b->sp < PAGE_OFFSET && if (is_sigreturn(kbt) && b->sp < PAGE_OFFSET &&
b->sp % sizeof(long) == 0) { b->sp % sizeof(long) == 0) {
int retval; int retval;
pagefault_disable(); pagefault_disable();
...@@ -151,11 +154,6 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt, ...@@ -151,11 +154,6 @@ static struct pt_regs *valid_sigframe(struct KBacktraceIterator* kbt,
return NULL; return NULL;
} }
static int KBacktraceIterator_is_sigreturn(struct KBacktraceIterator *kbt)
{
return is_sigreturn(kbt->it.pc);
}
static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt) static int KBacktraceIterator_restart(struct KBacktraceIterator *kbt)
{ {
struct pt_regs *p; struct pt_regs *p;
...@@ -178,7 +176,7 @@ static int KBacktraceIterator_next_item_inclusive( ...@@ -178,7 +176,7 @@ static int KBacktraceIterator_next_item_inclusive(
{ {
for (;;) { for (;;) {
do { do {
if (!KBacktraceIterator_is_sigreturn(kbt)) if (!is_sigreturn(kbt))
return KBT_ONGOING; return KBT_ONGOING;
} while (backtrace_next(&kbt->it)); } while (backtrace_next(&kbt->it));
...@@ -357,51 +355,50 @@ static void describe_addr(struct KBacktraceIterator *kbt, ...@@ -357,51 +355,50 @@ static void describe_addr(struct KBacktraceIterator *kbt,
*/ */
static bool start_backtrace(void) static bool start_backtrace(void)
{ {
if (current->thread.in_backtrace) { if (current_thread_info()->in_backtrace) {
pr_err("Backtrace requested while in backtrace!\n"); pr_err("Backtrace requested while in backtrace!\n");
return false; return false;
} }
current->thread.in_backtrace = true; current_thread_info()->in_backtrace = true;
return true; return true;
} }
static void end_backtrace(void) static void end_backtrace(void)
{ {
current->thread.in_backtrace = false; current_thread_info()->in_backtrace = false;
} }
/* /*
* This method wraps the backtracer's more generic support. * This method wraps the backtracer's more generic support.
* It is only invoked from the architecture-specific code; show_stack() * It is only invoked from the architecture-specific code; show_stack()
* and dump_stack() (in entry.S) are architecture-independent entry points. * and dump_stack() are architecture-independent entry points.
*/ */
void tile_show_stack(struct KBacktraceIterator *kbt, int headers) void tile_show_stack(struct KBacktraceIterator *kbt)
{ {
int i; int i;
int have_mmap_sem = 0; int have_mmap_sem = 0;
if (!start_backtrace()) if (!start_backtrace())
return; return;
if (headers) {
/*
* Add a blank line since if we are called from panic(),
* then bust_spinlocks() spit out a space in front of us
* and it will mess up our KERN_ERR.
*/
pr_err("Starting stack dump of tid %d, pid %d (%s) on cpu %d at cycle %lld\n",
kbt->task->pid, kbt->task->tgid, kbt->task->comm,
raw_smp_processor_id(), get_cycles());
}
kbt->verbose = 1; kbt->verbose = 1;
i = 0; i = 0;
for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) { for (; !KBacktraceIterator_end(kbt); KBacktraceIterator_next(kbt)) {
char namebuf[KSYM_NAME_LEN+100]; char namebuf[KSYM_NAME_LEN+100];
unsigned long address = kbt->it.pc; unsigned long address = kbt->it.pc;
/* Try to acquire the mmap_sem as we pass into userspace. */ /*
if (address < PAGE_OFFSET && !have_mmap_sem && kbt->task->mm) * Try to acquire the mmap_sem as we pass into userspace.
* If we're in an interrupt context, don't even try, since
* it's not safe to call e.g. d_path() from an interrupt,
* since it uses spin locks without disabling interrupts.
* Note we test "kbt->task == current", not "kbt->is_current",
* since we're checking that "current" will work in d_path().
*/
if (kbt->task == current && address < PAGE_OFFSET &&
!have_mmap_sem && kbt->task->mm && !in_interrupt()) {
have_mmap_sem = have_mmap_sem =
down_read_trylock(&kbt->task->mm->mmap_sem); down_read_trylock(&kbt->task->mm->mmap_sem);
}
describe_addr(kbt, address, have_mmap_sem, describe_addr(kbt, address, have_mmap_sem,
namebuf, sizeof(namebuf)); namebuf, sizeof(namebuf));
...@@ -416,24 +413,12 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers) ...@@ -416,24 +413,12 @@ void tile_show_stack(struct KBacktraceIterator *kbt, int headers)
} }
if (kbt->end == KBT_LOOP) if (kbt->end == KBT_LOOP)
pr_err("Stack dump stopped; next frame identical to this one\n"); pr_err("Stack dump stopped; next frame identical to this one\n");
if (headers)
pr_err("Stack dump complete\n");
if (have_mmap_sem) if (have_mmap_sem)
up_read(&kbt->task->mm->mmap_sem); up_read(&kbt->task->mm->mmap_sem);
end_backtrace(); end_backtrace();
} }
EXPORT_SYMBOL(tile_show_stack); EXPORT_SYMBOL(tile_show_stack);
/* This is called from show_regs() and _dump_stack() */
void dump_stack_regs(struct pt_regs *regs)
{
struct KBacktraceIterator kbt;
KBacktraceIterator_init(&kbt, NULL, regs);
tile_show_stack(&kbt, 1);
}
EXPORT_SYMBOL(dump_stack_regs);
static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs, static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
ulong pc, ulong lr, ulong sp, ulong r52) ulong pc, ulong lr, ulong sp, ulong r52)
{ {
...@@ -445,11 +430,15 @@ static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs, ...@@ -445,11 +430,15 @@ static struct pt_regs *regs_to_pt_regs(struct pt_regs *regs,
return regs; return regs;
} }
/* This is called from dump_stack() and just converts to pt_regs */ /* Deprecated function currently only used by kernel_double_fault(). */
void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
{ {
struct KBacktraceIterator kbt;
struct pt_regs regs; struct pt_regs regs;
dump_stack_regs(regs_to_pt_regs(&regs, pc, lr, sp, r52));
regs_to_pt_regs(&regs, pc, lr, sp, r52);
KBacktraceIterator_init(&kbt, NULL, &regs);
tile_show_stack(&kbt);
} }
/* This is called from KBacktraceIterator_init_current() */ /* This is called from KBacktraceIterator_init_current() */
...@@ -461,22 +450,30 @@ void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc, ...@@ -461,22 +450,30 @@ void _KBacktraceIterator_init_current(struct KBacktraceIterator *kbt, ulong pc,
regs_to_pt_regs(&regs, pc, lr, sp, r52)); regs_to_pt_regs(&regs, pc, lr, sp, r52));
} }
/* This is called only from kernel/sched/core.c, with esp == NULL */ /*
* Called from sched_show_task() with task != NULL, or dump_stack()
* with task == NULL. The esp argument is always NULL.
*/
void show_stack(struct task_struct *task, unsigned long *esp) void show_stack(struct task_struct *task, unsigned long *esp)
{ {
struct KBacktraceIterator kbt; struct KBacktraceIterator kbt;
if (task == NULL || task == current) if (task == NULL || task == current) {
KBacktraceIterator_init_current(&kbt); KBacktraceIterator_init_current(&kbt);
else KBacktraceIterator_next(&kbt); /* don't show first frame */
} else {
KBacktraceIterator_init(&kbt, task, NULL); KBacktraceIterator_init(&kbt, task, NULL);
tile_show_stack(&kbt, 0); }
tile_show_stack(&kbt);
} }
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
/* Support generic Linux stack API too */ /* Support generic Linux stack API too */
void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) static void save_stack_trace_common(struct task_struct *task,
struct pt_regs *regs,
bool user,
struct stack_trace *trace)
{ {
struct KBacktraceIterator kbt; struct KBacktraceIterator kbt;
int skip = trace->skip; int skip = trace->skip;
...@@ -484,31 +481,57 @@ void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace) ...@@ -484,31 +481,57 @@ void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
if (!start_backtrace()) if (!start_backtrace())
goto done; goto done;
if (task == NULL || task == current) if (regs != NULL) {
KBacktraceIterator_init(&kbt, NULL, regs);
} else if (task == NULL || task == current) {
KBacktraceIterator_init_current(&kbt); KBacktraceIterator_init_current(&kbt);
else skip++; /* don't show KBacktraceIterator_init_current */
} else {
KBacktraceIterator_init(&kbt, task, NULL); KBacktraceIterator_init(&kbt, task, NULL);
}
for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) { for (; !KBacktraceIterator_end(&kbt); KBacktraceIterator_next(&kbt)) {
if (skip) { if (skip) {
--skip; --skip;
continue; continue;
} }
if (i >= trace->max_entries || kbt.it.pc < PAGE_OFFSET) if (i >= trace->max_entries ||
(!user && kbt.it.pc < PAGE_OFFSET))
break; break;
trace->entries[i++] = kbt.it.pc; trace->entries[i++] = kbt.it.pc;
} }
end_backtrace(); end_backtrace();
done: done:
if (i < trace->max_entries)
trace->entries[i++] = ULONG_MAX;
trace->nr_entries = i; trace->nr_entries = i;
} }
void save_stack_trace_tsk(struct task_struct *task, struct stack_trace *trace)
{
save_stack_trace_common(task, NULL, false, trace);
}
EXPORT_SYMBOL(save_stack_trace_tsk); EXPORT_SYMBOL(save_stack_trace_tsk);
void save_stack_trace(struct stack_trace *trace) void save_stack_trace(struct stack_trace *trace)
{ {
save_stack_trace_tsk(NULL, trace); save_stack_trace_common(NULL, NULL, false, trace);
} }
EXPORT_SYMBOL_GPL(save_stack_trace); EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
save_stack_trace_common(NULL, regs, false, trace);
}
void save_stack_trace_user(struct stack_trace *trace)
{
/* Trace user stack if we are not a kernel thread. */
if (current->mm)
save_stack_trace_common(NULL, task_pt_regs(current),
true, trace);
else if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
#endif #endif
/* In entry.S */ /* In entry.S */
......
...@@ -407,6 +407,9 @@ void do_nmi(struct pt_regs *regs, int fault_num, unsigned long reason) ...@@ -407,6 +407,9 @@ void do_nmi(struct pt_regs *regs, int fault_num, unsigned long reason)
} }
} }
/* Deprecated function currently only used here. */
extern void _dump_stack(int dummy, ulong pc, ulong lr, ulong sp, ulong r52);
void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52) void kernel_double_fault(int dummy, ulong pc, ulong lr, ulong sp, ulong r52)
{ {
_dump_stack(dummy, pc, lr, sp, r52); _dump_stack(dummy, pc, lr, sp, r52);
......
...@@ -26,7 +26,6 @@ EXPORT_SYMBOL(finv_user_asm); ...@@ -26,7 +26,6 @@ EXPORT_SYMBOL(finv_user_asm);
#include <linux/kernel.h> #include <linux/kernel.h>
#include <asm/processor.h> #include <asm/processor.h>
EXPORT_SYMBOL(current_text_addr); EXPORT_SYMBOL(current_text_addr);
EXPORT_SYMBOL(dump_stack);
/* arch/tile/kernel/head.S */ /* arch/tile/kernel/head.S */
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment