Commit 758d39eb authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

s390/dumpstack: merge all four stack tracers

We have four different stack tracers of which three had bugs. So it's
time to merge them to a single stack tracer which allows to specify a
call back function which will be called for each step.

This patch changes behavior a bit:

- the "nosched" and "in_sched_functions" check within
  save_stack_trace_tsk did work only for the last stack frame within a
  context. Now it considers the check for each stack frame like it
  should.

- both the oprofile variant and the perf_events variant did save a
  return address twice if a zero back chain was detected, which
  indicates an interrupt frame. The new dump_trace function will call
  the oprofile and perf_events backends with the psw address that is
  contained within the corresponding pt_regs structure instead.

- the original show_trace and save_context_stack functions did already
  use the psw address of the pt_regs structure if a zero back chain
  was detected. However now we ignore the psw address if it is a user
  space address. After all we trace the kernel stack and not the user
  space stack. This way we also get rid of the garbage user space
  address in case of warnings and / or panic call traces.

So this should make life easier since now there is only one stack
tracer left which we can break.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 3c2c126a
...@@ -184,6 +184,10 @@ struct task_struct; ...@@ -184,6 +184,10 @@ struct task_struct;
struct mm_struct; struct mm_struct;
struct seq_file; struct seq_file;
typedef int (*dump_trace_func_t)(void *data, unsigned long address);
void dump_trace(dump_trace_func_t func, void *data,
struct task_struct *task, unsigned long sp);
void show_cacheinfo(struct seq_file *m); void show_cacheinfo(struct seq_file *m);
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
......
...@@ -19,28 +19,28 @@ ...@@ -19,28 +19,28 @@
#include <asm/ipl.h> #include <asm/ipl.h>
/* /*
* For show_trace we have tree different stack to consider: * For dump_trace we have tree different stack to consider:
* - the panic stack which is used if the kernel stack has overflown * - the panic stack which is used if the kernel stack has overflown
* - the asynchronous interrupt stack (cpu related) * - the asynchronous interrupt stack (cpu related)
* - the synchronous kernel stack (process related) * - the synchronous kernel stack (process related)
* The stack trace can start at any of the three stack and can potentially * The stack trace can start at any of the three stacks and can potentially
* touch all of them. The order is: panic stack, async stack, sync stack. * touch all of them. The order is: panic stack, async stack, sync stack.
*/ */
static unsigned long static unsigned long
__show_trace(unsigned long sp, unsigned long low, unsigned long high) __dump_trace(dump_trace_func_t func, void *data, unsigned long sp,
unsigned long low, unsigned long high)
{ {
struct stack_frame *sf; struct stack_frame *sf;
struct pt_regs *regs; struct pt_regs *regs;
unsigned long addr;
while (1) { while (1) {
if (sp < low || sp > high - sizeof(*sf)) if (sp < low || sp > high - sizeof(*sf))
return sp; return sp;
sf = (struct stack_frame *) sp; sf = (struct stack_frame *) sp;
addr = sf->gprs[8];
printk("([<%016lx>] %pSR)\n", addr, (void *)addr);
/* Follow the backchain. */ /* Follow the backchain. */
while (1) { while (1) {
if (func(data, sf->gprs[8]))
return sp;
low = sp; low = sp;
sp = sf->back_chain; sp = sf->back_chain;
if (!sp) if (!sp)
...@@ -48,45 +48,58 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high) ...@@ -48,45 +48,58 @@ __show_trace(unsigned long sp, unsigned long low, unsigned long high)
if (sp <= low || sp > high - sizeof(*sf)) if (sp <= low || sp > high - sizeof(*sf))
return sp; return sp;
sf = (struct stack_frame *) sp; sf = (struct stack_frame *) sp;
addr = sf->gprs[8];
printk(" [<%016lx>] %pSR\n", addr, (void *)addr);
} }
/* Zero backchain detected, check for interrupt frame. */ /* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1); sp = (unsigned long) (sf + 1);
if (sp <= low || sp > high - sizeof(*regs)) if (sp <= low || sp > high - sizeof(*regs))
return sp; return sp;
regs = (struct pt_regs *) sp; regs = (struct pt_regs *) sp;
addr = regs->psw.addr; if (!user_mode(regs)) {
printk(" [<%016lx>] %pSR\n", addr, (void *)addr); if (func(data, regs->psw.addr))
return sp;
}
low = sp; low = sp;
sp = regs->gprs[15]; sp = regs->gprs[15];
} }
} }
static void show_trace(struct task_struct *task, unsigned long *stack) void dump_trace(dump_trace_func_t func, void *data, struct task_struct *task,
unsigned long sp)
{ {
const unsigned long frame_size = unsigned long frame_size;
STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
unsigned long sp;
sp = (unsigned long) stack; frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
if (!sp)
sp = task ? task->thread.ksp : current_stack_pointer();
printk("Call Trace:\n");
#ifdef CONFIG_CHECK_STACK #ifdef CONFIG_CHECK_STACK
sp = __show_trace(sp, sp = __dump_trace(func, data, sp,
S390_lowcore.panic_stack + frame_size - 4096, S390_lowcore.panic_stack + frame_size - 4096,
S390_lowcore.panic_stack + frame_size); S390_lowcore.panic_stack + frame_size);
#endif #endif
sp = __show_trace(sp, sp = __dump_trace(func, data, sp,
S390_lowcore.async_stack + frame_size - ASYNC_SIZE, S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
S390_lowcore.async_stack + frame_size); S390_lowcore.async_stack + frame_size);
if (task) if (task)
__show_trace(sp, (unsigned long) task_stack_page(task), __dump_trace(func, data, sp,
(unsigned long) task_stack_page(task) + THREAD_SIZE); (unsigned long)task_stack_page(task),
(unsigned long)task_stack_page(task) + THREAD_SIZE);
else else
__show_trace(sp, S390_lowcore.thread_info, __dump_trace(func, data, sp,
S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE); S390_lowcore.thread_info + THREAD_SIZE);
}
EXPORT_SYMBOL_GPL(dump_trace);
static int show_address(void *data, unsigned long address)
{
printk("([<%016lx>] %pSR)\n", address, (void *)address);
return 0;
}
static void show_trace(struct task_struct *task, unsigned long sp)
{
if (!sp)
sp = task ? task->thread.ksp : current_stack_pointer();
printk("Call Trace:\n");
dump_trace(show_address, NULL, task, sp);
if (!task) if (!task)
task = current; task = current;
debug_show_held_locks(task); debug_show_held_locks(task);
...@@ -112,7 +125,7 @@ void show_stack(struct task_struct *task, unsigned long *sp) ...@@ -112,7 +125,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
printk("%016lx ", *stack++); printk("%016lx ", *stack++);
} }
printk("\n"); printk("\n");
show_trace(task, sp); show_trace(task, (unsigned long)sp);
} }
static void show_last_breaking_event(struct pt_regs *regs) static void show_last_breaking_event(struct pt_regs *regs)
...@@ -152,7 +165,7 @@ void show_regs(struct pt_regs *regs) ...@@ -152,7 +165,7 @@ void show_regs(struct pt_regs *regs)
show_registers(regs); show_registers(regs);
/* Show stack backtrace if pt_regs is from kernel mode */ /* Show stack backtrace if pt_regs is from kernel mode */
if (!user_mode(regs)) if (!user_mode(regs))
show_trace(NULL, (unsigned long *) regs->gprs[15]); show_trace(NULL, regs->gprs[15]);
show_last_breaking_event(regs); show_last_breaking_event(regs);
} }
......
...@@ -222,64 +222,20 @@ static int __init service_level_perf_register(void) ...@@ -222,64 +222,20 @@ static int __init service_level_perf_register(void)
} }
arch_initcall(service_level_perf_register); arch_initcall(service_level_perf_register);
/* See also arch/s390/kernel/traps.c */ static int __perf_callchain_kernel(void *data, unsigned long address)
static unsigned long __store_trace(struct perf_callchain_entry *entry,
unsigned long sp,
unsigned long low, unsigned long high)
{ {
struct stack_frame *sf; struct perf_callchain_entry *entry = data;
struct pt_regs *regs;
perf_callchain_store(entry, address);
while (1) { return 0;
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
perf_callchain_store(entry, sf->gprs[8]);
/* Follow the backchain. */
while (1) {
low = sp;
sp = sf->back_chain;
if (!sp)
break;
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
perf_callchain_store(entry, sf->gprs[8]);
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
if (sp <= low || sp > high - sizeof(*regs))
return sp;
regs = (struct pt_regs *) sp;
perf_callchain_store(entry, sf->gprs[8]);
low = sp;
sp = regs->gprs[15];
}
} }
void perf_callchain_kernel(struct perf_callchain_entry *entry, void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs) struct pt_regs *regs)
{ {
unsigned long head, frame_size;
struct stack_frame *head_sf;
if (user_mode(regs)) if (user_mode(regs))
return; return;
dump_trace(__perf_callchain_kernel, entry, NULL, regs->gprs[15]);
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
head = regs->gprs[15];
head_sf = (struct stack_frame *) head;
if (!head_sf || !head_sf->back_chain)
return;
head = head_sf->back_chain;
head = __store_trace(entry, head,
S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
S390_lowcore.async_stack + frame_size);
__store_trace(entry, head, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
} }
/* Perf defintions for PMU event attributes in sysfs */ /* Perf defintions for PMU event attributes in sysfs */
......
...@@ -10,69 +10,31 @@ ...@@ -10,69 +10,31 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/module.h> #include <linux/module.h>
static unsigned long save_context_stack(struct stack_trace *trace, static int __save_address(void *data, unsigned long address, int nosched)
unsigned long sp,
unsigned long low,
unsigned long high,
int nosched)
{ {
struct stack_frame *sf; struct stack_trace *trace = data;
struct pt_regs *regs;
unsigned long addr;
while(1) { if (nosched && in_sched_functions(address))
if (sp < low || sp > high) return 0;
return sp; if (trace->skip > 0) {
sf = (struct stack_frame *)sp; trace->skip--;
while(1) { return 0;
addr = sf->gprs[8];
if (!trace->skip)
trace->entries[trace->nr_entries++] = addr;
else
trace->skip--;
if (trace->nr_entries >= trace->max_entries)
return sp;
low = sp;
sp = sf->back_chain;
if (!sp)
break;
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *)sp;
}
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long)(sf + 1);
if (sp <= low || sp > high - sizeof(*regs))
return sp;
regs = (struct pt_regs *)sp;
addr = regs->psw.addr;
if (!nosched || !in_sched_functions(addr)) {
if (!trace->skip)
trace->entries[trace->nr_entries++] = addr;
else
trace->skip--;
}
if (trace->nr_entries >= trace->max_entries)
return sp;
low = sp;
sp = regs->gprs[15];
} }
if (trace->nr_entries < trace->max_entries) {
trace->entries[trace->nr_entries++] = address;
return 0;
}
return 1;
} }
static void __save_stack_trace(struct stack_trace *trace, unsigned long sp) static int save_address(void *data, unsigned long address)
{ {
unsigned long new_sp, frame_size; return __save_address(data, address, 0);
}
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs); static int save_address_nosched(void *data, unsigned long address)
new_sp = save_context_stack(trace, sp, {
S390_lowcore.panic_stack + frame_size - PAGE_SIZE, return __save_address(data, address, 1);
S390_lowcore.panic_stack + frame_size, 0);
new_sp = save_context_stack(trace, new_sp,
S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
S390_lowcore.async_stack + frame_size, 0);
save_context_stack(trace, new_sp,
S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE, 0);
} }
void save_stack_trace(struct stack_trace *trace) void save_stack_trace(struct stack_trace *trace)
...@@ -80,7 +42,7 @@ void save_stack_trace(struct stack_trace *trace) ...@@ -80,7 +42,7 @@ void save_stack_trace(struct stack_trace *trace)
unsigned long sp; unsigned long sp;
sp = current_stack_pointer(); sp = current_stack_pointer();
__save_stack_trace(trace, sp); dump_trace(save_address, trace, NULL, sp);
if (trace->nr_entries < trace->max_entries) if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX; trace->entries[trace->nr_entries++] = ULONG_MAX;
} }
...@@ -88,14 +50,12 @@ EXPORT_SYMBOL_GPL(save_stack_trace); ...@@ -88,14 +50,12 @@ EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{ {
unsigned long sp, low, high; unsigned long sp;
sp = tsk->thread.ksp; sp = tsk->thread.ksp;
if (tsk == current) if (tsk == current)
sp = current_stack_pointer(); sp = current_stack_pointer();
low = (unsigned long) task_stack_page(tsk); dump_trace(save_address_nosched, trace, tsk, sp);
high = (unsigned long) task_pt_regs(tsk);
save_context_stack(trace, sp, low, high, 1);
if (trace->nr_entries < trace->max_entries) if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX; trace->entries[trace->nr_entries++] = ULONG_MAX;
} }
...@@ -106,7 +66,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace) ...@@ -106,7 +66,7 @@ void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
unsigned long sp; unsigned long sp;
sp = kernel_stack_pointer(regs); sp = kernel_stack_pointer(regs);
__save_stack_trace(trace, sp); dump_trace(save_address, trace, NULL, sp);
if (trace->nr_entries < trace->max_entries) if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX; trace->entries[trace->nr_entries++] = ULONG_MAX;
} }
......
...@@ -6,5 +6,5 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ ...@@ -6,5 +6,5 @@ DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
oprofilefs.o oprofile_stats.o \ oprofilefs.o oprofile_stats.o \
timer_int.o ) timer_int.o )
oprofile-y := $(DRIVER_OBJS) init.o backtrace.o oprofile-y := $(DRIVER_OBJS) init.o
oprofile-y += hwsampler.o oprofile-y += hwsampler.o
/*
* S390 Version
* Copyright IBM Corp. 2005
* Author(s): Andreas Krebbel <Andreas.Krebbel@de.ibm.com>
*/
#include <linux/oprofile.h>
#include <asm/processor.h> /* for struct stack_frame */
static unsigned long
__show_trace(unsigned int *depth, unsigned long sp,
unsigned long low, unsigned long high)
{
struct stack_frame *sf;
struct pt_regs *regs;
while (*depth) {
if (sp < low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
(*depth)--;
oprofile_add_trace(sf->gprs[8]);
/* Follow the backchain. */
while (*depth) {
low = sp;
sp = sf->back_chain;
if (!sp)
break;
if (sp <= low || sp > high - sizeof(*sf))
return sp;
sf = (struct stack_frame *) sp;
(*depth)--;
oprofile_add_trace(sf->gprs[8]);
}
if (*depth == 0)
break;
/* Zero backchain detected, check for interrupt frame. */
sp = (unsigned long) (sf + 1);
if (sp <= low || sp > high - sizeof(*regs))
return sp;
regs = (struct pt_regs *) sp;
(*depth)--;
oprofile_add_trace(sf->gprs[8]);
low = sp;
sp = regs->gprs[15];
}
return sp;
}
void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
{
unsigned long head, frame_size;
struct stack_frame* head_sf;
if (user_mode(regs))
return;
frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
head = regs->gprs[15];
head_sf = (struct stack_frame*)head;
if (!head_sf->back_chain)
return;
head = head_sf->back_chain;
head = __show_trace(&depth, head,
S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
S390_lowcore.async_stack + frame_size);
__show_trace(&depth, head, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
}
...@@ -20,8 +20,6 @@ ...@@ -20,8 +20,6 @@
#include "../../../drivers/oprofile/oprof.h" #include "../../../drivers/oprofile/oprof.h"
extern void s390_backtrace(struct pt_regs * const regs, unsigned int depth);
#include "hwsampler.h" #include "hwsampler.h"
#include "op_counter.h" #include "op_counter.h"
...@@ -494,6 +492,24 @@ static void oprofile_hwsampler_exit(void) ...@@ -494,6 +492,24 @@ static void oprofile_hwsampler_exit(void)
hwsampler_shutdown(); hwsampler_shutdown();
} }
static int __s390_backtrace(void *data, unsigned long address)
{
unsigned int *depth = data;
if (*depth == 0)
return 1;
(*depth)--;
oprofile_add_trace(address);
return 0;
}
static void s390_backtrace(struct pt_regs *regs, unsigned int depth)
{
if (user_mode(regs))
return;
dump_trace(__s390_backtrace, &depth, NULL, regs->gprs[15]);
}
int __init oprofile_arch_init(struct oprofile_operations *ops) int __init oprofile_arch_init(struct oprofile_operations *ops)
{ {
ops->backtrace = s390_backtrace; ops->backtrace = s390_backtrace;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment