Commit a1e2e31d authored by Steven Rostedt's avatar Steven Rostedt Committed by Steven Rostedt

ftrace: Return pt_regs to function trace callback

Return as the 4th paramater to the function tracer callback the pt_regs.

Later patches that implement regs passing for the architectures will require
having the ftrace_ops set the SAVE_REGS flag, which will tell the arch
to take the time to pass a full set of pt_regs to the ftrace_ops callback
function. If the arch does not support it then it should pass NULL.

If an arch can pass full regs, then it should define:
 ARCH_SUPPORTS_FTRACE_SAVE_REGS to 1

Link: http://lkml.kernel.org/r/20120702201821.019966811@goodmis.orgReviewed-by: default avatarMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent ccf3672d
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/ptrace.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -54,7 +55,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, ...@@ -54,7 +55,7 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
struct ftrace_ops; struct ftrace_ops;
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op); struct ftrace_ops *op, struct pt_regs *regs);
/* /*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
...@@ -188,7 +189,8 @@ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) ...@@ -188,7 +189,8 @@ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
return *this_cpu_ptr(ops->disabled); return *this_cpu_ptr(ops->disabled);
} }
extern void ftrace_stub(unsigned long a0, unsigned long a1, struct ftrace_ops *op); extern void ftrace_stub(unsigned long a0, unsigned long a1,
struct ftrace_ops *op, struct pt_regs *regs);
#else /* !CONFIG_FUNCTION_TRACER */ #else /* !CONFIG_FUNCTION_TRACER */
/* /*
......
...@@ -103,7 +103,7 @@ static struct ftrace_ops control_ops; ...@@ -103,7 +103,7 @@ static struct ftrace_ops control_ops;
#if ARCH_SUPPORTS_FTRACE_OPS #if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op); struct ftrace_ops *op, struct pt_regs *regs);
#else #else
/* See comment below, where ftrace_ops_list_func is defined */ /* See comment below, where ftrace_ops_list_func is defined */
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
...@@ -121,7 +121,7 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip); ...@@ -121,7 +121,7 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
*/ */
static void static void
ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op) struct ftrace_ops *op, struct pt_regs *regs)
{ {
if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
return; return;
...@@ -129,19 +129,19 @@ ftrace_global_list_func(unsigned long ip, unsigned long parent_ip, ...@@ -129,19 +129,19 @@ ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
trace_recursion_set(TRACE_GLOBAL_BIT); trace_recursion_set(TRACE_GLOBAL_BIT);
op = rcu_dereference_raw(ftrace_global_list); /*see above*/ op = rcu_dereference_raw(ftrace_global_list); /*see above*/
while (op != &ftrace_list_end) { while (op != &ftrace_list_end) {
op->func(ip, parent_ip, op); op->func(ip, parent_ip, op, regs);
op = rcu_dereference_raw(op->next); /*see above*/ op = rcu_dereference_raw(op->next); /*see above*/
}; };
trace_recursion_clear(TRACE_GLOBAL_BIT); trace_recursion_clear(TRACE_GLOBAL_BIT);
} }
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip, static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op) struct ftrace_ops *op, struct pt_regs *regs)
{ {
if (!test_tsk_trace_trace(current)) if (!test_tsk_trace_trace(current))
return; return;
ftrace_pid_function(ip, parent_ip, op); ftrace_pid_function(ip, parent_ip, op, regs);
} }
static void set_ftrace_pid_function(ftrace_func_t func) static void set_ftrace_pid_function(ftrace_func_t func)
...@@ -763,7 +763,7 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) ...@@ -763,7 +763,7 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
static void static void
function_profile_call(unsigned long ip, unsigned long parent_ip, function_profile_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops) struct ftrace_ops *ops, struct pt_regs *regs)
{ {
struct ftrace_profile_stat *stat; struct ftrace_profile_stat *stat;
struct ftrace_profile *rec; struct ftrace_profile *rec;
...@@ -793,7 +793,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip, ...@@ -793,7 +793,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip,
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int profile_graph_entry(struct ftrace_graph_ent *trace) static int profile_graph_entry(struct ftrace_graph_ent *trace)
{ {
function_profile_call(trace->func, 0, NULL); function_profile_call(trace->func, 0, NULL, NULL);
return 1; return 1;
} }
...@@ -2771,7 +2771,7 @@ static int __init ftrace_mod_cmd_init(void) ...@@ -2771,7 +2771,7 @@ static int __init ftrace_mod_cmd_init(void)
device_initcall(ftrace_mod_cmd_init); device_initcall(ftrace_mod_cmd_init);
static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip, static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op) struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct ftrace_func_probe *entry; struct ftrace_func_probe *entry;
struct hlist_head *hhd; struct hlist_head *hhd;
...@@ -3923,7 +3923,7 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) ...@@ -3923,7 +3923,7 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
static void static void
ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op) struct ftrace_ops *op, struct pt_regs *regs)
{ {
if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
return; return;
...@@ -3938,7 +3938,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, ...@@ -3938,7 +3938,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
while (op != &ftrace_list_end) { while (op != &ftrace_list_end) {
if (!ftrace_function_local_disabled(op) && if (!ftrace_function_local_disabled(op) &&
ftrace_ops_test(op, ip)) ftrace_ops_test(op, ip))
op->func(ip, parent_ip, op); op->func(ip, parent_ip, op, regs);
op = rcu_dereference_raw(op->next); op = rcu_dereference_raw(op->next);
}; };
...@@ -3952,7 +3952,7 @@ static struct ftrace_ops control_ops = { ...@@ -3952,7 +3952,7 @@ static struct ftrace_ops control_ops = {
static inline void static inline void
__ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ignored) struct ftrace_ops *ignored, struct pt_regs *regs)
{ {
struct ftrace_ops *op; struct ftrace_ops *op;
...@@ -3971,7 +3971,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, ...@@ -3971,7 +3971,7 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
op = rcu_dereference_raw(ftrace_ops_list); op = rcu_dereference_raw(ftrace_ops_list);
while (op != &ftrace_list_end) { while (op != &ftrace_list_end) {
if (ftrace_ops_test(op, ip)) if (ftrace_ops_test(op, ip))
op->func(ip, parent_ip, op); op->func(ip, parent_ip, op, regs);
op = rcu_dereference_raw(op->next); op = rcu_dereference_raw(op->next);
}; };
preempt_enable_notrace(); preempt_enable_notrace();
...@@ -3983,17 +3983,24 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, ...@@ -3983,17 +3983,24 @@ __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
* the list function ignores the op parameter, we do not want any * the list function ignores the op parameter, we do not want any
* C side effects, where a function is called without the caller * C side effects, where a function is called without the caller
* sending a third parameter. * sending a third parameter.
* Archs are to support both the regs and ftrace_ops at the same time.
* If they support ftrace_ops, it is assumed they support regs.
* If call backs want to use regs, they must either check for regs
* being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS.
* Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved.
* An architecture can pass partial regs with ftrace_ops and still
* set the ARCH_SUPPORT_FTARCE_OPS.
*/ */
#if ARCH_SUPPORTS_FTRACE_OPS #if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op) struct ftrace_ops *op, struct pt_regs *regs)
{ {
__ftrace_ops_list_func(ip, parent_ip, NULL); __ftrace_ops_list_func(ip, parent_ip, NULL, regs);
} }
#else #else
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
{ {
__ftrace_ops_list_func(ip, parent_ip, NULL); __ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
} }
#endif #endif
......
...@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); ...@@ -259,7 +259,7 @@ EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
static void static void
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip, perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops) struct ftrace_ops *ops, struct pt_regs *pt_regs)
{ {
struct ftrace_entry *entry; struct ftrace_entry *entry;
struct hlist_head *head; struct hlist_head *head;
......
...@@ -1682,7 +1682,7 @@ static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); ...@@ -1682,7 +1682,7 @@ static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
static void static void
function_test_events_call(unsigned long ip, unsigned long parent_ip, function_test_events_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op) struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer; struct ring_buffer *buffer;
......
...@@ -49,7 +49,7 @@ static void function_trace_start(struct trace_array *tr) ...@@ -49,7 +49,7 @@ static void function_trace_start(struct trace_array *tr)
static void static void
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip, function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op) struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = func_trace; struct trace_array *tr = func_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
...@@ -77,7 +77,8 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip, ...@@ -77,7 +77,8 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
static void static void
function_trace_call(unsigned long ip, unsigned long parent_ip, function_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op) struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = func_trace; struct trace_array *tr = func_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
...@@ -109,7 +110,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -109,7 +110,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
static void static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip, function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op) struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = func_trace; struct trace_array *tr = func_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
......
...@@ -137,7 +137,7 @@ static int func_prolog_dec(struct trace_array *tr, ...@@ -137,7 +137,7 @@ static int func_prolog_dec(struct trace_array *tr,
*/ */
static void static void
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op) struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = irqsoff_trace; struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
......
...@@ -108,7 +108,8 @@ func_prolog_preempt_disable(struct trace_array *tr, ...@@ -108,7 +108,8 @@ func_prolog_preempt_disable(struct trace_array *tr,
* wakeup uses its own tracer function to keep the overhead down: * wakeup uses its own tracer function to keep the overhead down:
*/ */
static void static void
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op) wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = wakeup_trace; struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
......
...@@ -104,7 +104,8 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) ...@@ -104,7 +104,8 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
static int trace_selftest_test_probe1_cnt; static int trace_selftest_test_probe1_cnt;
static void trace_selftest_test_probe1_func(unsigned long ip, static void trace_selftest_test_probe1_func(unsigned long ip,
unsigned long pip, unsigned long pip,
struct ftrace_ops *op) struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_probe1_cnt++; trace_selftest_test_probe1_cnt++;
} }
...@@ -112,7 +113,8 @@ static void trace_selftest_test_probe1_func(unsigned long ip, ...@@ -112,7 +113,8 @@ static void trace_selftest_test_probe1_func(unsigned long ip,
static int trace_selftest_test_probe2_cnt; static int trace_selftest_test_probe2_cnt;
static void trace_selftest_test_probe2_func(unsigned long ip, static void trace_selftest_test_probe2_func(unsigned long ip,
unsigned long pip, unsigned long pip,
struct ftrace_ops *op) struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_probe2_cnt++; trace_selftest_test_probe2_cnt++;
} }
...@@ -120,7 +122,8 @@ static void trace_selftest_test_probe2_func(unsigned long ip, ...@@ -120,7 +122,8 @@ static void trace_selftest_test_probe2_func(unsigned long ip,
static int trace_selftest_test_probe3_cnt; static int trace_selftest_test_probe3_cnt;
static void trace_selftest_test_probe3_func(unsigned long ip, static void trace_selftest_test_probe3_func(unsigned long ip,
unsigned long pip, unsigned long pip,
struct ftrace_ops *op) struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_probe3_cnt++; trace_selftest_test_probe3_cnt++;
} }
...@@ -128,7 +131,8 @@ static void trace_selftest_test_probe3_func(unsigned long ip, ...@@ -128,7 +131,8 @@ static void trace_selftest_test_probe3_func(unsigned long ip,
static int trace_selftest_test_global_cnt; static int trace_selftest_test_global_cnt;
static void trace_selftest_test_global_func(unsigned long ip, static void trace_selftest_test_global_func(unsigned long ip,
unsigned long pip, unsigned long pip,
struct ftrace_ops *op) struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_global_cnt++; trace_selftest_test_global_cnt++;
} }
...@@ -136,7 +140,8 @@ static void trace_selftest_test_global_func(unsigned long ip, ...@@ -136,7 +140,8 @@ static void trace_selftest_test_global_func(unsigned long ip,
static int trace_selftest_test_dyn_cnt; static int trace_selftest_test_dyn_cnt;
static void trace_selftest_test_dyn_func(unsigned long ip, static void trace_selftest_test_dyn_func(unsigned long ip,
unsigned long pip, unsigned long pip,
struct ftrace_ops *op) struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_dyn_cnt++; trace_selftest_test_dyn_cnt++;
} }
......
...@@ -111,7 +111,8 @@ static inline void check_stack(void) ...@@ -111,7 +111,8 @@ static inline void check_stack(void)
} }
static void static void
stack_trace_call(unsigned long ip, unsigned long parent_ip, struct ftrace_ops *op) stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
int cpu; int cpu;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment