Commit 4740974a authored by Steven Rostedt's avatar Steven Rostedt Committed by Steven Rostedt

ftrace: Add default recursion protection for function tracing

As more users of the function tracer utility are being added, they do
not always add the necessary recursion protection. To protect from
function recursion due to tracing, if the callback ftrace_ops does not
specifically specify that it protects against recursion (by setting
the FTRACE_OPS_FL_RECURSION_SAFE flag), the list operation will be
called by the mcount trampoline which adds recursion protection.

If the flag is set, then the function will be called directly with no
extra protection.

Note, the list operation is called if more than one function callback
is registered, or if the arch does not support all of the function
tracer features.
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 5767cfea
...@@ -85,6 +85,10 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, ...@@ -85,6 +85,10 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
* passing regs to the handler. * passing regs to the handler.
* Note, if this flag is set, the SAVE_REGS flag will automatically * Note, if this flag is set, the SAVE_REGS flag will automatically
* get set upon registering the ftrace_ops, if the arch supports it. * get set upon registering the ftrace_ops, if the arch supports it.
* RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
* that the call back has its own recursion protection. If it does
* not set this, then the ftrace infrastructure will add recursion
* protection for the caller.
*/ */
enum { enum {
FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_ENABLED = 1 << 0,
...@@ -93,6 +97,7 @@ enum { ...@@ -93,6 +97,7 @@ enum {
FTRACE_OPS_FL_CONTROL = 1 << 3, FTRACE_OPS_FL_CONTROL = 1 << 3,
FTRACE_OPS_FL_SAVE_REGS = 1 << 4, FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5, FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
}; };
struct ftrace_ops { struct ftrace_ops {
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
static struct ftrace_ops ftrace_list_end __read_mostly = { static struct ftrace_ops ftrace_list_end __read_mostly = {
.func = ftrace_stub, .func = ftrace_stub,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
/* ftrace_enabled is a method to turn ftrace on or off */ /* ftrace_enabled is a method to turn ftrace on or off */
...@@ -221,12 +222,13 @@ static void update_ftrace_function(void) ...@@ -221,12 +222,13 @@ static void update_ftrace_function(void)
/* /*
* If we are at the end of the list and this ops is * If we are at the end of the list and this ops is
* not dynamic and the arch supports passing ops, then have the * recursion safe and not dynamic and the arch supports passing ops,
* mcount trampoline call the function directly. * then have the mcount trampoline call the function directly.
*/ */
if (ftrace_ops_list == &ftrace_list_end || if (ftrace_ops_list == &ftrace_list_end ||
(ftrace_ops_list->next == &ftrace_list_end && (ftrace_ops_list->next == &ftrace_list_end &&
!(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) && !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
(ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
!FTRACE_FORCE_LIST_FUNC)) { !FTRACE_FORCE_LIST_FUNC)) {
/* Set the ftrace_ops that the arch callback uses */ /* Set the ftrace_ops that the arch callback uses */
if (ftrace_ops_list == &global_ops) if (ftrace_ops_list == &global_ops)
...@@ -867,6 +869,7 @@ static void unregister_ftrace_profiler(void) ...@@ -867,6 +869,7 @@ static void unregister_ftrace_profiler(void)
#else #else
static struct ftrace_ops ftrace_profile_ops __read_mostly = { static struct ftrace_ops ftrace_profile_ops __read_mostly = {
.func = function_profile_call, .func = function_profile_call,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static int register_ftrace_profiler(void) static int register_ftrace_profiler(void)
...@@ -1049,6 +1052,7 @@ static struct ftrace_ops global_ops = { ...@@ -1049,6 +1052,7 @@ static struct ftrace_ops global_ops = {
.func = ftrace_stub, .func = ftrace_stub,
.notrace_hash = EMPTY_HASH, .notrace_hash = EMPTY_HASH,
.filter_hash = EMPTY_HASH, .filter_hash = EMPTY_HASH,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static DEFINE_MUTEX(ftrace_regex_lock); static DEFINE_MUTEX(ftrace_regex_lock);
...@@ -3967,6 +3971,7 @@ void __init ftrace_init(void) ...@@ -3967,6 +3971,7 @@ void __init ftrace_init(void)
static struct ftrace_ops global_ops = { static struct ftrace_ops global_ops = {
.func = ftrace_stub, .func = ftrace_stub,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static int __init ftrace_nodyn_init(void) static int __init ftrace_nodyn_init(void)
...@@ -4023,6 +4028,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip, ...@@ -4023,6 +4028,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops control_ops = { static struct ftrace_ops control_ops = {
.func = ftrace_ops_control_func, .func = ftrace_ops_control_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static inline void static inline void
......
...@@ -1721,6 +1721,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip, ...@@ -1721,6 +1721,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops trace_ops __initdata = static struct ftrace_ops trace_ops __initdata =
{ {
.func = function_test_events_call, .func = function_test_events_call,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static __init void event_trace_self_test_with_function(void) static __init void event_trace_self_test_with_function(void)
......
...@@ -153,13 +153,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -153,13 +153,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = function_trace_call, .func = function_trace_call,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static struct ftrace_ops trace_stack_ops __read_mostly = static struct ftrace_ops trace_stack_ops __read_mostly =
{ {
.func = function_stack_trace_call, .func = function_stack_trace_call,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
/* Our two options */ /* Our two options */
......
...@@ -154,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip, ...@@ -154,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = irqsoff_tracer_call, .func = irqsoff_tracer_call,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
......
...@@ -130,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip, ...@@ -130,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = wakeup_tracer_call, .func = wakeup_tracer_call,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
......
...@@ -148,19 +148,22 @@ static void trace_selftest_test_dyn_func(unsigned long ip, ...@@ -148,19 +148,22 @@ static void trace_selftest_test_dyn_func(unsigned long ip,
static struct ftrace_ops test_probe1 = { static struct ftrace_ops test_probe1 = {
.func = trace_selftest_test_probe1_func, .func = trace_selftest_test_probe1_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static struct ftrace_ops test_probe2 = { static struct ftrace_ops test_probe2 = {
.func = trace_selftest_test_probe2_func, .func = trace_selftest_test_probe2_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static struct ftrace_ops test_probe3 = { static struct ftrace_ops test_probe3 = {
.func = trace_selftest_test_probe3_func, .func = trace_selftest_test_probe3_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static struct ftrace_ops test_global = { static struct ftrace_ops test_global = {
.func = trace_selftest_test_global_func, .func = trace_selftest_test_global_func,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static void print_counts(void) static void print_counts(void)
......
...@@ -137,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -137,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = stack_trace_call, .func = stack_trace_call,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static ssize_t static ssize_t
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment