Commit 182e9f5f authored by Steven Rostedt's avatar Steven Rostedt Committed by Ingo Molnar

ftrace: insert in the ftrace_preempt_disable()/enable() functions

Impact: use new, consolidated APIs in ftrace plugins

This patch replaces the schedule safe preempt disable code with the
ftrace_preempt_disable() and ftrace_preempt_enable() safe functions.
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 8f0a056f
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include <linux/list.h> #include <linux/list.h>
#include <linux/fs.h> #include <linux/fs.h>
#include "trace.h"
/* Up this if you want to test the TIME_EXTENTS and normalization */ /* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0 #define DEBUG_SHIFT 0
...@@ -1122,8 +1124,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, ...@@ -1122,8 +1124,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
return NULL; return NULL;
/* If we are tracing schedule, we don't want to recurse */ /* If we are tracing schedule, we don't want to recurse */
resched = need_resched(); resched = ftrace_preempt_disable();
preempt_disable_notrace();
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
...@@ -1154,10 +1155,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, ...@@ -1154,10 +1155,7 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer,
return event; return event;
out: out:
if (resched) ftrace_preempt_enable(resched);
preempt_enable_notrace();
else
preempt_enable_notrace();
return NULL; return NULL;
} }
...@@ -1199,12 +1197,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, ...@@ -1199,12 +1197,9 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer,
/* /*
* Only the last preempt count needs to restore preemption. * Only the last preempt count needs to restore preemption.
*/ */
if (preempt_count() == 1) { if (preempt_count() == 1)
if (per_cpu(rb_need_resched, cpu)) ftrace_preempt_enable(per_cpu(rb_need_resched, cpu));
preempt_enable_no_resched_notrace(); else
else
preempt_enable_notrace();
} else
preempt_enable_no_resched_notrace(); preempt_enable_no_resched_notrace();
return 0; return 0;
...@@ -1237,8 +1232,7 @@ int ring_buffer_write(struct ring_buffer *buffer, ...@@ -1237,8 +1232,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
if (atomic_read(&buffer->record_disabled)) if (atomic_read(&buffer->record_disabled))
return -EBUSY; return -EBUSY;
resched = need_resched(); resched = ftrace_preempt_disable();
preempt_disable_notrace();
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
...@@ -1264,10 +1258,7 @@ int ring_buffer_write(struct ring_buffer *buffer, ...@@ -1264,10 +1258,7 @@ int ring_buffer_write(struct ring_buffer *buffer,
ret = 0; ret = 0;
out: out:
if (resched) ftrace_preempt_enable(resched);
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
return ret; return ret;
} }
......
...@@ -904,8 +904,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) ...@@ -904,8 +904,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
return; return;
pc = preempt_count(); pc = preempt_count();
resched = need_resched(); resched = ftrace_preempt_disable();
preempt_disable_notrace();
local_save_flags(flags); local_save_flags(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = tr->data[cpu]; data = tr->data[cpu];
...@@ -915,10 +914,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) ...@@ -915,10 +914,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
trace_function(tr, data, ip, parent_ip, flags, pc); trace_function(tr, data, ip, parent_ip, flags, pc);
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
if (resched) ftrace_preempt_enable(resched);
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
} }
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
......
...@@ -50,8 +50,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) ...@@ -50,8 +50,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
return; return;
pc = preempt_count(); pc = preempt_count();
resched = need_resched(); resched = ftrace_preempt_disable();
preempt_disable_notrace();
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = tr->data[cpu]; data = tr->data[cpu];
...@@ -81,15 +80,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) ...@@ -81,15 +80,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
out: out:
atomic_dec(&data->disabled); atomic_dec(&data->disabled);
/* ftrace_preempt_enable(resched);
* To prevent recursion from the scheduler, if the
* resched flag was set before we entered, then
* don't reschedule.
*/
if (resched)
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
} }
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
......
...@@ -107,8 +107,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) ...@@ -107,8 +107,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
if (unlikely(!ftrace_enabled || stack_trace_disabled)) if (unlikely(!ftrace_enabled || stack_trace_disabled))
return; return;
resched = need_resched(); resched = ftrace_preempt_disable();
preempt_disable_notrace();
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
/* no atomic needed, we only modify this variable by this cpu */ /* no atomic needed, we only modify this variable by this cpu */
...@@ -120,10 +119,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) ...@@ -120,10 +119,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
out: out:
per_cpu(trace_active, cpu)--; per_cpu(trace_active, cpu)--;
/* prevent recursion in schedule */ /* prevent recursion in schedule */
if (resched) ftrace_preempt_enable(resched);
preempt_enable_no_resched_notrace();
else
preempt_enable_notrace();
} }
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment