Commit 3ea2e6d7 authored by Steven Rostedt's avatar Steven Rostedt Committed by Ingo Molnar

ftrace: make some tracers reentrant

Now that the ring buffer is reentrant, some of the ftrace tracers
(sched_swich, debugging traces) can also be reentrant.

Note: Never make the function tracer reentrant, that can cause
  recursion problems all over the kernel. The function tracer
  must disable reentrancy.
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent bf41a158
...@@ -839,7 +839,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) ...@@ -839,7 +839,6 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
{ {
struct trace_array *tr = &global_trace; struct trace_array *tr = &global_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
long disabled;
int cpu; int cpu;
int pc; int pc;
...@@ -850,12 +849,10 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) ...@@ -850,12 +849,10 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
preempt_disable_notrace(); preempt_disable_notrace();
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = tr->data[cpu]; data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) if (likely(!atomic_read(&data->disabled)))
ftrace_trace_special(tr, data, arg1, arg2, arg3, pc); ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
atomic_dec(&data->disabled);
preempt_enable_notrace(); preempt_enable_notrace();
} }
...@@ -2961,7 +2958,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) ...@@ -2961,7 +2958,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
struct trace_array_cpu *data; struct trace_array_cpu *data;
struct print_entry *entry; struct print_entry *entry;
unsigned long flags, irq_flags; unsigned long flags, irq_flags;
long disabled;
int cpu, len = 0, size, pc; int cpu, len = 0, size, pc;
if (!tr->ctrl || tracing_disabled) if (!tr->ctrl || tracing_disabled)
...@@ -2971,9 +2967,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) ...@@ -2971,9 +2967,8 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
preempt_disable_notrace(); preempt_disable_notrace();
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = tr->data[cpu]; data = tr->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (unlikely(disabled != 1)) if (unlikely(atomic_read(&data->disabled)))
goto out; goto out;
spin_lock_irqsave(&trace_buf_lock, flags); spin_lock_irqsave(&trace_buf_lock, flags);
...@@ -2999,7 +2994,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args) ...@@ -2999,7 +2994,6 @@ int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
spin_unlock_irqrestore(&trace_buf_lock, flags); spin_unlock_irqrestore(&trace_buf_lock, flags);
out: out:
atomic_dec(&data->disabled);
preempt_enable_notrace(); preempt_enable_notrace();
return len; return len;
......
...@@ -24,7 +24,6 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, ...@@ -24,7 +24,6 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
{ {
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
long disabled;
int cpu; int cpu;
int pc; int pc;
...@@ -41,12 +40,10 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev, ...@@ -41,12 +40,10 @@ probe_sched_switch(struct rq *__rq, struct task_struct *prev,
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = ctx_trace->data[cpu]; data = ctx_trace->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) if (likely(!atomic_read(&data->disabled)))
tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc); tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
atomic_dec(&data->disabled);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -55,7 +52,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) ...@@ -55,7 +52,6 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
{ {
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
long disabled;
int cpu, pc; int cpu, pc;
if (!likely(tracer_enabled)) if (!likely(tracer_enabled))
...@@ -67,13 +63,11 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee) ...@@ -67,13 +63,11 @@ probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = ctx_trace->data[cpu]; data = ctx_trace->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) if (likely(!atomic_read(&data->disabled)))
tracing_sched_wakeup_trace(ctx_trace, data, wakee, current, tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
flags, pc); flags, pc);
atomic_dec(&data->disabled);
local_irq_restore(flags); local_irq_restore(flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment