Commit 0f048701 authored by Steven Rostedt's avatar Steven Rostedt Committed by Ingo Molnar

ftrace: soft tracing stop and start

Impact: add way to quickly start stop tracing from the kernel

This patch adds a soft stop and start to the trace. This simply
disables function tracing via the ftrace_disabled flag, and
disables the trace buffers to prevent recording. The tracing
code may still be executed, but the trace will not be recorded.
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 60a7ecf4
...@@ -216,6 +216,9 @@ static inline void __ftrace_enabled_restore(int enabled) ...@@ -216,6 +216,9 @@ static inline void __ftrace_enabled_restore(int enabled)
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
extern int ftrace_dump_on_oops; extern int ftrace_dump_on_oops;
extern void tracing_start(void);
extern void tracing_stop(void);
extern void extern void
ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3); ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3);
...@@ -246,6 +249,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { } ...@@ -246,6 +249,8 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3) { }
static inline int static inline int
ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0))); ftrace_printk(const char *fmt, ...) __attribute__ ((format (printf, 1, 0)));
static inline void tracing_start(void) { }
static inline void tracing_stop(void) { }
static inline int static inline int
ftrace_printk(const char *fmt, ...) ftrace_printk(const char *fmt, ...)
{ {
......
...@@ -43,6 +43,15 @@ ...@@ -43,6 +43,15 @@
unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
unsigned long __read_mostly tracing_thresh; unsigned long __read_mostly tracing_thresh;
/*
* Kill all tracing for good (never come back).
* It is initialized to 1 but will turn to zero if the initialization
* of the tracer is successful. But that is the only place that sets
* this back to zero.
*/
int tracing_disabled = 1;
static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled); static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
static inline void ftrace_disable_cpu(void) static inline void ftrace_disable_cpu(void)
...@@ -62,8 +71,6 @@ static cpumask_t __read_mostly tracing_buffer_mask; ...@@ -62,8 +71,6 @@ static cpumask_t __read_mostly tracing_buffer_mask;
#define for_each_tracing_cpu(cpu) \ #define for_each_tracing_cpu(cpu) \
for_each_cpu_mask(cpu, tracing_buffer_mask) for_each_cpu_mask(cpu, tracing_buffer_mask)
static int tracing_disabled = 1;
/* /*
* ftrace_dump_on_oops - variable to dump ftrace buffer on oops * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
* *
...@@ -613,6 +620,76 @@ static void trace_init_cmdlines(void) ...@@ -613,6 +620,76 @@ static void trace_init_cmdlines(void)
cmdline_idx = 0; cmdline_idx = 0;
} }
static int trace_stop_count;
static DEFINE_SPINLOCK(tracing_start_lock);
/**
* tracing_start - quick start of the tracer
*
* If tracing is enabled but was stopped by tracing_stop,
* this will start the tracer back up.
*/
void tracing_start(void)
{
struct ring_buffer *buffer;
unsigned long flags;
if (tracing_disabled)
return;
spin_lock_irqsave(&tracing_start_lock, flags);
if (--trace_stop_count)
goto out;
if (trace_stop_count < 0) {
/* Someone screwed up their debugging */
WARN_ON_ONCE(1);
trace_stop_count = 0;
goto out;
}
buffer = global_trace.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
buffer = max_tr.buffer;
if (buffer)
ring_buffer_record_enable(buffer);
ftrace_start();
out:
spin_unlock_irqrestore(&tracing_start_lock, flags);
}
/**
* tracing_stop - quick stop of the tracer
*
* Light weight way to stop tracing. Use in conjunction with
* tracing_start.
*/
void tracing_stop(void)
{
struct ring_buffer *buffer;
unsigned long flags;
ftrace_stop();
spin_lock_irqsave(&tracing_start_lock, flags);
if (trace_stop_count++)
goto out;
buffer = global_trace.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
buffer = max_tr.buffer;
if (buffer)
ring_buffer_record_disable(buffer);
out:
spin_unlock_irqrestore(&tracing_start_lock, flags);
}
void trace_stop_cmdline_recording(void); void trace_stop_cmdline_recording(void);
static void trace_save_cmdline(struct task_struct *tsk) static void trace_save_cmdline(struct task_struct *tsk)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment