Commit 45ad21ca authored by Steven Rostedt (Red Hat)'s avatar Steven Rostedt (Red Hat) Committed by Steven Rostedt

tracing: Have trace_array keep track if snapshot buffer is allocated

The snapshot buffer belongs to the trace array not the tracer that is
running. The trace array should be the data structure that keeps track
of whether or not the snapshot buffer is allocated, not the tracer
desciptor. Having the trace array keep track of it makes modifications
so much easier.
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 6de58e62
...@@ -667,7 +667,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -667,7 +667,7 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
if (!tr->current_trace->allocated_snapshot) { if (!tr->allocated_snapshot) {
/* Only the nop tracer should hit this when disabling */ /* Only the nop tracer should hit this when disabling */
WARN_ON_ONCE(tr->current_trace != &nop_trace); WARN_ON_ONCE(tr->current_trace != &nop_trace);
return; return;
...@@ -700,7 +700,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -700,7 +700,7 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
return; return;
WARN_ON_ONCE(!irqs_disabled()); WARN_ON_ONCE(!irqs_disabled());
if (WARN_ON_ONCE(!tr->current_trace->allocated_snapshot)) if (WARN_ON_ONCE(!tr->allocated_snapshot))
return; return;
arch_spin_lock(&ftrace_max_lock); arch_spin_lock(&ftrace_max_lock);
...@@ -802,7 +802,7 @@ int register_tracer(struct tracer *type) ...@@ -802,7 +802,7 @@ int register_tracer(struct tracer *type)
if (ring_buffer_expanded) if (ring_buffer_expanded)
ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size, ring_buffer_resize(tr->max_buffer.buffer, trace_buf_size,
RING_BUFFER_ALL_CPUS); RING_BUFFER_ALL_CPUS);
type->allocated_snapshot = true; tr->allocated_snapshot = true;
} }
#endif #endif
...@@ -822,7 +822,7 @@ int register_tracer(struct tracer *type) ...@@ -822,7 +822,7 @@ int register_tracer(struct tracer *type)
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
if (type->use_max_tr) { if (type->use_max_tr) {
type->allocated_snapshot = false; tr->allocated_snapshot = false;
/* Shrink the max buffer again */ /* Shrink the max buffer again */
if (ring_buffer_expanded) if (ring_buffer_expanded)
...@@ -2463,7 +2463,7 @@ static void show_snapshot_percpu_help(struct seq_file *m) ...@@ -2463,7 +2463,7 @@ static void show_snapshot_percpu_help(struct seq_file *m)
static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter) static void print_snapshot_help(struct seq_file *m, struct trace_iterator *iter)
{ {
if (iter->trace->allocated_snapshot) if (iter->tr->allocated_snapshot)
seq_printf(m, "#\n# * Snapshot is allocated *\n#\n"); seq_printf(m, "#\n# * Snapshot is allocated *\n#\n");
else else
seq_printf(m, "#\n# * Snapshot is freed *\n#\n"); seq_printf(m, "#\n# * Snapshot is freed *\n#\n");
...@@ -3364,12 +3364,12 @@ static int tracing_set_tracer(const char *buf) ...@@ -3364,12 +3364,12 @@ static int tracing_set_tracer(const char *buf)
if (tr->current_trace->reset) if (tr->current_trace->reset)
tr->current_trace->reset(tr); tr->current_trace->reset(tr);
#ifdef CONFIG_TRACER_MAX_TRACE
had_max_tr = tr->current_trace->allocated_snapshot;
/* Current trace needs to be nop_trace before synchronize_sched */ /* Current trace needs to be nop_trace before synchronize_sched */
tr->current_trace = &nop_trace; tr->current_trace = &nop_trace;
#ifdef CONFIG_TRACER_MAX_TRACE
had_max_tr = tr->allocated_snapshot;
if (had_max_tr && !t->use_max_tr) { if (had_max_tr && !t->use_max_tr) {
/* /*
* We need to make sure that the update_max_tr sees that * We need to make sure that the update_max_tr sees that
...@@ -3387,10 +3387,8 @@ static int tracing_set_tracer(const char *buf) ...@@ -3387,10 +3387,8 @@ static int tracing_set_tracer(const char *buf)
ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS); ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
set_buffer_entries(&tr->max_buffer, 1); set_buffer_entries(&tr->max_buffer, 1);
tracing_reset_online_cpus(&tr->max_buffer); tracing_reset_online_cpus(&tr->max_buffer);
tr->current_trace->allocated_snapshot = false; tr->allocated_snapshot = false;
} }
#else
tr->current_trace = &nop_trace;
#endif #endif
destroy_trace_option_files(topts); destroy_trace_option_files(topts);
...@@ -3403,7 +3401,7 @@ static int tracing_set_tracer(const char *buf) ...@@ -3403,7 +3401,7 @@ static int tracing_set_tracer(const char *buf)
RING_BUFFER_ALL_CPUS); RING_BUFFER_ALL_CPUS);
if (ret < 0) if (ret < 0)
goto out; goto out;
t->allocated_snapshot = true; tr->allocated_snapshot = true;
} }
#endif #endif
...@@ -4275,13 +4273,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, ...@@ -4275,13 +4273,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
if (tr->current_trace->allocated_snapshot) { if (tr->allocated_snapshot) {
/* free spare buffer */ /* free spare buffer */
ring_buffer_resize(tr->max_buffer.buffer, 1, ring_buffer_resize(tr->max_buffer.buffer, 1,
RING_BUFFER_ALL_CPUS); RING_BUFFER_ALL_CPUS);
set_buffer_entries(&tr->max_buffer, 1); set_buffer_entries(&tr->max_buffer, 1);
tracing_reset_online_cpus(&tr->max_buffer); tracing_reset_online_cpus(&tr->max_buffer);
tr->current_trace->allocated_snapshot = false; tr->allocated_snapshot = false;
} }
break; break;
case 1: case 1:
...@@ -4292,13 +4290,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, ...@@ -4292,13 +4290,13 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
break; break;
} }
#endif #endif
if (!tr->current_trace->allocated_snapshot) { if (!tr->allocated_snapshot) {
/* allocate spare buffer */ /* allocate spare buffer */
ret = resize_buffer_duplicate_size(&tr->max_buffer, ret = resize_buffer_duplicate_size(&tr->max_buffer,
&tr->trace_buffer, RING_BUFFER_ALL_CPUS); &tr->trace_buffer, RING_BUFFER_ALL_CPUS);
if (ret < 0) if (ret < 0)
break; break;
tr->current_trace->allocated_snapshot = true; tr->allocated_snapshot = true;
} }
local_irq_disable(); local_irq_disable();
/* Now, we're going to swap */ /* Now, we're going to swap */
...@@ -4309,7 +4307,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, ...@@ -4309,7 +4307,7 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
local_irq_enable(); local_irq_enable();
break; break;
default: default:
if (tr->current_trace->allocated_snapshot) { if (tr->allocated_snapshot) {
if (iter->cpu_file == RING_BUFFER_ALL_CPUS) if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
tracing_reset_online_cpus(&tr->max_buffer); tracing_reset_online_cpus(&tr->max_buffer);
else else
......
...@@ -197,6 +197,7 @@ struct trace_array { ...@@ -197,6 +197,7 @@ struct trace_array {
* the trace_buffer so the tracing can continue. * the trace_buffer so the tracing can continue.
*/ */
struct trace_buffer max_buffer; struct trace_buffer max_buffer;
bool allocated_snapshot;
#endif #endif
int buffer_disabled; int buffer_disabled;
struct trace_cpu trace_cpu; /* place holder */ struct trace_cpu trace_cpu; /* place holder */
...@@ -367,7 +368,6 @@ struct tracer { ...@@ -367,7 +368,6 @@ struct tracer {
bool enabled; bool enabled;
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
bool use_max_tr; bool use_max_tr;
bool allocated_snapshot;
#endif #endif
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment