Commit 3209cff4 authored by Steven Rostedt (Red Hat)'s avatar Steven Rostedt (Red Hat) Committed by Steven Rostedt

tracing: Add alloc/free_snapshot() to replace duplicate code

Add alloc_snapshot() and free_snapshot() to allocate and free the
snapshot buffer respectively, and use these to remove duplicate
code.
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent e1df4cb6
...@@ -149,14 +149,14 @@ static int __init set_ftrace_dump_on_oops(char *str) ...@@ -149,14 +149,14 @@ static int __init set_ftrace_dump_on_oops(char *str)
} }
__setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops); __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
static int __init alloc_snapshot(char *str) static int __init boot_alloc_snapshot(char *str)
{ {
allocate_snapshot = true; allocate_snapshot = true;
/* We also need the main ring buffer expanded */ /* We also need the main ring buffer expanded */
ring_buffer_expanded = true; ring_buffer_expanded = true;
return 1; return 1;
} }
__setup("alloc_snapshot", alloc_snapshot); __setup("alloc_snapshot", boot_alloc_snapshot);
static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata; static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
...@@ -470,6 +470,38 @@ EXPORT_SYMBOL_GPL(tracing_snapshot); ...@@ -470,6 +470,38 @@ EXPORT_SYMBOL_GPL(tracing_snapshot);
static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf, static int resize_buffer_duplicate_size(struct trace_buffer *trace_buf,
struct trace_buffer *size_buf, int cpu_id); struct trace_buffer *size_buf, int cpu_id);
static void set_buffer_entries(struct trace_buffer *buf, unsigned long val);
static int alloc_snapshot(struct trace_array *tr)
{
int ret;
if (!tr->allocated_snapshot) {
/* allocate spare buffer */
ret = resize_buffer_duplicate_size(&tr->max_buffer,
&tr->trace_buffer, RING_BUFFER_ALL_CPUS);
if (ret < 0)
return ret;
tr->allocated_snapshot = true;
}
return 0;
}
void free_snapshot(struct trace_array *tr)
{
/*
* We don't free the ring buffer. instead, resize it because
* The max_tr ring buffer has some state (e.g. ring->clock) and
* we want preserve it.
*/
ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
set_buffer_entries(&tr->max_buffer, 1);
tracing_reset_online_cpus(&tr->max_buffer);
tr->allocated_snapshot = false;
}
/** /**
* trace_snapshot_alloc - allocate and take a snapshot of the current buffer. * trace_snapshot_alloc - allocate and take a snapshot of the current buffer.
...@@ -487,16 +519,9 @@ void tracing_snapshot_alloc(void) ...@@ -487,16 +519,9 @@ void tracing_snapshot_alloc(void)
struct trace_array *tr = &global_trace; struct trace_array *tr = &global_trace;
int ret; int ret;
if (!tr->allocated_snapshot) { ret = alloc_snapshot(tr);
if (WARN_ON(ret < 0))
/* allocate spare buffer */ return;
ret = resize_buffer_duplicate_size(&tr->max_buffer,
&tr->trace_buffer, RING_BUFFER_ALL_CPUS);
if (WARN_ON(ret < 0))
return;
tr->allocated_snapshot = true;
}
tracing_snapshot(); tracing_snapshot();
} }
...@@ -3581,15 +3606,7 @@ static int tracing_set_tracer(const char *buf) ...@@ -3581,15 +3606,7 @@ static int tracing_set_tracer(const char *buf)
* so a synchronized_sched() is sufficient. * so a synchronized_sched() is sufficient.
*/ */
synchronize_sched(); synchronize_sched();
/* free_snapshot(tr);
* We don't free the ring buffer. instead, resize it because
* The max_tr ring buffer has some state (e.g. ring->clock) and
* we want preserve it.
*/
ring_buffer_resize(tr->max_buffer.buffer, 1, RING_BUFFER_ALL_CPUS);
set_buffer_entries(&tr->max_buffer, 1);
tracing_reset_online_cpus(&tr->max_buffer);
tr->allocated_snapshot = false;
} }
#endif #endif
destroy_trace_option_files(topts); destroy_trace_option_files(topts);
...@@ -3598,12 +3615,9 @@ static int tracing_set_tracer(const char *buf) ...@@ -3598,12 +3615,9 @@ static int tracing_set_tracer(const char *buf)
#ifdef CONFIG_TRACER_MAX_TRACE #ifdef CONFIG_TRACER_MAX_TRACE
if (t->use_max_tr && !had_max_tr) { if (t->use_max_tr && !had_max_tr) {
/* we need to make per cpu buffer sizes equivalent */ ret = alloc_snapshot(tr);
ret = resize_buffer_duplicate_size(&tr->max_buffer, &tr->trace_buffer,
RING_BUFFER_ALL_CPUS);
if (ret < 0) if (ret < 0)
goto out; goto out;
tr->allocated_snapshot = true;
} }
#endif #endif
...@@ -4475,14 +4489,8 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, ...@@ -4475,14 +4489,8 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
ret = -EINVAL; ret = -EINVAL;
break; break;
} }
if (tr->allocated_snapshot) { if (tr->allocated_snapshot)
/* free spare buffer */ free_snapshot(tr);
ring_buffer_resize(tr->max_buffer.buffer, 1,
RING_BUFFER_ALL_CPUS);
set_buffer_entries(&tr->max_buffer, 1);
tracing_reset_online_cpus(&tr->max_buffer);
tr->allocated_snapshot = false;
}
break; break;
case 1: case 1:
/* Only allow per-cpu swap if the ring buffer supports it */ /* Only allow per-cpu swap if the ring buffer supports it */
...@@ -4493,12 +4501,9 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt, ...@@ -4493,12 +4501,9 @@ tracing_snapshot_write(struct file *filp, const char __user *ubuf, size_t cnt,
} }
#endif #endif
if (!tr->allocated_snapshot) { if (!tr->allocated_snapshot) {
/* allocate spare buffer */ ret = alloc_snapshot(tr);
ret = resize_buffer_duplicate_size(&tr->max_buffer,
&tr->trace_buffer, RING_BUFFER_ALL_CPUS);
if (ret < 0) if (ret < 0)
break; break;
tr->allocated_snapshot = true;
} }
local_irq_disable(); local_irq_disable();
/* Now, we're going to swap */ /* Now, we're going to swap */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment