Commit 30afdcb1 authored by Steven Rostedt's avatar Steven Rostedt Committed by Thomas Gleixner

ftrace: selftest protect againt max flip

There is a slight race condition in the selftest where the max update
of the wakeup and irqs/preemption off tests can be doing a max update as
the buffers are being tested. If this happens the system can crash with
a GPF.

This patch adds the max update spinlock around the checking of the
buffers to prevent such a race.
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent d15f57f2
...@@ -82,10 +82,12 @@ trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data) ...@@ -82,10 +82,12 @@ trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
*/ */
static int trace_test_buffer(struct trace_array *tr, unsigned long *count) static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
{ {
unsigned long cnt = 0; unsigned long flags, cnt = 0;
int cpu; int cpu, ret = 0;
int ret = 0;
/* Don't allow flipping of max traces now */
raw_local_irq_save(flags);
__raw_spin_lock(&ftrace_max_lock);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
if (!head_page(tr->data[cpu])) if (!head_page(tr->data[cpu]))
continue; continue;
...@@ -96,6 +98,8 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count) ...@@ -96,6 +98,8 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
if (ret) if (ret)
break; break;
} }
__raw_spin_unlock(&ftrace_max_lock);
raw_local_irq_restore(flags);
if (count) if (count)
*count = cnt; *count = cnt;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment