Commit 0b8f1e2e authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/core: Fix sideband list-iteration vs. event ordering NULL pointer deference crash

Vegard Nossum reported that perf fuzzing generates a NULL
pointer dereference crash:

> Digging a bit deeper into this, it seems the event itself is getting
> created by perf_event_open() and it gets added to the pmu_event_list
> through:
>
> perf_event_open()
>  - perf_event_alloc()
>     - account_event()
>        - account_pmu_sb_event()
>           - attach_sb_event()
>
> so at this point the event is being attached but its ->ctx is still
> NULL. It seems like ->ctx is set just a bit later in
> perf_event_open(), though.
>
> But before that, __schedule() comes along and creates a stack trace
> similar to the one above:
>
> __schedule()
>  - __perf_event_task_sched_out()
>    - perf_iterate_sb()
>      - perf_iterate_sb_cpu()
>         - event_filter_match()
>           - perf_cgroup_match()
>             - __get_cpu_context()
>               - (dereference ctx which is NULL)
>
> So I guess the question is... should the event be attached (= put on
> the list) before ->ctx gets set? Or should the cgroup code check for a
> NULL ->ctx?

The latter seems like the simplest solution. Moving the list-add later
creates a bit of a mess.
Reported-by: default avatarVegard Nossum <vegard.nossum@gmail.com>
Tested-by: default avatarVegard Nossum <vegard.nossum@gmail.com>
Tested-by: default avatarVince Weaver <vincent.weaver@maine.edu>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Carrillo-Cisneros <davidcc@google.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: f2fb6bef ("perf/core: Optimize side-band event delivery")
Link: http://lkml.kernel.org/r/20160804123724.GN6862@twins.programming.kicks-ass.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 69766c40
...@@ -1716,8 +1716,8 @@ static inline int pmu_filter_match(struct perf_event *event) ...@@ -1716,8 +1716,8 @@ static inline int pmu_filter_match(struct perf_event *event)
static inline int static inline int
event_filter_match(struct perf_event *event) event_filter_match(struct perf_event *event)
{ {
return (event->cpu == -1 || event->cpu == smp_processor_id()) return (event->cpu == -1 || event->cpu == smp_processor_id()) &&
&& perf_cgroup_match(event) && pmu_filter_match(event); perf_cgroup_match(event) && pmu_filter_match(event);
} }
static void static void
...@@ -1737,8 +1737,8 @@ event_sched_out(struct perf_event *event, ...@@ -1737,8 +1737,8 @@ event_sched_out(struct perf_event *event,
* maintained, otherwise bogus information is return * maintained, otherwise bogus information is return
* via read() for time_enabled, time_running: * via read() for time_enabled, time_running:
*/ */
if (event->state == PERF_EVENT_STATE_INACTIVE if (event->state == PERF_EVENT_STATE_INACTIVE &&
&& !event_filter_match(event)) { !event_filter_match(event)) {
delta = tstamp - event->tstamp_stopped; delta = tstamp - event->tstamp_stopped;
event->tstamp_running += delta; event->tstamp_running += delta;
event->tstamp_stopped = tstamp; event->tstamp_stopped = tstamp;
...@@ -2236,10 +2236,15 @@ perf_install_in_context(struct perf_event_context *ctx, ...@@ -2236,10 +2236,15 @@ perf_install_in_context(struct perf_event_context *ctx,
lockdep_assert_held(&ctx->mutex); lockdep_assert_held(&ctx->mutex);
event->ctx = ctx;
if (event->cpu != -1) if (event->cpu != -1)
event->cpu = cpu; event->cpu = cpu;
/*
* Ensures that if we can observe event->ctx, both the event and ctx
* will be 'complete'. See perf_iterate_sb_cpu().
*/
smp_store_release(&event->ctx, ctx);
if (!task) { if (!task) {
cpu_function_call(cpu, __perf_install_in_context, event); cpu_function_call(cpu, __perf_install_in_context, event);
return; return;
...@@ -5969,6 +5974,14 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, void *data) ...@@ -5969,6 +5974,14 @@ static void perf_iterate_sb_cpu(perf_iterate_f output, void *data)
struct perf_event *event; struct perf_event *event;
list_for_each_entry_rcu(event, &pel->list, sb_list) { list_for_each_entry_rcu(event, &pel->list, sb_list) {
/*
* Skip events that are not fully formed yet; ensure that
* if we observe event->ctx, both event and ctx will be
* complete enough. See perf_install_in_context().
*/
if (!smp_load_acquire(&event->ctx))
continue;
if (event->state < PERF_EVENT_STATE_INACTIVE) if (event->state < PERF_EVENT_STATE_INACTIVE)
continue; continue;
if (!event_filter_match(event)) if (!event_filter_match(event))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment