Commit 33238c50 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/core: Fix event cgroup tracking

Song reports that installing cgroup events is broken since:

  db0503e4 ("perf/core: Optimize perf_install_in_event()")

The problem being that cgroup events try to track cpuctx->cgrp even
for disabled events, which is pointless and actively harmful since the
above commit. Rework the code to have explicit enable/disable hooks
for cgroup events, such that we can limit cgroup tracking to active
events.

More specifically, since the above commit disabled events are no
longer added to their context from the 'right' CPU, and we can't
access things like the current cgroup for a remote CPU.

Cc: <stable@vger.kernel.org> # v5.5+
Fixes: db0503e4 ("perf/core: Optimize perf_install_in_event()")
Reported-by: default avatarSong Liu <songliubraving@fb.com>
Tested-by: default avatarSong Liu <songliubraving@fb.com>
Reviewed-by: default avatarSong Liu <songliubraving@fb.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lkml.kernel.org/r/20200318193337.GB20760@hirez.programming.kicks-ass.net
parent f5e94d10
...@@ -983,16 +983,10 @@ perf_cgroup_set_shadow_time(struct perf_event *event, u64 now) ...@@ -983,16 +983,10 @@ perf_cgroup_set_shadow_time(struct perf_event *event, u64 now)
event->shadow_ctx_time = now - t->timestamp; event->shadow_ctx_time = now - t->timestamp;
} }
/*
* Update cpuctx->cgrp so that it is set when first cgroup event is added and
* cleared when last cgroup event is removed.
*/
static inline void static inline void
list_update_cgroup_event(struct perf_event *event, perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
struct perf_event_context *ctx, bool add)
{ {
struct perf_cpu_context *cpuctx; struct perf_cpu_context *cpuctx;
struct list_head *cpuctx_entry;
if (!is_cgroup_event(event)) if (!is_cgroup_event(event))
return; return;
...@@ -1009,28 +1003,41 @@ list_update_cgroup_event(struct perf_event *event, ...@@ -1009,28 +1003,41 @@ list_update_cgroup_event(struct perf_event *event,
* because if the first would mismatch, the second would not try again * because if the first would mismatch, the second would not try again
* and we would leave cpuctx->cgrp unset. * and we would leave cpuctx->cgrp unset.
*/ */
if (add && !cpuctx->cgrp) { if (ctx->is_active && !cpuctx->cgrp) {
struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx); struct perf_cgroup *cgrp = perf_cgroup_from_task(current, ctx);
if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup)) if (cgroup_is_descendant(cgrp->css.cgroup, event->cgrp->css.cgroup))
cpuctx->cgrp = cgrp; cpuctx->cgrp = cgrp;
} }
if (add && ctx->nr_cgroups++) if (ctx->nr_cgroups++)
return;
list_add(&cpuctx->cgrp_cpuctx_entry,
per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
}
static inline void
perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
{
struct perf_cpu_context *cpuctx;
if (!is_cgroup_event(event))
return; return;
else if (!add && --ctx->nr_cgroups)
/*
* Because cgroup events are always per-cpu events,
* @ctx == &cpuctx->ctx.
*/
cpuctx = container_of(ctx, struct perf_cpu_context, ctx);
if (--ctx->nr_cgroups)
return; return;
/* no cgroup running */ if (ctx->is_active && cpuctx->cgrp)
if (!add)
cpuctx->cgrp = NULL; cpuctx->cgrp = NULL;
cpuctx_entry = &cpuctx->cgrp_cpuctx_entry; list_del(&cpuctx->cgrp_cpuctx_entry);
if (add)
list_add(cpuctx_entry,
per_cpu_ptr(&cgrp_cpuctx_list, event->cpu));
else
list_del(cpuctx_entry);
} }
#else /* !CONFIG_CGROUP_PERF */ #else /* !CONFIG_CGROUP_PERF */
...@@ -1096,11 +1103,14 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event) ...@@ -1096,11 +1103,14 @@ static inline u64 perf_cgroup_event_time(struct perf_event *event)
} }
static inline void static inline void
list_update_cgroup_event(struct perf_event *event, perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx)
struct perf_event_context *ctx, bool add)
{ {
} }
static inline void
perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx)
{
}
#endif #endif
/* /*
...@@ -1791,13 +1801,14 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) ...@@ -1791,13 +1801,14 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx)
add_event_to_groups(event, ctx); add_event_to_groups(event, ctx);
} }
list_update_cgroup_event(event, ctx, true);
list_add_rcu(&event->event_entry, &ctx->event_list); list_add_rcu(&event->event_entry, &ctx->event_list);
ctx->nr_events++; ctx->nr_events++;
if (event->attr.inherit_stat) if (event->attr.inherit_stat)
ctx->nr_stat++; ctx->nr_stat++;
if (event->state > PERF_EVENT_STATE_OFF)
perf_cgroup_event_enable(event, ctx);
ctx->generation++; ctx->generation++;
} }
...@@ -1976,8 +1987,6 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) ...@@ -1976,8 +1987,6 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
event->attach_state &= ~PERF_ATTACH_CONTEXT; event->attach_state &= ~PERF_ATTACH_CONTEXT;
list_update_cgroup_event(event, ctx, false);
ctx->nr_events--; ctx->nr_events--;
if (event->attr.inherit_stat) if (event->attr.inherit_stat)
ctx->nr_stat--; ctx->nr_stat--;
...@@ -1994,8 +2003,10 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx) ...@@ -1994,8 +2003,10 @@ list_del_event(struct perf_event *event, struct perf_event_context *ctx)
* of error state is by explicit re-enabling * of error state is by explicit re-enabling
* of the event * of the event
*/ */
if (event->state > PERF_EVENT_STATE_OFF) if (event->state > PERF_EVENT_STATE_OFF) {
perf_cgroup_event_disable(event, ctx);
perf_event_set_state(event, PERF_EVENT_STATE_OFF); perf_event_set_state(event, PERF_EVENT_STATE_OFF);
}
ctx->generation++; ctx->generation++;
} }
...@@ -2226,6 +2237,7 @@ event_sched_out(struct perf_event *event, ...@@ -2226,6 +2237,7 @@ event_sched_out(struct perf_event *event,
if (READ_ONCE(event->pending_disable) >= 0) { if (READ_ONCE(event->pending_disable) >= 0) {
WRITE_ONCE(event->pending_disable, -1); WRITE_ONCE(event->pending_disable, -1);
perf_cgroup_event_disable(event, ctx);
state = PERF_EVENT_STATE_OFF; state = PERF_EVENT_STATE_OFF;
} }
perf_event_set_state(event, state); perf_event_set_state(event, state);
...@@ -2363,6 +2375,7 @@ static void __perf_event_disable(struct perf_event *event, ...@@ -2363,6 +2375,7 @@ static void __perf_event_disable(struct perf_event *event,
event_sched_out(event, cpuctx, ctx); event_sched_out(event, cpuctx, ctx);
perf_event_set_state(event, PERF_EVENT_STATE_OFF); perf_event_set_state(event, PERF_EVENT_STATE_OFF);
perf_cgroup_event_disable(event, ctx);
} }
/* /*
...@@ -2746,7 +2759,7 @@ static int __perf_install_in_context(void *info) ...@@ -2746,7 +2759,7 @@ static int __perf_install_in_context(void *info)
} }
#ifdef CONFIG_CGROUP_PERF #ifdef CONFIG_CGROUP_PERF
if (is_cgroup_event(event)) { if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) {
/* /*
* If the current cgroup doesn't match the event's * If the current cgroup doesn't match the event's
* cgroup, we should not try to schedule it. * cgroup, we should not try to schedule it.
...@@ -2906,6 +2919,7 @@ static void __perf_event_enable(struct perf_event *event, ...@@ -2906,6 +2919,7 @@ static void __perf_event_enable(struct perf_event *event,
ctx_sched_out(ctx, cpuctx, EVENT_TIME); ctx_sched_out(ctx, cpuctx, EVENT_TIME);
perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE);
perf_cgroup_event_enable(event, ctx);
if (!ctx->is_active) if (!ctx->is_active)
return; return;
...@@ -3616,8 +3630,10 @@ static int merge_sched_in(struct perf_event *event, void *data) ...@@ -3616,8 +3630,10 @@ static int merge_sched_in(struct perf_event *event, void *data)
} }
if (event->state == PERF_EVENT_STATE_INACTIVE) { if (event->state == PERF_EVENT_STATE_INACTIVE) {
if (event->attr.pinned) if (event->attr.pinned) {
perf_cgroup_event_disable(event, ctx);
perf_event_set_state(event, PERF_EVENT_STATE_ERROR); perf_event_set_state(event, PERF_EVENT_STATE_ERROR);
}
*can_add_hw = 0; *can_add_hw = 0;
ctx->rotate_necessary = 1; ctx->rotate_necessary = 1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment