Commit fd7d5517 authored by Ian Rogers's avatar Ian Rogers Committed by Ingo Molnar

perf/cgroups: Don't rotate events for cgroups unnecessarily

Currently perf_rotate_context assumes that if the context's nr_events !=
nr_active a rotation is necessary for perf event multiplexing. With
cgroups, nr_events is the total count of events for all cgroups and
nr_active will not include events in a cgroup other than the current
task's. This makes rotation appear necessary for cgroups when it is not.

Add a perf_event_context flag that is set when rotation is necessary.
Clear the flag during sched_out and set it when a flexible sched_in
fails due to resources.
Signed-off-by: default avatarIan Rogers <irogers@google.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Link: https://lkml.kernel.org/r/20190601082722.44543-1-irogers@google.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 637d97b5
...@@ -749,6 +749,11 @@ struct perf_event_context { ...@@ -749,6 +749,11 @@ struct perf_event_context {
int nr_stat; int nr_stat;
int nr_freq; int nr_freq;
int rotate_disable; int rotate_disable;
/*
* Set when nr_events != nr_active, except tolerant to events not
* necessary to be active due to scheduling constraints, such as cgroups.
*/
int rotate_necessary;
refcount_t refcount; refcount_t refcount;
struct task_struct *task; struct task_struct *task;
......
...@@ -2952,6 +2952,12 @@ static void ctx_sched_out(struct perf_event_context *ctx, ...@@ -2952,6 +2952,12 @@ static void ctx_sched_out(struct perf_event_context *ctx,
if (!ctx->nr_active || !(is_active & EVENT_ALL)) if (!ctx->nr_active || !(is_active & EVENT_ALL))
return; return;
/*
* If we had been multiplexing, no rotations are necessary, now no events
* are active.
*/
ctx->rotate_necessary = 0;
perf_pmu_disable(ctx->pmu); perf_pmu_disable(ctx->pmu);
if (is_active & EVENT_PINNED) { if (is_active & EVENT_PINNED) {
list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list) list_for_each_entry_safe(event, tmp, &ctx->pinned_active, active_list)
...@@ -3319,10 +3325,13 @@ static int flexible_sched_in(struct perf_event *event, void *data) ...@@ -3319,10 +3325,13 @@ static int flexible_sched_in(struct perf_event *event, void *data)
return 0; return 0;
if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) { if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
if (!group_sched_in(event, sid->cpuctx, sid->ctx)) int ret = group_sched_in(event, sid->cpuctx, sid->ctx);
list_add_tail(&event->active_list, &sid->ctx->flexible_active); if (ret) {
else
sid->can_add_hw = 0; sid->can_add_hw = 0;
sid->ctx->rotate_necessary = 1;
return 0;
}
list_add_tail(&event->active_list, &sid->ctx->flexible_active);
} }
return 0; return 0;
...@@ -3690,24 +3699,17 @@ ctx_first_active(struct perf_event_context *ctx) ...@@ -3690,24 +3699,17 @@ ctx_first_active(struct perf_event_context *ctx)
static bool perf_rotate_context(struct perf_cpu_context *cpuctx) static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
{ {
struct perf_event *cpu_event = NULL, *task_event = NULL; struct perf_event *cpu_event = NULL, *task_event = NULL;
bool cpu_rotate = false, task_rotate = false; struct perf_event_context *task_ctx = NULL;
struct perf_event_context *ctx = NULL; int cpu_rotate, task_rotate;
/* /*
* Since we run this from IRQ context, nobody can install new * Since we run this from IRQ context, nobody can install new
* events, thus the event count values are stable. * events, thus the event count values are stable.
*/ */
if (cpuctx->ctx.nr_events) { cpu_rotate = cpuctx->ctx.rotate_necessary;
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active) task_ctx = cpuctx->task_ctx;
cpu_rotate = true; task_rotate = task_ctx ? task_ctx->rotate_necessary : 0;
}
ctx = cpuctx->task_ctx;
if (ctx && ctx->nr_events) {
if (ctx->nr_events != ctx->nr_active)
task_rotate = true;
}
if (!(cpu_rotate || task_rotate)) if (!(cpu_rotate || task_rotate))
return false; return false;
...@@ -3716,7 +3718,7 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx) ...@@ -3716,7 +3718,7 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
perf_pmu_disable(cpuctx->ctx.pmu); perf_pmu_disable(cpuctx->ctx.pmu);
if (task_rotate) if (task_rotate)
task_event = ctx_first_active(ctx); task_event = ctx_first_active(task_ctx);
if (cpu_rotate) if (cpu_rotate)
cpu_event = ctx_first_active(&cpuctx->ctx); cpu_event = ctx_first_active(&cpuctx->ctx);
...@@ -3724,17 +3726,17 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx) ...@@ -3724,17 +3726,17 @@ static bool perf_rotate_context(struct perf_cpu_context *cpuctx)
* As per the order given at ctx_resched() first 'pop' task flexible * As per the order given at ctx_resched() first 'pop' task flexible
* and then, if needed CPU flexible. * and then, if needed CPU flexible.
*/ */
if (task_event || (ctx && cpu_event)) if (task_event || (task_ctx && cpu_event))
ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE); ctx_sched_out(task_ctx, cpuctx, EVENT_FLEXIBLE);
if (cpu_event) if (cpu_event)
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (task_event) if (task_event)
rotate_ctx(ctx, task_event); rotate_ctx(task_ctx, task_event);
if (cpu_event) if (cpu_event)
rotate_ctx(&cpuctx->ctx, cpu_event); rotate_ctx(&cpuctx->ctx, cpu_event);
perf_event_sched_in(cpuctx, ctx, current); perf_event_sched_in(cpuctx, task_ctx, current);
perf_pmu_enable(cpuctx->ctx.pmu); perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx); perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment