Commit d7b629a3 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf_counter: Solve the rotate_ctx vs inherit race differently

Instead of disabling RR scheduling of the counters, use a different list
that does not get rotated to iterate the counters on inheritance.

[ Impact: cleanup, optimization ]
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Cc: John Kacur <jkacur@redhat.com>
LKML-Reference: <20090520102553.237504544@chello.nl>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c44d70a3
...@@ -508,7 +508,6 @@ struct perf_counter_context { ...@@ -508,7 +508,6 @@ struct perf_counter_context {
int nr_counters; int nr_counters;
int nr_active; int nr_active;
int is_active; int is_active;
int rr_allowed;
struct task_struct *task; struct task_struct *task;
/* /*
......
...@@ -1120,8 +1120,7 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) ...@@ -1120,8 +1120,7 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
__perf_counter_task_sched_out(ctx); __perf_counter_task_sched_out(ctx);
rotate_ctx(&cpuctx->ctx); rotate_ctx(&cpuctx->ctx);
if (ctx->rr_allowed) rotate_ctx(ctx);
rotate_ctx(ctx);
perf_counter_cpu_sched_in(cpuctx, cpu); perf_counter_cpu_sched_in(cpuctx, cpu);
perf_counter_task_sched_in(curr, cpu); perf_counter_task_sched_in(curr, cpu);
...@@ -3109,7 +3108,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx, ...@@ -3109,7 +3108,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
mutex_init(&ctx->mutex); mutex_init(&ctx->mutex);
INIT_LIST_HEAD(&ctx->counter_list); INIT_LIST_HEAD(&ctx->counter_list);
INIT_LIST_HEAD(&ctx->event_list); INIT_LIST_HEAD(&ctx->event_list);
ctx->rr_allowed = 1;
ctx->task = task; ctx->task = task;
} }
...@@ -3350,14 +3348,14 @@ void perf_counter_init_task(struct task_struct *child) ...@@ -3350,14 +3348,14 @@ void perf_counter_init_task(struct task_struct *child)
*/ */
mutex_lock(&parent_ctx->mutex); mutex_lock(&parent_ctx->mutex);
parent_ctx->rr_allowed = 0;
barrier(); /* irqs */
/* /*
* We dont have to disable NMIs - we are only looking at * We dont have to disable NMIs - we are only looking at
* the list, not manipulating it: * the list, not manipulating it:
*/ */
list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) { list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
if (counter != counter->group_leader)
continue;
if (!counter->hw_event.inherit) if (!counter->hw_event.inherit)
continue; continue;
...@@ -3366,9 +3364,6 @@ void perf_counter_init_task(struct task_struct *child) ...@@ -3366,9 +3364,6 @@ void perf_counter_init_task(struct task_struct *child)
break; break;
} }
barrier(); /* irqs */
parent_ctx->rr_allowed = 1;
mutex_unlock(&parent_ctx->mutex); mutex_unlock(&parent_ctx->mutex);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment