Commit 7995888f authored by Ingo Molnar's avatar Ingo Molnar

perfcounters: tweak group scheduling

Impact: schedule in groups atomically

If there are multiple groups in a task, make sure they are scheduled
in and out atomically.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 8fb93313
...@@ -367,21 +367,26 @@ counter_sched_in(struct perf_counter *counter, ...@@ -367,21 +367,26 @@ counter_sched_in(struct perf_counter *counter,
ctx->nr_active++; ctx->nr_active++;
} }
static void static int
group_sched_in(struct perf_counter *group_counter, group_sched_in(struct perf_counter *group_counter,
struct perf_cpu_context *cpuctx, struct perf_cpu_context *cpuctx,
struct perf_counter_context *ctx, struct perf_counter_context *ctx,
int cpu) int cpu)
{ {
struct perf_counter *counter; struct perf_counter *counter;
int was_group = 0;
counter_sched_in(group_counter, cpuctx, ctx, cpu); counter_sched_in(group_counter, cpuctx, ctx, cpu);
/* /*
* Schedule in siblings as one group (if any): * Schedule in siblings as one group (if any):
*/ */
list_for_each_entry(counter, &group_counter->sibling_list, list_entry) list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
counter_sched_in(counter, cpuctx, ctx, cpu); counter_sched_in(counter, cpuctx, ctx, cpu);
was_group = 1;
}
return was_group;
} }
/* /*
...@@ -416,7 +421,12 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu) ...@@ -416,7 +421,12 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu)
if (counter->cpu != -1 && counter->cpu != cpu) if (counter->cpu != -1 && counter->cpu != cpu)
continue; continue;
group_sched_in(counter, cpuctx, ctx, cpu); /*
* If we scheduled in a group atomically and
* exclusively, break out:
*/
if (group_sched_in(counter, cpuctx, ctx, cpu))
break;
} }
spin_unlock(&ctx->lock); spin_unlock(&ctx->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment