Commit 2c29ef0f authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf: Simplify and fix __perf_install_in_context()

Currently __perf_install_in_context() will try and schedule in the
event irrespective of our event scheduling rules, that is, we try to
schedule CPU-pinned, TASK-pinned, CPU-flexible, TASK-flexible, but
when creating a new event we simply try and schedule it on top of
whatever is already on the PMU, this can lead to errors for pinned
events.

Therefore, simplify things and simply schedule everything out, add the
event to the corresponding context and schedule everything back in.

This also nicely handles the case where with
__ARCH_WANT_INTERRUPTS_ON_CTXSW the IPI can come right in the middle
of schedule, before we managed to call perf_event_task_sched_in().
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20110409192141.870894224@chello.nlSigned-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 04dc2dbb
...@@ -1469,8 +1469,12 @@ static void add_event_to_ctx(struct perf_event *event, ...@@ -1469,8 +1469,12 @@ static void add_event_to_ctx(struct perf_event *event,
event->tstamp_stopped = tstamp; event->tstamp_stopped = tstamp;
} }
static void perf_event_context_sched_in(struct perf_event_context *ctx, static void task_ctx_sched_out(struct perf_event_context *ctx);
struct task_struct *tsk); static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task);
/* /*
* Cross CPU call to install and enable a performance event * Cross CPU call to install and enable a performance event
...@@ -1481,20 +1485,31 @@ static int __perf_install_in_context(void *info) ...@@ -1481,20 +1485,31 @@ static int __perf_install_in_context(void *info)
{ {
struct perf_event *event = info; struct perf_event *event = info;
struct perf_event_context *ctx = event->ctx; struct perf_event_context *ctx = event->ctx;
struct perf_event *leader = event->group_leader;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
int err; struct perf_event_context *task_ctx = cpuctx->task_ctx;
struct task_struct *task = current;
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
/* /*
* In case we're installing a new context to an already running task, * If there was an active task_ctx schedule it out.
* could also happen before perf_event_task_sched_in() on architectures
* which do context switches with IRQs enabled.
*/ */
if (ctx->task && !cpuctx->task_ctx) if (task_ctx) {
perf_event_context_sched_in(ctx, ctx->task); task_ctx_sched_out(task_ctx);
/*
* If the context we're installing events in is not the
* active task_ctx, flip them.
*/
if (ctx->task && task_ctx != ctx) {
raw_spin_unlock(&cpuctx->ctx.lock);
raw_spin_lock(&ctx->lock);
cpuctx->task_ctx = task_ctx = ctx;
}
task = task_ctx->task;
}
cpu_ctx_sched_out(cpuctx, EVENT_ALL);
raw_spin_lock(&ctx->lock);
ctx->is_active = 1;
update_context_time(ctx); update_context_time(ctx);
/* /*
* update cgrp time only if current cgrp * update cgrp time only if current cgrp
...@@ -1505,43 +1520,18 @@ static int __perf_install_in_context(void *info) ...@@ -1505,43 +1520,18 @@ static int __perf_install_in_context(void *info)
add_event_to_ctx(event, ctx); add_event_to_ctx(event, ctx);
if (!event_filter_match(event))
goto unlock;
/*
* Don't put the event on if it is disabled or if
* it is in a group and the group isn't on.
*/
if (event->state != PERF_EVENT_STATE_INACTIVE ||
(leader != event && leader->state != PERF_EVENT_STATE_ACTIVE))
goto unlock;
/* /*
* An exclusive event can't go on if there are already active * Schedule everything back in
* hardware events, and no hardware event can go on if there
* is already an exclusive event on.
*/ */
if (!group_can_go_on(event, cpuctx, 1)) cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
err = -EEXIST; if (task_ctx)
else ctx_sched_in(task_ctx, cpuctx, EVENT_PINNED, task);
err = event_sched_in(event, cpuctx, ctx); cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
if (task_ctx)
if (err) { ctx_sched_in(task_ctx, cpuctx, EVENT_FLEXIBLE, task);
/*
* This event couldn't go on. If it is in a group
* then we have to pull the whole group off.
* If the event group is pinned then put it in error state.
*/
if (leader != event)
group_sched_out(leader, cpuctx, ctx);
if (leader->attr.pinned) {
update_group_times(leader);
leader->state = PERF_EVENT_STATE_ERROR;
}
}
unlock: perf_pmu_enable(cpuctx->ctx.pmu);
raw_spin_unlock(&ctx->lock); perf_ctx_unlock(cpuctx, task_ctx);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment