Commit 5862cc57 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Greg Kroah-Hartman

perf: Fix PERF_EVENT_IOC_PERIOD migration race

commit c7999c6f upstream.

I ran the perf fuzzer, which triggered some WARN()s which are due to
trying to stop/restart an event on the wrong CPU.

Use the normal IPI pattern to ensure we run the code on the correct CPU.
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Vince Weaver <vincent.weaver@maine.edu>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Fixes: bad7192b ("perf: Fix PERF_EVENT_IOC_PERIOD to force-reset the period")
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent cf766f63
...@@ -3562,28 +3562,21 @@ static void perf_event_for_each(struct perf_event *event, ...@@ -3562,28 +3562,21 @@ static void perf_event_for_each(struct perf_event *event,
mutex_unlock(&ctx->mutex); mutex_unlock(&ctx->mutex);
} }
static int perf_event_period(struct perf_event *event, u64 __user *arg) struct period_event {
{ struct perf_event *event;
struct perf_event_context *ctx = event->ctx;
int ret = 0, active;
u64 value; u64 value;
};
if (!is_sampling_event(event)) static int __perf_event_period(void *info)
return -EINVAL; {
struct period_event *pe = info;
if (copy_from_user(&value, arg, sizeof(value))) struct perf_event *event = pe->event;
return -EFAULT; struct perf_event_context *ctx = event->ctx;
u64 value = pe->value;
if (!value) bool active;
return -EINVAL;
raw_spin_lock_irq(&ctx->lock); raw_spin_lock(&ctx->lock);
if (event->attr.freq) { if (event->attr.freq) {
if (value > sysctl_perf_event_sample_rate) {
ret = -EINVAL;
goto unlock;
}
event->attr.sample_freq = value; event->attr.sample_freq = value;
} else { } else {
event->attr.sample_period = value; event->attr.sample_period = value;
...@@ -3602,11 +3595,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg) ...@@ -3602,11 +3595,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
event->pmu->start(event, PERF_EF_RELOAD); event->pmu->start(event, PERF_EF_RELOAD);
perf_pmu_enable(ctx->pmu); perf_pmu_enable(ctx->pmu);
} }
raw_spin_unlock(&ctx->lock);
unlock: return 0;
}
static int perf_event_period(struct perf_event *event, u64 __user *arg)
{
struct period_event pe = { .event = event, };
struct perf_event_context *ctx = event->ctx;
struct task_struct *task;
u64 value;
if (!is_sampling_event(event))
return -EINVAL;
if (copy_from_user(&value, arg, sizeof(value)))
return -EFAULT;
if (!value)
return -EINVAL;
if (event->attr.freq && value > sysctl_perf_event_sample_rate)
return -EINVAL;
task = ctx->task;
pe.value = value;
if (!task) {
cpu_function_call(event->cpu, __perf_event_period, &pe);
return 0;
}
retry:
if (!task_function_call(task, __perf_event_period, &pe))
return 0;
raw_spin_lock_irq(&ctx->lock);
if (ctx->is_active) {
raw_spin_unlock_irq(&ctx->lock);
task = ctx->task;
goto retry;
}
__perf_event_period(&pe);
raw_spin_unlock_irq(&ctx->lock); raw_spin_unlock_irq(&ctx->lock);
return ret; return 0;
} }
static const struct file_operations perf_fops; static const struct file_operations perf_fops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment