Commit 24cd7f54 authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf: Reduce perf_disable() usage

Since the current perf_disable() usage is only an optimization,
remove it for now. This eases the removal of the __weak
hw_perf_enable() interface.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: paulus <paulus@samba.org>
Cc: stephane eranian <eranian@googlemail.com>
Cc: Robert Richter <robert.richter@amd.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Lin Ming <ming.m.lin@intel.com>
Cc: Yanmin <yanmin_zhang@linux.intel.com>
Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com>
Cc: David Miller <davem@davemloft.net>
Cc: Michael Cree <mcree@orcon.net.nz>
LKML-Reference: <new-submission>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 9ed6060d
...@@ -277,6 +277,8 @@ armpmu_enable(struct perf_event *event) ...@@ -277,6 +277,8 @@ armpmu_enable(struct perf_event *event)
int idx; int idx;
int err = 0; int err = 0;
perf_disable();
/* If we don't have a space for the counter then finish early. */ /* If we don't have a space for the counter then finish early. */
idx = armpmu->get_event_idx(cpuc, hwc); idx = armpmu->get_event_idx(cpuc, hwc);
if (idx < 0) { if (idx < 0) {
...@@ -303,6 +305,7 @@ armpmu_enable(struct perf_event *event) ...@@ -303,6 +305,7 @@ armpmu_enable(struct perf_event *event)
perf_event_update_userpage(event); perf_event_update_userpage(event);
out: out:
perf_enable();
return err; return err;
} }
......
...@@ -861,6 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu) ...@@ -861,6 +861,7 @@ void power_pmu_start_txn(struct pmu *pmu)
{ {
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
perf_disable();
cpuhw->group_flag |= PERF_EVENT_TXN; cpuhw->group_flag |= PERF_EVENT_TXN;
cpuhw->n_txn_start = cpuhw->n_events; cpuhw->n_txn_start = cpuhw->n_events;
} }
...@@ -875,6 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu) ...@@ -875,6 +876,7 @@ void power_pmu_cancel_txn(struct pmu *pmu)
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN; cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_enable();
} }
/* /*
...@@ -901,6 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu) ...@@ -901,6 +903,7 @@ int power_pmu_commit_txn(struct pmu *pmu)
cpuhw->event[i]->hw.config = cpuhw->events[i]; cpuhw->event[i]->hw.config = cpuhw->events[i];
cpuhw->group_flag &= ~PERF_EVENT_TXN; cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_enable();
return 0; return 0;
} }
......
...@@ -262,7 +262,7 @@ static int collect_events(struct perf_event *group, int max_count, ...@@ -262,7 +262,7 @@ static int collect_events(struct perf_event *group, int max_count,
return n; return n;
} }
/* perf must be disabled, context locked on entry */ /* context locked on entry */
static int fsl_emb_pmu_enable(struct perf_event *event) static int fsl_emb_pmu_enable(struct perf_event *event)
{ {
struct cpu_hw_events *cpuhw; struct cpu_hw_events *cpuhw;
...@@ -271,6 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event) ...@@ -271,6 +271,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
u64 val; u64 val;
int i; int i;
perf_disable();
cpuhw = &get_cpu_var(cpu_hw_events); cpuhw = &get_cpu_var(cpu_hw_events);
if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) if (event->hw.config & FSL_EMB_EVENT_RESTRICTED)
...@@ -310,15 +311,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event) ...@@ -310,15 +311,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
ret = 0; ret = 0;
out: out:
put_cpu_var(cpu_hw_events); put_cpu_var(cpu_hw_events);
perf_enable();
return ret; return ret;
} }
/* perf must be disabled, context locked on entry */ /* context locked on entry */
static void fsl_emb_pmu_disable(struct perf_event *event) static void fsl_emb_pmu_disable(struct perf_event *event)
{ {
struct cpu_hw_events *cpuhw; struct cpu_hw_events *cpuhw;
int i = event->hw.idx; int i = event->hw.idx;
perf_disable();
if (i < 0) if (i < 0)
goto out; goto out;
...@@ -346,6 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event) ...@@ -346,6 +349,7 @@ static void fsl_emb_pmu_disable(struct perf_event *event)
cpuhw->n_events--; cpuhw->n_events--;
out: out:
perf_enable();
put_cpu_var(cpu_hw_events); put_cpu_var(cpu_hw_events);
} }
......
...@@ -230,11 +230,14 @@ static int sh_pmu_enable(struct perf_event *event) ...@@ -230,11 +230,14 @@ static int sh_pmu_enable(struct perf_event *event)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx; int idx = hwc->idx;
int ret = -EAGAIN;
perf_disable();
if (test_and_set_bit(idx, cpuc->used_mask)) { if (test_and_set_bit(idx, cpuc->used_mask)) {
idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
if (idx == sh_pmu->num_events) if (idx == sh_pmu->num_events)
return -EAGAIN; goto out;
set_bit(idx, cpuc->used_mask); set_bit(idx, cpuc->used_mask);
hwc->idx = idx; hwc->idx = idx;
...@@ -248,8 +251,10 @@ static int sh_pmu_enable(struct perf_event *event) ...@@ -248,8 +251,10 @@ static int sh_pmu_enable(struct perf_event *event)
sh_pmu->enable(hwc, idx); sh_pmu->enable(hwc, idx);
perf_event_update_userpage(event); perf_event_update_userpage(event);
ret = 0;
return 0; out:
perf_enable();
return ret;
} }
static void sh_pmu_read(struct perf_event *event) static void sh_pmu_read(struct perf_event *event)
......
...@@ -1113,6 +1113,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu) ...@@ -1113,6 +1113,7 @@ static void sparc_pmu_start_txn(struct pmu *pmu)
{ {
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
perf_disable();
cpuhw->group_flag |= PERF_EVENT_TXN; cpuhw->group_flag |= PERF_EVENT_TXN;
} }
...@@ -1126,6 +1127,7 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu) ...@@ -1126,6 +1127,7 @@ static void sparc_pmu_cancel_txn(struct pmu *pmu)
struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
cpuhw->group_flag &= ~PERF_EVENT_TXN; cpuhw->group_flag &= ~PERF_EVENT_TXN;
perf_enable();
} }
/* /*
...@@ -1149,6 +1151,7 @@ static int sparc_pmu_commit_txn(struct pmu *pmu) ...@@ -1149,6 +1151,7 @@ static int sparc_pmu_commit_txn(struct pmu *pmu)
return -EAGAIN; return -EAGAIN;
cpuc->group_flag &= ~PERF_EVENT_TXN; cpuc->group_flag &= ~PERF_EVENT_TXN;
perf_enable();
return 0; return 0;
} }
......
...@@ -969,10 +969,11 @@ static int x86_pmu_enable(struct perf_event *event) ...@@ -969,10 +969,11 @@ static int x86_pmu_enable(struct perf_event *event)
hwc = &event->hw; hwc = &event->hw;
perf_disable();
n0 = cpuc->n_events; n0 = cpuc->n_events;
n = collect_events(cpuc, event, false); ret = n = collect_events(cpuc, event, false);
if (n < 0) if (ret < 0)
return n; goto out;
/* /*
* If group events scheduling transaction was started, * If group events scheduling transaction was started,
...@@ -980,23 +981,26 @@ static int x86_pmu_enable(struct perf_event *event) ...@@ -980,23 +981,26 @@ static int x86_pmu_enable(struct perf_event *event)
* at commit time(->commit_txn) as a whole * at commit time(->commit_txn) as a whole
*/ */
if (cpuc->group_flag & PERF_EVENT_TXN) if (cpuc->group_flag & PERF_EVENT_TXN)
goto out; goto done_collect;
ret = x86_pmu.schedule_events(cpuc, n, assign); ret = x86_pmu.schedule_events(cpuc, n, assign);
if (ret) if (ret)
return ret; goto out;
/* /*
* copy new assignment, now we know it is possible * copy new assignment, now we know it is possible
* will be used by hw_perf_enable() * will be used by hw_perf_enable()
*/ */
memcpy(cpuc->assign, assign, n*sizeof(int)); memcpy(cpuc->assign, assign, n*sizeof(int));
out: done_collect:
cpuc->n_events = n; cpuc->n_events = n;
cpuc->n_added += n - n0; cpuc->n_added += n - n0;
cpuc->n_txn += n - n0; cpuc->n_txn += n - n0;
return 0; ret = 0;
out:
perf_enable();
return ret;
} }
static int x86_pmu_start(struct perf_event *event) static int x86_pmu_start(struct perf_event *event)
...@@ -1432,6 +1436,7 @@ static void x86_pmu_start_txn(struct pmu *pmu) ...@@ -1432,6 +1436,7 @@ static void x86_pmu_start_txn(struct pmu *pmu)
{ {
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
perf_disable();
cpuc->group_flag |= PERF_EVENT_TXN; cpuc->group_flag |= PERF_EVENT_TXN;
cpuc->n_txn = 0; cpuc->n_txn = 0;
} }
...@@ -1451,6 +1456,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu) ...@@ -1451,6 +1456,7 @@ static void x86_pmu_cancel_txn(struct pmu *pmu)
*/ */
cpuc->n_added -= cpuc->n_txn; cpuc->n_added -= cpuc->n_txn;
cpuc->n_events -= cpuc->n_txn; cpuc->n_events -= cpuc->n_txn;
perf_enable();
} }
/* /*
...@@ -1480,7 +1486,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu) ...@@ -1480,7 +1486,7 @@ static int x86_pmu_commit_txn(struct pmu *pmu)
memcpy(cpuc->assign, assign, n*sizeof(int)); memcpy(cpuc->assign, assign, n*sizeof(int));
cpuc->group_flag &= ~PERF_EVENT_TXN; cpuc->group_flag &= ~PERF_EVENT_TXN;
perf_enable();
return 0; return 0;
} }
......
...@@ -564,26 +564,26 @@ struct pmu { ...@@ -564,26 +564,26 @@ struct pmu {
struct list_head entry; struct list_head entry;
/* /*
* Should return -ENOENT when the @event doesn't match this pmu * Should return -ENOENT when the @event doesn't match this PMU.
*/ */
int (*event_init) (struct perf_event *event); int (*event_init) (struct perf_event *event);
int (*enable) (struct perf_event *event); int (*enable) (struct perf_event *event);
void (*disable) (struct perf_event *event); void (*disable) (struct perf_event *event);
int (*start) (struct perf_event *event); int (*start) (struct perf_event *event);
void (*stop) (struct perf_event *event); void (*stop) (struct perf_event *event);
void (*read) (struct perf_event *event); void (*read) (struct perf_event *event);
void (*unthrottle) (struct perf_event *event); void (*unthrottle) (struct perf_event *event);
/* /*
* Group events scheduling is treated as a transaction, add group * Group events scheduling is treated as a transaction, add
* events as a whole and perform one schedulability test. If the test * group events as a whole and perform one schedulability test.
* fails, roll back the whole group * If the test fails, roll back the whole group
*/ */
/* /*
* Start the transaction, after this ->enable() doesn't need * Start the transaction, after this ->enable() doesn't need to
* to do schedulability tests. * do schedulability tests.
*/ */
void (*start_txn) (struct pmu *pmu); void (*start_txn) (struct pmu *pmu);
/* /*
...@@ -594,8 +594,8 @@ struct pmu { ...@@ -594,8 +594,8 @@ struct pmu {
*/ */
int (*commit_txn) (struct pmu *pmu); int (*commit_txn) (struct pmu *pmu);
/* /*
* Will cancel the transaction, assumes ->disable() is called for * Will cancel the transaction, assumes ->disable() is called
* each successfull ->enable() during the transaction. * for each successfull ->enable() during the transaction.
*/ */
void (*cancel_txn) (struct pmu *pmu); void (*cancel_txn) (struct pmu *pmu);
}; };
......
...@@ -478,11 +478,6 @@ static void __perf_event_remove_from_context(void *info) ...@@ -478,11 +478,6 @@ static void __perf_event_remove_from_context(void *info)
return; return;
raw_spin_lock(&ctx->lock); raw_spin_lock(&ctx->lock);
/*
* Protect the list operation against NMI by disabling the
* events on a global level.
*/
perf_disable();
event_sched_out(event, cpuctx, ctx); event_sched_out(event, cpuctx, ctx);
...@@ -498,7 +493,6 @@ static void __perf_event_remove_from_context(void *info) ...@@ -498,7 +493,6 @@ static void __perf_event_remove_from_context(void *info)
perf_max_events - perf_reserved_percpu); perf_max_events - perf_reserved_percpu);
} }
perf_enable();
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
} }
...@@ -803,12 +797,6 @@ static void __perf_install_in_context(void *info) ...@@ -803,12 +797,6 @@ static void __perf_install_in_context(void *info)
ctx->is_active = 1; ctx->is_active = 1;
update_context_time(ctx); update_context_time(ctx);
/*
* Protect the list operation against NMI by disabling the
* events on a global level. NOP for non NMI based events.
*/
perf_disable();
add_event_to_ctx(event, ctx); add_event_to_ctx(event, ctx);
if (event->cpu != -1 && event->cpu != smp_processor_id()) if (event->cpu != -1 && event->cpu != smp_processor_id())
...@@ -850,8 +838,6 @@ static void __perf_install_in_context(void *info) ...@@ -850,8 +838,6 @@ static void __perf_install_in_context(void *info)
cpuctx->max_pertask--; cpuctx->max_pertask--;
unlock: unlock:
perf_enable();
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
} }
...@@ -972,12 +958,10 @@ static void __perf_event_enable(void *info) ...@@ -972,12 +958,10 @@ static void __perf_event_enable(void *info)
if (!group_can_go_on(event, cpuctx, 1)) { if (!group_can_go_on(event, cpuctx, 1)) {
err = -EEXIST; err = -EEXIST;
} else { } else {
perf_disable();
if (event == leader) if (event == leader)
err = group_sched_in(event, cpuctx, ctx); err = group_sched_in(event, cpuctx, ctx);
else else
err = event_sched_in(event, cpuctx, ctx); err = event_sched_in(event, cpuctx, ctx);
perf_enable();
} }
if (err) { if (err) {
...@@ -1090,9 +1074,8 @@ static void ctx_sched_out(struct perf_event_context *ctx, ...@@ -1090,9 +1074,8 @@ static void ctx_sched_out(struct perf_event_context *ctx,
goto out; goto out;
update_context_time(ctx); update_context_time(ctx);
perf_disable();
if (!ctx->nr_active) if (!ctx->nr_active)
goto out_enable; goto out;
if (event_type & EVENT_PINNED) { if (event_type & EVENT_PINNED) {
list_for_each_entry(event, &ctx->pinned_groups, group_entry) list_for_each_entry(event, &ctx->pinned_groups, group_entry)
...@@ -1103,9 +1086,6 @@ static void ctx_sched_out(struct perf_event_context *ctx, ...@@ -1103,9 +1086,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
list_for_each_entry(event, &ctx->flexible_groups, group_entry) list_for_each_entry(event, &ctx->flexible_groups, group_entry)
group_sched_out(event, cpuctx, ctx); group_sched_out(event, cpuctx, ctx);
} }
out_enable:
perf_enable();
out: out:
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
} }
...@@ -1364,8 +1344,6 @@ ctx_sched_in(struct perf_event_context *ctx, ...@@ -1364,8 +1344,6 @@ ctx_sched_in(struct perf_event_context *ctx,
ctx->timestamp = perf_clock(); ctx->timestamp = perf_clock();
perf_disable();
/* /*
* First go through the list and put on any pinned groups * First go through the list and put on any pinned groups
* in order to give them the best chance of going on. * in order to give them the best chance of going on.
...@@ -1377,7 +1355,6 @@ ctx_sched_in(struct perf_event_context *ctx, ...@@ -1377,7 +1355,6 @@ ctx_sched_in(struct perf_event_context *ctx,
if (event_type & EVENT_FLEXIBLE) if (event_type & EVENT_FLEXIBLE)
ctx_flexible_sched_in(ctx, cpuctx); ctx_flexible_sched_in(ctx, cpuctx);
perf_enable();
out: out:
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
} }
...@@ -1425,8 +1402,6 @@ void perf_event_task_sched_in(struct task_struct *task) ...@@ -1425,8 +1402,6 @@ void perf_event_task_sched_in(struct task_struct *task)
if (cpuctx->task_ctx == ctx) if (cpuctx->task_ctx == ctx)
return; return;
perf_disable();
/* /*
* We want to keep the following priority order: * We want to keep the following priority order:
* cpu pinned (that don't need to move), task pinned, * cpu pinned (that don't need to move), task pinned,
...@@ -1439,8 +1414,6 @@ void perf_event_task_sched_in(struct task_struct *task) ...@@ -1439,8 +1414,6 @@ void perf_event_task_sched_in(struct task_struct *task)
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE); ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
cpuctx->task_ctx = ctx; cpuctx->task_ctx = ctx;
perf_enable();
} }
#define MAX_INTERRUPTS (~0ULL) #define MAX_INTERRUPTS (~0ULL)
...@@ -1555,11 +1528,9 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count) ...@@ -1555,11 +1528,9 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
hwc->sample_period = sample_period; hwc->sample_period = sample_period;
if (local64_read(&hwc->period_left) > 8*sample_period) { if (local64_read(&hwc->period_left) > 8*sample_period) {
perf_disable();
perf_event_stop(event); perf_event_stop(event);
local64_set(&hwc->period_left, 0); local64_set(&hwc->period_left, 0);
perf_event_start(event); perf_event_start(event);
perf_enable();
} }
} }
...@@ -1588,15 +1559,12 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) ...@@ -1588,15 +1559,12 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
*/ */
if (interrupts == MAX_INTERRUPTS) { if (interrupts == MAX_INTERRUPTS) {
perf_log_throttle(event, 1); perf_log_throttle(event, 1);
perf_disable();
event->pmu->unthrottle(event); event->pmu->unthrottle(event);
perf_enable();
} }
if (!event->attr.freq || !event->attr.sample_freq) if (!event->attr.freq || !event->attr.sample_freq)
continue; continue;
perf_disable();
event->pmu->read(event); event->pmu->read(event);
now = local64_read(&event->count); now = local64_read(&event->count);
delta = now - hwc->freq_count_stamp; delta = now - hwc->freq_count_stamp;
...@@ -1604,7 +1572,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx) ...@@ -1604,7 +1572,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
if (delta > 0) if (delta > 0)
perf_adjust_period(event, TICK_NSEC, delta); perf_adjust_period(event, TICK_NSEC, delta);
perf_enable();
} }
raw_spin_unlock(&ctx->lock); raw_spin_unlock(&ctx->lock);
} }
...@@ -1647,7 +1614,6 @@ void perf_event_task_tick(struct task_struct *curr) ...@@ -1647,7 +1614,6 @@ void perf_event_task_tick(struct task_struct *curr)
if (!rotate) if (!rotate)
return; return;
perf_disable();
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
if (ctx) if (ctx)
task_ctx_sched_out(ctx, EVENT_FLEXIBLE); task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
...@@ -1659,7 +1625,6 @@ void perf_event_task_tick(struct task_struct *curr) ...@@ -1659,7 +1625,6 @@ void perf_event_task_tick(struct task_struct *curr)
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
if (ctx) if (ctx)
task_ctx_sched_in(curr, EVENT_FLEXIBLE); task_ctx_sched_in(curr, EVENT_FLEXIBLE);
perf_enable();
} }
static int event_enable_on_exec(struct perf_event *event, static int event_enable_on_exec(struct perf_event *event,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment