Commit 447a194b authored by Stephane Eranian's avatar Stephane Eranian Committed by Ingo Molnar

perf_events, x86: Fix bug in hw_perf_enable()

We cannot assume that because hwc->idx == assign[i], we can avoid
reprogramming the counter in hw_perf_enable().

The event may have been scheduled out and another event may have been
programmed into this counter. Thus, we need a more robust way of
verifying if the counter still contains config/data related to an event.

This patch adds a generation number to each counter on each cpu. Using
this mechanism we can verify reliabilty whether the content of a counter
corresponds to an event.
Signed-off-by: default avatarStephane Eranian <eranian@google.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <4b66dc67.0b38560a.1635.ffffae18@mx.google.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent fce877e3
...@@ -90,6 +90,7 @@ struct cpu_hw_events { ...@@ -90,6 +90,7 @@ struct cpu_hw_events {
int n_events; int n_events;
int n_added; int n_added;
int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */ int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
u64 tags[X86_PMC_IDX_MAX];
struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
}; };
...@@ -1142,6 +1143,8 @@ static int __hw_perf_event_init(struct perf_event *event) ...@@ -1142,6 +1143,8 @@ static int __hw_perf_event_init(struct perf_event *event)
hwc->config = ARCH_PERFMON_EVENTSEL_INT; hwc->config = ARCH_PERFMON_EVENTSEL_INT;
hwc->idx = -1; hwc->idx = -1;
hwc->last_cpu = -1;
hwc->last_tag = ~0ULL;
/* /*
* Count user and OS events unless requested not to. * Count user and OS events unless requested not to.
...@@ -1457,11 +1460,14 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, ...@@ -1457,11 +1460,14 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
return n; return n;
} }
static inline void x86_assign_hw_event(struct perf_event *event, static inline void x86_assign_hw_event(struct perf_event *event,
struct hw_perf_event *hwc, int idx) struct cpu_hw_events *cpuc, int i)
{ {
hwc->idx = idx; struct hw_perf_event *hwc = &event->hw;
hwc->idx = cpuc->assign[i];
hwc->last_cpu = smp_processor_id();
hwc->last_tag = ++cpuc->tags[i];
if (hwc->idx == X86_PMC_IDX_FIXED_BTS) { if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
hwc->config_base = 0; hwc->config_base = 0;
...@@ -1480,6 +1486,15 @@ static inline void x86_assign_hw_event(struct perf_event *event, ...@@ -1480,6 +1486,15 @@ static inline void x86_assign_hw_event(struct perf_event *event,
} }
} }
static inline int match_prev_assignment(struct hw_perf_event *hwc,
struct cpu_hw_events *cpuc,
int i)
{
return hwc->idx == cpuc->assign[i] &&
hwc->last_cpu == smp_processor_id() &&
hwc->last_tag == cpuc->tags[i];
}
static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc); static void __x86_pmu_disable(struct perf_event *event, struct cpu_hw_events *cpuc);
void hw_perf_enable(void) void hw_perf_enable(void)
...@@ -1508,7 +1523,14 @@ void hw_perf_enable(void) ...@@ -1508,7 +1523,14 @@ void hw_perf_enable(void)
event = cpuc->event_list[i]; event = cpuc->event_list[i];
hwc = &event->hw; hwc = &event->hw;
if (hwc->idx == -1 || hwc->idx == cpuc->assign[i]) /*
* we can avoid reprogramming counter if:
* - assigned same counter as last time
* - running on same CPU as last time
* - no other event has used the counter since
*/
if (hwc->idx == -1 ||
match_prev_assignment(hwc, cpuc, i))
continue; continue;
__x86_pmu_disable(event, cpuc); __x86_pmu_disable(event, cpuc);
...@@ -1522,12 +1544,12 @@ void hw_perf_enable(void) ...@@ -1522,12 +1544,12 @@ void hw_perf_enable(void)
hwc = &event->hw; hwc = &event->hw;
if (hwc->idx == -1) { if (hwc->idx == -1) {
x86_assign_hw_event(event, hwc, cpuc->assign[i]); x86_assign_hw_event(event, cpuc, i);
x86_perf_event_set_period(event, hwc, hwc->idx); x86_perf_event_set_period(event, hwc, hwc->idx);
} }
/* /*
* need to mark as active because x86_pmu_disable() * need to mark as active because x86_pmu_disable()
* clear active_mask and eventsp[] yet it preserves * clear active_mask and events[] yet it preserves
* idx * idx
*/ */
set_bit(hwc->idx, cpuc->active_mask); set_bit(hwc->idx, cpuc->active_mask);
......
...@@ -478,9 +478,11 @@ struct hw_perf_event { ...@@ -478,9 +478,11 @@ struct hw_perf_event {
union { union {
struct { /* hardware */ struct { /* hardware */
u64 config; u64 config;
u64 last_tag;
unsigned long config_base; unsigned long config_base;
unsigned long event_base; unsigned long event_base;
int idx; int idx;
int last_cpu;
}; };
struct { /* software */ struct { /* software */
s64 remaining; s64 remaining;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment