Commit d4369891 authored by Robert Richter's avatar Robert Richter Committed by Ingo Molnar

perf_counter, x86: rework counter disable functions

As for the enable function, this patch reworks the disable functions
and introduces x86_pmu_disable_counter(). The internal function i/f in
struct x86_pmu changed too.

[ Impact: refactor and generalize code ]
Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-23-git-send-email-robert.richter@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 7c90cc45
...@@ -45,7 +45,7 @@ struct x86_pmu { ...@@ -45,7 +45,7 @@ struct x86_pmu {
u64 (*save_disable_all)(void); u64 (*save_disable_all)(void);
void (*restore_all)(u64); void (*restore_all)(u64);
void (*enable)(struct hw_perf_counter *, int); void (*enable)(struct hw_perf_counter *, int);
void (*disable)(int, u64); void (*disable)(struct hw_perf_counter *, int);
unsigned eventsel; unsigned eventsel;
unsigned perfctr; unsigned perfctr;
u64 (*event_map)(int); u64 (*event_map)(int);
...@@ -425,28 +425,19 @@ static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) ...@@ -425,28 +425,19 @@ static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
} }
static void intel_pmu_disable_counter(int idx, u64 config) static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
{ {
wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config); int err;
}
static void amd_pmu_disable_counter(int idx, u64 config)
{
wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
}
static void hw_perf_disable(int idx, u64 config)
{
if (unlikely(!perf_counters_initialized)) if (unlikely(!perf_counters_initialized))
return; return;
x86_pmu.disable(idx, config); err = checking_wrmsrl(hwc->config_base + idx,
hwc->config);
} }
static inline void static inline void
__pmc_fixed_disable(struct perf_counter *counter, intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
struct hw_perf_counter *hwc, int __idx)
{ {
int idx = __idx - X86_PMC_IDX_FIXED; int idx = __idx - X86_PMC_IDX_FIXED;
u64 ctrl_val, mask; u64 ctrl_val, mask;
...@@ -460,13 +451,20 @@ __pmc_fixed_disable(struct perf_counter *counter, ...@@ -460,13 +451,20 @@ __pmc_fixed_disable(struct perf_counter *counter,
} }
static inline void static inline void
__x86_pmu_disable(struct perf_counter *counter, intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
struct hw_perf_counter *hwc, int idx)
{ {
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
__pmc_fixed_disable(counter, hwc, idx); intel_pmu_disable_fixed(hwc, idx);
else return;
hw_perf_disable(idx, hwc->config); }
x86_pmu_disable_counter(hwc, idx);
}
static inline void
amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
{
x86_pmu_disable_counter(hwc, idx);
} }
static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
...@@ -551,7 +549,7 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) ...@@ -551,7 +549,7 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
if (cpuc->enabled) if (cpuc->enabled)
x86_pmu_enable_counter(hwc, idx); x86_pmu_enable_counter(hwc, idx);
else else
amd_pmu_disable_counter(idx, hwc->config); x86_pmu_disable_counter(hwc, idx);
} }
static int static int
...@@ -622,7 +620,7 @@ static int x86_pmu_enable(struct perf_counter *counter) ...@@ -622,7 +620,7 @@ static int x86_pmu_enable(struct perf_counter *counter)
perf_counters_lapic_init(hwc->nmi); perf_counters_lapic_init(hwc->nmi);
__x86_pmu_disable(counter, hwc, idx); x86_pmu.disable(hwc, idx);
cpuc->counters[idx] = counter; cpuc->counters[idx] = counter;
set_bit(idx, cpuc->active); set_bit(idx, cpuc->active);
...@@ -694,7 +692,7 @@ static void x86_pmu_disable(struct perf_counter *counter) ...@@ -694,7 +692,7 @@ static void x86_pmu_disable(struct perf_counter *counter)
* could reenable again: * could reenable again:
*/ */
clear_bit(idx, cpuc->active); clear_bit(idx, cpuc->active);
__x86_pmu_disable(counter, hwc, idx); x86_pmu.disable(hwc, idx);
/* /*
* Make sure the cleared pointer becomes visible before we * Make sure the cleared pointer becomes visible before we
...@@ -762,7 +760,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) ...@@ -762,7 +760,7 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
intel_pmu_save_and_restart(counter); intel_pmu_save_and_restart(counter);
if (perf_counter_overflow(counter, nmi, regs, 0)) if (perf_counter_overflow(counter, nmi, regs, 0))
__x86_pmu_disable(counter, &counter->hw, bit); intel_pmu_disable_counter(&counter->hw, bit);
} }
intel_pmu_ack_status(ack); intel_pmu_ack_status(ack);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment