Commit a29aa8a7 authored by Robert Richter's avatar Robert Richter Committed by Ingo Molnar

perf_counter, x86: implement the interrupt handler for AMD cpus

This patch implements the interrupt handler for AMD performance
counters. In difference to the Intel pmu, there is no single status
register and also there are no fixed counters. This makes the handler
very different and it is useful to make the handler vendor
specific. To check if a counter is overflowed the upper bit of the
counter is checked. Only counters where the active bit is set are
checked.

With this patch throttling is enabled for AMD performance counters.

This patch also reenables Linux performance counters on AMD cpus.

[ Impact: re-enable perfcounters on AMD CPUs ]
Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-25-git-send-email-robert.richter@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 85cf9dba
...@@ -240,10 +240,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter) ...@@ -240,10 +240,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
struct hw_perf_counter *hwc = &counter->hw; struct hw_perf_counter *hwc = &counter->hw;
int err; int err;
/* disable temporarily */
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
return -ENOSYS;
if (!x86_pmu_initialized()) if (!x86_pmu_initialized())
return -ENODEV; return -ENODEV;
...@@ -773,7 +769,43 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi) ...@@ -773,7 +769,43 @@ static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
return ret; return ret;
} }
static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) { return 0; } static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
{
int cpu = smp_processor_id();
struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
u64 val;
int handled = 0;
struct perf_counter *counter;
struct hw_perf_counter *hwc;
int idx;
++cpuc->interrupts;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active))
continue;
counter = cpuc->counters[idx];
hwc = &counter->hw;
x86_perf_counter_update(counter, hwc, idx);
val = atomic64_read(&hwc->prev_count);
if (val & (1ULL << (x86_pmu.counter_bits - 1)))
continue;
/* counter overflow */
x86_perf_counter_set_period(counter, hwc, idx);
handled = 1;
inc_irq_stat(apic_perf_irqs);
if (perf_counter_overflow(counter, nmi, regs, 0))
amd_pmu_disable_counter(hwc, idx);
else if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS)
/*
* do not reenable when throttled, but reload
* the register
*/
amd_pmu_disable_counter(hwc, idx);
else if (counter->state == PERF_COUNTER_STATE_ACTIVE)
amd_pmu_enable_counter(hwc, idx);
}
return handled;
}
void perf_counter_unthrottle(void) void perf_counter_unthrottle(void)
{ {
...@@ -782,9 +814,6 @@ void perf_counter_unthrottle(void) ...@@ -782,9 +814,6 @@ void perf_counter_unthrottle(void)
if (!x86_pmu_initialized()) if (!x86_pmu_initialized())
return; return;
if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
return;
cpuc = &__get_cpu_var(cpu_hw_counters); cpuc = &__get_cpu_var(cpu_hw_counters);
if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
if (printk_ratelimit()) if (printk_ratelimit())
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment