Commit 87b9cf46 authored by Ingo Molnar's avatar Ingo Molnar

x86, perfcounters: read out MSR_CORE_PERF_GLOBAL_STATUS with counters disabled

Impact: make perfcounter NMI and IRQ sequence more robust

Make __smp_perf_counter_interrupt() a bit more conservative: first disable
all counters, then read out the status. Most invocations are because there
are real events, so there's no performance impact.

Code flow gets a bit simpler as well this way.
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 241771ef
...@@ -383,18 +383,16 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) ...@@ -383,18 +383,16 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
struct cpu_hw_counters *cpuc; struct cpu_hw_counters *cpuc;
u64 ack, status; u64 ack, status;
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
if (!status) {
ack_APIC_irq();
return;
}
/* Disable counters globally */ /* Disable counters globally */
wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0); wrmsr(MSR_CORE_PERF_GLOBAL_CTRL, 0, 0);
ack_APIC_irq(); ack_APIC_irq();
cpuc = &per_cpu(cpu_hw_counters, cpu); cpuc = &per_cpu(cpu_hw_counters, cpu);
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
if (!status)
goto out;
again: again:
ack = status; ack = status;
for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) { for_each_bit(bit, (unsigned long *) &status, nr_hw_counters) {
...@@ -440,7 +438,7 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi) ...@@ -440,7 +438,7 @@ static void __smp_perf_counter_interrupt(struct pt_regs *regs, int nmi)
rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status); rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
if (status) if (status)
goto again; goto again;
out:
/* /*
* Do not reenable when global enable is off: * Do not reenable when global enable is off:
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment