Commit 72db5596 authored by Andi Kleen's avatar Andi Kleen Committed by Ingo Molnar

perf/x86/intel: Move NMI clearing to end of PMI handler

This avoids some problems with spurious PMIs on Haswell.
Haswell seems to behave more like P4 in this regard. Do
the same thing as the P4 perf handler by unmasking
the NMI only at the end. Shouldn't make any difference
for earlier family 6 cores.

(Tested on Haswell, IvyBridge, Westmere, Saltwell (Atom).)
Signed-off-by: default avatarAndi Kleen <ak@linux.intel.com>
Cc: Andi Kleen <ak@linux.jf.intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Link: http://lkml.kernel.org/r/1371515812-9646-5-git-send-email-andi@firstfloor.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 3044318f
...@@ -378,6 +378,7 @@ struct x86_pmu { ...@@ -378,6 +378,7 @@ struct x86_pmu {
struct event_constraint *event_constraints; struct event_constraint *event_constraints;
struct x86_pmu_quirk *quirks; struct x86_pmu_quirk *quirks;
int perfctr_second_write; int perfctr_second_write;
bool late_ack;
/* /*
* sysfs attrs * sysfs attrs
......
...@@ -1185,15 +1185,11 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) ...@@ -1185,15 +1185,11 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
cpuc = &__get_cpu_var(cpu_hw_events); cpuc = &__get_cpu_var(cpu_hw_events);
/* /*
* Some chipsets need to unmask the LVTPC in a particular spot * No known reason to not always do late ACK,
* inside the nmi handler. As a result, the unmasking was pushed * but just in case do it opt-in.
* into all the nmi handlers.
*
* This handler doesn't seem to have any issues with the unmasking
* so it was left at the top.
*/ */
apic_write(APIC_LVTPC, APIC_DM_NMI); if (!x86_pmu.late_ack)
apic_write(APIC_LVTPC, APIC_DM_NMI);
intel_pmu_disable_all(); intel_pmu_disable_all();
handled = intel_pmu_drain_bts_buffer(); handled = intel_pmu_drain_bts_buffer();
status = intel_pmu_get_status(); status = intel_pmu_get_status();
...@@ -1257,6 +1253,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) ...@@ -1257,6 +1253,13 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
done: done:
intel_pmu_enable_all(0); intel_pmu_enable_all(0);
/*
* Only unmask the NMI after the overflow counters
* have been reset. This avoids spurious NMIs on
* Haswell CPUs.
*/
if (x86_pmu.late_ack)
apic_write(APIC_LVTPC, APIC_DM_NMI);
return handled; return handled;
} }
...@@ -2260,6 +2263,7 @@ __init int intel_pmu_init(void) ...@@ -2260,6 +2263,7 @@ __init int intel_pmu_init(void)
case 70: case 70:
case 71: case 71:
case 63: case 63:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids)); memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment