Commit 2b9e344d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

perf/x86/intel: Clean up checkpoint-interrupt bits

Clean up the weird CP interrupt exception code by keeping a CP mask.

Andi suggested this implementation but weirdly didn't actually
implement it himself, do so now because it removes the conditional in
the interrupt handler and avoids the assumption its only on cnt2.
Suggested-by: default avatarAndi Kleen <andi@firstfloor.org>
Signed-off-by: default avatarPeter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/n/tip-dvb4q0rydkfp00kqat4p5bah@git.kernel.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 4b2c4f1f
...@@ -163,6 +163,11 @@ struct cpu_hw_events { ...@@ -163,6 +163,11 @@ struct cpu_hw_events {
u64 intel_ctrl_host_mask; u64 intel_ctrl_host_mask;
struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX]; struct perf_guest_switch_msr guest_switch_msrs[X86_PMC_IDX_MAX];
/*
* Intel checkpoint mask
*/
u64 intel_cp_status;
/* /*
* manage shared (per-core, per-cpu) registers * manage shared (per-core, per-cpu) registers
* used on Intel NHM/WSM/SNB * used on Intel NHM/WSM/SNB
......
...@@ -1184,6 +1184,11 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc) ...@@ -1184,6 +1184,11 @@ static void intel_pmu_disable_fixed(struct hw_perf_event *hwc)
wrmsrl(hwc->config_base, ctrl_val); wrmsrl(hwc->config_base, ctrl_val);
} }
static inline bool event_is_checkpointed(struct perf_event *event)
{
return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
}
static void intel_pmu_disable_event(struct perf_event *event) static void intel_pmu_disable_event(struct perf_event *event)
{ {
struct hw_perf_event *hwc = &event->hw; struct hw_perf_event *hwc = &event->hw;
...@@ -1197,6 +1202,7 @@ static void intel_pmu_disable_event(struct perf_event *event) ...@@ -1197,6 +1202,7 @@ static void intel_pmu_disable_event(struct perf_event *event)
cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx); cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx); cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
cpuc->intel_cp_status &= ~(1ull << hwc->idx);
/* /*
* must disable before any actual event * must disable before any actual event
...@@ -1271,6 +1277,9 @@ static void intel_pmu_enable_event(struct perf_event *event) ...@@ -1271,6 +1277,9 @@ static void intel_pmu_enable_event(struct perf_event *event)
if (event->attr.exclude_guest) if (event->attr.exclude_guest)
cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx); cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
if (unlikely(event_is_checkpointed(event)))
cpuc->intel_cp_status |= (1ull << hwc->idx);
if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
intel_pmu_enable_fixed(hwc); intel_pmu_enable_fixed(hwc);
return; return;
...@@ -1282,11 +1291,6 @@ static void intel_pmu_enable_event(struct perf_event *event) ...@@ -1282,11 +1291,6 @@ static void intel_pmu_enable_event(struct perf_event *event)
__x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE); __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
} }
static inline bool event_is_checkpointed(struct perf_event *event)
{
return (event->hw.config & HSW_IN_TX_CHECKPOINTED) != 0;
}
/* /*
* Save and restart an expired event. Called by NMI contexts, * Save and restart an expired event. Called by NMI contexts,
* so it has to be careful about preempting normal event ops: * so it has to be careful about preempting normal event ops:
...@@ -1389,11 +1393,11 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) ...@@ -1389,11 +1393,11 @@ static int intel_pmu_handle_irq(struct pt_regs *regs)
} }
/* /*
* To avoid spurious interrupts with perf stat always reset checkpointed * Checkpointed counters can lead to 'spurious' PMIs because the
* counters. * rollback caused by the PMI will have cleared the overflow status
* bit. Therefore always force probe these counters.
*/ */
if (cpuc->events[2] && event_is_checkpointed(cpuc->events[2])) status |= cpuc->intel_cp_status;
status |= (1ULL << 2);
for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
struct perf_event *event = cpuc->events[bit]; struct perf_event *event = cpuc->events[bit];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment