Commit 7685665c authored by Sandipan Das's avatar Sandipan Das Committed by Peter Zijlstra

perf/x86/amd/core: Add PerfMonV2 overflow handling

If AMD Performance Monitoring Version 2 (PerfMonV2) is
supported, use a new scheme to process Core PMC overflows
in the NMI handler using the new global control and status
registers. This will be bypassed on unsupported hardware
(x86_pmu.version < 2).

In x86_pmu_handle_irq(), overflows are detected by testing
the contents of the PERF_CTR register for each active PMC in
a loop. The new scheme instead inspects the overflow bits of
the global status register.

The Performance Counter Global Status (PerfCntrGlobalStatus)
register has overflow (PerfCntrOvfl) bits for each PMC. This
is, however, a read-only MSR. To acknowledge that overflows
have been processed, the NMI handler must clear the bits by
writing to the PerfCntrGlobalStatusClr register.

In x86_pmu_handle_irq(), PMCs counting the same event that
are started and stopped at the same time record slightly
different counts due to delays in between reads from the
PERF_CTR registers. This is fixed by stopping and starting
the PMCs at the same before and with a single write to the
Performance Counter Global Control (PerfCntrGlobalCtl) upon
entering and before exiting the NMI handler.
Signed-off-by: default avatarSandipan Das <sandipan.das@amd.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/f20b7e4da0b0a83bdbe05857f354146623bc63ab.1650515382.git.sandipan.das@amd.com
parent 9622e67e
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <asm/apicdef.h> #include <asm/apicdef.h>
#include <asm/apic.h>
#include <asm/nmi.h> #include <asm/nmi.h>
#include "../perf_event.h" #include "../perf_event.h"
...@@ -669,6 +670,45 @@ static inline void amd_pmu_set_global_ctl(u64 ctl) ...@@ -669,6 +670,45 @@ static inline void amd_pmu_set_global_ctl(u64 ctl)
wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl); wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_CTL, ctl);
} }
static inline u64 amd_pmu_get_global_status(void)
{
u64 status;
/* PerfCntrGlobalStatus is read-only */
rdmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS, status);
return status & amd_pmu_global_cntr_mask;
}
static inline void amd_pmu_ack_global_status(u64 status)
{
/*
* PerfCntrGlobalStatus is read-only but an overflow acknowledgment
* mechanism exists; writing 1 to a bit in PerfCntrGlobalStatusClr
* clears the same bit in PerfCntrGlobalStatus
*/
/* Only allow modifications to PerfCntrGlobalStatus.PerfCntrOvfl */
status &= amd_pmu_global_cntr_mask;
wrmsrl(MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR, status);
}
static bool amd_pmu_test_overflow_topbit(int idx)
{
u64 counter;
rdmsrl(x86_pmu_event_addr(idx), counter);
return !(counter & BIT_ULL(x86_pmu.cntval_bits - 1));
}
static bool amd_pmu_test_overflow_status(int idx)
{
return amd_pmu_get_global_status() & BIT_ULL(idx);
}
DEFINE_STATIC_CALL(amd_pmu_test_overflow, amd_pmu_test_overflow_topbit);
/* /*
* When a PMC counter overflows, an NMI is used to process the event and * When a PMC counter overflows, an NMI is used to process the event and
* reset the counter. NMI latency can result in the counter being updated * reset the counter. NMI latency can result in the counter being updated
...@@ -681,7 +721,6 @@ static inline void amd_pmu_set_global_ctl(u64 ctl) ...@@ -681,7 +721,6 @@ static inline void amd_pmu_set_global_ctl(u64 ctl)
static void amd_pmu_wait_on_overflow(int idx) static void amd_pmu_wait_on_overflow(int idx)
{ {
unsigned int i; unsigned int i;
u64 counter;
/* /*
* Wait for the counter to be reset if it has overflowed. This loop * Wait for the counter to be reset if it has overflowed. This loop
...@@ -689,8 +728,7 @@ static void amd_pmu_wait_on_overflow(int idx) ...@@ -689,8 +728,7 @@ static void amd_pmu_wait_on_overflow(int idx)
* forever... * forever...
*/ */
for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) { for (i = 0; i < OVERFLOW_WAIT_COUNT; i++) {
rdmsrl(x86_pmu_event_addr(idx), counter); if (!static_call(amd_pmu_test_overflow)(idx))
if (counter & (1ULL << (x86_pmu.cntval_bits - 1)))
break; break;
/* Might be in IRQ context, so can't sleep */ /* Might be in IRQ context, so can't sleep */
...@@ -830,6 +868,24 @@ static void amd_pmu_del_event(struct perf_event *event) ...@@ -830,6 +868,24 @@ static void amd_pmu_del_event(struct perf_event *event)
* handled a counter. When an un-handled NMI is received, it will be claimed * handled a counter. When an un-handled NMI is received, it will be claimed
* only if arriving within that window. * only if arriving within that window.
*/ */
static inline int amd_pmu_adjust_nmi_window(int handled)
{
/*
* If a counter was handled, record a timestamp such that un-handled
* NMIs will be claimed if arriving within that window.
*/
if (handled) {
this_cpu_write(perf_nmi_tstamp, jiffies + perf_nmi_window);
return handled;
}
if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp)))
return NMI_DONE;
return NMI_HANDLED;
}
static int amd_pmu_handle_irq(struct pt_regs *regs) static int amd_pmu_handle_irq(struct pt_regs *regs)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
...@@ -857,20 +913,84 @@ static int amd_pmu_handle_irq(struct pt_regs *regs) ...@@ -857,20 +913,84 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
if (pmu_enabled) if (pmu_enabled)
amd_pmu_enable_all(0); amd_pmu_enable_all(0);
return amd_pmu_adjust_nmi_window(handled);
}
static int amd_pmu_v2_handle_irq(struct pt_regs *regs)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_sample_data data;
struct hw_perf_event *hwc;
struct perf_event *event;
int handled = 0, idx;
u64 status, mask;
bool pmu_enabled;
/* /*
* If a counter was handled, record a timestamp such that un-handled * Save the PMU state as it needs to be restored when leaving the
* NMIs will be claimed if arriving within that window. * handler
*/ */
if (handled) { pmu_enabled = cpuc->enabled;
this_cpu_write(perf_nmi_tstamp, jiffies + perf_nmi_window); cpuc->enabled = 0;
return handled; /* Stop counting */
amd_pmu_v2_disable_all();
status = amd_pmu_get_global_status();
/* Check if any overflows are pending */
if (!status)
goto done;
for (idx = 0; idx < x86_pmu.num_counters; idx++) {
if (!test_bit(idx, cpuc->active_mask))
continue;
event = cpuc->events[idx];
hwc = &event->hw;
x86_perf_event_update(event);
mask = BIT_ULL(idx);
if (!(status & mask))
continue;
/* Event overflow */
handled++;
perf_sample_data_init(&data, 0, hwc->last_period);
if (!x86_perf_event_set_period(event))
continue;
if (perf_event_overflow(event, &data, regs))
x86_pmu_stop(event, 0);
status &= ~mask;
} }
if (time_after(jiffies, this_cpu_read(perf_nmi_tstamp))) /*
return NMI_DONE; * It should never be the case that some overflows are not handled as
* the corresponding PMCs are expected to be inactive according to the
* active_mask
*/
WARN_ON(status > 0);
return NMI_HANDLED; /* Clear overflow bits */
amd_pmu_ack_global_status(~status);
/*
* Unmasking the LVTPC is not required as the Mask (M) bit of the LVT
* PMI entry is not set by the local APIC when a PMC overflow occurs
*/
inc_irq_stat(apic_perf_irqs);
done:
cpuc->enabled = pmu_enabled;
/* Resume counting only if PMU is active */
if (pmu_enabled)
amd_pmu_v2_enable_all(0);
return amd_pmu_adjust_nmi_window(handled);
} }
static struct event_constraint * static struct event_constraint *
...@@ -1256,6 +1376,8 @@ static int __init amd_core_pmu_init(void) ...@@ -1256,6 +1376,8 @@ static int __init amd_core_pmu_init(void)
x86_pmu.enable_all = amd_pmu_v2_enable_all; x86_pmu.enable_all = amd_pmu_v2_enable_all;
x86_pmu.disable_all = amd_pmu_v2_disable_all; x86_pmu.disable_all = amd_pmu_v2_disable_all;
x86_pmu.enable = amd_pmu_v2_enable_event; x86_pmu.enable = amd_pmu_v2_enable_event;
x86_pmu.handle_irq = amd_pmu_v2_handle_irq;
static_call_update(amd_pmu_test_overflow, amd_pmu_test_overflow_status);
} }
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment