Commit cc37e520 authored by Stephane Eranian's avatar Stephane Eranian Committed by Peter Zijlstra

perf/x86/amd: Make Zen3 branch sampling opt-in

Add a kernel config option CONFIG_PERF_EVENTS_AMD_BRS
to make the support for AMD Zen3 Branch Sampling (BRS) an opt-in
compile time option.
Signed-off-by: default avatarStephane Eranian <eranian@google.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20220322221517.2510440-8-eranian@google.com
parent ba2fe750
......@@ -44,4 +44,12 @@ config PERF_EVENTS_AMD_UNCORE
To compile this driver as a module, choose M here: the
module will be called 'amd-uncore'.
config PERF_EVENTS_AMD_BRS
depends on PERF_EVENTS && CPU_SUP_AMD
bool "AMD Zen3 Branch Sampling support"
help
Enable AMD Zen3 branch sampling support (BRS) which samples up to
16 consecutive taken branches in registers.
endmenu
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_CPU_SUP_AMD) += core.o brs.o
obj-$(CONFIG_CPU_SUP_AMD) += core.o
obj-$(CONFIG_PERF_EVENTS_AMD_BRS) += brs.o
obj-$(CONFIG_PERF_EVENTS_AMD_POWER) += power.o
obj-$(CONFIG_X86_LOCAL_APIC) += ibs.o
obj-$(CONFIG_PERF_EVENTS_AMD_UNCORE) += amd-uncore.o
......
......@@ -1218,6 +1218,8 @@ static inline bool fixed_counter_disabled(int i, struct pmu *pmu)
#ifdef CONFIG_CPU_SUP_AMD
int amd_pmu_init(void);
#ifdef CONFIG_PERF_EVENTS_AMD_BRS
int amd_brs_init(void);
void amd_brs_disable(void);
void amd_brs_enable(void);
......@@ -1252,25 +1254,52 @@ static inline void amd_pmu_brs_del(struct perf_event *event)
void amd_pmu_brs_sched_task(struct perf_event_context *ctx, bool sched_in);
/*
* check if BRS is activated on the CPU
* active defined as it has non-zero users and DBG_EXT_CFG.BRSEN=1
*/
static inline bool amd_brs_active(void)
static inline s64 amd_brs_adjust_period(s64 period)
{
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
if (period > x86_pmu.lbr_nr)
return period - x86_pmu.lbr_nr;
return cpuc->brs_active;
return period;
}
#else
static inline int amd_brs_init(void)
{
return 0;
}
static inline void amd_brs_disable(void) {}
static inline void amd_brs_enable(void) {}
static inline void amd_brs_drain(void) {}
static inline void amd_brs_lopwr_init(void) {}
static inline void amd_brs_disable_all(void) {}
static inline int amd_brs_setup_filter(struct perf_event *event)
{
return 0;
}
static inline void amd_brs_reset(void) {}
static inline s64 amd_brs_adjust_period(s64 period)
static inline void amd_pmu_brs_add(struct perf_event *event)
{
if (period > x86_pmu.lbr_nr)
return period - x86_pmu.lbr_nr;
}
static inline void amd_pmu_brs_del(struct perf_event *event)
{
}
static inline void amd_pmu_brs_sched_task(struct perf_event_context *ctx, bool sched_in)
{
}
static inline s64 amd_brs_adjust_period(s64 period)
{
return period;
}
static inline void amd_brs_enable_all(void)
{
}
#endif
#else /* CONFIG_CPU_SUP_AMD */
static inline int amd_pmu_init(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment