Commit fc4b8fca authored by Kan Liang's avatar Kan Liang Committed by Peter Zijlstra

perf/x86: Hybrid PMU support for intel_ctrl

The intel_ctrl is the counter mask of a PMU. The PMU counter information
may be different among hybrid PMUs, each hybrid PMU should use its own
intel_ctrl to check and access the counters.

When handling a certain hybrid PMU, apply the intel_ctrl from the
corresponding hybrid PMU.

When checking the HW existence, apply the PMU and number of counters
from the corresponding hybrid PMU as well. Perf will check the HW
existence for each Hybrid PMU before registration. Expose the
check_hw_exists() for a later patch.
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarAndi Kleen <ak@linux.intel.com>
Link: https://lkml.kernel.org/r/1618237865-33448-6-git-send-email-kan.liang@linux.intel.com
parent d0946a88
...@@ -231,7 +231,7 @@ static void release_pmc_hardware(void) {} ...@@ -231,7 +231,7 @@ static void release_pmc_hardware(void) {}
#endif #endif
static bool check_hw_exists(void) bool check_hw_exists(struct pmu *pmu, int num_counters, int num_counters_fixed)
{ {
u64 val, val_fail = -1, val_new= ~0; u64 val, val_fail = -1, val_new= ~0;
int i, reg, reg_fail = -1, ret = 0; int i, reg, reg_fail = -1, ret = 0;
...@@ -242,7 +242,7 @@ static bool check_hw_exists(void) ...@@ -242,7 +242,7 @@ static bool check_hw_exists(void)
* Check to see if the BIOS enabled any of the counters, if so * Check to see if the BIOS enabled any of the counters, if so
* complain and bail. * complain and bail.
*/ */
for (i = 0; i < x86_pmu.num_counters; i++) { for (i = 0; i < num_counters; i++) {
reg = x86_pmu_config_addr(i); reg = x86_pmu_config_addr(i);
ret = rdmsrl_safe(reg, &val); ret = rdmsrl_safe(reg, &val);
if (ret) if (ret)
...@@ -256,13 +256,13 @@ static bool check_hw_exists(void) ...@@ -256,13 +256,13 @@ static bool check_hw_exists(void)
} }
} }
if (x86_pmu.num_counters_fixed) { if (num_counters_fixed) {
reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL; reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
ret = rdmsrl_safe(reg, &val); ret = rdmsrl_safe(reg, &val);
if (ret) if (ret)
goto msr_fail; goto msr_fail;
for (i = 0; i < x86_pmu.num_counters_fixed; i++) { for (i = 0; i < num_counters_fixed; i++) {
if (fixed_counter_disabled(i)) if (fixed_counter_disabled(i, pmu))
continue; continue;
if (val & (0x03 << i*4)) { if (val & (0x03 << i*4)) {
bios_fail = 1; bios_fail = 1;
...@@ -1547,7 +1547,7 @@ void perf_event_print_debug(void) ...@@ -1547,7 +1547,7 @@ void perf_event_print_debug(void)
cpu, idx, prev_left); cpu, idx, prev_left);
} }
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
if (fixed_counter_disabled(idx)) if (fixed_counter_disabled(idx, cpuc->pmu))
continue; continue;
rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count); rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
...@@ -1992,7 +1992,7 @@ static int __init init_hw_perf_events(void) ...@@ -1992,7 +1992,7 @@ static int __init init_hw_perf_events(void)
pmu_check_apic(); pmu_check_apic();
/* sanity check that the hardware exists or is emulated */ /* sanity check that the hardware exists or is emulated */
if (!check_hw_exists()) if (!check_hw_exists(&pmu, x86_pmu.num_counters, x86_pmu.num_counters_fixed))
return 0; return 0;
pr_cont("%s PMU driver.\n", x86_pmu.name); pr_cont("%s PMU driver.\n", x86_pmu.name);
......
...@@ -2153,10 +2153,11 @@ static void intel_pmu_disable_all(void) ...@@ -2153,10 +2153,11 @@ static void intel_pmu_disable_all(void)
static void __intel_pmu_enable_all(int added, bool pmi) static void __intel_pmu_enable_all(int added, bool pmi)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
intel_pmu_lbr_enable_all(pmi); intel_pmu_lbr_enable_all(pmi);
wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL,
x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask); intel_ctrl & ~cpuc->intel_ctrl_guest_mask);
if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) { if (test_bit(INTEL_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
struct perf_event *event = struct perf_event *event =
...@@ -2709,6 +2710,7 @@ int intel_pmu_save_and_restart(struct perf_event *event) ...@@ -2709,6 +2710,7 @@ int intel_pmu_save_and_restart(struct perf_event *event)
static void intel_pmu_reset(void) static void intel_pmu_reset(void)
{ {
struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds); struct debug_store *ds = __this_cpu_read(cpu_hw_events.ds);
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
unsigned long flags; unsigned long flags;
int idx; int idx;
...@@ -2724,7 +2726,7 @@ static void intel_pmu_reset(void) ...@@ -2724,7 +2726,7 @@ static void intel_pmu_reset(void)
wrmsrl_safe(x86_pmu_event_addr(idx), 0ull); wrmsrl_safe(x86_pmu_event_addr(idx), 0ull);
} }
for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) { for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
if (fixed_counter_disabled(idx)) if (fixed_counter_disabled(idx, cpuc->pmu))
continue; continue;
wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull); wrmsrl_safe(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
} }
...@@ -2753,6 +2755,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) ...@@ -2753,6 +2755,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int bit; int bit;
int handled = 0; int handled = 0;
u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
inc_irq_stat(apic_perf_irqs); inc_irq_stat(apic_perf_irqs);
...@@ -2798,7 +2801,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status) ...@@ -2798,7 +2801,7 @@ static int handle_pmi_common(struct pt_regs *regs, u64 status)
handled++; handled++;
x86_pmu.drain_pebs(regs, &data); x86_pmu.drain_pebs(regs, &data);
status &= x86_pmu.intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI; status &= intel_ctrl | GLOBAL_STATUS_TRACE_TOPAPMI;
/* /*
* PMI throttle may be triggered, which stops the PEBS event. * PMI throttle may be triggered, which stops the PEBS event.
...@@ -3804,10 +3807,11 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr) ...@@ -3804,10 +3807,11 @@ static struct perf_guest_switch_msr *intel_guest_get_msrs(int *nr)
{ {
struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs; struct perf_guest_switch_msr *arr = cpuc->guest_switch_msrs;
u64 intel_ctrl = hybrid(cpuc->pmu, intel_ctrl);
arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL; arr[0].msr = MSR_CORE_PERF_GLOBAL_CTRL;
arr[0].host = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_guest_mask; arr[0].host = intel_ctrl & ~cpuc->intel_ctrl_guest_mask;
arr[0].guest = x86_pmu.intel_ctrl & ~cpuc->intel_ctrl_host_mask; arr[0].guest = intel_ctrl & ~cpuc->intel_ctrl_host_mask;
if (x86_pmu.flags & PMU_FL_PEBS_ALL) if (x86_pmu.flags & PMU_FL_PEBS_ALL)
arr[0].guest &= ~cpuc->pebs_enabled; arr[0].guest &= ~cpuc->pebs_enabled;
else else
......
...@@ -634,6 +634,7 @@ enum { ...@@ -634,6 +634,7 @@ enum {
struct x86_hybrid_pmu { struct x86_hybrid_pmu {
struct pmu pmu; struct pmu pmu;
union perf_capabilities intel_cap; union perf_capabilities intel_cap;
u64 intel_ctrl;
}; };
static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu) static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
...@@ -998,6 +999,9 @@ static inline int x86_pmu_rdpmc_index(int index) ...@@ -998,6 +999,9 @@ static inline int x86_pmu_rdpmc_index(int index)
return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
} }
bool check_hw_exists(struct pmu *pmu, int num_counters,
int num_counters_fixed);
int x86_add_exclusive(unsigned int what); int x86_add_exclusive(unsigned int what);
void x86_del_exclusive(unsigned int what); void x86_del_exclusive(unsigned int what);
...@@ -1102,9 +1106,11 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, ...@@ -1102,9 +1106,11 @@ ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr,
ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr, ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
char *page); char *page);
static inline bool fixed_counter_disabled(int i) static inline bool fixed_counter_disabled(int i, struct pmu *pmu)
{ {
return !(x86_pmu.intel_ctrl >> (i + INTEL_PMC_IDX_FIXED)); u64 intel_ctrl = hybrid(pmu, intel_ctrl);
return !(intel_ctrl >> (i + INTEL_PMC_IDX_FIXED));
} }
#ifdef CONFIG_CPU_SUP_AMD #ifdef CONFIG_CPU_SUP_AMD
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment