Commit bc14fe1b authored by Kan Liang's avatar Kan Liang Committed by Peter Zijlstra

perf/x86/intel: Factor out intel_pmu_check_event_constraints

Each Hybrid PMU has to check and update its own event constraints before
registration.

The intel_pmu_check_event_constraints will be reused later to check
the event constraints of each hybrid PMU.
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarAndi Kleen <ak@linux.intel.com>
Link: https://lkml.kernel.org/r/1618237865-33448-13-git-send-email-kan.liang@linux.intel.com
parent b8c4d1a8
...@@ -5084,6 +5084,49 @@ static void intel_pmu_check_num_counters(int *num_counters, ...@@ -5084,6 +5084,49 @@ static void intel_pmu_check_num_counters(int *num_counters,
*intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED; *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED;
} }
static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints,
int num_counters,
int num_counters_fixed,
u64 intel_ctrl)
{
struct event_constraint *c;
if (!event_constraints)
return;
/*
* event on fixed counter2 (REF_CYCLES) only works on this
* counter, so do not extend mask to generic counters
*/
for_each_event_constraint(c, event_constraints) {
/*
* Don't extend the topdown slots and metrics
* events to the generic counters.
*/
if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
/*
* Disable topdown slots and metrics events,
* if slots event is not in CPUID.
*/
if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl))
c->idxmsk64 = 0;
c->weight = hweight64(c->idxmsk64);
continue;
}
if (c->cmask == FIXED_EVENT_FLAGS) {
/* Disabled fixed counters which are not in CPUID */
c->idxmsk64 &= intel_ctrl;
if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
c->idxmsk64 |= (1ULL << num_counters) - 1;
}
c->idxmsk64 &=
~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed));
c->weight = hweight64(c->idxmsk64);
}
}
__init int intel_pmu_init(void) __init int intel_pmu_init(void)
{ {
struct attribute **extra_skl_attr = &empty_attrs; struct attribute **extra_skl_attr = &empty_attrs;
...@@ -5094,7 +5137,6 @@ __init int intel_pmu_init(void) ...@@ -5094,7 +5137,6 @@ __init int intel_pmu_init(void)
union cpuid10_edx edx; union cpuid10_edx edx;
union cpuid10_eax eax; union cpuid10_eax eax;
union cpuid10_ebx ebx; union cpuid10_ebx ebx;
struct event_constraint *c;
unsigned int fixed_mask; unsigned int fixed_mask;
struct extra_reg *er; struct extra_reg *er;
bool pmem = false; bool pmem = false;
...@@ -5732,40 +5774,10 @@ __init int intel_pmu_init(void) ...@@ -5732,40 +5774,10 @@ __init int intel_pmu_init(void)
if (x86_pmu.intel_cap.anythread_deprecated) if (x86_pmu.intel_cap.anythread_deprecated)
x86_pmu.format_attrs = intel_arch_formats_attr; x86_pmu.format_attrs = intel_arch_formats_attr;
if (x86_pmu.event_constraints) { intel_pmu_check_event_constraints(x86_pmu.event_constraints,
/* x86_pmu.num_counters,
* event on fixed counter2 (REF_CYCLES) only works on this x86_pmu.num_counters_fixed,
* counter, so do not extend mask to generic counters x86_pmu.intel_ctrl);
*/
for_each_event_constraint(c, x86_pmu.event_constraints) {
/*
* Don't extend the topdown slots and metrics
* events to the generic counters.
*/
if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) {
/*
* Disable topdown slots and metrics events,
* if slots event is not in CPUID.
*/
if (!(INTEL_PMC_MSK_FIXED_SLOTS & x86_pmu.intel_ctrl))
c->idxmsk64 = 0;
c->weight = hweight64(c->idxmsk64);
continue;
}
if (c->cmask == FIXED_EVENT_FLAGS) {
/* Disabled fixed counters which are not in CPUID */
c->idxmsk64 &= x86_pmu.intel_ctrl;
if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES)
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
}
c->idxmsk64 &=
~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
c->weight = hweight64(c->idxmsk64);
}
}
/* /*
* Access LBR MSR may cause #GP under certain circumstances. * Access LBR MSR may cause #GP under certain circumstances.
* E.g. KVM doesn't support LBR MSR * E.g. KVM doesn't support LBR MSR
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment