Commit a1eac7ac authored by Robert Richter's avatar Robert Richter Committed by Ingo Molnar

perf/x86: Move Intel specific code to intel_pmu_init()

There is some Intel specific code in the generic x86 path. Move it to
intel_pmu_init().

Since p4 and p6 pmus don't have fixed counters we may skip the check
in case such a pmu is detected.
Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1340217996-2254-3-git-send-email-robert.richter@amd.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 15c7ad51
...@@ -1307,7 +1307,6 @@ static struct attribute_group x86_pmu_format_group = { ...@@ -1307,7 +1307,6 @@ static struct attribute_group x86_pmu_format_group = {
static int __init init_hw_perf_events(void) static int __init init_hw_perf_events(void)
{ {
struct x86_pmu_quirk *quirk; struct x86_pmu_quirk *quirk;
struct event_constraint *c;
int err; int err;
pr_info("Performance Events: "); pr_info("Performance Events: ");
...@@ -1338,22 +1337,9 @@ static int __init init_hw_perf_events(void) ...@@ -1338,22 +1337,9 @@ static int __init init_hw_perf_events(void)
for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next) for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
quirk->func(); quirk->func();
if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) { if (!x86_pmu.intel_ctrl)
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
}
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1; x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
}
x86_pmu.intel_ctrl |=
((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
perf_events_lapic_init(); perf_events_lapic_init();
register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI"); register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
...@@ -1361,22 +1347,6 @@ static int __init init_hw_perf_events(void) ...@@ -1361,22 +1347,6 @@ static int __init init_hw_perf_events(void)
__EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1, __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
0, x86_pmu.num_counters, 0); 0, x86_pmu.num_counters, 0);
if (x86_pmu.event_constraints) {
/*
* event on fixed counter2 (REF_CYCLES) only works on this
* counter, so do not extend mask to generic counters
*/
for_each_event_constraint(c, x86_pmu.event_constraints) {
if (c->cmask != X86_RAW_EVENT_MASK
|| c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
continue;
}
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
c->weight += x86_pmu.num_counters;
}
}
x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */ x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
x86_pmu_format_group.attrs = x86_pmu.format_attrs; x86_pmu_format_group.attrs = x86_pmu.format_attrs;
......
...@@ -1765,6 +1765,7 @@ __init int intel_pmu_init(void) ...@@ -1765,6 +1765,7 @@ __init int intel_pmu_init(void)
union cpuid10_edx edx; union cpuid10_edx edx;
union cpuid10_eax eax; union cpuid10_eax eax;
union cpuid10_ebx ebx; union cpuid10_ebx ebx;
struct event_constraint *c;
unsigned int unused; unsigned int unused;
int version; int version;
...@@ -1953,5 +1954,37 @@ __init int intel_pmu_init(void) ...@@ -1953,5 +1954,37 @@ __init int intel_pmu_init(void)
} }
} }
if (x86_pmu.num_counters > INTEL_PMC_MAX_GENERIC) {
WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
x86_pmu.num_counters, INTEL_PMC_MAX_GENERIC);
x86_pmu.num_counters = INTEL_PMC_MAX_GENERIC;
}
x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
if (x86_pmu.num_counters_fixed > INTEL_PMC_MAX_FIXED) {
WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
x86_pmu.num_counters_fixed, INTEL_PMC_MAX_FIXED);
x86_pmu.num_counters_fixed = INTEL_PMC_MAX_FIXED;
}
x86_pmu.intel_ctrl |=
((1LL << x86_pmu.num_counters_fixed)-1) << INTEL_PMC_IDX_FIXED;
if (x86_pmu.event_constraints) {
/*
* event on fixed counter2 (REF_CYCLES) only works on this
* counter, so do not extend mask to generic counters
*/
for_each_event_constraint(c, x86_pmu.event_constraints) {
if (c->cmask != X86_RAW_EVENT_MASK
|| c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
continue;
}
c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
c->weight += x86_pmu.num_counters;
}
}
return 0; return 0;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment