Commit 183af736 authored by Kan Liang's avatar Kan Liang Committed by Peter Zijlstra

perf/x86: Hybrid PMU support for extra_regs

Different hybrid PMU may have different extra registers, e.g. Core PMU
may have offcore registers, frontend register and ldlat register. Atom
core may only have offcore registers and ldlat register. Each hybrid PMU
should use its own extra_regs.

An Intel Hybrid system should always have extra registers.
Unconditionally allocate shared_regs for Intel Hybrid system.
Signed-off-by: default avatarKan Liang <kan.liang@linux.intel.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarAndi Kleen <ak@linux.intel.com>
Link: https://lkml.kernel.org/r/1618237865-33448-11-git-send-email-kan.liang@linux.intel.com
parent 24ee38ff
...@@ -154,15 +154,16 @@ u64 x86_perf_event_update(struct perf_event *event) ...@@ -154,15 +154,16 @@ u64 x86_perf_event_update(struct perf_event *event)
*/ */
static int x86_pmu_extra_regs(u64 config, struct perf_event *event) static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
{ {
struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
struct hw_perf_event_extra *reg; struct hw_perf_event_extra *reg;
struct extra_reg *er; struct extra_reg *er;
reg = &event->hw.extra_reg; reg = &event->hw.extra_reg;
if (!x86_pmu.extra_regs) if (!extra_regs)
return 0; return 0;
for (er = x86_pmu.extra_regs; er->msr; er++) { for (er = extra_regs; er->msr; er++) {
if (er->event != (config & er->config_mask)) if (er->event != (config & er->config_mask))
continue; continue;
if (event->attr.config1 & ~er->valid_mask) if (event->attr.config1 & ~er->valid_mask)
......
...@@ -2966,8 +2966,10 @@ intel_vlbr_constraints(struct perf_event *event) ...@@ -2966,8 +2966,10 @@ intel_vlbr_constraints(struct perf_event *event)
return NULL; return NULL;
} }
static int intel_alt_er(int idx, u64 config) static int intel_alt_er(struct cpu_hw_events *cpuc,
int idx, u64 config)
{ {
struct extra_reg *extra_regs = hybrid(cpuc->pmu, extra_regs);
int alt_idx = idx; int alt_idx = idx;
if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1)) if (!(x86_pmu.flags & PMU_FL_HAS_RSP_1))
...@@ -2979,7 +2981,7 @@ static int intel_alt_er(int idx, u64 config) ...@@ -2979,7 +2981,7 @@ static int intel_alt_er(int idx, u64 config)
if (idx == EXTRA_REG_RSP_1) if (idx == EXTRA_REG_RSP_1)
alt_idx = EXTRA_REG_RSP_0; alt_idx = EXTRA_REG_RSP_0;
if (config & ~x86_pmu.extra_regs[alt_idx].valid_mask) if (config & ~extra_regs[alt_idx].valid_mask)
return idx; return idx;
return alt_idx; return alt_idx;
...@@ -2987,15 +2989,16 @@ static int intel_alt_er(int idx, u64 config) ...@@ -2987,15 +2989,16 @@ static int intel_alt_er(int idx, u64 config)
static void intel_fixup_er(struct perf_event *event, int idx) static void intel_fixup_er(struct perf_event *event, int idx)
{ {
struct extra_reg *extra_regs = hybrid(event->pmu, extra_regs);
event->hw.extra_reg.idx = idx; event->hw.extra_reg.idx = idx;
if (idx == EXTRA_REG_RSP_0) { if (idx == EXTRA_REG_RSP_0) {
event->hw.config &= ~INTEL_ARCH_EVENT_MASK; event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_0].event; event->hw.config |= extra_regs[EXTRA_REG_RSP_0].event;
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
} else if (idx == EXTRA_REG_RSP_1) { } else if (idx == EXTRA_REG_RSP_1) {
event->hw.config &= ~INTEL_ARCH_EVENT_MASK; event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
event->hw.config |= x86_pmu.extra_regs[EXTRA_REG_RSP_1].event; event->hw.config |= extra_regs[EXTRA_REG_RSP_1].event;
event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
} }
} }
...@@ -3071,7 +3074,7 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc, ...@@ -3071,7 +3074,7 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
*/ */
c = NULL; c = NULL;
} else { } else {
idx = intel_alt_er(idx, reg->config); idx = intel_alt_er(cpuc, idx, reg->config);
if (idx != reg->idx) { if (idx != reg->idx) {
raw_spin_unlock_irqrestore(&era->lock, flags); raw_spin_unlock_irqrestore(&era->lock, flags);
goto again; goto again;
...@@ -4155,7 +4158,7 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu) ...@@ -4155,7 +4158,7 @@ int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
{ {
cpuc->pebs_record_size = x86_pmu.pebs_record_size; cpuc->pebs_record_size = x86_pmu.pebs_record_size;
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { if (is_hybrid() || x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
cpuc->shared_regs = allocate_shared_regs(cpu); cpuc->shared_regs = allocate_shared_regs(cpu);
if (!cpuc->shared_regs) if (!cpuc->shared_regs)
goto err; goto err;
......
...@@ -650,6 +650,7 @@ struct x86_hybrid_pmu { ...@@ -650,6 +650,7 @@ struct x86_hybrid_pmu {
[PERF_COUNT_HW_CACHE_RESULT_MAX]; [PERF_COUNT_HW_CACHE_RESULT_MAX];
struct event_constraint *event_constraints; struct event_constraint *event_constraints;
struct event_constraint *pebs_constraints; struct event_constraint *pebs_constraints;
struct extra_reg *extra_regs;
}; };
static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu) static __always_inline struct x86_hybrid_pmu *hybrid_pmu(struct pmu *pmu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment