Commit 99132883 authored by Paolo Bonzini's avatar Paolo Bonzini

Merge branch 'kvm-amd-pmu-fixes' into HEAD

parents 6ea6581f 5a1bde46
...@@ -887,6 +887,11 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -887,6 +887,11 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
union cpuid10_eax eax; union cpuid10_eax eax;
union cpuid10_edx edx; union cpuid10_edx edx;
if (!static_cpu_has(X86_FEATURE_ARCH_PERFMON)) {
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
break;
}
perf_get_x86_pmu_capability(&cap); perf_get_x86_pmu_capability(&cap);
/* /*
......
...@@ -45,6 +45,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = { ...@@ -45,6 +45,22 @@ static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
[7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
}; };
/* duplicated from amd_f17h_perfmon_event_map. */
static struct kvm_event_hw_type_mapping amd_f17h_event_mapping[] = {
[0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
[1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
[2] = { 0x60, 0xff, PERF_COUNT_HW_CACHE_REFERENCES },
[3] = { 0x64, 0x09, PERF_COUNT_HW_CACHE_MISSES },
[4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
[5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
[6] = { 0x87, 0x02, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
[7] = { 0x87, 0x01, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
};
/* amd_pmc_perf_hw_id depends on these being the same size */
static_assert(ARRAY_SIZE(amd_event_mapping) ==
ARRAY_SIZE(amd_f17h_event_mapping));
static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type) static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
{ {
struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
...@@ -140,6 +156,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr, ...@@ -140,6 +156,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc) static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
{ {
struct kvm_event_hw_type_mapping *event_mapping;
u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT; u8 event_select = pmc->eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; u8 unit_mask = (pmc->eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
int i; int i;
...@@ -148,15 +165,20 @@ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc) ...@@ -148,15 +165,20 @@ static unsigned int amd_pmc_perf_hw_id(struct kvm_pmc *pmc)
if (WARN_ON(pmc_is_fixed(pmc))) if (WARN_ON(pmc_is_fixed(pmc)))
return PERF_COUNT_HW_MAX; return PERF_COUNT_HW_MAX;
if (guest_cpuid_family(pmc->vcpu) >= 0x17)
event_mapping = amd_f17h_event_mapping;
else
event_mapping = amd_event_mapping;
for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++) for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
if (amd_event_mapping[i].eventsel == event_select if (event_mapping[i].eventsel == event_select
&& amd_event_mapping[i].unit_mask == unit_mask) && event_mapping[i].unit_mask == unit_mask)
break; break;
if (i == ARRAY_SIZE(amd_event_mapping)) if (i == ARRAY_SIZE(amd_event_mapping))
return PERF_COUNT_HW_MAX; return PERF_COUNT_HW_MAX;
return amd_event_mapping[i].event_type; return event_mapping[i].event_type;
} }
/* check if a PMC is enabled by comparing it against global_ctrl bits. Because /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment