Commit cda231cd authored by Paolo Bonzini's avatar Paolo Bonzini

Merge tag 'kvm-x86-pmu-6.11' of https://github.com/kvm-x86/linux into HEAD

KVM x86/pmu changes for 6.11

 - Don't advertise IA32_PERF_GLOBAL_OVF_CTRL as an MSR-to-be-saved, as it reads
   '0' and writes from userspace are ignored.

 - Update to the newfangled Intel CPU FMS infrastructure.

 - Use macros instead of open-coded literals to clean up KVM's manipulation of
   FIXED_CTR_CTRL MSRs.
parents 5c5ddf71 f287bef6
...@@ -533,12 +533,16 @@ struct kvm_pmc { ...@@ -533,12 +533,16 @@ struct kvm_pmc {
}; };
/* More counters may conflict with other existing Architectural MSRs */ /* More counters may conflict with other existing Architectural MSRs */
#define KVM_INTEL_PMC_MAX_GENERIC 8 #define KVM_MAX(a, b) ((a) >= (b) ? (a) : (b))
#define MSR_ARCH_PERFMON_PERFCTR_MAX (MSR_ARCH_PERFMON_PERFCTR0 + KVM_INTEL_PMC_MAX_GENERIC - 1) #define KVM_MAX_NR_INTEL_GP_COUNTERS 8
#define MSR_ARCH_PERFMON_EVENTSEL_MAX (MSR_ARCH_PERFMON_EVENTSEL0 + KVM_INTEL_PMC_MAX_GENERIC - 1) #define KVM_MAX_NR_AMD_GP_COUNTERS 6
#define KVM_PMC_MAX_FIXED 3 #define KVM_MAX_NR_GP_COUNTERS KVM_MAX(KVM_MAX_NR_INTEL_GP_COUNTERS, \
#define MSR_ARCH_PERFMON_FIXED_CTR_MAX (MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_PMC_MAX_FIXED - 1) KVM_MAX_NR_AMD_GP_COUNTERS)
#define KVM_AMD_PMC_MAX_GENERIC 6
#define KVM_MAX_NR_INTEL_FIXED_COUTNERS 3
#define KVM_MAX_NR_AMD_FIXED_COUTNERS 0
#define KVM_MAX_NR_FIXED_COUNTERS KVM_MAX(KVM_MAX_NR_INTEL_FIXED_COUTNERS, \
KVM_MAX_NR_AMD_FIXED_COUTNERS)
struct kvm_pmu { struct kvm_pmu {
u8 version; u8 version;
...@@ -546,16 +550,16 @@ struct kvm_pmu { ...@@ -546,16 +550,16 @@ struct kvm_pmu {
unsigned nr_arch_fixed_counters; unsigned nr_arch_fixed_counters;
unsigned available_event_types; unsigned available_event_types;
u64 fixed_ctr_ctrl; u64 fixed_ctr_ctrl;
u64 fixed_ctr_ctrl_mask; u64 fixed_ctr_ctrl_rsvd;
u64 global_ctrl; u64 global_ctrl;
u64 global_status; u64 global_status;
u64 counter_bitmask[2]; u64 counter_bitmask[2];
u64 global_ctrl_mask; u64 global_ctrl_rsvd;
u64 global_status_mask; u64 global_status_rsvd;
u64 reserved_bits; u64 reserved_bits;
u64 raw_event_mask; u64 raw_event_mask;
struct kvm_pmc gp_counters[KVM_INTEL_PMC_MAX_GENERIC]; struct kvm_pmc gp_counters[KVM_MAX_NR_GP_COUNTERS];
struct kvm_pmc fixed_counters[KVM_PMC_MAX_FIXED]; struct kvm_pmc fixed_counters[KVM_MAX_NR_FIXED_COUNTERS];
/* /*
* Overlay the bitmap with a 64-bit atomic so that all bits can be * Overlay the bitmap with a 64-bit atomic so that all bits can be
...@@ -571,9 +575,9 @@ struct kvm_pmu { ...@@ -571,9 +575,9 @@ struct kvm_pmu {
u64 ds_area; u64 ds_area;
u64 pebs_enable; u64 pebs_enable;
u64 pebs_enable_mask; u64 pebs_enable_rsvd;
u64 pebs_data_cfg; u64 pebs_data_cfg;
u64 pebs_data_cfg_mask; u64 pebs_data_cfg_rsvd;
/* /*
* If a guest counter is cross-mapped to host counter with different * If a guest counter is cross-mapped to host counter with different
......
...@@ -34,16 +34,16 @@ EXPORT_SYMBOL_GPL(kvm_pmu_eventsel); ...@@ -34,16 +34,16 @@ EXPORT_SYMBOL_GPL(kvm_pmu_eventsel);
/* Precise Distribution of Instructions Retired (PDIR) */ /* Precise Distribution of Instructions Retired (PDIR) */
static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = { static const struct x86_cpu_id vmx_pebs_pdir_cpu[] = {
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, NULL), X86_MATCH_VFM(INTEL_ICELAKE_D, NULL),
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, NULL), X86_MATCH_VFM(INTEL_ICELAKE_X, NULL),
/* Instruction-Accurate PDIR (PDIR++) */ /* Instruction-Accurate PDIR (PDIR++) */
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL), X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL),
{} {}
}; };
/* Precise Distribution (PDist) */ /* Precise Distribution (PDist) */
static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = { static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = {
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, NULL), X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, NULL),
{} {}
}; };
...@@ -69,7 +69,7 @@ static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = { ...@@ -69,7 +69,7 @@ static const struct x86_cpu_id vmx_pebs_pdist_cpu[] = {
* code. Each pmc, stored in kvm_pmc.idx field, is unique across * code. Each pmc, stored in kvm_pmc.idx field, is unique across
* all perf counters (both gp and fixed). The mapping relationship * all perf counters (both gp and fixed). The mapping relationship
* between pmc and perf counters is as the following: * between pmc and perf counters is as the following:
* * Intel: [0 .. KVM_INTEL_PMC_MAX_GENERIC-1] <=> gp counters * * Intel: [0 .. KVM_MAX_NR_INTEL_GP_COUNTERS-1] <=> gp counters
* [KVM_FIXED_PMC_BASE_IDX .. KVM_FIXED_PMC_BASE_IDX + 2] <=> fixed * [KVM_FIXED_PMC_BASE_IDX .. KVM_FIXED_PMC_BASE_IDX + 2] <=> fixed
* * AMD: [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H * * AMD: [0 .. AMD64_NUM_COUNTERS-1] and, for families 15H
* and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters * and later, [0 .. AMD64_NUM_COUNTERS_CORE-1] <=> gp counters
...@@ -469,11 +469,11 @@ static int reprogram_counter(struct kvm_pmc *pmc) ...@@ -469,11 +469,11 @@ static int reprogram_counter(struct kvm_pmc *pmc)
if (pmc_is_fixed(pmc)) { if (pmc_is_fixed(pmc)) {
fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, fixed_ctr_ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl,
pmc->idx - KVM_FIXED_PMC_BASE_IDX); pmc->idx - KVM_FIXED_PMC_BASE_IDX);
if (fixed_ctr_ctrl & 0x1) if (fixed_ctr_ctrl & INTEL_FIXED_0_KERNEL)
eventsel |= ARCH_PERFMON_EVENTSEL_OS; eventsel |= ARCH_PERFMON_EVENTSEL_OS;
if (fixed_ctr_ctrl & 0x2) if (fixed_ctr_ctrl & INTEL_FIXED_0_USER)
eventsel |= ARCH_PERFMON_EVENTSEL_USR; eventsel |= ARCH_PERFMON_EVENTSEL_USR;
if (fixed_ctr_ctrl & 0x8) if (fixed_ctr_ctrl & INTEL_FIXED_0_ENABLE_PMI)
eventsel |= ARCH_PERFMON_EVENTSEL_INT; eventsel |= ARCH_PERFMON_EVENTSEL_INT;
new_config = (u64)fixed_ctr_ctrl; new_config = (u64)fixed_ctr_ctrl;
} }
...@@ -681,13 +681,13 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -681,13 +681,13 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!msr_info->host_initiated) if (!msr_info->host_initiated)
break; break;
if (data & pmu->global_status_mask) if (data & pmu->global_status_rsvd)
return 1; return 1;
pmu->global_status = data; pmu->global_status = data;
break; break;
case MSR_AMD64_PERF_CNTR_GLOBAL_CTL: case MSR_AMD64_PERF_CNTR_GLOBAL_CTL:
data &= ~pmu->global_ctrl_mask; data &= ~pmu->global_ctrl_rsvd;
fallthrough; fallthrough;
case MSR_CORE_PERF_GLOBAL_CTRL: case MSR_CORE_PERF_GLOBAL_CTRL:
if (!kvm_valid_perf_global_ctrl(pmu, data)) if (!kvm_valid_perf_global_ctrl(pmu, data))
...@@ -704,7 +704,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -704,7 +704,7 @@ int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
* GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in * GLOBAL_OVF_CTRL, a.k.a. GLOBAL STATUS_RESET, clears bits in
* GLOBAL_STATUS, and so the set of reserved bits is the same. * GLOBAL_STATUS, and so the set of reserved bits is the same.
*/ */
if (data & pmu->global_status_mask) if (data & pmu->global_status_rsvd)
return 1; return 1;
fallthrough; fallthrough;
case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR: case MSR_AMD64_PERF_CNTR_GLOBAL_STATUS_CLR:
...@@ -768,11 +768,11 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu) ...@@ -768,11 +768,11 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->reserved_bits = 0xffffffff00200000ull; pmu->reserved_bits = 0xffffffff00200000ull;
pmu->raw_event_mask = X86_RAW_EVENT_MASK; pmu->raw_event_mask = X86_RAW_EVENT_MASK;
pmu->global_ctrl_mask = ~0ull; pmu->global_ctrl_rsvd = ~0ull;
pmu->global_status_mask = ~0ull; pmu->global_status_rsvd = ~0ull;
pmu->fixed_ctr_ctrl_mask = ~0ull; pmu->fixed_ctr_ctrl_rsvd = ~0ull;
pmu->pebs_enable_mask = ~0ull; pmu->pebs_enable_rsvd = ~0ull;
pmu->pebs_data_cfg_mask = ~0ull; pmu->pebs_data_cfg_rsvd = ~0ull;
bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX); bitmap_zero(pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX);
if (!vcpu->kvm->arch.enable_pmu) if (!vcpu->kvm->arch.enable_pmu)
...@@ -846,8 +846,8 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc) ...@@ -846,8 +846,8 @@ static inline bool cpl_is_matched(struct kvm_pmc *pmc)
} else { } else {
config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl,
pmc->idx - KVM_FIXED_PMC_BASE_IDX); pmc->idx - KVM_FIXED_PMC_BASE_IDX);
select_os = config & 0x1; select_os = config & INTEL_FIXED_0_KERNEL;
select_user = config & 0x2; select_user = config & INTEL_FIXED_0_USER;
} }
/* /*
......
...@@ -14,7 +14,8 @@ ...@@ -14,7 +14,8 @@
MSR_IA32_MISC_ENABLE_BTS_UNAVAIL) MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
/* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
#define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf) #define fixed_ctrl_field(ctrl_reg, idx) \
(((ctrl_reg) >> ((idx) * INTEL_FIXED_BITS_STRIDE)) & INTEL_FIXED_BITS_MASK)
#define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000 #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000
#define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001 #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001
...@@ -129,7 +130,7 @@ static inline bool pmc_is_fixed(struct kvm_pmc *pmc) ...@@ -129,7 +130,7 @@ static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
u64 data) u64 data)
{ {
return !(pmu->global_ctrl_mask & data); return !(pmu->global_ctrl_rsvd & data);
} }
/* returns general purpose PMC with the specified MSR. Note that it can be /* returns general purpose PMC with the specified MSR. Note that it can be
...@@ -170,7 +171,8 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc) ...@@ -170,7 +171,8 @@ static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
if (pmc_is_fixed(pmc)) if (pmc_is_fixed(pmc))
return fixed_ctrl_field(pmu->fixed_ctr_ctrl, return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
pmc->idx - KVM_FIXED_PMC_BASE_IDX) & 0x3; pmc->idx - KVM_FIXED_PMC_BASE_IDX) &
(INTEL_FIXED_0_KERNEL | INTEL_FIXED_0_USER);
return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
} }
...@@ -217,7 +219,7 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops) ...@@ -217,7 +219,7 @@ static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp, kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
pmu_ops->MAX_NR_GP_COUNTERS); pmu_ops->MAX_NR_GP_COUNTERS);
kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed, kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
KVM_PMC_MAX_FIXED); KVM_MAX_NR_FIXED_COUNTERS);
kvm_pmu_eventsel.INSTRUCTIONS_RETIRED = kvm_pmu_eventsel.INSTRUCTIONS_RETIRED =
perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS); perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS);
......
...@@ -199,8 +199,8 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu) ...@@ -199,8 +199,8 @@ static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
kvm_pmu_cap.num_counters_gp); kvm_pmu_cap.num_counters_gp);
if (pmu->version > 1) { if (pmu->version > 1) {
pmu->global_ctrl_mask = ~((1ull << pmu->nr_arch_gp_counters) - 1); pmu->global_ctrl_rsvd = ~((1ull << pmu->nr_arch_gp_counters) - 1);
pmu->global_status_mask = pmu->global_ctrl_mask; pmu->global_status_rsvd = pmu->global_ctrl_rsvd;
} }
pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
...@@ -217,10 +217,9 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu) ...@@ -217,10 +217,9 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
int i; int i;
BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > AMD64_NUM_COUNTERS_CORE); BUILD_BUG_ON(KVM_MAX_NR_AMD_GP_COUNTERS > AMD64_NUM_COUNTERS_CORE);
BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > INTEL_PMC_MAX_GENERIC);
for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC ; i++) { for (i = 0; i < KVM_MAX_NR_AMD_GP_COUNTERS; i++) {
pmu->gp_counters[i].type = KVM_PMC_GP; pmu->gp_counters[i].type = KVM_PMC_GP;
pmu->gp_counters[i].vcpu = vcpu; pmu->gp_counters[i].vcpu = vcpu;
pmu->gp_counters[i].idx = i; pmu->gp_counters[i].idx = i;
...@@ -238,6 +237,6 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = { ...@@ -238,6 +237,6 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
.refresh = amd_pmu_refresh, .refresh = amd_pmu_refresh,
.init = amd_pmu_init, .init = amd_pmu_init,
.EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT, .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
.MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC, .MAX_NR_GP_COUNTERS = KVM_MAX_NR_AMD_GP_COUNTERS,
.MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS, .MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,
}; };
...@@ -348,14 +348,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -348,14 +348,14 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
switch (msr) { switch (msr) {
case MSR_CORE_PERF_FIXED_CTR_CTRL: case MSR_CORE_PERF_FIXED_CTR_CTRL:
if (data & pmu->fixed_ctr_ctrl_mask) if (data & pmu->fixed_ctr_ctrl_rsvd)
return 1; return 1;
if (pmu->fixed_ctr_ctrl != data) if (pmu->fixed_ctr_ctrl != data)
reprogram_fixed_counters(pmu, data); reprogram_fixed_counters(pmu, data);
break; break;
case MSR_IA32_PEBS_ENABLE: case MSR_IA32_PEBS_ENABLE:
if (data & pmu->pebs_enable_mask) if (data & pmu->pebs_enable_rsvd)
return 1; return 1;
if (pmu->pebs_enable != data) { if (pmu->pebs_enable != data) {
...@@ -371,7 +371,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -371,7 +371,7 @@ static int intel_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
pmu->ds_area = data; pmu->ds_area = data;
break; break;
case MSR_PEBS_DATA_CFG: case MSR_PEBS_DATA_CFG:
if (data & pmu->pebs_data_cfg_mask) if (data & pmu->pebs_data_cfg_rsvd)
return 1; return 1;
pmu->pebs_data_cfg = data; pmu->pebs_data_cfg = data;
...@@ -436,8 +436,8 @@ static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index) ...@@ -436,8 +436,8 @@ static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
}; };
u64 eventsel; u64 eventsel;
BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_perf_ids) != KVM_PMC_MAX_FIXED); BUILD_BUG_ON(ARRAY_SIZE(fixed_pmc_perf_ids) != KVM_MAX_NR_INTEL_FIXED_COUTNERS);
BUILD_BUG_ON(index >= KVM_PMC_MAX_FIXED); BUILD_BUG_ON(index >= KVM_MAX_NR_INTEL_FIXED_COUTNERS);
/* /*
* Yell if perf reports support for a fixed counter but perf doesn't * Yell if perf reports support for a fixed counter but perf doesn't
...@@ -448,6 +448,14 @@ static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index) ...@@ -448,6 +448,14 @@ static __always_inline u64 intel_get_fixed_pmc_eventsel(unsigned int index)
return eventsel; return eventsel;
} }
static void intel_pmu_enable_fixed_counter_bits(struct kvm_pmu *pmu, u64 bits)
{
int i;
for (i = 0; i < pmu->nr_arch_fixed_counters; i++)
pmu->fixed_ctr_ctrl_rsvd &= ~intel_fixed_bits_by_idx(i, bits);
}
static void intel_pmu_refresh(struct kvm_vcpu *vcpu) static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
{ {
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
...@@ -456,8 +464,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) ...@@ -456,8 +464,7 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
union cpuid10_eax eax; union cpuid10_eax eax;
union cpuid10_edx edx; union cpuid10_edx edx;
u64 perf_capabilities; u64 perf_capabilities;
u64 counter_mask; u64 counter_rsvd;
int i;
memset(&lbr_desc->records, 0, sizeof(lbr_desc->records)); memset(&lbr_desc->records, 0, sizeof(lbr_desc->records));
...@@ -501,22 +508,24 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) ...@@ -501,22 +508,24 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
((u64)1 << edx.split.bit_width_fixed) - 1; ((u64)1 << edx.split.bit_width_fixed) - 1;
} }
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) intel_pmu_enable_fixed_counter_bits(pmu, INTEL_FIXED_0_KERNEL |
pmu->fixed_ctr_ctrl_mask &= ~(0xbull << (i * 4)); INTEL_FIXED_0_USER |
counter_mask = ~(((1ull << pmu->nr_arch_gp_counters) - 1) | INTEL_FIXED_0_ENABLE_PMI);
counter_rsvd = ~(((1ull << pmu->nr_arch_gp_counters) - 1) |
(((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX)); (((1ull << pmu->nr_arch_fixed_counters) - 1) << KVM_FIXED_PMC_BASE_IDX));
pmu->global_ctrl_mask = counter_mask; pmu->global_ctrl_rsvd = counter_rsvd;
/* /*
* GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET) * GLOBAL_STATUS and GLOBAL_OVF_CONTROL (a.k.a. GLOBAL_STATUS_RESET)
* share reserved bit definitions. The kernel just happens to use * share reserved bit definitions. The kernel just happens to use
* OVF_CTRL for the names. * OVF_CTRL for the names.
*/ */
pmu->global_status_mask = pmu->global_ctrl_mask pmu->global_status_rsvd = pmu->global_ctrl_rsvd
& ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF | & ~(MSR_CORE_PERF_GLOBAL_OVF_CTRL_OVF_BUF |
MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD); MSR_CORE_PERF_GLOBAL_OVF_CTRL_COND_CHGD);
if (vmx_pt_mode_is_host_guest()) if (vmx_pt_mode_is_host_guest())
pmu->global_status_mask &= pmu->global_status_rsvd &=
~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI; ~MSR_CORE_PERF_GLOBAL_OVF_CTRL_TRACE_TOPA_PMI;
entry = kvm_find_cpuid_entry_index(vcpu, 7, 0); entry = kvm_find_cpuid_entry_index(vcpu, 7, 0);
...@@ -544,15 +553,12 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) ...@@ -544,15 +553,12 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
if (perf_capabilities & PERF_CAP_PEBS_FORMAT) { if (perf_capabilities & PERF_CAP_PEBS_FORMAT) {
if (perf_capabilities & PERF_CAP_PEBS_BASELINE) { if (perf_capabilities & PERF_CAP_PEBS_BASELINE) {
pmu->pebs_enable_mask = counter_mask; pmu->pebs_enable_rsvd = counter_rsvd;
pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE; pmu->reserved_bits &= ~ICL_EVENTSEL_ADAPTIVE;
for (i = 0; i < pmu->nr_arch_fixed_counters; i++) { pmu->pebs_data_cfg_rsvd = ~0xff00000full;
pmu->fixed_ctr_ctrl_mask &= intel_pmu_enable_fixed_counter_bits(pmu, ICL_FIXED_0_ADAPTIVE);
~(1ULL << (KVM_FIXED_PMC_BASE_IDX + i * 4));
}
pmu->pebs_data_cfg_mask = ~0xff00000full;
} else { } else {
pmu->pebs_enable_mask = pmu->pebs_enable_rsvd =
~((1ull << pmu->nr_arch_gp_counters) - 1); ~((1ull << pmu->nr_arch_gp_counters) - 1);
} }
} }
...@@ -564,14 +570,14 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu) ...@@ -564,14 +570,14 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu); struct lbr_desc *lbr_desc = vcpu_to_lbr_desc(vcpu);
for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) { for (i = 0; i < KVM_MAX_NR_INTEL_GP_COUNTERS; i++) {
pmu->gp_counters[i].type = KVM_PMC_GP; pmu->gp_counters[i].type = KVM_PMC_GP;
pmu->gp_counters[i].vcpu = vcpu; pmu->gp_counters[i].vcpu = vcpu;
pmu->gp_counters[i].idx = i; pmu->gp_counters[i].idx = i;
pmu->gp_counters[i].current_config = 0; pmu->gp_counters[i].current_config = 0;
} }
for (i = 0; i < KVM_PMC_MAX_FIXED; i++) { for (i = 0; i < KVM_MAX_NR_INTEL_FIXED_COUTNERS; i++) {
pmu->fixed_counters[i].type = KVM_PMC_FIXED; pmu->fixed_counters[i].type = KVM_PMC_FIXED;
pmu->fixed_counters[i].vcpu = vcpu; pmu->fixed_counters[i].vcpu = vcpu;
pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX; pmu->fixed_counters[i].idx = i + KVM_FIXED_PMC_BASE_IDX;
...@@ -731,6 +737,6 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = { ...@@ -731,6 +737,6 @@ struct kvm_pmu_ops intel_pmu_ops __initdata = {
.deliver_pmi = intel_pmu_deliver_pmi, .deliver_pmi = intel_pmu_deliver_pmi,
.cleanup = intel_pmu_cleanup, .cleanup = intel_pmu_cleanup,
.EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT, .EVENTSEL_EVENT = ARCH_PERFMON_EVENTSEL_EVENT,
.MAX_NR_GP_COUNTERS = KVM_INTEL_PMC_MAX_GENERIC, .MAX_NR_GP_COUNTERS = KVM_MAX_NR_INTEL_GP_COUNTERS,
.MIN_NR_GP_COUNTERS = 1, .MIN_NR_GP_COUNTERS = 1,
}; };
...@@ -2561,17 +2561,15 @@ static bool cpu_has_sgx(void) ...@@ -2561,17 +2561,15 @@ static bool cpu_has_sgx(void)
*/ */
static bool cpu_has_perf_global_ctrl_bug(void) static bool cpu_has_perf_global_ctrl_bug(void)
{ {
if (boot_cpu_data.x86 == 0x6) { switch (boot_cpu_data.x86_vfm) {
switch (boot_cpu_data.x86_model) { case INTEL_NEHALEM_EP: /* AAK155 */
case INTEL_FAM6_NEHALEM_EP: /* AAK155 */ case INTEL_NEHALEM: /* AAP115 */
case INTEL_FAM6_NEHALEM: /* AAP115 */ case INTEL_WESTMERE: /* AAT100 */
case INTEL_FAM6_WESTMERE: /* AAT100 */ case INTEL_WESTMERE_EP: /* BC86,AAY89,BD102 */
case INTEL_FAM6_WESTMERE_EP: /* BC86,AAY89,BD102 */ case INTEL_NEHALEM_EX: /* BA97 */
case INTEL_FAM6_NEHALEM_EX: /* BA97 */ return true;
return true; default:
default: break;
break;
}
} }
return false; return false;
......
...@@ -1448,10 +1448,10 @@ static const u32 msrs_to_save_pmu[] = { ...@@ -1448,10 +1448,10 @@ static const u32 msrs_to_save_pmu[] = {
MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1, MSR_ARCH_PERFMON_FIXED_CTR0, MSR_ARCH_PERFMON_FIXED_CTR1,
MSR_ARCH_PERFMON_FIXED_CTR0 + 2, MSR_ARCH_PERFMON_FIXED_CTR0 + 2,
MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS, MSR_CORE_PERF_FIXED_CTR_CTRL, MSR_CORE_PERF_GLOBAL_STATUS,
MSR_CORE_PERF_GLOBAL_CTRL, MSR_CORE_PERF_GLOBAL_OVF_CTRL, MSR_CORE_PERF_GLOBAL_CTRL,
MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG, MSR_IA32_PEBS_ENABLE, MSR_IA32_DS_AREA, MSR_PEBS_DATA_CFG,
/* This part of MSRs should match KVM_INTEL_PMC_MAX_GENERIC. */ /* This part of MSRs should match KVM_MAX_NR_INTEL_GP_COUNTERS. */
MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1, MSR_ARCH_PERFMON_PERFCTR0, MSR_ARCH_PERFMON_PERFCTR1,
MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3, MSR_ARCH_PERFMON_PERFCTR0 + 2, MSR_ARCH_PERFMON_PERFCTR0 + 3,
MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5, MSR_ARCH_PERFMON_PERFCTR0 + 4, MSR_ARCH_PERFMON_PERFCTR0 + 5,
...@@ -1464,7 +1464,7 @@ static const u32 msrs_to_save_pmu[] = { ...@@ -1464,7 +1464,7 @@ static const u32 msrs_to_save_pmu[] = {
MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3, MSR_K7_EVNTSEL0, MSR_K7_EVNTSEL1, MSR_K7_EVNTSEL2, MSR_K7_EVNTSEL3,
MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3, MSR_K7_PERFCTR0, MSR_K7_PERFCTR1, MSR_K7_PERFCTR2, MSR_K7_PERFCTR3,
/* This part of MSRs should match KVM_AMD_PMC_MAX_GENERIC. */ /* This part of MSRs should match KVM_MAX_NR_AMD_GP_COUNTERS. */
MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2, MSR_F15H_PERF_CTL0, MSR_F15H_PERF_CTL1, MSR_F15H_PERF_CTL2,
MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5, MSR_F15H_PERF_CTL3, MSR_F15H_PERF_CTL4, MSR_F15H_PERF_CTL5,
MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2, MSR_F15H_PERF_CTR0, MSR_F15H_PERF_CTR1, MSR_F15H_PERF_CTR2,
...@@ -7439,17 +7439,20 @@ static void kvm_probe_msr_to_save(u32 msr_index) ...@@ -7439,17 +7439,20 @@ static void kvm_probe_msr_to_save(u32 msr_index)
intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)) intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2))
return; return;
break; break;
case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR_MAX: case MSR_ARCH_PERFMON_PERFCTR0 ...
MSR_ARCH_PERFMON_PERFCTR0 + KVM_MAX_NR_GP_COUNTERS - 1:
if (msr_index - MSR_ARCH_PERFMON_PERFCTR0 >= if (msr_index - MSR_ARCH_PERFMON_PERFCTR0 >=
kvm_pmu_cap.num_counters_gp) kvm_pmu_cap.num_counters_gp)
return; return;
break; break;
case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL_MAX: case MSR_ARCH_PERFMON_EVENTSEL0 ...
MSR_ARCH_PERFMON_EVENTSEL0 + KVM_MAX_NR_GP_COUNTERS - 1:
if (msr_index - MSR_ARCH_PERFMON_EVENTSEL0 >= if (msr_index - MSR_ARCH_PERFMON_EVENTSEL0 >=
kvm_pmu_cap.num_counters_gp) kvm_pmu_cap.num_counters_gp)
return; return;
break; break;
case MSR_ARCH_PERFMON_FIXED_CTR0 ... MSR_ARCH_PERFMON_FIXED_CTR_MAX: case MSR_ARCH_PERFMON_FIXED_CTR0 ...
MSR_ARCH_PERFMON_FIXED_CTR0 + KVM_MAX_NR_FIXED_COUNTERS - 1:
if (msr_index - MSR_ARCH_PERFMON_FIXED_CTR0 >= if (msr_index - MSR_ARCH_PERFMON_FIXED_CTR0 >=
kvm_pmu_cap.num_counters_fixed) kvm_pmu_cap.num_counters_fixed)
return; return;
...@@ -7480,7 +7483,7 @@ static void kvm_init_msr_lists(void) ...@@ -7480,7 +7483,7 @@ static void kvm_init_msr_lists(void)
{ {
unsigned i; unsigned i;
BUILD_BUG_ON_MSG(KVM_PMC_MAX_FIXED != 3, BUILD_BUG_ON_MSG(KVM_MAX_NR_FIXED_COUNTERS != 3,
"Please update the fixed PMCs in msrs_to_save_pmu[]"); "Please update the fixed PMCs in msrs_to_save_pmu[]");
num_msrs_to_save = 0; num_msrs_to_save = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment