Commit a7557539 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: x86/pmu: preserve IA32_PERF_CAPABILITIES across CPUID refresh

Once MSR_IA32_PERF_CAPABILITIES is changed via vmx_set_msr(), the
value should not be changed by cpuid(). To ensure that the new value
is kept, the default initialization path is moved to intel_pmu_init().
The effective value of the MSR will be 0 if PDCM is clear, however.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 252e365e
...@@ -152,12 +152,17 @@ static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu, ...@@ -152,12 +152,17 @@ static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
return &counters[array_index_nospec(idx, num_counters)]; return &counters[array_index_nospec(idx, num_counters)];
} }
static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu) static inline u64 vcpu_get_perf_capabilities(struct kvm_vcpu *vcpu)
{ {
if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM)) if (!guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
return false; return 0;
return vcpu->arch.perf_capabilities & PMU_CAP_FW_WRITES; return vcpu->arch.perf_capabilities;
}
static inline bool fw_writes_is_enabled(struct kvm_vcpu *vcpu)
{
return (vcpu_get_perf_capabilities(vcpu) & PMU_CAP_FW_WRITES) != 0;
} }
static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr) static inline struct kvm_pmc *get_fw_gp_pmc(struct kvm_pmu *pmu, u32 msr)
...@@ -327,7 +332,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) ...@@ -327,7 +332,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
pmu->counter_bitmask[KVM_PMC_FIXED] = 0; pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
pmu->version = 0; pmu->version = 0;
pmu->reserved_bits = 0xffffffff00200000ull; pmu->reserved_bits = 0xffffffff00200000ull;
vcpu->arch.perf_capabilities = 0;
entry = kvm_find_cpuid_entry(vcpu, 0xa, 0); entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
if (!entry) if (!entry)
...@@ -340,8 +344,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu) ...@@ -340,8 +344,6 @@ static void intel_pmu_refresh(struct kvm_vcpu *vcpu)
return; return;
perf_get_x86_pmu_capability(&x86_pmu); perf_get_x86_pmu_capability(&x86_pmu);
if (guest_cpuid_has(vcpu, X86_FEATURE_PDCM))
vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters, pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
x86_pmu.num_counters_gp); x86_pmu.num_counters_gp);
...@@ -405,6 +407,8 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu) ...@@ -405,6 +407,8 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED; pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
pmu->fixed_counters[i].current_config = 0; pmu->fixed_counters[i].current_config = 0;
} }
vcpu->arch.perf_capabilities = vmx_get_perf_capabilities();
} }
static void intel_pmu_reset(struct kvm_vcpu *vcpu) static void intel_pmu_reset(struct kvm_vcpu *vcpu)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment