Commit 7a5ee6ed authored by Chenyi Qiang's avatar Chenyi Qiang Committed by Paolo Bonzini

KVM: X86: Fix initialization of MSR lists

The three MSR lists(msrs_to_save[], emulated_msrs[] and
msr_based_features[]) are global arrays of kvm.ko, which are
adjusted (copy supported MSRs forward to override the unsupported MSRs)
when insmod kvm-{intel,amd}.ko, but it doesn't reset these three arrays
to their initial value when rmmod kvm-{intel,amd}.ko. Thus, at the next
installation, kvm-{intel,amd}.ko will do operations on the modified
arrays with some MSRs lost and some MSRs duplicated.

So define three constant arrays to hold the initial MSR lists and
initialize msrs_to_save[], emulated_msrs[] and msr_based_features[]
based on the constant arrays.

Cc: stable@vger.kernel.org
Reviewed-by: default avatarXiaoyao Li <xiaoyao.li@intel.com>
Signed-off-by: default avatarChenyi Qiang <chenyi.qiang@intel.com>
[Remove now useless conditionals. - Paolo]
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent e2d3fcaf
...@@ -1132,13 +1132,15 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc); ...@@ -1132,13 +1132,15 @@ EXPORT_SYMBOL_GPL(kvm_rdpmc);
* List of msr numbers which we expose to userspace through KVM_GET_MSRS * List of msr numbers which we expose to userspace through KVM_GET_MSRS
* and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST. * and KVM_SET_MSRS, and KVM_GET_MSR_INDEX_LIST.
* *
* This list is modified at module load time to reflect the * The three MSR lists(msrs_to_save, emulated_msrs, msr_based_features)
* extract the supported MSRs from the related const lists.
* msrs_to_save is selected from the msrs_to_save_all to reflect the
* capabilities of the host cpu. This capabilities test skips MSRs that are * capabilities of the host cpu. This capabilities test skips MSRs that are
* kvm-specific. Those are put in emulated_msrs; filtering of emulated_msrs * kvm-specific. Those are put in emulated_msrs_all; filtering of emulated_msrs
* may depend on host virtualization features rather than host cpu features. * may depend on host virtualization features rather than host cpu features.
*/ */
static u32 msrs_to_save[] = { static const u32 msrs_to_save_all[] = {
MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP, MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
MSR_STAR, MSR_STAR,
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
...@@ -1179,9 +1181,10 @@ static u32 msrs_to_save[] = { ...@@ -1179,9 +1181,10 @@ static u32 msrs_to_save[] = {
MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17, MSR_ARCH_PERFMON_EVENTSEL0 + 16, MSR_ARCH_PERFMON_EVENTSEL0 + 17,
}; };
static u32 msrs_to_save[ARRAY_SIZE(msrs_to_save_all)];
static unsigned num_msrs_to_save; static unsigned num_msrs_to_save;
static u32 emulated_msrs[] = { static const u32 emulated_msrs_all[] = {
MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK, MSR_KVM_SYSTEM_TIME, MSR_KVM_WALL_CLOCK,
MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW, MSR_KVM_SYSTEM_TIME_NEW, MSR_KVM_WALL_CLOCK_NEW,
HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL, HV_X64_MSR_GUEST_OS_ID, HV_X64_MSR_HYPERCALL,
...@@ -1220,7 +1223,7 @@ static u32 emulated_msrs[] = { ...@@ -1220,7 +1223,7 @@ static u32 emulated_msrs[] = {
* by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs. * by arch/x86/kvm/vmx/nested.c based on CPUID or other MSRs.
* We always support the "true" VMX control MSRs, even if the host * We always support the "true" VMX control MSRs, even if the host
* processor does not, so I am putting these registers here rather * processor does not, so I am putting these registers here rather
* than in msrs_to_save. * than in msrs_to_save_all.
*/ */
MSR_IA32_VMX_BASIC, MSR_IA32_VMX_BASIC,
MSR_IA32_VMX_TRUE_PINBASED_CTLS, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
...@@ -1239,13 +1242,14 @@ static u32 emulated_msrs[] = { ...@@ -1239,13 +1242,14 @@ static u32 emulated_msrs[] = {
MSR_KVM_POLL_CONTROL, MSR_KVM_POLL_CONTROL,
}; };
static u32 emulated_msrs[ARRAY_SIZE(emulated_msrs_all)];
static unsigned num_emulated_msrs; static unsigned num_emulated_msrs;
/* /*
* List of msr numbers which are used to expose MSR-based features that * List of msr numbers which are used to expose MSR-based features that
* can be used by a hypervisor to validate requested CPU features. * can be used by a hypervisor to validate requested CPU features.
*/ */
static u32 msr_based_features[] = { static const u32 msr_based_features_all[] = {
MSR_IA32_VMX_BASIC, MSR_IA32_VMX_BASIC,
MSR_IA32_VMX_TRUE_PINBASED_CTLS, MSR_IA32_VMX_TRUE_PINBASED_CTLS,
MSR_IA32_VMX_PINBASED_CTLS, MSR_IA32_VMX_PINBASED_CTLS,
...@@ -1270,6 +1274,7 @@ static u32 msr_based_features[] = { ...@@ -1270,6 +1274,7 @@ static u32 msr_based_features[] = {
MSR_IA32_ARCH_CAPABILITIES, MSR_IA32_ARCH_CAPABILITIES,
}; };
static u32 msr_based_features[ARRAY_SIZE(msr_based_features_all)];
static unsigned int num_msr_based_features; static unsigned int num_msr_based_features;
static u64 kvm_get_arch_capabilities(void) static u64 kvm_get_arch_capabilities(void)
...@@ -5090,22 +5095,22 @@ static void kvm_init_msr_list(void) ...@@ -5090,22 +5095,22 @@ static void kvm_init_msr_list(void)
{ {
struct x86_pmu_capability x86_pmu; struct x86_pmu_capability x86_pmu;
u32 dummy[2]; u32 dummy[2];
unsigned i, j; unsigned i;
BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4, BUILD_BUG_ON_MSG(INTEL_PMC_MAX_FIXED != 4,
"Please update the fixed PMCs in msrs_to_save[]"); "Please update the fixed PMCs in msrs_to_saved_all[]");
perf_get_x86_pmu_capability(&x86_pmu); perf_get_x86_pmu_capability(&x86_pmu);
for (i = j = 0; i < ARRAY_SIZE(msrs_to_save); i++) { for (i = 0; i < ARRAY_SIZE(msrs_to_save_all); i++) {
if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) if (rdmsr_safe(msrs_to_save_all[i], &dummy[0], &dummy[1]) < 0)
continue; continue;
/* /*
* Even MSRs that are valid in the host may not be exposed * Even MSRs that are valid in the host may not be exposed
* to the guests in some cases. * to the guests in some cases.
*/ */
switch (msrs_to_save[i]) { switch (msrs_to_save_all[i]) {
case MSR_IA32_BNDCFGS: case MSR_IA32_BNDCFGS:
if (!kvm_mpx_supported()) if (!kvm_mpx_supported())
continue; continue;
...@@ -5133,17 +5138,17 @@ static void kvm_init_msr_list(void) ...@@ -5133,17 +5138,17 @@ static void kvm_init_msr_list(void)
break; break;
case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: { case MSR_IA32_RTIT_ADDR0_A ... MSR_IA32_RTIT_ADDR3_B: {
if (!kvm_x86_ops->pt_supported() || if (!kvm_x86_ops->pt_supported() ||
msrs_to_save[i] - MSR_IA32_RTIT_ADDR0_A >= msrs_to_save_all[i] - MSR_IA32_RTIT_ADDR0_A >=
intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2) intel_pt_validate_hw_cap(PT_CAP_num_address_ranges) * 2)
continue; continue;
break; break;
case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17: case MSR_ARCH_PERFMON_PERFCTR0 ... MSR_ARCH_PERFMON_PERFCTR0 + 17:
if (msrs_to_save[i] - MSR_ARCH_PERFMON_PERFCTR0 >= if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_PERFCTR0 >=
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
continue; continue;
break; break;
case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17: case MSR_ARCH_PERFMON_EVENTSEL0 ... MSR_ARCH_PERFMON_EVENTSEL0 + 17:
if (msrs_to_save[i] - MSR_ARCH_PERFMON_EVENTSEL0 >= if (msrs_to_save_all[i] - MSR_ARCH_PERFMON_EVENTSEL0 >=
min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp)) min(INTEL_PMC_MAX_GENERIC, x86_pmu.num_counters_gp))
continue; continue;
} }
...@@ -5151,34 +5156,25 @@ static void kvm_init_msr_list(void) ...@@ -5151,34 +5156,25 @@ static void kvm_init_msr_list(void)
break; break;
} }
if (j < i) msrs_to_save[num_msrs_to_save++] = msrs_to_save_all[i];
msrs_to_save[j] = msrs_to_save[i];
j++;
} }
num_msrs_to_save = j;
for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) { for (i = 0; i < ARRAY_SIZE(emulated_msrs_all); i++) {
if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i])) if (!kvm_x86_ops->has_emulated_msr(emulated_msrs_all[i]))
continue; continue;
if (j < i) emulated_msrs[num_emulated_msrs++] = emulated_msrs_all[i];
emulated_msrs[j] = emulated_msrs[i];
j++;
} }
num_emulated_msrs = j;
for (i = j = 0; i < ARRAY_SIZE(msr_based_features); i++) { for (i = 0; i < ARRAY_SIZE(msr_based_features_all); i++) {
struct kvm_msr_entry msr; struct kvm_msr_entry msr;
msr.index = msr_based_features[i]; msr.index = msr_based_features_all[i];
if (kvm_get_msr_feature(&msr)) if (kvm_get_msr_feature(&msr))
continue; continue;
if (j < i) msr_based_features[num_msr_based_features++] = msr_based_features_all[i];
msr_based_features[j] = msr_based_features[i];
j++;
} }
num_msr_based_features = j;
} }
static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len, static int vcpu_mmio_write(struct kvm_vcpu *vcpu, gpa_t addr, int len,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment