Commit aedbaf4f authored by Xiaoyao Li's avatar Xiaoyao Li Committed by Paolo Bonzini

KVM: x86: Extract kvm_update_cpuid_runtime() from kvm_update_cpuid()

Beside called in kvm_vcpu_ioctl_set_cpuid*(), kvm_update_cpuid() is also
called 5 places else in x86.c and 1 place else in lapic.c. All those 6
places only need the part of updating guest CPUIDs (OSXSAVE, OSPKE, APIC,
KVM_FEATURE_PV_UNHALT, ...) based on the runtime vcpu state, so extract
them as a separate kvm_update_cpuid_runtime().
Signed-off-by: default avatarXiaoyao Li <xiaoyao.li@intel.com>
Message-Id: <20200709043426.92712-3-xiaoyao.li@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent a76733a9
...@@ -73,10 +73,9 @@ static int kvm_check_cpuid(struct kvm_vcpu *vcpu) ...@@ -73,10 +73,9 @@ static int kvm_check_cpuid(struct kvm_vcpu *vcpu)
return 0; return 0;
} }
void kvm_update_cpuid(struct kvm_vcpu *vcpu) void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu)
{ {
struct kvm_cpuid_entry2 *best; struct kvm_cpuid_entry2 *best;
struct kvm_lapic *apic = vcpu->arch.apic;
best = kvm_find_cpuid_entry(vcpu, 1, 0); best = kvm_find_cpuid_entry(vcpu, 1, 0);
if (best) { if (best) {
...@@ -89,28 +88,14 @@ void kvm_update_cpuid(struct kvm_vcpu *vcpu) ...@@ -89,28 +88,14 @@ void kvm_update_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE); vcpu->arch.apic_base & MSR_IA32_APICBASE_ENABLE);
} }
if (best && apic) {
if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
apic->lapic_timer.timer_mode_mask = 3 << 17;
else
apic->lapic_timer.timer_mode_mask = 1 << 17;
kvm_apic_set_version(vcpu);
}
best = kvm_find_cpuid_entry(vcpu, 7, 0); best = kvm_find_cpuid_entry(vcpu, 7, 0);
if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7) if (best && boot_cpu_has(X86_FEATURE_PKU) && best->function == 0x7)
cpuid_entry_change(best, X86_FEATURE_OSPKE, cpuid_entry_change(best, X86_FEATURE_OSPKE,
kvm_read_cr4_bits(vcpu, X86_CR4_PKE)); kvm_read_cr4_bits(vcpu, X86_CR4_PKE));
best = kvm_find_cpuid_entry(vcpu, 0xD, 0); best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
if (!best) { if (best)
vcpu->arch.guest_supported_xcr0 = 0;
} else {
vcpu->arch.guest_supported_xcr0 =
(best->eax | ((u64)best->edx << 32)) & supported_xcr0;
best->ebx = xstate_required_size(vcpu->arch.xcr0, false); best->ebx = xstate_required_size(vcpu->arch.xcr0, false);
}
best = kvm_find_cpuid_entry(vcpu, 0xD, 1); best = kvm_find_cpuid_entry(vcpu, 0xD, 1);
if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) || if (best && (cpuid_entry_has(best, X86_FEATURE_XSAVES) ||
...@@ -129,6 +114,29 @@ void kvm_update_cpuid(struct kvm_vcpu *vcpu) ...@@ -129,6 +114,29 @@ void kvm_update_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.ia32_misc_enable_msr & vcpu->arch.ia32_misc_enable_msr &
MSR_IA32_MISC_ENABLE_MWAIT); MSR_IA32_MISC_ENABLE_MWAIT);
} }
}
static void kvm_update_cpuid(struct kvm_vcpu *vcpu)
{
struct kvm_lapic *apic = vcpu->arch.apic;
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 1, 0);
if (best && apic) {
if (cpuid_entry_has(best, X86_FEATURE_TSC_DEADLINE_TIMER))
apic->lapic_timer.timer_mode_mask = 3 << 17;
else
apic->lapic_timer.timer_mode_mask = 1 << 17;
kvm_apic_set_version(vcpu);
}
best = kvm_find_cpuid_entry(vcpu, 0xD, 0);
if (!best)
vcpu->arch.guest_supported_xcr0 = 0;
else
vcpu->arch.guest_supported_xcr0 =
(best->eax | ((u64)best->edx << 32)) & supported_xcr0;
/* Note, maxphyaddr must be updated before tdp_level. */ /* Note, maxphyaddr must be updated before tdp_level. */
vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu); vcpu->arch.maxphyaddr = cpuid_query_maxphyaddr(vcpu);
...@@ -221,6 +229,7 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu, ...@@ -221,6 +229,7 @@ int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
cpuid_fix_nx_cap(vcpu); cpuid_fix_nx_cap(vcpu);
kvm_x86_ops.cpuid_update(vcpu); kvm_x86_ops.cpuid_update(vcpu);
kvm_update_cpuid_runtime(vcpu);
kvm_update_cpuid(vcpu); kvm_update_cpuid(vcpu);
kvfree(cpuid_entries); kvfree(cpuid_entries);
...@@ -249,6 +258,7 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu, ...@@ -249,6 +258,7 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
} }
kvm_x86_ops.cpuid_update(vcpu); kvm_x86_ops.cpuid_update(vcpu);
kvm_update_cpuid_runtime(vcpu);
kvm_update_cpuid(vcpu); kvm_update_cpuid(vcpu);
out: out:
return r; return r;
......
...@@ -9,7 +9,7 @@ ...@@ -9,7 +9,7 @@
extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly; extern u32 kvm_cpu_caps[NCAPINTS] __read_mostly;
void kvm_set_cpu_caps(void); void kvm_set_cpu_caps(void);
void kvm_update_cpuid(struct kvm_vcpu *vcpu); void kvm_update_cpuid_runtime(struct kvm_vcpu *vcpu);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index); u32 function, u32 index);
int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
......
...@@ -2230,7 +2230,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) ...@@ -2230,7 +2230,7 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
vcpu->arch.apic_base = value; vcpu->arch.apic_base = value;
if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE) if ((old_value ^ value) & MSR_IA32_APICBASE_ENABLE)
kvm_update_cpuid(vcpu); kvm_update_cpuid_runtime(vcpu);
if (!apic) if (!apic)
return; return;
......
...@@ -940,7 +940,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) ...@@ -940,7 +940,7 @@ static int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr)
vcpu->arch.xcr0 = xcr0; vcpu->arch.xcr0 = xcr0;
if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND) if ((xcr0 ^ old_xcr0) & XFEATURE_MASK_EXTEND)
kvm_update_cpuid(vcpu); kvm_update_cpuid_runtime(vcpu);
return 0; return 0;
} }
...@@ -1004,7 +1004,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) ...@@ -1004,7 +1004,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE)) if ((cr4 ^ old_cr4) & (X86_CR4_OSXSAVE | X86_CR4_PKE))
kvm_update_cpuid(vcpu); kvm_update_cpuid_runtime(vcpu);
return 0; return 0;
} }
...@@ -2916,7 +2916,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -2916,7 +2916,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3)) if (!guest_cpuid_has(vcpu, X86_FEATURE_XMM3))
return 1; return 1;
vcpu->arch.ia32_misc_enable_msr = data; vcpu->arch.ia32_misc_enable_msr = data;
kvm_update_cpuid(vcpu); kvm_update_cpuid_runtime(vcpu);
} else { } else {
vcpu->arch.ia32_misc_enable_msr = data; vcpu->arch.ia32_misc_enable_msr = data;
} }
...@@ -8170,7 +8170,7 @@ static void enter_smm(struct kvm_vcpu *vcpu) ...@@ -8170,7 +8170,7 @@ static void enter_smm(struct kvm_vcpu *vcpu)
kvm_x86_ops.set_efer(vcpu, 0); kvm_x86_ops.set_efer(vcpu, 0);
#endif #endif
kvm_update_cpuid(vcpu); kvm_update_cpuid_runtime(vcpu);
kvm_mmu_reset_context(vcpu); kvm_mmu_reset_context(vcpu);
} }
...@@ -9194,7 +9194,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) ...@@ -9194,7 +9194,7 @@ static int __set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
(X86_CR4_OSXSAVE | X86_CR4_PKE)); (X86_CR4_OSXSAVE | X86_CR4_PKE));
kvm_x86_ops.set_cr4(vcpu, sregs->cr4); kvm_x86_ops.set_cr4(vcpu, sregs->cr4);
if (cpuid_update_needed) if (cpuid_update_needed)
kvm_update_cpuid(vcpu); kvm_update_cpuid_runtime(vcpu);
idx = srcu_read_lock(&vcpu->kvm->srcu); idx = srcu_read_lock(&vcpu->kvm->srcu);
if (is_pae_paging(vcpu)) { if (is_pae_paging(vcpu)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment