Commit bd791999 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Override host CPUID results with kvm_cpu_caps

Override CPUID entries with kvm_cpu_caps during KVM_GET_SUPPORTED_CPUID
instead of masking the host CPUID result, which is redundant now that
the host CPUID is incorporated into kvm_cpu_caps at runtime.

No functional change intended.
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent d8577a4c
...@@ -485,8 +485,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -485,8 +485,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->eax = min(entry->eax, 0x1fU); entry->eax = min(entry->eax, 0x1fU);
break; break;
case 1: case 1:
cpuid_entry_mask(entry, CPUID_1_EDX); cpuid_entry_override(entry, CPUID_1_EDX);
cpuid_entry_mask(entry, CPUID_1_ECX); cpuid_entry_override(entry, CPUID_1_ECX);
/* we support x2apic emulation even if host does not support /* we support x2apic emulation even if host does not support
* it since we emulate x2apic in software */ * it since we emulate x2apic in software */
cpuid_entry_set(entry, X86_FEATURE_X2APIC); cpuid_entry_set(entry, X86_FEATURE_X2APIC);
...@@ -531,9 +531,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -531,9 +531,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
/* function 7 has additional index. */ /* function 7 has additional index. */
case 7: case 7:
entry->eax = min(entry->eax, 1u); entry->eax = min(entry->eax, 1u);
cpuid_entry_mask(entry, CPUID_7_0_EBX); cpuid_entry_override(entry, CPUID_7_0_EBX);
cpuid_entry_mask(entry, CPUID_7_ECX); cpuid_entry_override(entry, CPUID_7_ECX);
cpuid_entry_mask(entry, CPUID_7_EDX); cpuid_entry_override(entry, CPUID_7_EDX);
/* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */ /* TSC_ADJUST and ARCH_CAPABILITIES are emulated in software. */
cpuid_entry_set(entry, X86_FEATURE_TSC_ADJUST); cpuid_entry_set(entry, X86_FEATURE_TSC_ADJUST);
...@@ -552,7 +552,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -552,7 +552,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
if (!entry) if (!entry)
goto out; goto out;
cpuid_entry_mask(entry, CPUID_7_1_EAX); cpuid_entry_override(entry, CPUID_7_1_EAX);
entry->ebx = 0; entry->ebx = 0;
entry->ecx = 0; entry->ecx = 0;
entry->edx = 0; entry->edx = 0;
...@@ -618,7 +618,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -618,7 +618,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
if (!entry) if (!entry)
goto out; goto out;
cpuid_entry_mask(entry, CPUID_D_1_EAX); cpuid_entry_override(entry, CPUID_D_1_EAX);
if (entry->eax & (F(XSAVES)|F(XSAVEC))) if (entry->eax & (F(XSAVES)|F(XSAVEC)))
entry->ebx = xstate_required_size(supported_xcr0, true); entry->ebx = xstate_required_size(supported_xcr0, true);
else else
...@@ -697,11 +697,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -697,11 +697,8 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->eax = min(entry->eax, 0x8000001f); entry->eax = min(entry->eax, 0x8000001f);
break; break;
case 0x80000001: case 0x80000001:
cpuid_entry_mask(entry, CPUID_8000_0001_EDX); cpuid_entry_override(entry, CPUID_8000_0001_EDX);
/* Add it manually because it may not be in host CPUID. */ cpuid_entry_override(entry, CPUID_8000_0001_ECX);
if (!tdp_enabled)
cpuid_entry_set(entry, X86_FEATURE_GBPAGES);
cpuid_entry_mask(entry, CPUID_8000_0001_ECX);
break; break;
case 0x80000007: /* Advanced power management */ case 0x80000007: /* Advanced power management */
/* invariant TSC is CPUID.80000007H:EDX[8] */ /* invariant TSC is CPUID.80000007H:EDX[8] */
...@@ -719,7 +716,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -719,7 +716,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
g_phys_as = phys_as; g_phys_as = phys_as;
entry->eax = g_phys_as | (virt_as << 8); entry->eax = g_phys_as | (virt_as << 8);
entry->edx = 0; entry->edx = 0;
cpuid_entry_mask(entry, CPUID_8000_0008_EBX); cpuid_entry_override(entry, CPUID_8000_0008_EBX);
/* /*
* AMD has separate bits for each SPEC_CTRL bit. * AMD has separate bits for each SPEC_CTRL bit.
* arch/x86/kernel/cpu/bugs.c is kind enough to * arch/x86/kernel/cpu/bugs.c is kind enough to
...@@ -761,7 +758,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -761,7 +758,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->eax = min(entry->eax, 0xC0000004); entry->eax = min(entry->eax, 0xC0000004);
break; break;
case 0xC0000001: case 0xC0000001:
cpuid_entry_mask(entry, CPUID_C000_0001_EDX); cpuid_entry_override(entry, CPUID_C000_0001_EDX);
break; break;
case 3: /* Processor serial number */ case 3: /* Processor serial number */
case 5: /* MONITOR/MWAIT */ case 5: /* MONITOR/MWAIT */
......
...@@ -170,13 +170,13 @@ static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry, ...@@ -170,13 +170,13 @@ static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
*reg &= ~__feature_bit(x86_feature); *reg &= ~__feature_bit(x86_feature);
} }
static __always_inline void cpuid_entry_mask(struct kvm_cpuid_entry2 *entry, static __always_inline void cpuid_entry_override(struct kvm_cpuid_entry2 *entry,
enum cpuid_leafs leaf) enum cpuid_leafs leaf)
{ {
u32 *reg = cpuid_entry_get_reg(entry, leaf * 32); u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps)); BUILD_BUG_ON(leaf >= ARRAY_SIZE(kvm_cpu_caps));
*reg &= kvm_cpu_caps[leaf]; *reg = kvm_cpu_caps[leaf];
} }
static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu, static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
......
...@@ -6072,8 +6072,7 @@ static void svm_set_supported_cpuid(struct kvm_cpuid_entry2 *entry) ...@@ -6072,8 +6072,7 @@ static void svm_set_supported_cpuid(struct kvm_cpuid_entry2 *entry)
entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
ASID emulation to nested SVM */ ASID emulation to nested SVM */
entry->ecx = 0; /* Reserved */ entry->ecx = 0; /* Reserved */
/* Note, 0x8000000A.EDX is managed via kvm_cpu_caps. */; cpuid_entry_override(entry, CPUID_8000_000A_EDX);
cpuid_entry_mask(entry, CPUID_8000_000A_EDX);
break; break;
} }
} }
......
...@@ -7130,18 +7130,6 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu) ...@@ -7130,18 +7130,6 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
*/ */
static void vmx_set_supported_cpuid(struct kvm_cpuid_entry2 *entry) static void vmx_set_supported_cpuid(struct kvm_cpuid_entry2 *entry)
{ {
switch (entry->function) {
case 0x7:
/*
* UMIP needs to be manually set even though vmx_set_cpu_caps()
* also sets UMIP since do_host_cpuid() will drop it.
*/
if (vmx_umip_emulated())
cpuid_entry_set(entry, X86_FEATURE_UMIP);
break;
default:
break;
}
} }
static __init void vmx_set_cpu_caps(void) static __init void vmx_set_cpu_caps(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment