Commit e745e37d authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Refactor cpuid_mask() to auto-retrieve the register

Use the recently introduced cpuid_entry_get_reg() to automatically get
the appropriate register when masking a CPUID entry.

No functional change intended.
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent b32666b1
...@@ -254,12 +254,6 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu, ...@@ -254,12 +254,6 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
return r; return r;
} }
static __always_inline void cpuid_mask(u32 *word, int wordnum)
{
reverse_cpuid_check(wordnum);
*word &= boot_cpu_data.x86_capability[wordnum];
}
struct kvm_cpuid_array { struct kvm_cpuid_array {
struct kvm_cpuid_entry2 *entries; struct kvm_cpuid_entry2 *entries;
const int maxnent; const int maxnent;
...@@ -373,13 +367,13 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry) ...@@ -373,13 +367,13 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry)
case 0: case 0:
entry->eax = min(entry->eax, 1u); entry->eax = min(entry->eax, 1u);
entry->ebx &= kvm_cpuid_7_0_ebx_x86_features; entry->ebx &= kvm_cpuid_7_0_ebx_x86_features;
cpuid_mask(&entry->ebx, CPUID_7_0_EBX); cpuid_entry_mask(entry, CPUID_7_0_EBX);
/* TSC_ADJUST is emulated */ /* TSC_ADJUST is emulated */
cpuid_entry_set(entry, X86_FEATURE_TSC_ADJUST); cpuid_entry_set(entry, X86_FEATURE_TSC_ADJUST);
entry->ecx &= kvm_cpuid_7_0_ecx_x86_features; entry->ecx &= kvm_cpuid_7_0_ecx_x86_features;
f_la57 = cpuid_entry_get(entry, X86_FEATURE_LA57); f_la57 = cpuid_entry_get(entry, X86_FEATURE_LA57);
cpuid_mask(&entry->ecx, CPUID_7_ECX); cpuid_entry_mask(entry, CPUID_7_ECX);
/* Set LA57 based on hardware capability. */ /* Set LA57 based on hardware capability. */
entry->ecx |= f_la57; entry->ecx |= f_la57;
entry->ecx |= f_umip; entry->ecx |= f_umip;
...@@ -389,7 +383,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry) ...@@ -389,7 +383,7 @@ static inline void do_cpuid_7_mask(struct kvm_cpuid_entry2 *entry)
cpuid_entry_clear(entry, X86_FEATURE_PKU); cpuid_entry_clear(entry, X86_FEATURE_PKU);
entry->edx &= kvm_cpuid_7_0_edx_x86_features; entry->edx &= kvm_cpuid_7_0_edx_x86_features;
cpuid_mask(&entry->edx, CPUID_7_EDX); cpuid_entry_mask(entry, CPUID_7_EDX);
if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS)) if (boot_cpu_has(X86_FEATURE_IBPB) && boot_cpu_has(X86_FEATURE_IBRS))
cpuid_entry_set(entry, X86_FEATURE_SPEC_CTRL); cpuid_entry_set(entry, X86_FEATURE_SPEC_CTRL);
if (boot_cpu_has(X86_FEATURE_STIBP)) if (boot_cpu_has(X86_FEATURE_STIBP))
...@@ -507,9 +501,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -507,9 +501,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
break; break;
case 1: case 1:
entry->edx &= kvm_cpuid_1_edx_x86_features; entry->edx &= kvm_cpuid_1_edx_x86_features;
cpuid_mask(&entry->edx, CPUID_1_EDX); cpuid_entry_mask(entry, CPUID_1_EDX);
entry->ecx &= kvm_cpuid_1_ecx_x86_features; entry->ecx &= kvm_cpuid_1_ecx_x86_features;
cpuid_mask(&entry->ecx, CPUID_1_ECX); cpuid_entry_mask(entry, CPUID_1_ECX);
/* we support x2apic emulation even if host does not support /* we support x2apic emulation even if host does not support
* it since we emulate x2apic in software */ * it since we emulate x2apic in software */
cpuid_entry_set(entry, X86_FEATURE_X2APIC); cpuid_entry_set(entry, X86_FEATURE_X2APIC);
...@@ -619,7 +613,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -619,7 +613,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
goto out; goto out;
entry->eax &= kvm_cpuid_D_1_eax_x86_features; entry->eax &= kvm_cpuid_D_1_eax_x86_features;
cpuid_mask(&entry->eax, CPUID_D_1_EAX); cpuid_entry_mask(entry, CPUID_D_1_EAX);
if (entry->eax & (F(XSAVES)|F(XSAVEC))) if (entry->eax & (F(XSAVES)|F(XSAVEC)))
entry->ebx = xstate_required_size(supported_xcr0, true); entry->ebx = xstate_required_size(supported_xcr0, true);
else else
...@@ -699,9 +693,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -699,9 +693,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
break; break;
case 0x80000001: case 0x80000001:
entry->edx &= kvm_cpuid_8000_0001_edx_x86_features; entry->edx &= kvm_cpuid_8000_0001_edx_x86_features;
cpuid_mask(&entry->edx, CPUID_8000_0001_EDX); cpuid_entry_mask(entry, CPUID_8000_0001_EDX);
entry->ecx &= kvm_cpuid_8000_0001_ecx_x86_features; entry->ecx &= kvm_cpuid_8000_0001_ecx_x86_features;
cpuid_mask(&entry->ecx, CPUID_8000_0001_ECX); cpuid_entry_mask(entry, CPUID_8000_0001_ECX);
break; break;
case 0x80000007: /* Advanced power management */ case 0x80000007: /* Advanced power management */
/* invariant TSC is CPUID.80000007H:EDX[8] */ /* invariant TSC is CPUID.80000007H:EDX[8] */
...@@ -720,7 +714,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -720,7 +714,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->eax = g_phys_as | (virt_as << 8); entry->eax = g_phys_as | (virt_as << 8);
entry->edx = 0; entry->edx = 0;
entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); cpuid_entry_mask(entry, CPUID_8000_0008_EBX);
/* /*
* AMD has separate bits for each SPEC_CTRL bit. * AMD has separate bits for each SPEC_CTRL bit.
* arch/x86/kernel/cpu/bugs.c is kind enough to * arch/x86/kernel/cpu/bugs.c is kind enough to
...@@ -763,7 +757,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -763,7 +757,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
break; break;
case 0xC0000001: case 0xC0000001:
entry->edx &= kvm_cpuid_C000_0001_edx_x86_features; entry->edx &= kvm_cpuid_C000_0001_edx_x86_features;
cpuid_mask(&entry->edx, CPUID_C000_0001_EDX); cpuid_entry_mask(entry, CPUID_C000_0001_EDX);
break; break;
case 3: /* Processor serial number */ case 3: /* Processor serial number */
case 5: /* MONITOR/MWAIT */ case 5: /* MONITOR/MWAIT */
......
...@@ -167,6 +167,14 @@ static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry, ...@@ -167,6 +167,14 @@ static __always_inline void cpuid_entry_change(struct kvm_cpuid_entry2 *entry,
*reg &= ~__feature_bit(x86_feature); *reg &= ~__feature_bit(x86_feature);
} }
static __always_inline void cpuid_entry_mask(struct kvm_cpuid_entry2 *entry,
enum cpuid_leafs leaf)
{
u32 *reg = cpuid_entry_get_reg(entry, leaf * 32);
*reg &= boot_cpu_data.x86_capability[leaf];
}
static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu, static __always_inline u32 *guest_cpuid_get_register(struct kvm_vcpu *vcpu,
unsigned int x86_feature) unsigned int x86_feature)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment