Commit cfc48181 authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Calculate the supported xcr0 mask at load time

Add a new global variable, supported_xcr0, to track which xcr0 bits can
be exposed to the guest instead of calculating the mask on every call.
The supported bits are constant for a given instance of KVM.

This paves the way toward eliminating the ->mpx_supported() call in
kvm_mpx_supported(), e.g. eliminates multiple retpolines in VMX's nested
VM-Enter path, and eventually toward eliminating ->mpx_supported()
altogether.

No functional change intended.
Reviewed-by: default avatarXiaoyao Li <xiaoyao.li@intel.com>
Reviewed-by: default avatarVitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 2ef7619d
...@@ -52,16 +52,6 @@ bool kvm_mpx_supported(void) ...@@ -52,16 +52,6 @@ bool kvm_mpx_supported(void)
} }
EXPORT_SYMBOL_GPL(kvm_mpx_supported); EXPORT_SYMBOL_GPL(kvm_mpx_supported);
u64 kvm_supported_xcr0(void)
{
u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
if (!kvm_mpx_supported())
xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
return xcr0;
}
#define F feature_bit #define F feature_bit
int kvm_update_cpuid(struct kvm_vcpu *vcpu) int kvm_update_cpuid(struct kvm_vcpu *vcpu)
...@@ -107,8 +97,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu) ...@@ -107,8 +97,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET; vcpu->arch.guest_xstate_size = XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET;
} else { } else {
vcpu->arch.guest_supported_xcr0 = vcpu->arch.guest_supported_xcr0 =
(best->eax | ((u64)best->edx << 32)) & (best->eax | ((u64)best->edx << 32)) & supported_xcr0;
kvm_supported_xcr0();
vcpu->arch.guest_xstate_size = best->ebx = vcpu->arch.guest_xstate_size = best->ebx =
xstate_required_size(vcpu->arch.xcr0, false); xstate_required_size(vcpu->arch.xcr0, false);
} }
...@@ -633,14 +622,12 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -633,14 +622,12 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
goto out; goto out;
} }
break; break;
case 0xd: { case 0xd:
u64 supported = kvm_supported_xcr0(); entry->eax &= supported_xcr0;
entry->ebx = xstate_required_size(supported_xcr0, false);
entry->eax &= supported;
entry->ebx = xstate_required_size(supported, false);
entry->ecx = entry->ebx; entry->ecx = entry->ebx;
entry->edx &= supported >> 32; entry->edx &= supported_xcr0 >> 32;
if (!supported) if (!supported_xcr0)
break; break;
entry = do_host_cpuid(array, function, 1); entry = do_host_cpuid(array, function, 1);
...@@ -650,7 +637,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -650,7 +637,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->eax &= kvm_cpuid_D_1_eax_x86_features; entry->eax &= kvm_cpuid_D_1_eax_x86_features;
cpuid_mask(&entry->eax, CPUID_D_1_EAX); cpuid_mask(&entry->eax, CPUID_D_1_EAX);
if (entry->eax & (F(XSAVES)|F(XSAVEC))) if (entry->eax & (F(XSAVES)|F(XSAVEC)))
entry->ebx = xstate_required_size(supported, true); entry->ebx = xstate_required_size(supported_xcr0, true);
else else
entry->ebx = 0; entry->ebx = 0;
/* Saving XSS controlled state via XSAVES isn't supported. */ /* Saving XSS controlled state via XSAVES isn't supported. */
...@@ -658,7 +645,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -658,7 +645,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->edx = 0; entry->edx = 0;
for (i = 2; i < 64; ++i) { for (i = 2; i < 64; ++i) {
if (!(supported & BIT_ULL(i))) if (!(supported_xcr0 & BIT_ULL(i)))
continue; continue;
entry = do_host_cpuid(array, function, i); entry = do_host_cpuid(array, function, i);
...@@ -666,7 +653,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -666,7 +653,7 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
goto out; goto out;
/* /*
* The @supported check above should have filtered out * The supported check above should have filtered out
* invalid sub-leafs as well as sub-leafs managed by * invalid sub-leafs as well as sub-leafs managed by
* IA32_XSS MSR. Only XCR0-managed sub-leafs should * IA32_XSS MSR. Only XCR0-managed sub-leafs should
* reach this point, and they should have a non-zero * reach this point, and they should have a non-zero
...@@ -681,7 +668,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function) ...@@ -681,7 +668,6 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
entry->edx = 0; entry->edx = 0;
} }
break; break;
}
/* Intel PT */ /* Intel PT */
case 0x14: case 0x14:
if (!f_intel_pt) if (!f_intel_pt)
......
...@@ -1385,6 +1385,8 @@ static __init int svm_hardware_setup(void) ...@@ -1385,6 +1385,8 @@ static __init int svm_hardware_setup(void)
init_msrpm_offsets(); init_msrpm_offsets();
supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
if (boot_cpu_has(X86_FEATURE_NX)) if (boot_cpu_has(X86_FEATURE_NX))
kvm_enable_efer_bits(EFER_NX); kvm_enable_efer_bits(EFER_NX);
......
...@@ -7660,6 +7660,10 @@ static __init int hardware_setup(void) ...@@ -7660,6 +7660,10 @@ static __init int hardware_setup(void)
WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost"); WARN_ONCE(host_bndcfgs, "KVM: BNDCFGS in host will be lost");
} }
if (!kvm_mpx_supported())
supported_xcr0 &= ~(XFEATURE_MASK_BNDREGS |
XFEATURE_MASK_BNDCSR);
if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() || if (!cpu_has_vmx_vpid() || !cpu_has_vmx_invvpid() ||
!(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global())) !(cpu_has_vmx_invvpid_single() || cpu_has_vmx_invvpid_global()))
enable_vpid = 0; enable_vpid = 0;
......
...@@ -181,6 +181,11 @@ struct kvm_shared_msrs { ...@@ -181,6 +181,11 @@ struct kvm_shared_msrs {
static struct kvm_shared_msrs_global __read_mostly shared_msrs_global; static struct kvm_shared_msrs_global __read_mostly shared_msrs_global;
static struct kvm_shared_msrs __percpu *shared_msrs; static struct kvm_shared_msrs __percpu *shared_msrs;
#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
| XFEATURE_MASK_PKRU)
static u64 __read_mostly host_xss; static u64 __read_mostly host_xss;
struct kvm_stats_debugfs_item debugfs_entries[] = { struct kvm_stats_debugfs_item debugfs_entries[] = {
...@@ -227,6 +232,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = { ...@@ -227,6 +232,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
}; };
u64 __read_mostly host_xcr0; u64 __read_mostly host_xcr0;
u64 __read_mostly supported_xcr0;
EXPORT_SYMBOL_GPL(supported_xcr0);
struct kmem_cache *x86_fpu_cache; struct kmem_cache *x86_fpu_cache;
EXPORT_SYMBOL_GPL(x86_fpu_cache); EXPORT_SYMBOL_GPL(x86_fpu_cache);
...@@ -4114,8 +4121,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu, ...@@ -4114,8 +4121,7 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
* CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility * CPUID leaf 0xD, index 0, EDX:EAX. This is for compatibility
* with old userspace. * with old userspace.
*/ */
if (xstate_bv & ~kvm_supported_xcr0() || if (xstate_bv & ~supported_xcr0 || mxcsr & ~mxcsr_feature_mask)
mxcsr & ~mxcsr_feature_mask)
return -EINVAL; return -EINVAL;
load_xsave(vcpu, (u8 *)guest_xsave->region); load_xsave(vcpu, (u8 *)guest_xsave->region);
} else { } else {
...@@ -7352,8 +7358,10 @@ int kvm_arch_init(void *opaque) ...@@ -7352,8 +7358,10 @@ int kvm_arch_init(void *opaque)
perf_register_guest_info_callbacks(&kvm_guest_cbs); perf_register_guest_info_callbacks(&kvm_guest_cbs);
if (boot_cpu_has(X86_FEATURE_XSAVE)) if (boot_cpu_has(X86_FEATURE_XSAVE)) {
host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); host_xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
supported_xcr0 = host_xcr0 & KVM_SUPPORTED_XCR0;
}
kvm_lapic_init(); kvm_lapic_init();
if (pi_inject_timer == -1) if (pi_inject_timer == -1)
......
...@@ -270,13 +270,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -270,13 +270,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
int emulation_type, void *insn, int insn_len); int emulation_type, void *insn, int insn_len);
enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu); enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
#define KVM_SUPPORTED_XCR0 (XFEATURE_MASK_FP | XFEATURE_MASK_SSE \
| XFEATURE_MASK_YMM | XFEATURE_MASK_BNDREGS \
| XFEATURE_MASK_BNDCSR | XFEATURE_MASK_AVX512 \
| XFEATURE_MASK_PKRU)
extern u64 host_xcr0; extern u64 host_xcr0;
extern u64 supported_xcr0;
extern u64 kvm_supported_xcr0(void);
extern unsigned int min_timer_period_us; extern unsigned int min_timer_period_us;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment