Commit b965592a authored by Tom Lendacky's avatar Tom Lendacky Committed by Greg Kroah-Hartman

KVM: SVM: Implement VIRT_SPEC_CTRL support for SSBD

commit bc226f07 upstream

Expose the new virtualized architectural mechanism, VIRT_SSBD, for using
speculative store bypass disable (SSBD) under SVM.  This will allow guests
to use SSBD on hardware that uses non-architectural mechanisms for enabling
SSBD.

[ tglx: Folded the migration fixup from Paolo Bonzini ]
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent b0ef8c72
...@@ -864,7 +864,7 @@ struct kvm_x86_ops { ...@@ -864,7 +864,7 @@ struct kvm_x86_ops {
int (*hardware_setup)(void); /* __init */ int (*hardware_setup)(void); /* __init */
void (*hardware_unsetup)(void); /* __exit */ void (*hardware_unsetup)(void); /* __exit */
bool (*cpu_has_accelerated_tpr)(void); bool (*cpu_has_accelerated_tpr)(void);
bool (*cpu_has_high_real_mode_segbase)(void); bool (*has_emulated_msr)(int index);
void (*cpuid_update)(struct kvm_vcpu *vcpu); void (*cpuid_update)(struct kvm_vcpu *vcpu);
int (*vm_init)(struct kvm *kvm); int (*vm_init)(struct kvm *kvm);
......
...@@ -735,7 +735,8 @@ static void init_speculation_control(struct cpuinfo_x86 *c) ...@@ -735,7 +735,8 @@ static void init_speculation_control(struct cpuinfo_x86 *c)
if (cpu_has(c, X86_FEATURE_INTEL_STIBP)) if (cpu_has(c, X86_FEATURE_INTEL_STIBP))
set_cpu_cap(c, X86_FEATURE_STIBP); set_cpu_cap(c, X86_FEATURE_STIBP);
if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD)) if (cpu_has(c, X86_FEATURE_SPEC_CTRL_SSBD) ||
cpu_has(c, X86_FEATURE_VIRT_SSBD))
set_cpu_cap(c, X86_FEATURE_SSBD); set_cpu_cap(c, X86_FEATURE_SSBD);
if (cpu_has(c, X86_FEATURE_AMD_IBRS)) { if (cpu_has(c, X86_FEATURE_AMD_IBRS)) {
......
...@@ -357,7 +357,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, ...@@ -357,7 +357,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
/* cpuid 0x80000008.ebx */ /* cpuid 0x80000008.ebx */
const u32 kvm_cpuid_8000_0008_ebx_x86_features = const u32 kvm_cpuid_8000_0008_ebx_x86_features =
F(AMD_IBPB) | F(AMD_IBRS); F(AMD_IBPB) | F(AMD_IBRS) | F(VIRT_SSBD);
/* cpuid 0xC0000001.edx */ /* cpuid 0xC0000001.edx */
const u32 kvm_cpuid_C000_0001_edx_x86_features = const u32 kvm_cpuid_C000_0001_edx_x86_features =
...@@ -618,13 +618,20 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, ...@@ -618,13 +618,20 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
g_phys_as = phys_as; g_phys_as = phys_as;
entry->eax = g_phys_as | (virt_as << 8); entry->eax = g_phys_as | (virt_as << 8);
entry->edx = 0; entry->edx = 0;
/* IBRS and IBPB aren't necessarily present in hardware cpuid */ /*
* IBRS, IBPB and VIRT_SSBD aren't necessarily present in
* hardware cpuid
*/
if (boot_cpu_has(X86_FEATURE_AMD_IBPB)) if (boot_cpu_has(X86_FEATURE_AMD_IBPB))
entry->ebx |= F(AMD_IBPB); entry->ebx |= F(AMD_IBPB);
if (boot_cpu_has(X86_FEATURE_AMD_IBRS)) if (boot_cpu_has(X86_FEATURE_AMD_IBRS))
entry->ebx |= F(AMD_IBRS); entry->ebx |= F(AMD_IBRS);
if (boot_cpu_has(X86_FEATURE_VIRT_SSBD))
entry->ebx |= F(VIRT_SSBD);
entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features; entry->ebx &= kvm_cpuid_8000_0008_ebx_x86_features;
cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX); cpuid_mask(&entry->ebx, CPUID_8000_0008_EBX);
if (boot_cpu_has(X86_FEATURE_LS_CFG_SSBD))
entry->ebx |= F(VIRT_SSBD);
break; break;
} }
case 0x80000019: case 0x80000019:
......
...@@ -190,6 +190,15 @@ static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu) ...@@ -190,6 +190,15 @@ static inline bool guest_cpuid_has_arch_capabilities(struct kvm_vcpu *vcpu)
return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES)); return best && (best->edx & bit(X86_FEATURE_ARCH_CAPABILITIES));
} }
static inline bool guest_cpuid_has_virt_ssbd(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
best = kvm_find_cpuid_entry(vcpu, 0x80000008, 0);
return best && (best->ebx & bit(X86_FEATURE_VIRT_SSBD));
}
/* /*
* NRIPS is provided through cpuidfn 0x8000000a.edx bit 3 * NRIPS is provided through cpuidfn 0x8000000a.edx bit 3
......
...@@ -3557,6 +3557,13 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) ...@@ -3557,6 +3557,13 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
msr_info->data = svm->spec_ctrl; msr_info->data = svm->spec_ctrl;
break; break;
case MSR_AMD64_VIRT_SPEC_CTRL:
if (!msr_info->host_initiated &&
!guest_cpuid_has_virt_ssbd(vcpu))
return 1;
msr_info->data = svm->virt_spec_ctrl;
break;
case MSR_IA32_UCODE_REV: case MSR_IA32_UCODE_REV:
msr_info->data = 0x01000065; msr_info->data = 0x01000065;
break; break;
...@@ -3691,6 +3698,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) ...@@ -3691,6 +3698,16 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
break; break;
set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1); set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
break; break;
case MSR_AMD64_VIRT_SPEC_CTRL:
if (!msr->host_initiated &&
!guest_cpuid_has_virt_ssbd(vcpu))
return 1;
if (data & ~SPEC_CTRL_SSBD)
return 1;
svm->virt_spec_ctrl = data;
break;
case MSR_STAR: case MSR_STAR:
svm->vmcb->save.star = data; svm->vmcb->save.star = data;
break; break;
...@@ -5150,7 +5167,7 @@ static bool svm_cpu_has_accelerated_tpr(void) ...@@ -5150,7 +5167,7 @@ static bool svm_cpu_has_accelerated_tpr(void)
return false; return false;
} }
static bool svm_has_high_real_mode_segbase(void) static bool svm_has_emulated_msr(int index)
{ {
return true; return true;
} }
...@@ -5467,7 +5484,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { ...@@ -5467,7 +5484,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.hardware_enable = svm_hardware_enable, .hardware_enable = svm_hardware_enable,
.hardware_disable = svm_hardware_disable, .hardware_disable = svm_hardware_disable,
.cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr, .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
.cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase, .has_emulated_msr = svm_has_emulated_msr,
.vcpu_create = svm_create_vcpu, .vcpu_create = svm_create_vcpu,
.vcpu_free = svm_free_vcpu, .vcpu_free = svm_free_vcpu,
......
...@@ -8691,9 +8691,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu) ...@@ -8691,9 +8691,21 @@ static void vmx_handle_external_intr(struct kvm_vcpu *vcpu)
} }
} }
static bool vmx_has_high_real_mode_segbase(void) static bool vmx_has_emulated_msr(int index)
{ {
return enable_unrestricted_guest || emulate_invalid_guest_state; switch (index) {
case MSR_IA32_SMBASE:
/*
* We cannot do SMM unless we can run the guest in big
* real mode.
*/
return enable_unrestricted_guest || emulate_invalid_guest_state;
case MSR_AMD64_VIRT_SPEC_CTRL:
/* This is AMD only. */
return false;
default:
return true;
}
} }
static bool vmx_mpx_supported(void) static bool vmx_mpx_supported(void)
...@@ -11346,7 +11358,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { ...@@ -11346,7 +11358,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.hardware_enable = hardware_enable, .hardware_enable = hardware_enable,
.hardware_disable = hardware_disable, .hardware_disable = hardware_disable,
.cpu_has_accelerated_tpr = report_flexpriority, .cpu_has_accelerated_tpr = report_flexpriority,
.cpu_has_high_real_mode_segbase = vmx_has_high_real_mode_segbase, .has_emulated_msr = vmx_has_emulated_msr,
.vcpu_create = vmx_create_vcpu, .vcpu_create = vmx_create_vcpu,
.vcpu_free = vmx_free_vcpu, .vcpu_free = vmx_free_vcpu,
......
...@@ -1002,6 +1002,7 @@ static u32 emulated_msrs[] = { ...@@ -1002,6 +1002,7 @@ static u32 emulated_msrs[] = {
MSR_IA32_MCG_CTL, MSR_IA32_MCG_CTL,
MSR_IA32_MCG_EXT_CTL, MSR_IA32_MCG_EXT_CTL,
MSR_IA32_SMBASE, MSR_IA32_SMBASE,
MSR_AMD64_VIRT_SPEC_CTRL,
}; };
static unsigned num_emulated_msrs; static unsigned num_emulated_msrs;
...@@ -2664,7 +2665,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) ...@@ -2664,7 +2665,7 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
* fringe case that is not enabled except via specific settings * fringe case that is not enabled except via specific settings
* of the module parameters. * of the module parameters.
*/ */
r = kvm_x86_ops->cpu_has_high_real_mode_segbase(); r = kvm_x86_ops->has_emulated_msr(MSR_IA32_SMBASE);
break; break;
case KVM_CAP_COALESCED_MMIO: case KVM_CAP_COALESCED_MMIO:
r = KVM_COALESCED_MMIO_PAGE_OFFSET; r = KVM_COALESCED_MMIO_PAGE_OFFSET;
...@@ -4226,14 +4227,8 @@ static void kvm_init_msr_list(void) ...@@ -4226,14 +4227,8 @@ static void kvm_init_msr_list(void)
num_msrs_to_save = j; num_msrs_to_save = j;
for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) { for (i = j = 0; i < ARRAY_SIZE(emulated_msrs); i++) {
switch (emulated_msrs[i]) { if (!kvm_x86_ops->has_emulated_msr(emulated_msrs[i]))
case MSR_IA32_SMBASE: continue;
if (!kvm_x86_ops->cpu_has_high_real_mode_segbase())
continue;
break;
default:
break;
}
if (j < i) if (j < i)
emulated_msrs[j] = emulated_msrs[i]; emulated_msrs[j] = emulated_msrs[i];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment