Commit b5ec2b3f authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Greg Kroah-Hartman

x86/bugs, KVM: Extend speculation control for VIRT_SPEC_CTRL

commit ccbcd267 upstream.

AMD is proposing a VIRT_SPEC_CTRL MSR to handle the Speculative Store
Bypass Disable via MSR_AMD64_LS_CFG so that guests do not have to care
about the bit position of the SSBD bit and thus facilitate migration.
Also, the sibling coordination on Family 17H CPUs can only be done on
the host.

Extend x86_spec_ctrl_set_guest() and x86_spec_ctrl_restore_host() with an
extra argument for the VIRT_SPEC_CTRL MSR.

Hand in 0 from VMX and in SVM add a new virt_spec_ctrl member to the CPU
data structure which is going to be used in later patches for the actual
implementation.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
[bwh: Backported to 4.4: This was partly applied before; apply just the
 missing bits]
Signed-off-by: default avatarBen Hutchings <ben.hutchings@codethink.co.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 7f77d36a
...@@ -149,6 +149,12 @@ struct vcpu_svm { ...@@ -149,6 +149,12 @@ struct vcpu_svm {
} host; } host;
u64 spec_ctrl; u64 spec_ctrl;
/*
* Contains guest-controlled bits of VIRT_SPEC_CTRL, which will be
* translated into the appropriate L2_CFG bits on the host to
* perform speculative control.
*/
u64 virt_spec_ctrl;
u32 *msrpm; u32 *msrpm;
...@@ -1146,6 +1152,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) ...@@ -1146,6 +1152,7 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
u32 eax = 1; u32 eax = 1;
svm->spec_ctrl = 0; svm->spec_ctrl = 0;
svm->virt_spec_ctrl = 0;
if (!init_event) { if (!init_event) {
svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE | svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
...@@ -3904,7 +3911,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -3904,7 +3911,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
* is no need to worry about the conditional branch over the wrmsr * is no need to worry about the conditional branch over the wrmsr
* being speculatively taken. * being speculatively taken.
*/ */
x86_spec_ctrl_set_guest(svm->spec_ctrl); x86_spec_ctrl_set_guest(svm->spec_ctrl, svm->virt_spec_ctrl);
asm volatile ( asm volatile (
"push %%" _ASM_BP "; \n\t" "push %%" _ASM_BP "; \n\t"
...@@ -4028,7 +4035,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -4028,7 +4035,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
x86_spec_ctrl_restore_host(svm->spec_ctrl); x86_spec_ctrl_restore_host(svm->spec_ctrl, svm->virt_spec_ctrl);
reload_tss(vcpu); reload_tss(vcpu);
......
...@@ -8658,9 +8658,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -8658,9 +8658,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* is no need to worry about the conditional branch over the wrmsr * is no need to worry about the conditional branch over the wrmsr
* being speculatively taken. * being speculatively taken.
*/ */
x86_spec_ctrl_set_guest(vmx->spec_ctrl); x86_spec_ctrl_set_guest(vmx->spec_ctrl, 0);
vmx->__launched = vmx->loaded_vmcs->launched; vmx->__launched = vmx->loaded_vmcs->launched;
asm( asm(
/* Store host registers */ /* Store host registers */
"push %%" _ASM_DX "; push %%" _ASM_BP ";" "push %%" _ASM_DX "; push %%" _ASM_BP ";"
...@@ -8796,7 +8797,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -8796,7 +8797,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)) if (!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))
vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
x86_spec_ctrl_restore_host(vmx->spec_ctrl); x86_spec_ctrl_restore_host(vmx->spec_ctrl, 0);
/* Eliminate branch target predictions from guest mode */ /* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB(); vmexit_fill_RSB();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment