Commit cf21f58a authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk Committed by Greg Kroah-Hartman

x86/bugs, KVM: Support the combination of guest and host IBRS

commit 5cf68754 upstream

A guest may modify the SPEC_CTRL MSR from the value used by the
kernel. Since the kernel doesn't use IBRS, this means a value of zero is
what is needed in the host.

But the 336996-Speculative-Execution-Side-Channel-Mitigations.pdf refers to
the other bits as reserved so the kernel should respect the boot time
SPEC_CTRL value and use that.

This allows to deal with future extensions to the SPEC_CTRL interface if
any at all.

Note: This uses wrmsrl() instead of native_wrmsl(). I does not make any
difference as paravirt will over-write the callq *0xfff.. with the wrmsrl
assembler code.
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Reviewed-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 0f5dd651
...@@ -228,6 +228,16 @@ enum spectre_v2_mitigation { ...@@ -228,6 +228,16 @@ enum spectre_v2_mitigation {
extern void x86_spec_ctrl_set(u64); extern void x86_spec_ctrl_set(u64);
extern u64 x86_spec_ctrl_get_default(void); extern u64 x86_spec_ctrl_get_default(void);
/*
* On VMENTER we must preserve whatever view of the SPEC_CTRL MSR
* the guest has, while on VMEXIT we restore the host view. This
* would be easier if SPEC_CTRL were architecturally maskable or
* shadowable for guests but this is not (currently) the case.
* Takes the guest view of SPEC_CTRL MSR as a parameter.
*/
extern void x86_spec_ctrl_set_guest(u64);
extern void x86_spec_ctrl_restore_host(u64);
extern char __indirect_thunk_start[]; extern char __indirect_thunk_start[];
extern char __indirect_thunk_end[]; extern char __indirect_thunk_end[];
......
...@@ -122,6 +122,24 @@ u64 x86_spec_ctrl_get_default(void) ...@@ -122,6 +122,24 @@ u64 x86_spec_ctrl_get_default(void)
} }
EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default); EXPORT_SYMBOL_GPL(x86_spec_ctrl_get_default);
void x86_spec_ctrl_set_guest(u64 guest_spec_ctrl)
{
if (!boot_cpu_has(X86_FEATURE_IBRS))
return;
if (x86_spec_ctrl_base != guest_spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, guest_spec_ctrl);
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_set_guest);
void x86_spec_ctrl_restore_host(u64 guest_spec_ctrl)
{
if (!boot_cpu_has(X86_FEATURE_IBRS))
return;
if (x86_spec_ctrl_base != guest_spec_ctrl)
wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
}
EXPORT_SYMBOL_GPL(x86_spec_ctrl_restore_host);
#ifdef RETPOLINE #ifdef RETPOLINE
static bool spectre_v2_bad_module; static bool spectre_v2_bad_module;
......
...@@ -4917,8 +4917,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -4917,8 +4917,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
* is no need to worry about the conditional branch over the wrmsr * is no need to worry about the conditional branch over the wrmsr
* being speculatively taken. * being speculatively taken.
*/ */
if (svm->spec_ctrl) x86_spec_ctrl_set_guest(svm->spec_ctrl);
native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
asm volatile ( asm volatile (
"push %%" _ASM_BP "; \n\t" "push %%" _ASM_BP "; \n\t"
...@@ -5030,8 +5029,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -5030,8 +5029,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (svm->spec_ctrl) x86_spec_ctrl_restore_host(svm->spec_ctrl);
native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
/* Eliminate branch target predictions from guest mode */ /* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB(); vmexit_fill_RSB();
......
...@@ -8916,8 +8916,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -8916,8 +8916,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
* is no need to worry about the conditional branch over the wrmsr * is no need to worry about the conditional branch over the wrmsr
* being speculatively taken. * being speculatively taken.
*/ */
if (vmx->spec_ctrl) x86_spec_ctrl_set_guest(vmx->spec_ctrl);
native_wrmsrl(MSR_IA32_SPEC_CTRL, vmx->spec_ctrl);
vmx->__launched = vmx->loaded_vmcs->launched; vmx->__launched = vmx->loaded_vmcs->launched;
asm( asm(
...@@ -9055,8 +9054,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) ...@@ -9055,8 +9054,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL))) if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL); vmx->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
if (vmx->spec_ctrl) x86_spec_ctrl_restore_host(vmx->spec_ctrl);
native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
/* Eliminate branch target predictions from guest mode */ /* Eliminate branch target predictions from guest mode */
vmexit_fill_RSB(); vmexit_fill_RSB();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment