Commit 4186ae81 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Greg Kroah-Hartman

x86/l1tf: Handle EPT disabled state proper

commit a7b9020b upstream

If Extended Page Tables (EPT) are disabled or not supported, no L1D
flushing is required. The setup function can just avoid setting up the L1D
flush for the EPT=n case.

Invoke it after the hardware setup has be done and enable_ept has the
correct state and expose the EPT disabled state in the mitigation status as
well.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarJiri Kosina <jkosina@suse.cz>
Reviewed-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Link: https://lkml.kernel.org/r/20180713142322.612160168@linutronix.deSigned-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 31282cf4
......@@ -504,6 +504,7 @@ enum vmx_l1d_flush_state {
VMENTER_L1D_FLUSH_NEVER,
VMENTER_L1D_FLUSH_COND,
VMENTER_L1D_FLUSH_ALWAYS,
VMENTER_L1D_FLUSH_EPT_DISABLED,
};
extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
......
......@@ -679,6 +679,7 @@ static const char *l1tf_vmx_states[] = {
[VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
[VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
[VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
[VMENTER_L1D_FLUSH_EPT_DISABLED] = "EPT disabled",
};
static ssize_t l1tf_show_state(char *buf)
......
......@@ -11659,6 +11659,11 @@ static int __init vmx_setup_l1d_flush(void)
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return 0;
if (!enable_ept) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
return 0;
}
l1tf_vmx_mitigation = vmentry_l1d_flush;
if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER)
......@@ -11685,18 +11690,35 @@ static void vmx_cleanup_l1d_flush(void)
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
}
static void vmx_exit(void)
{
#ifdef CONFIG_KEXEC_CORE
RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
synchronize_rcu();
#endif
kvm_exit();
vmx_cleanup_l1d_flush();
}
module_exit(vmx_exit)
static int __init vmx_init(void)
{
int r;
r = vmx_setup_l1d_flush();
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE);
if (r)
return r;
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE);
/*
* Must be called after kvm_init() so enable_ept is properly set up
*/
r = vmx_setup_l1d_flush();
if (r) {
vmx_cleanup_l1d_flush();
vmx_exit();
return r;
}
......@@ -11707,18 +11729,4 @@ static int __init vmx_init(void)
return 0;
}
static void __exit vmx_exit(void)
{
#ifdef CONFIG_KEXEC_CORE
RCU_INIT_POINTER(crash_vmclear_loaded_vmcss, NULL);
synchronize_rcu();
#endif
kvm_exit();
vmx_cleanup_l1d_flush();
}
module_init(vmx_init)
module_exit(vmx_exit)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment