Commit 4757e268 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Stefan Bader

x86/litf: Introduce vmx status variable

Store the effective mitigation of VMX in a status variable and use it to
report the VMX state in the l1tf sysfs file.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarJiri Kosina <jkosina@suse.cz>
Reviewed-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Link: https://lkml.kernel.org/r/20180713142322.433098358@linutronix.de

CVE-2018-3620
CVE-2018-3646

[smb: Minor context adjustment in last hunk]
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent eea468d2
...@@ -500,4 +500,13 @@ enum vm_instruction_error_number { ...@@ -500,4 +500,13 @@ enum vm_instruction_error_number {
VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28, VMXERR_INVALID_OPERAND_TO_INVEPT_INVVPID = 28,
}; };
enum vmx_l1d_flush_state {
VMENTER_L1D_FLUSH_AUTO,
VMENTER_L1D_FLUSH_NEVER,
VMENTER_L1D_FLUSH_COND,
VMENTER_L1D_FLUSH_ALWAYS,
};
extern enum vmx_l1d_flush_state l1tf_vmx_mitigation;
#endif #endif
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
#include <asm/fpu/internal.h> #include <asm/fpu/internal.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/vmx.h>
#include <asm/paravirt.h> #include <asm/paravirt.h>
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
...@@ -685,6 +686,12 @@ void x86_spec_ctrl_setup_ap(void) ...@@ -685,6 +686,12 @@ void x86_spec_ctrl_setup_ap(void)
#undef pr_fmt #undef pr_fmt
#define pr_fmt(fmt) "L1TF: " fmt #define pr_fmt(fmt) "L1TF: " fmt
#if IS_ENABLED(CONFIG_KVM_INTEL)
enum vmx_l1d_flush_state l1tf_vmx_mitigation __ro_after_init = VMENTER_L1D_FLUSH_AUTO;
EXPORT_SYMBOL_GPL(l1tf_vmx_mitigation);
#endif
static void __init l1tf_select_mitigation(void) static void __init l1tf_select_mitigation(void)
{ {
u64 half_pa; u64 half_pa;
...@@ -714,6 +721,32 @@ static void __init l1tf_select_mitigation(void) ...@@ -714,6 +721,32 @@ static void __init l1tf_select_mitigation(void)
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
#if IS_ENABLED(CONFIG_KVM_INTEL)
static const char *l1tf_vmx_states[] = {
[VMENTER_L1D_FLUSH_AUTO] = "auto",
[VMENTER_L1D_FLUSH_NEVER] = "vulnerable",
[VMENTER_L1D_FLUSH_COND] = "conditional cache flushes",
[VMENTER_L1D_FLUSH_ALWAYS] = "cache flushes",
};
static ssize_t l1tf_show_state(char *buf)
{
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO)
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
return sprintf(buf, "%s; VMX: SMT %s, L1D %s\n", L1TF_DEFAULT_MSG,
cpu_smt_control == CPU_SMT_ENABLED ? "vulnerable" : "disabled",
l1tf_vmx_states[l1tf_vmx_mitigation]);
}
#else
static ssize_t l1tf_show_state(char *buf)
{
return sprintf(buf, "%s\n", L1TF_DEFAULT_MSG);
}
#endif
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr, static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug) char *buf, unsigned int bug)
{ {
...@@ -741,9 +774,8 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr ...@@ -741,9 +774,8 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
case X86_BUG_L1TF: case X86_BUG_L1TF:
if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV)) if (boot_cpu_has(X86_FEATURE_L1TF_PTEINV))
return sprintf(buf, "Mitigation: Page Table Inversion\n"); return l1tf_show_state(buf);
break; break;
default: default:
break; break;
} }
......
...@@ -178,19 +178,13 @@ extern const ulong vmx_return; ...@@ -178,19 +178,13 @@ extern const ulong vmx_return;
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
/* These MUST be in sync with vmentry_l1d_param order. */
enum vmx_l1d_flush_state {
VMENTER_L1D_FLUSH_NEVER,
VMENTER_L1D_FLUSH_COND,
VMENTER_L1D_FLUSH_ALWAYS,
};
static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND; static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND;
static const struct { static const struct {
const char *option; const char *option;
enum vmx_l1d_flush_state cmd; enum vmx_l1d_flush_state cmd;
} vmentry_l1d_param[] = { } vmentry_l1d_param[] = {
{"auto", VMENTER_L1D_FLUSH_AUTO},
{"never", VMENTER_L1D_FLUSH_NEVER}, {"never", VMENTER_L1D_FLUSH_NEVER},
{"cond", VMENTER_L1D_FLUSH_COND}, {"cond", VMENTER_L1D_FLUSH_COND},
{"always", VMENTER_L1D_FLUSH_ALWAYS}, {"always", VMENTER_L1D_FLUSH_ALWAYS},
...@@ -11204,8 +11198,12 @@ static int __init vmx_setup_l1d_flush(void) ...@@ -11204,8 +11198,12 @@ static int __init vmx_setup_l1d_flush(void)
{ {
struct page *page; struct page *page;
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return 0;
l1tf_vmx_mitigation = vmentry_l1d_flush;
if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER || if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
!boot_cpu_has_bug(X86_BUG_L1TF) ||
vmx_l1d_use_msr_save_list()) vmx_l1d_use_msr_save_list())
return 0; return 0;
...@@ -11220,12 +11218,14 @@ static int __init vmx_setup_l1d_flush(void) ...@@ -11220,12 +11218,14 @@ static int __init vmx_setup_l1d_flush(void)
return 0; return 0;
} }
static void vmx_free_l1d_flush_pages(void) static void vmx_cleanup_l1d_flush(void)
{ {
if (vmx_l1d_flush_pages) { if (vmx_l1d_flush_pages) {
free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER); free_pages((unsigned long)vmx_l1d_flush_pages, L1D_CACHE_ORDER);
vmx_l1d_flush_pages = NULL; vmx_l1d_flush_pages = NULL;
} }
/* Restore state so sysfs ignores VMX */
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_AUTO;
} }
static int __init vmx_init(void) static int __init vmx_init(void)
...@@ -11239,7 +11239,7 @@ static int __init vmx_init(void) ...@@ -11239,7 +11239,7 @@ static int __init vmx_init(void)
r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx),
__alignof__(struct vcpu_vmx), THIS_MODULE); __alignof__(struct vcpu_vmx), THIS_MODULE);
if (r) { if (r) {
vmx_free_l1d_flush_pages(); vmx_cleanup_l1d_flush();
return r; return r;
} }
...@@ -11260,7 +11260,7 @@ static void __exit vmx_exit(void) ...@@ -11260,7 +11260,7 @@ static void __exit vmx_exit(void)
kvm_exit(); kvm_exit();
vmx_free_l1d_flush_pages(); vmx_cleanup_l1d_flush();
} }
module_init(vmx_init) module_init(vmx_init)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment