Commit 641a2117 authored by Thomas Gleixner's avatar Thomas Gleixner Committed by Greg Kroah-Hartman

x86/kvm: Move l1tf setup function

commit 7db92e16 upstream

In preparation of allowing run time control for L1D flushing, move the
setup code to the module parameter handler.

In case of pre module init parsing, just store the value and let vmx_init()
do the actual setup after running kvm_init() so that enable_ept is having
the correct state.

During run-time invoke it directly from the parameter setter to prepare for
run-time control.
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Tested-by: default avatarJiri Kosina <jkosina@suse.cz>
Reviewed-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Reviewed-by: default avatarJosh Poimboeuf <jpoimboe@redhat.com>
Link: https://lkml.kernel.org/r/20180713142322.694063239@linutronix.deSigned-off-by: default avatarDavid Woodhouse <dwmw@amazon.co.uk>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 4186ae81
...@@ -194,7 +194,8 @@ extern const ulong vmx_return; ...@@ -194,7 +194,8 @@ extern const ulong vmx_return;
static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush); static DEFINE_STATIC_KEY_FALSE(vmx_l1d_should_flush);
static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush = VMENTER_L1D_FLUSH_COND; /* Storage for pre module init parameter parsing */
static enum vmx_l1d_flush_state __read_mostly vmentry_l1d_flush_param = VMENTER_L1D_FLUSH_AUTO;
static const struct { static const struct {
const char *option; const char *option;
...@@ -206,33 +207,85 @@ static const struct { ...@@ -206,33 +207,85 @@ static const struct {
{"always", VMENTER_L1D_FLUSH_ALWAYS}, {"always", VMENTER_L1D_FLUSH_ALWAYS},
}; };
static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp) #define L1D_CACHE_ORDER 4
static void *vmx_l1d_flush_pages;
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
{ {
unsigned int i; struct page *page;
if (!s) /* If set to 'auto' select 'cond' */
return -EINVAL; if (l1tf == VMENTER_L1D_FLUSH_AUTO)
l1tf = VMENTER_L1D_FLUSH_COND;
for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) { if (!enable_ept) {
if (!strcmp(s, vmentry_l1d_param[i].option)) { l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
vmentry_l1d_flush = vmentry_l1d_param[i].cmd; return 0;
return 0; }
}
if (l1tf != VMENTER_L1D_FLUSH_NEVER && !vmx_l1d_flush_pages &&
!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
if (!page)
return -ENOMEM;
vmx_l1d_flush_pages = page_address(page);
} }
l1tf_vmx_mitigation = l1tf;
if (l1tf != VMENTER_L1D_FLUSH_NEVER)
static_branch_enable(&vmx_l1d_should_flush);
return 0;
}
static int vmentry_l1d_flush_parse(const char *s)
{
unsigned int i;
if (s) {
for (i = 0; i < ARRAY_SIZE(vmentry_l1d_param); i++) {
if (!strcmp(s, vmentry_l1d_param[i].option))
return vmentry_l1d_param[i].cmd;
}
}
return -EINVAL; return -EINVAL;
} }
static int vmentry_l1d_flush_set(const char *s, const struct kernel_param *kp)
{
int l1tf;
if (!boot_cpu_has(X86_BUG_L1TF))
return 0;
l1tf = vmentry_l1d_flush_parse(s);
if (l1tf < 0)
return l1tf;
/*
* Has vmx_init() run already? If not then this is the pre init
* parameter parsing. In that case just store the value and let
* vmx_init() do the proper setup after enable_ept has been
* established.
*/
if (l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_AUTO) {
vmentry_l1d_flush_param = l1tf;
return 0;
}
return vmx_setup_l1d_flush(l1tf);
}
static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp) static int vmentry_l1d_flush_get(char *s, const struct kernel_param *kp)
{ {
return sprintf(s, "%s\n", vmentry_l1d_param[vmentry_l1d_flush].option); return sprintf(s, "%s\n", vmentry_l1d_param[l1tf_vmx_mitigation].option);
} }
static const struct kernel_param_ops vmentry_l1d_flush_ops = { static const struct kernel_param_ops vmentry_l1d_flush_ops = {
.set = vmentry_l1d_flush_set, .set = vmentry_l1d_flush_set,
.get = vmentry_l1d_flush_get, .get = vmentry_l1d_flush_get,
}; };
module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, &vmentry_l1d_flush, S_IRUGO); module_param_cb(vmentry_l1d_flush, &vmentry_l1d_flush_ops, NULL, S_IRUGO);
#define NR_AUTOLOAD_MSRS 8 #define NR_AUTOLOAD_MSRS 8
...@@ -8582,7 +8635,7 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu) ...@@ -8582,7 +8635,7 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
* it. The flush bit gets set again either from vcpu_run() or from * it. The flush bit gets set again either from vcpu_run() or from
* one of the unsafe VMEXIT handlers. * one of the unsafe VMEXIT handlers.
*/ */
always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS; always = l1tf_vmx_mitigation == VMENTER_L1D_FLUSH_ALWAYS;
vcpu->arch.l1tf_flush_l1d = always; vcpu->arch.l1tf_flush_l1d = always;
vcpu->stat.l1d_flush++; vcpu->stat.l1d_flush++;
...@@ -11652,34 +11705,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { ...@@ -11652,34 +11705,6 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.setup_mce = vmx_setup_mce, .setup_mce = vmx_setup_mce,
}; };
static int __init vmx_setup_l1d_flush(void)
{
struct page *page;
if (!boot_cpu_has_bug(X86_BUG_L1TF))
return 0;
if (!enable_ept) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
return 0;
}
l1tf_vmx_mitigation = vmentry_l1d_flush;
if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER)
return 0;
if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
page = alloc_pages(GFP_KERNEL, L1D_CACHE_ORDER);
if (!page)
return -ENOMEM;
vmx_l1d_flush_pages = page_address(page);
}
static_branch_enable(&vmx_l1d_should_flush);
return 0;
}
static void vmx_cleanup_l1d_flush(void) static void vmx_cleanup_l1d_flush(void)
{ {
if (vmx_l1d_flush_pages) { if (vmx_l1d_flush_pages) {
...@@ -11714,12 +11739,18 @@ static int __init vmx_init(void) ...@@ -11714,12 +11739,18 @@ static int __init vmx_init(void)
return r; return r;
/* /*
* Must be called after kvm_init() so enable_ept is properly set up * Must be called after kvm_init() so enable_ept is properly set
* up. Hand the parameter mitigation value in which was stored in
* the pre module init parser. If no parameter was given, it will
* contain 'auto' which will be turned into the default 'cond'
* mitigation mode.
*/ */
r = vmx_setup_l1d_flush(); if (boot_cpu_has(X86_BUG_L1TF)) {
if (r) { r = vmx_setup_l1d_flush(vmentry_l1d_flush_param);
vmx_exit(); if (r) {
return r; vmx_exit();
return r;
}
} }
#ifdef CONFIG_KEXEC_CORE #ifdef CONFIG_KEXEC_CORE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment