Commit 1c67bf4c authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk Committed by Greg Kroah-Hartman

x86/KVM/VMX: Use MSR save list for IA32_FLUSH_CMD if required

commit 390d975e upstream

If the L1D flush module parameter is set to 'always' and the IA32_FLUSH_CMD
MSR is available, optimize the VMENTER code with the MSR save list.
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 7b0cdac5
...@@ -5714,6 +5714,16 @@ static void ept_set_mmio_spte_mask(void) ...@@ -5714,6 +5714,16 @@ static void ept_set_mmio_spte_mask(void)
VMX_EPT_MISCONFIG_WX_VALUE); VMX_EPT_MISCONFIG_WX_VALUE);
} }
static bool vmx_l1d_use_msr_save_list(void)
{
if (!enable_ept || !boot_cpu_has_bug(X86_BUG_L1TF) ||
static_cpu_has(X86_FEATURE_HYPERVISOR) ||
!static_cpu_has(X86_FEATURE_FLUSH_L1D))
return false;
return vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
}
#define VMX_XSS_EXIT_BITMAP 0 #define VMX_XSS_EXIT_BITMAP 0
/* /*
* Sets up the vmcs for emulated real mode. * Sets up the vmcs for emulated real mode.
...@@ -6061,6 +6071,12 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) ...@@ -6061,6 +6071,12 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO, vmcs_clear_bits(GUEST_INTERRUPTIBILITY_INFO,
GUEST_INTR_STATE_NMI); GUEST_INTR_STATE_NMI);
} }
/*
* If flushing the L1D cache on every VMENTER is enforced and the
* MSR is available, use the MSR save list.
*/
if (vmx_l1d_use_msr_save_list())
add_atomic_switch_msr(vmx, MSR_IA32_FLUSH_CMD, L1D_FLUSH, 0, true);
} }
static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
...@@ -9082,11 +9098,26 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu) ...@@ -9082,11 +9098,26 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
bool always; bool always;
/* /*
* If the mitigation mode is 'flush always', keep the flush bit * This code is only executed when:
* set, otherwise clear it. It gets set again either from * - the flush mode is 'cond'
* vcpu_run() or from one of the unsafe VMEXIT handlers. * - the flush mode is 'always' and the flush MSR is not
* available
*
* If the CPU has the flush MSR then clear the flush bit because
* 'always' mode is handled via the MSR save list.
*
* If the MSR is not avaibable then act depending on the mitigation
* mode: If 'flush always', keep the flush bit set, otherwise clear
* it.
*
* The flush bit gets set again either from vcpu_run() or from one
* of the unsafe VMEXIT handlers.
*/ */
if (static_cpu_has(X86_FEATURE_FLUSH_L1D))
always = false;
else
always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS; always = vmentry_l1d_flush == VMENTER_L1D_FLUSH_ALWAYS;
vcpu->arch.l1tf_flush_l1d = always; vcpu->arch.l1tf_flush_l1d = always;
vcpu->stat.l1d_flush++; vcpu->stat.l1d_flush++;
...@@ -12503,7 +12534,8 @@ static int __init vmx_setup_l1d_flush(void) ...@@ -12503,7 +12534,8 @@ static int __init vmx_setup_l1d_flush(void)
struct page *page; struct page *page;
if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER || if (vmentry_l1d_flush == VMENTER_L1D_FLUSH_NEVER ||
!boot_cpu_has_bug(X86_BUG_L1TF)) !boot_cpu_has_bug(X86_BUG_L1TF) ||
vmx_l1d_use_msr_save_list())
return 0; return 0;
if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) { if (!boot_cpu_has(X86_FEATURE_FLUSH_L1D)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment