Commit 0502ecb7 authored by Paolo Bonzini's avatar Paolo Bonzini Committed by Stefan Bader

KVM: VMX: Tell the nested hypervisor to skip L1D flush on vmentry

When nested virtualization is in use, VMENTER operations from the nested
hypervisor into the nested guest will always be processed by the bare metal
hypervisor, and KVM's "conditional cache flushes" mode in particular does a
flush on nested vmentry.  Therefore, include the "skip L1D flush on
vmentry" bit in KVM's suggested ARCH_CAPABILITIES setting.

Add the relevant Documentation.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>

CVE-2018-3620
CVE-2018-3646

[tyhicks: Adjust for the missing MSR_F10H_DECFG and MSR_IA32_UCODE_REV
 feature MSRs which do not exist in 4.15]
Signed-off-by: default avatarTyler Hicks <tyhicks@canonical.com>
[smb: Minor context and adjusted documentation path]
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent e5559836
...@@ -546,6 +546,27 @@ available: ...@@ -546,6 +546,27 @@ available:
EPT can be disabled in the hypervisor via the 'kvm-intel.ept' EPT can be disabled in the hypervisor via the 'kvm-intel.ept'
parameter. parameter.
3.4. Nested virtual machines
""""""""""""""""""""""""""""
When nested virtualization is in use, three operating systems are involved:
the bare metal hypervisor, the nested hypervisor and the nested virtual
machine. VMENTER operations from the nested hypervisor into the nested
guest will always be processed by the bare metal hypervisor. If KVM is the
bare metal hypervisor it wiil:
- Flush the L1D cache on every switch from the nested hypervisor to the
nested virtual machine, so that the nested hypervisor's secrets are not
exposed to the nested virtual machine;
- Flush the L1D cache on every switch from the nested virtual machine to
the nested hypervisor; this is a complex operation, and flushing the L1D
cache avoids that the bare metal hypervisor's secrets are exposed to the
nested virtual machine;
- Instruct the nested hypervisor to not perform any L1D cache flush. This
is an optimization to avoid double L1D flushing.
.. _default_mitigations: .. _default_mitigations:
......
...@@ -1268,6 +1268,7 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu); ...@@ -1268,6 +1268,7 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu);
void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm, void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
unsigned long address); unsigned long address);
u64 kvm_get_arch_capabilities(void);
void kvm_define_shared_msr(unsigned index, u32 msr); void kvm_define_shared_msr(unsigned index, u32 msr);
int kvm_set_shared_msr(unsigned index, u64 val, u64 mask); int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
......
...@@ -5106,8 +5106,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) ...@@ -5106,8 +5106,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
++vmx->nmsrs; ++vmx->nmsrs;
} }
if (boot_cpu_has(X86_FEATURE_ARCH_CAPABILITIES)) vmx->arch_capabilities = kvm_get_arch_capabilities();
rdmsrl(MSR_IA32_ARCH_CAPABILITIES, vmx->arch_capabilities);
vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl); vm_exit_controls_init(vmx, vmcs_config.vmexit_ctrl);
......
...@@ -1003,11 +1003,33 @@ static u32 msr_based_features[] = { ...@@ -1003,11 +1003,33 @@ static u32 msr_based_features[] = {
static unsigned int num_msr_based_features; static unsigned int num_msr_based_features;
u64 kvm_get_arch_capabilities(void)
{
u64 data;
rdmsrl_safe(MSR_IA32_ARCH_CAPABILITIES, &data);
/*
* If we're doing cache flushes (either "always" or "cond")
* we will do one whenever the guest does a vmlaunch/vmresume.
* If an outer hypervisor is doing the cache flush for us
* (VMENTER_L1D_FLUSH_NESTED_VM), we can safely pass that
* capability to the guest too, and if EPT is disabled we're not
* vulnerable. Overall, only VMENTER_L1D_FLUSH_NEVER will
* require a nested hypervisor to do a flush of its own.
*/
if (l1tf_vmx_mitigation != VMENTER_L1D_FLUSH_NEVER)
data |= ARCH_CAP_SKIP_VMENTRY_L1DFLUSH;
return data;
}
EXPORT_SYMBOL_GPL(kvm_get_arch_capabilities);
static int kvm_get_msr_feature(struct kvm_msr_entry *msr) static int kvm_get_msr_feature(struct kvm_msr_entry *msr)
{ {
switch (msr->index) { switch (msr->index) {
case MSR_IA32_ARCH_CAPABILITIES: case MSR_IA32_ARCH_CAPABILITIES:
rdmsrl_safe(msr->index, &msr->data); msr->data = kvm_get_arch_capabilities();
break; break;
default: default:
if (kvm_x86_ops->get_msr_feature(msr)) if (kvm_x86_ops->get_msr_feature(msr))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment