Commit bbd637c6 authored by Nicolai Stange's avatar Nicolai Stange Committed by Stefan Bader

x86/KVM/VMX: Initialize the vmx_l1d_flush_pages' content

The slow path in vmx_l1d_flush() reads from vmx_l1d_flush_pages in order
to evict the L1d cache.

However, these pages are never cleared and, in theory, their data could be
leaked.

More importantly, KSM could merge a nested hypervisor's vmx_l1d_flush_pages
to fewer than 1 << L1D_CACHE_ORDER host physical pages and this would break
the L1d flushing algorithm: L1D on x86_64 is tagged by physical addresses.

Fix this by initializing the individual vmx_l1d_flush_pages with a
different pattern each.

Rename the "empty_zp" asm constraint identifier in vmx_l1d_flush() to
"flush_pages" to reflect this change.

Fixes: a47dd5f0 ("x86/KVM/VMX: Add L1D flush algorithm")
Signed-off-by: default avatarNicolai Stange <nstange@suse.de>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>

CVE-2018-3620
CVE-2018-3646
Signed-off-by: default avatarStefan Bader <stefan.bader@canonical.com>
parent fa535113
...@@ -196,6 +196,7 @@ static void *vmx_l1d_flush_pages; ...@@ -196,6 +196,7 @@ static void *vmx_l1d_flush_pages;
static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
{ {
struct page *page; struct page *page;
unsigned int i;
if (!enable_ept) { if (!enable_ept) {
l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED; l1tf_vmx_mitigation = VMENTER_L1D_FLUSH_EPT_DISABLED;
...@@ -228,6 +229,16 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf) ...@@ -228,6 +229,16 @@ static int vmx_setup_l1d_flush(enum vmx_l1d_flush_state l1tf)
if (!page) if (!page)
return -ENOMEM; return -ENOMEM;
vmx_l1d_flush_pages = page_address(page); vmx_l1d_flush_pages = page_address(page);
/*
* Initialize each page with a different pattern in
* order to protect against KSM in the nested
* virtualization case.
*/
for (i = 0; i < 1u << L1D_CACHE_ORDER; ++i) {
memset(vmx_l1d_flush_pages + i * PAGE_SIZE, i + 1,
PAGE_SIZE);
}
} }
l1tf_vmx_mitigation = l1tf; l1tf_vmx_mitigation = l1tf;
...@@ -8404,7 +8415,7 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu) ...@@ -8404,7 +8415,7 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
/* First ensure the pages are in the TLB */ /* First ensure the pages are in the TLB */
"xorl %%eax, %%eax\n" "xorl %%eax, %%eax\n"
".Lpopulate_tlb:\n\t" ".Lpopulate_tlb:\n\t"
"movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t" "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
"addl $4096, %%eax\n\t" "addl $4096, %%eax\n\t"
"cmpl %%eax, %[size]\n\t" "cmpl %%eax, %[size]\n\t"
"jne .Lpopulate_tlb\n\t" "jne .Lpopulate_tlb\n\t"
...@@ -8413,12 +8424,12 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu) ...@@ -8413,12 +8424,12 @@ static void vmx_l1d_flush(struct kvm_vcpu *vcpu)
/* Now fill the cache */ /* Now fill the cache */
"xorl %%eax, %%eax\n" "xorl %%eax, %%eax\n"
".Lfill_cache:\n" ".Lfill_cache:\n"
"movzbl (%[empty_zp], %%" _ASM_AX "), %%ecx\n\t" "movzbl (%[flush_pages], %%" _ASM_AX "), %%ecx\n\t"
"addl $64, %%eax\n\t" "addl $64, %%eax\n\t"
"cmpl %%eax, %[size]\n\t" "cmpl %%eax, %[size]\n\t"
"jne .Lfill_cache\n\t" "jne .Lfill_cache\n\t"
"lfence\n" "lfence\n"
:: [empty_zp] "r" (vmx_l1d_flush_pages), :: [flush_pages] "r" (vmx_l1d_flush_pages),
[size] "r" (size) [size] "r" (size)
: "eax", "ebx", "ecx", "edx"); : "eax", "ebx", "ecx", "edx");
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment