Commit 995f00a6 authored by Peter Feiner's avatar Peter Feiner Committed by Paolo Bonzini

x86: kvm: mmu: use ept a/d in vmcs02 iff used in vmcs12

EPT A/D was enabled in the vmcs02 EPTP regardless of the vmcs12's EPTP
value. The problem is that enabling A/D changes the behavior of L2's
x86 page table walks as seen by L1. With A/D enabled, x86 page table
walks are always treated as EPT writes.

Commit ae1e2d10 ("kvm: nVMX: support EPT accessed/dirty bits",
2017-03-30) tried to work around this problem by clearing the write
bit in the exit qualification for EPT violations triggered by page
walks.  However, that fixup introduced the opposite bug: page-table walks
that actually set x86 A/D bits were *missing* the write bit in the exit
qualification.

This patch fixes the problem by disabling EPT A/D in the shadow MMU
when EPT A/D is disabled in vmcs12's EPTP.
Signed-off-by: default avatarPeter Feiner <pfeiner@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent ac8d57e5
...@@ -4419,6 +4419,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly, ...@@ -4419,6 +4419,7 @@ void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
context->root_level = context->shadow_root_level; context->root_level = context->shadow_root_level;
context->root_hpa = INVALID_PAGE; context->root_hpa = INVALID_PAGE;
context->direct_map = false; context->direct_map = false;
context->base_role.ad_disabled = !accessed_dirty;
update_permission_bitmask(vcpu, context, true); update_permission_bitmask(vcpu, context, true);
update_pkru_bitmask(vcpu, context, true); update_pkru_bitmask(vcpu, context, true);
......
...@@ -910,8 +910,9 @@ static void nested_release_page_clean(struct page *page) ...@@ -910,8 +910,9 @@ static void nested_release_page_clean(struct page *page)
kvm_release_page_clean(page); kvm_release_page_clean(page);
} }
static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu);
static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
static u64 construct_eptp(unsigned long root_hpa); static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa);
static bool vmx_xsaves_supported(void); static bool vmx_xsaves_supported(void);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
static void vmx_set_segment(struct kvm_vcpu *vcpu, static void vmx_set_segment(struct kvm_vcpu *vcpu,
...@@ -4013,7 +4014,7 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid) ...@@ -4013,7 +4014,7 @@ static inline void __vmx_flush_tlb(struct kvm_vcpu *vcpu, int vpid)
if (enable_ept) { if (enable_ept) {
if (!VALID_PAGE(vcpu->arch.mmu.root_hpa)) if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
return; return;
ept_sync_context(construct_eptp(vcpu->arch.mmu.root_hpa)); ept_sync_context(construct_eptp(vcpu, vcpu->arch.mmu.root_hpa));
} else { } else {
vpid_sync_context(vpid); vpid_sync_context(vpid);
} }
...@@ -4188,14 +4189,15 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) ...@@ -4188,14 +4189,15 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
vmx->emulation_required = emulation_required(vcpu); vmx->emulation_required = emulation_required(vcpu);
} }
static u64 construct_eptp(unsigned long root_hpa) static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa)
{ {
u64 eptp; u64 eptp;
/* TODO write the value reading from MSR */ /* TODO write the value reading from MSR */
eptp = VMX_EPT_DEFAULT_MT | eptp = VMX_EPT_DEFAULT_MT |
VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT; VMX_EPT_DEFAULT_GAW << VMX_EPT_GAW_EPTP_SHIFT;
if (enable_ept_ad_bits) if (enable_ept_ad_bits &&
(!is_guest_mode(vcpu) || nested_ept_ad_enabled(vcpu)))
eptp |= VMX_EPT_AD_ENABLE_BIT; eptp |= VMX_EPT_AD_ENABLE_BIT;
eptp |= (root_hpa & PAGE_MASK); eptp |= (root_hpa & PAGE_MASK);
...@@ -4209,7 +4211,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3) ...@@ -4209,7 +4211,7 @@ static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3)
guest_cr3 = cr3; guest_cr3 = cr3;
if (enable_ept) { if (enable_ept) {
eptp = construct_eptp(cr3); eptp = construct_eptp(vcpu, cr3);
vmcs_write64(EPT_POINTER, eptp); vmcs_write64(EPT_POINTER, eptp);
if (is_paging(vcpu) || is_guest_mode(vcpu)) if (is_paging(vcpu) || is_guest_mode(vcpu))
guest_cr3 = kvm_read_cr3(vcpu); guest_cr3 = kvm_read_cr3(vcpu);
...@@ -6214,17 +6216,6 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) ...@@ -6214,17 +6216,6 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
exit_qualification = vmcs_readl(EXIT_QUALIFICATION); exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
if (is_guest_mode(vcpu)
&& !(exit_qualification & EPT_VIOLATION_GVA_TRANSLATED)) {
/*
* Fix up exit_qualification according to whether guest
* page table accesses are reads or writes.
*/
u64 eptp = nested_ept_get_cr3(vcpu);
if (!(eptp & VMX_EPT_AD_ENABLE_BIT))
exit_qualification &= ~EPT_VIOLATION_ACC_WRITE;
}
/* /*
* EPT violation happened while executing iret from NMI, * EPT violation happened while executing iret from NMI,
* "blocked by NMI" bit has to be set before next VM entry. * "blocked by NMI" bit has to be set before next VM entry.
...@@ -6447,7 +6438,7 @@ void vmx_enable_tdp(void) ...@@ -6447,7 +6438,7 @@ void vmx_enable_tdp(void)
enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull, enable_ept_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull,
0ull, VMX_EPT_EXECUTABLE_MASK, 0ull, VMX_EPT_EXECUTABLE_MASK,
cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK, cpu_has_vmx_ept_execute_only() ? 0ull : VMX_EPT_READABLE_MASK,
enable_ept_ad_bits ? 0ull : VMX_EPT_RWX_MASK); VMX_EPT_RWX_MASK);
ept_set_mmio_spte_mask(); ept_set_mmio_spte_mask();
kvm_enable_tdp(); kvm_enable_tdp();
...@@ -9393,6 +9384,11 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, ...@@ -9393,6 +9384,11 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
vmcs12->guest_physical_address = fault->address; vmcs12->guest_physical_address = fault->address;
} }
static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu)
{
return nested_ept_get_cr3(vcpu) & VMX_EPT_AD_ENABLE_BIT;
}
/* Callbacks for nested_ept_init_mmu_context: */ /* Callbacks for nested_ept_init_mmu_context: */
static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
...@@ -9403,18 +9399,18 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu) ...@@ -9403,18 +9399,18 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu)
static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu) static int nested_ept_init_mmu_context(struct kvm_vcpu *vcpu)
{ {
u64 eptp; bool wants_ad;
WARN_ON(mmu_is_nested(vcpu)); WARN_ON(mmu_is_nested(vcpu));
eptp = nested_ept_get_cr3(vcpu); wants_ad = nested_ept_ad_enabled(vcpu);
if ((eptp & VMX_EPT_AD_ENABLE_BIT) && !enable_ept_ad_bits) if (wants_ad && !enable_ept_ad_bits)
return 1; return 1;
kvm_mmu_unload(vcpu); kvm_mmu_unload(vcpu);
kvm_init_shadow_ept_mmu(vcpu, kvm_init_shadow_ept_mmu(vcpu,
to_vmx(vcpu)->nested.nested_vmx_ept_caps & to_vmx(vcpu)->nested.nested_vmx_ept_caps &
VMX_EPT_EXECUTE_ONLY_BIT, VMX_EPT_EXECUTE_ONLY_BIT,
eptp & VMX_EPT_AD_ENABLE_BIT); wants_ad);
vcpu->arch.mmu.set_cr3 = vmx_set_cr3; vcpu->arch.mmu.set_cr3 = vmx_set_cr3;
vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3; vcpu->arch.mmu.get_cr3 = nested_ept_get_cr3;
vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault; vcpu->arch.mmu.inject_page_fault = nested_ept_inject_page_fault;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment