Commit 85aa8889 authored by Junaid Shahid's avatar Junaid Shahid Committed by Paolo Bonzini

kvm: vmx: Sync all matching EPTPs when injecting nested EPT fault

When a nested EPT violation/misconfig is injected into the guest,
the shadow EPT PTEs associated with that address need to be synced.
This is done by kvm_inject_emulated_page_fault() before it calls
nested_ept_inject_page_fault(). However, that will only sync the
shadow EPT PTE associated with the current L1 EPTP. Since the ASID
is based on EP4TA rather than the full EPTP, so syncing the current
EPTP is not enough. The SPTEs associated with any other L1 EPTPs
in the prev_roots cache with the same EP4TA also need to be synced.
Signed-off-by: default avatarJunaid Shahid <junaids@google.com>
Message-Id: <20210806222229.1645356-1-junaids@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 375d1ade
...@@ -330,6 +330,31 @@ void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu) ...@@ -330,6 +330,31 @@ void nested_vmx_free_vcpu(struct kvm_vcpu *vcpu)
vcpu_put(vcpu); vcpu_put(vcpu);
} }
#define EPTP_PA_MASK GENMASK_ULL(51, 12)
static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
{
return VALID_PAGE(root_hpa) &&
((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
}
static void nested_ept_invalidate_addr(struct kvm_vcpu *vcpu, gpa_t eptp,
gpa_t addr)
{
uint i;
struct kvm_mmu_root_info *cached_root;
WARN_ON_ONCE(!mmu_is_nested(vcpu));
for (i = 0; i < KVM_MMU_NUM_PREV_ROOTS; i++) {
cached_root = &vcpu->arch.mmu->prev_roots[i];
if (nested_ept_root_matches(cached_root->hpa, cached_root->pgd,
eptp))
vcpu->arch.mmu->invlpg(vcpu, addr, cached_root->hpa);
}
}
static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
struct x86_exception *fault) struct x86_exception *fault)
{ {
...@@ -342,10 +367,22 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, ...@@ -342,10 +367,22 @@ static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
vm_exit_reason = EXIT_REASON_PML_FULL; vm_exit_reason = EXIT_REASON_PML_FULL;
vmx->nested.pml_full = false; vmx->nested.pml_full = false;
exit_qualification &= INTR_INFO_UNBLOCK_NMI; exit_qualification &= INTR_INFO_UNBLOCK_NMI;
} else if (fault->error_code & PFERR_RSVD_MASK) } else {
vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; if (fault->error_code & PFERR_RSVD_MASK)
else vm_exit_reason = EXIT_REASON_EPT_MISCONFIG;
vm_exit_reason = EXIT_REASON_EPT_VIOLATION; else
vm_exit_reason = EXIT_REASON_EPT_VIOLATION;
/*
* Although the caller (kvm_inject_emulated_page_fault) would
* have already synced the faulting address in the shadow EPT
* tables for the current EPTP12, we also need to sync it for
* any other cached EPTP02s based on the same EP4TA, since the
* TLB associates mappings to the EP4TA rather than the full EPTP.
*/
nested_ept_invalidate_addr(vcpu, vmcs12->ept_pointer,
fault->address);
}
nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification); nested_vmx_vmexit(vcpu, vm_exit_reason, 0, exit_qualification);
vmcs12->guest_physical_address = fault->address; vmcs12->guest_physical_address = fault->address;
...@@ -5325,14 +5362,6 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu) ...@@ -5325,14 +5362,6 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
return nested_vmx_succeed(vcpu); return nested_vmx_succeed(vcpu);
} }
#define EPTP_PA_MASK GENMASK_ULL(51, 12)
static bool nested_ept_root_matches(hpa_t root_hpa, u64 root_eptp, u64 eptp)
{
return VALID_PAGE(root_hpa) &&
((root_eptp & EPTP_PA_MASK) == (eptp & EPTP_PA_MASK));
}
/* Emulate the INVEPT instruction */ /* Emulate the INVEPT instruction */
static int handle_invept(struct kvm_vcpu *vcpu) static int handle_invept(struct kvm_vcpu *vcpu)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment