Commit ee1fa209 authored by Junaid Shahid's avatar Junaid Shahid Committed by Paolo Bonzini

KVM: x86: Sync SPTEs when injecting page/EPT fault into L1

When injecting a page fault or EPT violation/misconfiguration, KVM is
not syncing any shadow PTEs associated with the faulting address,
including those in previous MMUs that are associated with L1's current
EPTP (in a nested EPT scenario), nor is it flushing any hardware TLB
entries.  All this is done by kvm_mmu_invalidate_gva.

Page faults that are either !PRESENT or RSVD are exempt from the flushing,
as the CPU is not allowed to cache such translations.
Signed-off-by: default avatarJunaid Shahid <junaids@google.com>
Co-developed-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Message-Id: <20200320212833.3507-8-sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 0cd665bd
......@@ -4560,7 +4560,7 @@ static int nested_vmx_get_vmptr(struct kvm_vcpu *vcpu, gpa_t *vmpointer)
return 1;
if (kvm_read_guest_virt(vcpu, gva, vmpointer, sizeof(*vmpointer), &e)) {
kvm_inject_page_fault(vcpu, &e);
kvm_inject_emulated_page_fault(vcpu, &e);
return 1;
}
......@@ -4869,7 +4869,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
return 1;
/* _system ok, nested_vmx_check_permission has verified cpl=0 */
if (kvm_write_guest_virt_system(vcpu, gva, &value, len, &e)) {
kvm_inject_page_fault(vcpu, &e);
kvm_inject_emulated_page_fault(vcpu, &e);
return 1;
}
}
......@@ -4943,7 +4943,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
instr_info, false, len, &gva))
return 1;
if (kvm_read_guest_virt(vcpu, gva, &value, len, &e)) {
kvm_inject_page_fault(vcpu, &e);
kvm_inject_emulated_page_fault(vcpu, &e);
return 1;
}
}
......@@ -5108,7 +5108,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
/* *_system ok, nested_vmx_check_permission has verified cpl=0 */
if (kvm_write_guest_virt_system(vcpu, gva, (void *)&current_vmptr,
sizeof(gpa_t), &e)) {
kvm_inject_page_fault(vcpu, &e);
kvm_inject_emulated_page_fault(vcpu, &e);
return 1;
}
return nested_vmx_succeed(vcpu);
......@@ -5152,7 +5152,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
vmx_instruction_info, false, sizeof(operand), &gva))
return 1;
if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
kvm_inject_page_fault(vcpu, &e);
kvm_inject_emulated_page_fault(vcpu, &e);
return 1;
}
......@@ -5220,7 +5220,7 @@ static int handle_invvpid(struct kvm_vcpu *vcpu)
vmx_instruction_info, false, sizeof(operand), &gva))
return 1;
if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
kvm_inject_page_fault(vcpu, &e);
kvm_inject_emulated_page_fault(vcpu, &e);
return 1;
}
if (operand.vpid >> 16)
......
......@@ -5393,7 +5393,7 @@ static int handle_invpcid(struct kvm_vcpu *vcpu)
return 1;
if (kvm_read_guest_virt(vcpu, gva, &operand, sizeof(operand), &e)) {
kvm_inject_page_fault(vcpu, &e);
kvm_inject_emulated_page_fault(vcpu, &e);
return 1;
}
......
......@@ -619,8 +619,17 @@ bool kvm_inject_emulated_page_fault(struct kvm_vcpu *vcpu,
fault_mmu = fault->nested_page_fault ? vcpu->arch.mmu :
vcpu->arch.walk_mmu;
fault_mmu->inject_page_fault(vcpu, fault);
/*
* Invalidate the TLB entry for the faulting address, if it exists,
* else the access will fault indefinitely (and to emulate hardware).
*/
if ((fault->error_code & PFERR_PRESENT_MASK) &&
!(fault->error_code & PFERR_RSVD_MASK))
kvm_mmu_invalidate_gva(vcpu, fault_mmu, fault->address,
fault_mmu->root_hpa);
fault_mmu->inject_page_fault(vcpu, fault);
return fault->nested_page_fault;
}
EXPORT_SYMBOL_GPL(kvm_inject_emulated_page_fault);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment