Commit 92daa48b authored by Sean Christopherson's avatar Sean Christopherson Committed by Paolo Bonzini

KVM: x86: Add EMULTYPE_PF when emulation is triggered by a page fault

Add a new emulation type flag to explicitly mark emulation related to a
page fault.  Move the propation of the GPA into the emulator from the
page fault handler into x86_emulate_instruction, using EMULTYPE_PF as an
indicator that cr2 is valid.  Similarly, don't propagate cr2 into the
exception.address when it's *not* valid.
Signed-off-by: default avatarSean Christopherson <sean.j.christopherson@intel.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 999eabcc
...@@ -1381,8 +1381,9 @@ extern u64 kvm_mce_cap_supported; ...@@ -1381,8 +1381,9 @@ extern u64 kvm_mce_cap_supported;
* decode the instruction length. For use *only* by * decode the instruction length. For use *only* by
* kvm_x86_ops->skip_emulated_instruction() implementations. * kvm_x86_ops->skip_emulated_instruction() implementations.
* *
* EMULTYPE_ALLOW_RETRY - Set when the emulator should resume the guest to * EMULTYPE_ALLOW_RETRY_PF - Set when the emulator should resume the guest to
* retry native execution under certain conditions. * retry native execution under certain conditions,
* Can only be set in conjunction with EMULTYPE_PF.
* *
* EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was * EMULTYPE_TRAP_UD_FORCED - Set when emulating an intercepted #UD that was
* triggered by KVM's magic "force emulation" prefix, * triggered by KVM's magic "force emulation" prefix,
...@@ -1395,13 +1396,18 @@ extern u64 kvm_mce_cap_supported; ...@@ -1395,13 +1396,18 @@ extern u64 kvm_mce_cap_supported;
* backdoor emulation, which is opt in via module param. * backdoor emulation, which is opt in via module param.
* VMware backoor emulation handles select instructions * VMware backoor emulation handles select instructions
* and reinjects the #GP for all other cases. * and reinjects the #GP for all other cases.
*
* EMULTYPE_PF - Set when emulating MMIO by way of an intercepted #PF, in which
* case the CR2/GPA value pass on the stack is valid.
*/ */
#define EMULTYPE_NO_DECODE (1 << 0) #define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1) #define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2) #define EMULTYPE_SKIP (1 << 2)
#define EMULTYPE_ALLOW_RETRY (1 << 3) #define EMULTYPE_ALLOW_RETRY_PF (1 << 3)
#define EMULTYPE_TRAP_UD_FORCED (1 << 4) #define EMULTYPE_TRAP_UD_FORCED (1 << 4)
#define EMULTYPE_VMWARE_GP (1 << 5) #define EMULTYPE_VMWARE_GP (1 << 5)
#define EMULTYPE_PF (1 << 6)
int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type); int kvm_emulate_instruction(struct kvm_vcpu *vcpu, int emulation_type);
int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu, int kvm_emulate_instruction_from_buffer(struct kvm_vcpu *vcpu,
void *insn, int insn_len); void *insn, int insn_len);
......
...@@ -5415,18 +5415,12 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt); ...@@ -5415,18 +5415,12 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
void *insn, int insn_len) void *insn, int insn_len)
{ {
int r, emulation_type = 0; int r, emulation_type = EMULTYPE_PF;
bool direct = vcpu->arch.mmu->direct_map; bool direct = vcpu->arch.mmu->direct_map;
if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa))) if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
return RET_PF_RETRY; return RET_PF_RETRY;
/* With shadow page tables, fault_address contains a GVA or nGPA. */
if (vcpu->arch.mmu->direct_map) {
vcpu->arch.gpa_available = true;
vcpu->arch.gpa_val = cr2_or_gpa;
}
r = RET_PF_INVALID; r = RET_PF_INVALID;
if (unlikely(error_code & PFERR_RSVD_MASK)) { if (unlikely(error_code & PFERR_RSVD_MASK)) {
r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct); r = handle_mmio_page_fault(vcpu, cr2_or_gpa, direct);
...@@ -5470,7 +5464,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code, ...@@ -5470,7 +5464,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
* for L1 isn't going to magically fix whatever issue cause L2 to fail. * for L1 isn't going to magically fix whatever issue cause L2 to fail.
*/ */
if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu)) if (!mmio_info_in_cache(vcpu, cr2_or_gpa, direct) && !is_guest_mode(vcpu))
emulation_type = EMULTYPE_ALLOW_RETRY; emulation_type |= EMULTYPE_ALLOW_RETRY_PF;
emulate: emulate:
/* /*
* On AMD platforms, under certain conditions insn_len may be zero on #NPF. * On AMD platforms, under certain conditions insn_len may be zero on #NPF.
......
...@@ -6492,10 +6492,11 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -6492,10 +6492,11 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
gpa_t gpa = cr2_or_gpa; gpa_t gpa = cr2_or_gpa;
kvm_pfn_t pfn; kvm_pfn_t pfn;
if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
return false; return false;
if (WARN_ON_ONCE(is_guest_mode(vcpu))) if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
return false; return false;
if (!vcpu->arch.mmu->direct_map) { if (!vcpu->arch.mmu->direct_map) {
...@@ -6583,10 +6584,11 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt, ...@@ -6583,10 +6584,11 @@ static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
*/ */
vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0; vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
if (!(emulation_type & EMULTYPE_ALLOW_RETRY)) if (!(emulation_type & EMULTYPE_ALLOW_RETRY_PF))
return false; return false;
if (WARN_ON_ONCE(is_guest_mode(vcpu))) if (WARN_ON_ONCE(is_guest_mode(vcpu)) ||
WARN_ON_ONCE(!(emulation_type & EMULTYPE_PF)))
return false; return false;
if (x86_page_table_writing_insn(ctxt)) if (x86_page_table_writing_insn(ctxt))
...@@ -6839,9 +6841,20 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, ...@@ -6839,9 +6841,20 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
} }
restart: restart:
if (emulation_type & EMULTYPE_PF) {
/* Save the faulting GPA (cr2) in the address field */ /* Save the faulting GPA (cr2) in the address field */
ctxt->exception.address = cr2_or_gpa; ctxt->exception.address = cr2_or_gpa;
/* With shadow page tables, cr2 contains a GVA or nGPA. */
if (vcpu->arch.mmu->direct_map) {
vcpu->arch.gpa_available = true;
vcpu->arch.gpa_val = cr2_or_gpa;
}
} else {
/* Sanitize the address out of an abundance of paranoia. */
ctxt->exception.address = 0;
}
r = x86_emulate_insn(ctxt); r = x86_emulate_insn(ctxt);
if (r == EMULATION_INTERCEPTED) if (r == EMULATION_INTERCEPTED)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment