Commit dc25e89e authored by Andre Przywara's avatar Andre Przywara Committed by Avi Kivity

KVM: SVM: copy instruction bytes from VMCB

In case of a nested page fault or an intercepted #PF newer SVM
implementations provide a copy of the faulting instruction bytes
in the VMCB.
Use these bytes to feed the instruction emulator and avoid the costly
guest instruction fetch in this case.
Signed-off-by: default avatarAndre Przywara <andre.przywara@amd.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent df4f3108
......@@ -265,7 +265,7 @@ struct x86_emulate_ctxt {
#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT64
#endif
int x86_decode_insn(struct x86_emulate_ctxt *ctxt);
int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len);
#define EMULATION_FAILED -1
#define EMULATION_OK 0
#define EMULATION_RESTART 1
......
......@@ -634,13 +634,13 @@ enum emulation_result {
#define EMULTYPE_NO_DECODE (1 << 0)
#define EMULTYPE_TRAP_UD (1 << 1)
#define EMULTYPE_SKIP (1 << 2)
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2, int emulation_type);
int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
int emulation_type, void *insn, int insn_len);
static inline int emulate_instruction(struct kvm_vcpu *vcpu,
int emulation_type)
{
return x86_emulate_instruction(vcpu, 0, emulation_type);
return x86_emulate_instruction(vcpu, 0, emulation_type, NULL, 0);
}
void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
......@@ -721,7 +721,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
int kvm_fix_hypercall(struct kvm_vcpu *vcpu);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code);
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
void *insn, int insn_len);
void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva);
void kvm_enable_tdp(void);
......
......@@ -83,7 +83,9 @@ struct __attribute__ ((__packed__)) vmcb_control_area {
u32 clean;
u32 reserved_5;
u64 next_rip;
u8 reserved_6[816];
u8 insn_len;
u8 insn_bytes[15];
u8 reserved_6[800];
};
......
......@@ -2610,7 +2610,7 @@ static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
}
int
x86_decode_insn(struct x86_emulate_ctxt *ctxt)
x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
{
struct x86_emulate_ops *ops = ctxt->ops;
struct decode_cache *c = &ctxt->decode;
......@@ -2621,7 +2621,10 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt)
struct operand memop = { .type = OP_NONE };
c->eip = ctxt->eip;
c->fetch.start = c->fetch.end = c->eip;
c->fetch.start = c->eip;
c->fetch.end = c->fetch.start + insn_len;
if (insn_len > 0)
memcpy(c->fetch.data, insn, insn_len);
ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
switch (mode) {
......
......@@ -3330,7 +3330,8 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
}
}
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
void *insn, int insn_len)
{
int r;
enum emulation_result er;
......@@ -3348,7 +3349,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
if (r)
goto out;
er = x86_emulate_instruction(vcpu, cr2, 0);
er = x86_emulate_instruction(vcpu, cr2, 0, insn, insn_len);
switch (er) {
case EMULATE_DONE:
......
......@@ -1527,7 +1527,9 @@ static int pf_interception(struct vcpu_svm *svm)
trace_kvm_page_fault(fault_address, error_code);
if (!npt_enabled && kvm_event_needs_reinjection(&svm->vcpu))
kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
r = kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
svm->vmcb->control.insn_bytes,
svm->vmcb->control.insn_len);
break;
case KVM_PV_REASON_PAGE_NOT_PRESENT:
svm->apf_reason = 0;
......
......@@ -3055,7 +3055,7 @@ static int handle_exception(struct kvm_vcpu *vcpu)
if (kvm_event_needs_reinjection(vcpu))
kvm_mmu_unprotect_page_virt(vcpu, cr2);
return kvm_mmu_page_fault(vcpu, cr2, error_code);
return kvm_mmu_page_fault(vcpu, cr2, error_code, NULL, 0);
}
if (vmx->rmode.vm86_active &&
......@@ -3502,7 +3502,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS);
trace_kvm_page_fault(gpa, exit_qualification);
return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3);
return kvm_mmu_page_fault(vcpu, gpa, exit_qualification & 0x3, NULL, 0);
}
static u64 ept_rsvd_mask(u64 spte, int level)
......
......@@ -4365,7 +4365,9 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
int x86_emulate_instruction(struct kvm_vcpu *vcpu,
unsigned long cr2,
int emulation_type)
int emulation_type,
void *insn,
int insn_len)
{
int r;
struct decode_cache *c = &vcpu->arch.emulate_ctxt.decode;
......@@ -4386,7 +4388,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
vcpu->arch.emulate_ctxt.have_exception = false;
vcpu->arch.emulate_ctxt.perm_ok = false;
r = x86_decode_insn(&vcpu->arch.emulate_ctxt);
r = x86_decode_insn(&vcpu->arch.emulate_ctxt, insn, insn_len);
if (r == X86EMUL_PROPAGATE_FAULT)
goto done;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment