Commit 3427318f authored by Laurent Vivier's avatar Laurent Vivier Committed by Avi Kivity

KVM: Call x86_decode_insn() only when needed

Move emulate_ctxt to kvm_vcpu to keep emulate context when we exit from kvm
module. Call x86_decode_insn() only when needed. Modify x86_emulate_insn() to
not modify the context if it must be re-entered.
Signed-off-by: default avatarLaurent Vivier <Laurent.Vivier@bull.net>
Signed-off-by: default avatarAvi Kivity <avi@qumranet.com>
parent 1be3aa47
...@@ -207,6 +207,8 @@ enum { ...@@ -207,6 +207,8 @@ enum {
VCPU_SREG_LDTR, VCPU_SREG_LDTR,
}; };
#include "x86_emulate.h"
struct kvm_pio_request { struct kvm_pio_request {
unsigned long count; unsigned long count;
int cur_count; int cur_count;
...@@ -380,6 +382,10 @@ struct kvm_vcpu { ...@@ -380,6 +382,10 @@ struct kvm_vcpu {
int cpuid_nent; int cpuid_nent;
struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES]; struct kvm_cpuid_entry cpuid_entries[KVM_MAX_CPUID_ENTRIES];
/* emulate context */
struct x86_emulate_ctxt emulate_ctxt;
}; };
struct kvm_mem_alias { struct kvm_mem_alias {
...@@ -555,7 +561,7 @@ enum emulation_result { ...@@ -555,7 +561,7 @@ enum emulation_result {
}; };
int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run, int emulate_instruction(struct kvm_vcpu *vcpu, struct kvm_run *run,
unsigned long cr2, u16 error_code); unsigned long cr2, u16 error_code, int no_decode);
void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context); void kvm_report_emulation_failure(struct kvm_vcpu *cvpu, const char *context);
void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); void realmode_lgdt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address); void realmode_lidt(struct kvm_vcpu *vcpu, u16 size, unsigned long address);
......
...@@ -1251,45 +1251,56 @@ struct x86_emulate_ops emulate_ops = { ...@@ -1251,45 +1251,56 @@ struct x86_emulate_ops emulate_ops = {
int emulate_instruction(struct kvm_vcpu *vcpu, int emulate_instruction(struct kvm_vcpu *vcpu,
struct kvm_run *run, struct kvm_run *run,
unsigned long cr2, unsigned long cr2,
u16 error_code) u16 error_code,
int no_decode)
{ {
struct x86_emulate_ctxt emulate_ctxt; int r = 0;
int r;
int cs_db, cs_l;
vcpu->mmio_fault_cr2 = cr2; vcpu->mmio_fault_cr2 = cr2;
kvm_x86_ops->cache_regs(vcpu); kvm_x86_ops->cache_regs(vcpu);
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
emulate_ctxt.vcpu = vcpu;
emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
emulate_ctxt.cr2 = cr2;
emulate_ctxt.mode = (emulate_ctxt.eflags & X86_EFLAGS_VM)
? X86EMUL_MODE_REAL : cs_l
? X86EMUL_MODE_PROT64 : cs_db
? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
if (emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
emulate_ctxt.cs_base = 0;
emulate_ctxt.ds_base = 0;
emulate_ctxt.es_base = 0;
emulate_ctxt.ss_base = 0;
} else {
emulate_ctxt.cs_base = get_segment_base(vcpu, VCPU_SREG_CS);
emulate_ctxt.ds_base = get_segment_base(vcpu, VCPU_SREG_DS);
emulate_ctxt.es_base = get_segment_base(vcpu, VCPU_SREG_ES);
emulate_ctxt.ss_base = get_segment_base(vcpu, VCPU_SREG_SS);
}
emulate_ctxt.gs_base = get_segment_base(vcpu, VCPU_SREG_GS);
emulate_ctxt.fs_base = get_segment_base(vcpu, VCPU_SREG_FS);
vcpu->mmio_is_write = 0; vcpu->mmio_is_write = 0;
vcpu->pio.string = 0; vcpu->pio.string = 0;
r = x86_decode_insn(&emulate_ctxt, &emulate_ops);
if (!no_decode) {
int cs_db, cs_l;
kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
vcpu->emulate_ctxt.vcpu = vcpu;
vcpu->emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu);
vcpu->emulate_ctxt.cr2 = cr2;
vcpu->emulate_ctxt.mode =
(vcpu->emulate_ctxt.eflags & X86_EFLAGS_VM)
? X86EMUL_MODE_REAL : cs_l
? X86EMUL_MODE_PROT64 : cs_db
? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
if (vcpu->emulate_ctxt.mode == X86EMUL_MODE_PROT64) {
vcpu->emulate_ctxt.cs_base = 0;
vcpu->emulate_ctxt.ds_base = 0;
vcpu->emulate_ctxt.es_base = 0;
vcpu->emulate_ctxt.ss_base = 0;
} else {
vcpu->emulate_ctxt.cs_base =
get_segment_base(vcpu, VCPU_SREG_CS);
vcpu->emulate_ctxt.ds_base =
get_segment_base(vcpu, VCPU_SREG_DS);
vcpu->emulate_ctxt.es_base =
get_segment_base(vcpu, VCPU_SREG_ES);
vcpu->emulate_ctxt.ss_base =
get_segment_base(vcpu, VCPU_SREG_SS);
}
vcpu->emulate_ctxt.gs_base =
get_segment_base(vcpu, VCPU_SREG_GS);
vcpu->emulate_ctxt.fs_base =
get_segment_base(vcpu, VCPU_SREG_FS);
r = x86_decode_insn(&vcpu->emulate_ctxt, &emulate_ops);
}
if (r == 0) if (r == 0)
r = x86_emulate_insn(&emulate_ctxt, &emulate_ops); r = x86_emulate_insn(&vcpu->emulate_ctxt, &emulate_ops);
if (vcpu->pio.string) if (vcpu->pio.string)
return EMULATE_DO_MMIO; return EMULATE_DO_MMIO;
...@@ -1313,7 +1324,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu, ...@@ -1313,7 +1324,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
} }
kvm_x86_ops->decache_regs(vcpu); kvm_x86_ops->decache_regs(vcpu);
kvm_x86_ops->set_rflags(vcpu, emulate_ctxt.eflags); kvm_x86_ops->set_rflags(vcpu, vcpu->emulate_ctxt.eflags);
if (vcpu->mmio_is_write) { if (vcpu->mmio_is_write) {
vcpu->mmio_needed = 0; vcpu->mmio_needed = 0;
...@@ -2055,7 +2066,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -2055,7 +2066,7 @@ static int kvm_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
vcpu->mmio_read_completed = 1; vcpu->mmio_read_completed = 1;
vcpu->mmio_needed = 0; vcpu->mmio_needed = 0;
r = emulate_instruction(vcpu, kvm_run, r = emulate_instruction(vcpu, kvm_run,
vcpu->mmio_fault_cr2, 0); vcpu->mmio_fault_cr2, 0, 1);
if (r == EMULATE_DO_MMIO) { if (r == EMULATE_DO_MMIO) {
/* /*
* Read-modify-write. Back to userspace. * Read-modify-write. Back to userspace.
......
...@@ -960,7 +960,7 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -960,7 +960,7 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
return 1; return 1;
} }
er = emulate_instruction(&svm->vcpu, kvm_run, fault_address, er = emulate_instruction(&svm->vcpu, kvm_run, fault_address,
error_code); error_code, 0);
mutex_unlock(&kvm->lock); mutex_unlock(&kvm->lock);
switch (er) { switch (er) {
...@@ -984,7 +984,7 @@ static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -984,7 +984,7 @@ static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
{ {
int er; int er;
er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0); er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0);
if (er != EMULATE_DONE) if (er != EMULATE_DONE)
inject_ud(&svm->vcpu); inject_ud(&svm->vcpu);
...@@ -1027,7 +1027,8 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -1027,7 +1027,8 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
string = (io_info & SVM_IOIO_STR_MASK) != 0; string = (io_info & SVM_IOIO_STR_MASK) != 0;
if (string) { if (string) {
if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO) if (emulate_instruction(&svm->vcpu,
kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
return 0; return 0;
return 1; return 1;
} }
...@@ -1086,7 +1087,7 @@ static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) ...@@ -1086,7 +1087,7 @@ static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
static int emulate_on_interception(struct vcpu_svm *svm, static int emulate_on_interception(struct vcpu_svm *svm,
struct kvm_run *kvm_run) struct kvm_run *kvm_run)
{ {
if (emulate_instruction(&svm->vcpu, NULL, 0, 0) != EMULATE_DONE) if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__); pr_unimpl(&svm->vcpu, "%s: failed\n", __FUNCTION__);
return 1; return 1;
} }
......
...@@ -1750,7 +1750,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, ...@@ -1750,7 +1750,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
* Cause the #SS fault with 0 error code in VM86 mode. * Cause the #SS fault with 0 error code in VM86 mode.
*/ */
if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0) if (((vec == GP_VECTOR) || (vec == SS_VECTOR)) && err_code == 0)
if (emulate_instruction(vcpu, NULL, 0, 0) == EMULATE_DONE) if (emulate_instruction(vcpu, NULL, 0, 0, 0) == EMULATE_DONE)
return 1; return 1;
return 0; return 0;
} }
...@@ -1787,7 +1787,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1787,7 +1787,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
} }
if (is_invalid_opcode(intr_info)) { if (is_invalid_opcode(intr_info)) {
er = emulate_instruction(vcpu, kvm_run, 0, 0); er = emulate_instruction(vcpu, kvm_run, 0, 0, 0);
if (er != EMULATE_DONE) if (er != EMULATE_DONE)
vmx_inject_ud(vcpu); vmx_inject_ud(vcpu);
...@@ -1812,7 +1812,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1812,7 +1812,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
return 1; return 1;
} }
er = emulate_instruction(vcpu, kvm_run, cr2, error_code); er = emulate_instruction(vcpu, kvm_run, cr2, error_code, 0);
mutex_unlock(&vcpu->kvm->lock); mutex_unlock(&vcpu->kvm->lock);
switch (er) { switch (er) {
...@@ -1873,7 +1873,8 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) ...@@ -1873,7 +1873,8 @@ static int handle_io(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
string = (exit_qualification & 16) != 0; string = (exit_qualification & 16) != 0;
if (string) { if (string) {
if (emulate_instruction(vcpu, kvm_run, 0, 0) == EMULATE_DO_MMIO) if (emulate_instruction(vcpu,
kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
return 0; return 0;
return 1; return 1;
} }
......
...@@ -913,10 +913,19 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -913,10 +913,19 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
unsigned long cr2 = ctxt->cr2; unsigned long cr2 = ctxt->cr2;
int no_wb = 0; int no_wb = 0;
u64 msr_data; u64 msr_data;
unsigned long saved_eip = 0;
unsigned long _eflags = ctxt->eflags; unsigned long _eflags = ctxt->eflags;
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
int rc = 0; int rc = 0;
/* Shadow copy of register state. Committed on successful emulation.
* NOTE: we can copy them from vcpu as x86_decode_insn() doesn't
* modify them.
*/
memcpy(c->regs, ctxt->vcpu->regs, sizeof c->regs);
saved_eip = c->eip;
if ((c->d & ModRM) && (c->modrm_mod != 3)) if ((c->d & ModRM) && (c->modrm_mod != 3))
cr2 = c->modrm_ea; cr2 = c->modrm_ea;
...@@ -1250,7 +1259,11 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1250,7 +1259,11 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
ctxt->vcpu->rip = c->eip; ctxt->vcpu->rip = c->eip;
done: done:
return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; if (rc == X86EMUL_UNHANDLEABLE) {
c->eip = saved_eip;
return -1;
}
return 0;
special_insn: special_insn:
if (c->twobyte) if (c->twobyte)
...@@ -1305,8 +1318,10 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1305,8 +1318,10 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
register_address(ctxt->es_base, register_address(ctxt->es_base,
c->regs[VCPU_REGS_RDI]), c->regs[VCPU_REGS_RDI]),
c->rep_prefix, c->rep_prefix,
c->regs[VCPU_REGS_RDX]) == 0) c->regs[VCPU_REGS_RDX]) == 0) {
c->eip = saved_eip;
return -1; return -1;
}
return 0; return 0;
case 0x6e: /* outsb */ case 0x6e: /* outsb */
case 0x6f: /* outsw/outsd */ case 0x6f: /* outsw/outsd */
...@@ -1321,8 +1336,10 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1321,8 +1336,10 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
ctxt->ds_base, ctxt->ds_base,
c->regs[VCPU_REGS_RSI]), c->regs[VCPU_REGS_RSI]),
c->rep_prefix, c->rep_prefix,
c->regs[VCPU_REGS_RDX]) == 0) c->regs[VCPU_REGS_RDX]) == 0) {
c->eip = saved_eip;
return -1; return -1;
}
return 0; return 0;
case 0x70 ... 0x7f: /* jcc (short) */ { case 0x70 ... 0x7f: /* jcc (short) */ {
int rel = insn_fetch(s8, 1, c->eip); int rel = insn_fetch(s8, 1, c->eip);
...@@ -1711,5 +1728,6 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1711,5 +1728,6 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
cannot_emulate: cannot_emulate:
DPRINTF("Cannot emulate %02x\n", c->b); DPRINTF("Cannot emulate %02x\n", c->b);
c->eip = saved_eip;
return -1; return -1;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment