Commit 9de41573 authored by Gleb Natapov's avatar Gleb Natapov Committed by Avi Kivity

KVM: x86 emulator: introduce read cache

Introduce read cache which is needed for instruction that require more
then one exit to userspace. After returning from userspace the instruction
will be re-executed with cached read value.
Signed-off-by: default avatarGleb Natapov <gleb@redhat.com>
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
parent 1c11e713
...@@ -186,6 +186,7 @@ struct decode_cache { ...@@ -186,6 +186,7 @@ struct decode_cache {
unsigned long modrm_val; unsigned long modrm_val;
struct fetch_cache fetch; struct fetch_cache fetch;
struct read_cache io_read; struct read_cache io_read;
struct read_cache mem_read;
}; };
struct x86_emulate_ctxt { struct x86_emulate_ctxt {
......
...@@ -1263,6 +1263,33 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1263,6 +1263,33 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
} }
static int read_emulated(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
unsigned long addr, void *dest, unsigned size)
{
int rc;
struct read_cache *mc = &ctxt->decode.mem_read;
while (size) {
int n = min(size, 8u);
size -= n;
if (mc->pos < mc->end)
goto read_cached;
rc = ops->read_emulated(addr, mc->data + mc->end, n, ctxt->vcpu);
if (rc != X86EMUL_CONTINUE)
return rc;
mc->end += n;
read_cached:
memcpy(dest, mc->data + mc->pos, n);
mc->pos += n;
dest += n;
addr += n;
}
return X86EMUL_CONTINUE;
}
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, struct x86_emulate_ops *ops,
unsigned int size, unsigned short port, unsigned int size, unsigned short port,
...@@ -1504,9 +1531,9 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, ...@@ -1504,9 +1531,9 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
int rc; int rc;
rc = ops->read_emulated(register_address(c, ss_base(ctxt), rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt),
c->regs[VCPU_REGS_RSP]), c->regs[VCPU_REGS_RSP]),
dest, len, ctxt->vcpu); dest, len);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
...@@ -2475,6 +2502,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -2475,6 +2502,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
int saved_dst_type = c->dst.type; int saved_dst_type = c->dst.type;
ctxt->interruptibility = 0; ctxt->interruptibility = 0;
ctxt->decode.mem_read.pos = 0;
/* Shadow copy of register state. Committed on successful emulation. /* Shadow copy of register state. Committed on successful emulation.
* NOTE: we can copy them from vcpu as x86_decode_insn() doesn't * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't
...@@ -2529,20 +2557,16 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -2529,20 +2557,16 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
} }
if (c->src.type == OP_MEM) { if (c->src.type == OP_MEM) {
rc = ops->read_emulated((unsigned long)c->src.ptr, rc = read_emulated(ctxt, ops, (unsigned long)c->src.ptr,
&c->src.val, &c->src.val, c->src.bytes);
c->src.bytes,
ctxt->vcpu);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
goto done; goto done;
c->src.orig_val = c->src.val; c->src.orig_val = c->src.val;
} }
if (c->src2.type == OP_MEM) { if (c->src2.type == OP_MEM) {
rc = ops->read_emulated((unsigned long)c->src2.ptr, rc = read_emulated(ctxt, ops, (unsigned long)c->src2.ptr,
&c->src2.val, &c->src2.val, c->src2.bytes);
c->src2.bytes,
ctxt->vcpu);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
goto done; goto done;
} }
...@@ -2553,8 +2577,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -2553,8 +2577,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
if ((c->dst.type == OP_MEM) && !(c->d & Mov)) { if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
/* optimisation - avoid slow emulated read if Mov */ /* optimisation - avoid slow emulated read if Mov */
rc = ops->read_emulated((unsigned long)c->dst.ptr, &c->dst.val, rc = read_emulated(ctxt, ops, (unsigned long)c->dst.ptr,
c->dst.bytes, ctxt->vcpu); &c->dst.val, c->dst.bytes);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
goto done; goto done;
} }
...@@ -2981,7 +3005,11 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -2981,7 +3005,11 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
(rc->end != 0 && rc->end == rc->pos)) (rc->end != 0 && rc->end == rc->pos))
ctxt->restart = false; ctxt->restart = false;
} }
/*
* reset read cache here in case string instruction is restared
* without decoding
*/
ctxt->decode.mem_read.end = 0;
/* Commit shadow register state. */ /* Commit shadow register state. */
memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs); memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs);
kvm_rip_write(ctxt->vcpu, c->eip); kvm_rip_write(ctxt->vcpu, c->eip);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment