Commit 285ca9e9 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: emulate: speed up do_insn_fetch

Hoist the common case up from do_insn_fetch_byte to do_insn_fetch,
and prime the fetch_cache in x86_decode_insn.  This helps a bit the
compiler and the branch predictor, but above all it lays the
ground for further changes in the next few patches.
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 41061cdb
...@@ -705,25 +705,23 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, ...@@ -705,25 +705,23 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
} }
/* /*
* Fetch the next byte of the instruction being emulated which is pointed to * Prefetch the remaining bytes of the instruction without crossing page
* by ctxt->_eip, then increment ctxt->_eip.
*
* Also prefetch the remaining bytes of the instruction without crossing page
* boundary if they are not in fetch_cache yet. * boundary if they are not in fetch_cache yet.
*/ */
static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest) static int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt)
{ {
struct fetch_cache *fc = &ctxt->fetch; struct fetch_cache *fc = &ctxt->fetch;
int rc; int rc;
int size, cur_size; int size, cur_size;
if (ctxt->_eip == fc->end) {
unsigned long linear; unsigned long linear;
struct segmented_address addr = { .seg = VCPU_SREG_CS, struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = ctxt->_eip }; .ea = fc->end };
cur_size = fc->end - fc->start; cur_size = fc->end - fc->start;
size = min(15UL - cur_size, size = min(15UL - cur_size,
PAGE_SIZE - offset_in_page(ctxt->_eip)); PAGE_SIZE - offset_in_page(fc->end));
if (unlikely(size == 0))
return X86EMUL_UNHANDLEABLE;
rc = __linearize(ctxt, addr, size, false, true, &linear); rc = __linearize(ctxt, addr, size, false, true, &linear);
if (unlikely(rc != X86EMUL_CONTINUE)) if (unlikely(rc != X86EMUL_CONTINUE))
return rc; return rc;
...@@ -732,25 +730,27 @@ static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest) ...@@ -732,25 +730,27 @@ static int do_insn_fetch_byte(struct x86_emulate_ctxt *ctxt, u8 *dest)
if (unlikely(rc != X86EMUL_CONTINUE)) if (unlikely(rc != X86EMUL_CONTINUE))
return rc; return rc;
fc->end += size; fc->end += size;
}
*dest = fc->data[ctxt->_eip - fc->start];
ctxt->_eip++;
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
void *dest, unsigned size) void *__dest, unsigned size)
{ {
int rc; int rc;
struct fetch_cache *fc = &ctxt->fetch;
u8 *dest = __dest;
u8 *src = &fc->data[ctxt->_eip - fc->start];
/* x86 instructions are limited to 15 bytes. */
if (unlikely(ctxt->_eip + size - ctxt->eip > 15))
return X86EMUL_UNHANDLEABLE;
while (size--) { while (size--) {
rc = do_insn_fetch_byte(ctxt, dest++); if (unlikely(ctxt->_eip == fc->end)) {
rc = do_insn_fetch_bytes(ctxt);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
} }
*dest++ = *src++;
ctxt->_eip++;
continue;
}
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
...@@ -4227,6 +4227,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) ...@@ -4227,6 +4227,11 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
ctxt->opcode_len = 1; ctxt->opcode_len = 1;
if (insn_len > 0) if (insn_len > 0)
memcpy(ctxt->fetch.data, insn, insn_len); memcpy(ctxt->fetch.data, insn, insn_len);
else {
rc = do_insn_fetch_bytes(ctxt);
if (rc != X86EMUL_CONTINUE)
return rc;
}
switch (mode) { switch (mode) {
case X86EMUL_MODE_REAL: case X86EMUL_MODE_REAL:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment