Commit dde7e6d1 authored by Avi Kivity's avatar Avi Kivity

KVM: x86 emulator: move x86_decode_insn() downwards

No code changes.
Signed-off-by: default avatarAvi Kivity <avi@redhat.com>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent ef65c889
...@@ -945,917 +945,545 @@ static int decode_abs(struct x86_emulate_ctxt *ctxt, ...@@ -945,917 +945,545 @@ static int decode_abs(struct x86_emulate_ctxt *ctxt,
return rc; return rc;
} }
int static int read_emulated(struct x86_emulate_ctxt *ctxt,
x86_decode_insn(struct x86_emulate_ctxt *ctxt) struct x86_emulate_ops *ops,
unsigned long addr, void *dest, unsigned size)
{ {
struct x86_emulate_ops *ops = ctxt->ops; int rc;
struct decode_cache *c = &ctxt->decode; struct read_cache *mc = &ctxt->decode.mem_read;
int rc = X86EMUL_CONTINUE; u32 err;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, dual, goffset;
struct opcode opcode, *g_mod012, *g_mod3;
/* we cannot decode insn before we complete previous rep insn */ while (size) {
WARN_ON(ctxt->restart); int n = min(size, 8u);
size -= n;
if (mc->pos < mc->end)
goto read_cached;
c->eip = ctxt->eip; rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
c->fetch.start = c->fetch.end = c->eip; ctxt->vcpu);
ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS); if (rc == X86EMUL_PROPAGATE_FAULT)
emulate_pf(ctxt, addr, err);
if (rc != X86EMUL_CONTINUE)
return rc;
mc->end += n;
switch (mode) { read_cached:
case X86EMUL_MODE_REAL: memcpy(dest, mc->data + mc->pos, n);
case X86EMUL_MODE_VM86: mc->pos += n;
case X86EMUL_MODE_PROT16: dest += n;
def_op_bytes = def_ad_bytes = 2; addr += n;
break;
case X86EMUL_MODE_PROT32:
def_op_bytes = def_ad_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86EMUL_MODE_PROT64:
def_op_bytes = 4;
def_ad_bytes = 8;
break;
#endif
default:
return -1;
} }
return X86EMUL_CONTINUE;
}
c->op_bytes = def_op_bytes; static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
c->ad_bytes = def_ad_bytes; struct x86_emulate_ops *ops,
unsigned int size, unsigned short port,
/* Legacy prefixes. */ void *dest)
for (;;) { {
switch (c->b = insn_fetch(u8, 1, c->eip)) { struct read_cache *rc = &ctxt->decode.io_read;
case 0x66: /* operand-size override */
/* switch between 2/4 bytes */
c->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
c->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
c->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
set_seg_override(c, (c->b >> 3) & 3);
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
set_seg_override(c, c->b & 7);
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
c->rex_prefix = c->b;
continue;
case 0xf0: /* LOCK */
c->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
c->rep_prefix = REPNE_PREFIX;
break;
case 0xf3: /* REP/REPE/REPZ */
c->rep_prefix = REPE_PREFIX;
break;
default:
goto done_prefixes;
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
c->rex_prefix = 0; if (rc->pos == rc->end) { /* refill pio read ahead */
struct decode_cache *c = &ctxt->decode;
unsigned int in_page, n;
unsigned int count = c->rep_prefix ?
address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
in_page = (ctxt->eflags & EFLG_DF) ?
offset_in_page(c->regs[VCPU_REGS_RDI]) :
PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
count);
if (n == 0)
n = 1;
rc->pos = rc->end = 0;
if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
return 0;
rc->end = n * size;
} }
done_prefixes: memcpy(dest, rc->data + rc->pos, size);
rc->pos += size;
return 1;
}
/* REX prefix. */ static u32 desc_limit_scaled(struct desc_struct *desc)
if (c->rex_prefix) {
if (c->rex_prefix & 8) u32 limit = get_desc_limit(desc);
c->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */ return desc->g ? (limit << 12) | 0xfff : limit;
opcode = opcode_table[c->b]; }
if (opcode.flags == 0) {
/* Two-byte opcode? */
if (c->b == 0x0f) {
c->twobyte = 1;
c->b = insn_fetch(u8, 1, c->eip);
opcode = twobyte_table[c->b];
}
}
c->d = opcode.flags;
if (c->d & Group) { static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
dual = c->d & GroupDual; struct x86_emulate_ops *ops,
c->modrm = insn_fetch(u8, 1, c->eip); u16 selector, struct desc_ptr *dt)
--c->eip; {
if (selector & 1 << 2) {
struct desc_struct desc;
memset (dt, 0, sizeof *dt);
if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu))
return;
if (c->d & GroupDual) { dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
g_mod012 = opcode.u.gdual->mod012; dt->address = get_desc_base(&desc);
g_mod3 = opcode.u.gdual->mod3; } else
} else ops->get_gdt(dt, ctxt->vcpu);
g_mod012 = g_mod3 = opcode.u.group; }
c->d &= ~(Group | GroupDual); /* allowed just for 8 bytes segments */
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
u16 selector, struct desc_struct *desc)
{
struct desc_ptr dt;
u16 index = selector >> 3;
int ret;
u32 err;
ulong addr;
goffset = (c->modrm >> 3) & 7; get_descriptor_table_ptr(ctxt, ops, selector, &dt);
if ((c->modrm >> 6) == 3) if (dt.size < index * 8 + 7) {
opcode = g_mod3[goffset]; emulate_gp(ctxt, selector & 0xfffc);
else return X86EMUL_PROPAGATE_FAULT;
opcode = g_mod012[goffset];
c->d |= opcode.flags;
} }
addr = dt.address + index * 8;
ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
if (ret == X86EMUL_PROPAGATE_FAULT)
emulate_pf(ctxt, addr, err);
c->execute = opcode.u.execute; return ret;
}
/* Unrecognised? */ /* allowed just for 8 bytes segments */
if (c->d == 0 || (c->d & Undefined)) { static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
DPRINTF("Cannot emulate %02x\n", c->b); struct x86_emulate_ops *ops,
return -1; u16 selector, struct desc_struct *desc)
} {
struct desc_ptr dt;
u16 index = selector >> 3;
u32 err;
ulong addr;
int ret;
if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack)) get_descriptor_table_ptr(ctxt, ops, selector, &dt);
c->op_bytes = 8;
/* ModRM and SIB bytes. */ if (dt.size < index * 8 + 7) {
if (c->d & ModRM) emulate_gp(ctxt, selector & 0xfffc);
rc = decode_modrm(ctxt, ops); return X86EMUL_PROPAGATE_FAULT;
else if (c->d & MemAbs) }
rc = decode_abs(ctxt, ops);
if (rc != X86EMUL_CONTINUE)
goto done;
if (!c->has_seg_override) addr = dt.address + index * 8;
set_seg_override(c, VCPU_SREG_DS); ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
if (ret == X86EMUL_PROPAGATE_FAULT)
emulate_pf(ctxt, addr, err);
if (!(!c->twobyte && c->b == 0x8d)) return ret;
c->modrm_ea += seg_override_base(ctxt, ops, c); }
if (c->ad_bytes != 8) static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
c->modrm_ea = (u32)c->modrm_ea; struct x86_emulate_ops *ops,
u16 selector, int seg)
{
struct desc_struct seg_desc;
u8 dpl, rpl, cpl;
unsigned err_vec = GP_VECTOR;
u32 err_code = 0;
bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
int ret;
if (c->rip_relative) memset(&seg_desc, 0, sizeof seg_desc);
c->modrm_ea += c->eip;
/* if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
* Decode and fetch the source operand: register, memory || ctxt->mode == X86EMUL_MODE_REAL) {
* or immediate. /* set real mode segment descriptor */
*/ set_desc_base(&seg_desc, selector << 4);
switch (c->d & SrcMask) { set_desc_limit(&seg_desc, 0xffff);
case SrcNone: seg_desc.type = 3;
seg_desc.p = 1;
seg_desc.s = 1;
goto load;
}
/* NULL selector is not valid for TR, CS and SS */
if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR)
&& null_selector)
goto exception;
/* TR should be in GDT only */
if (seg == VCPU_SREG_TR && (selector & (1 << 2)))
goto exception;
if (null_selector) /* for NULL selector skip all following checks */
goto load;
ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc);
if (ret != X86EMUL_CONTINUE)
return ret;
err_code = selector & 0xfffc;
err_vec = GP_VECTOR;
/* can't load system descriptor into segment selecor */
if (seg <= VCPU_SREG_GS && !seg_desc.s)
goto exception;
if (!seg_desc.p) {
err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
goto exception;
}
rpl = selector & 3;
dpl = seg_desc.dpl;
cpl = ops->cpl(ctxt->vcpu);
switch (seg) {
case VCPU_SREG_SS:
/*
* segment is not a writable data segment or segment
* selector's RPL != CPL or segment selector's RPL != CPL
*/
if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
goto exception;
break; break;
case SrcReg: case VCPU_SREG_CS:
decode_register_operand(&c->src, c, 0); if (!(seg_desc.type & 8))
goto exception;
if (seg_desc.type & 4) {
/* conforming */
if (dpl > cpl)
goto exception;
} else {
/* nonconforming */
if (rpl > cpl || dpl != cpl)
goto exception;
}
/* CS(RPL) <- CPL */
selector = (selector & 0xfffc) | cpl;
break; break;
case SrcMem16: case VCPU_SREG_TR:
c->src.bytes = 2; if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
goto srcmem_common; goto exception;
case SrcMem32: break;
c->src.bytes = 4; case VCPU_SREG_LDTR:
goto srcmem_common; if (seg_desc.s || seg_desc.type != 2)
case SrcMem: goto exception;
c->src.bytes = (c->d & ByteOp) ? 1 : break;
c->op_bytes; default: /* DS, ES, FS, or GS */
/* Don't fetch the address for invlpg: it could be unmapped. */
if (c->twobyte && c->b == 0x01 && c->modrm_reg == 7)
break;
srcmem_common:
/* /*
* For instructions with a ModR/M byte, switch to register * segment is not a data or readable code segment or
* access if Mod = 3. * ((segment is a data or nonconforming code segment)
* and (both RPL and CPL > DPL))
*/ */
if ((c->d & ModRM) && c->modrm_mod == 3) { if ((seg_desc.type & 0xa) == 0x8 ||
c->src.type = OP_REG; (((seg_desc.type & 0xc) != 0xc) &&
c->src.val = c->modrm_val; (rpl > dpl && cpl > dpl)))
c->src.ptr = c->modrm_ptr; goto exception;
break;
}
c->src.type = OP_MEM;
c->src.ptr = (unsigned long *)c->modrm_ea;
c->src.val = 0;
break; break;
case SrcImm: }
case SrcImmU:
c->src.type = OP_IMM; if (seg_desc.s) {
c->src.ptr = (unsigned long *)c->eip; /* mark segment as accessed */
c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; seg_desc.type |= 1;
if (c->src.bytes == 8) ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc);
c->src.bytes = 4; if (ret != X86EMUL_CONTINUE)
/* NB. Immediates are sign-extended as necessary. */ return ret;
switch (c->src.bytes) { }
load:
ops->set_segment_selector(selector, seg, ctxt->vcpu);
ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
return X86EMUL_CONTINUE;
exception:
emulate_exception(ctxt, err_vec, err_code, true);
return X86EMUL_PROPAGATE_FAULT;
}
static inline int writeback(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
int rc;
struct decode_cache *c = &ctxt->decode;
u32 err;
switch (c->dst.type) {
case OP_REG:
/* The 4-byte case *is* correct:
* in 64-bit mode we zero-extend.
*/
switch (c->dst.bytes) {
case 1: case 1:
c->src.val = insn_fetch(s8, 1, c->eip); *(u8 *)c->dst.ptr = (u8)c->dst.val;
break; break;
case 2: case 2:
c->src.val = insn_fetch(s16, 2, c->eip); *(u16 *)c->dst.ptr = (u16)c->dst.val;
break; break;
case 4: case 4:
c->src.val = insn_fetch(s32, 4, c->eip); *c->dst.ptr = (u32)c->dst.val;
break; /* 64b: zero-ext */
case 8:
*c->dst.ptr = c->dst.val;
break; break;
} }
if ((c->d & SrcMask) == SrcImmU) {
switch (c->src.bytes) {
case 1:
c->src.val &= 0xff;
break;
case 2:
c->src.val &= 0xffff;
break;
case 4:
c->src.val &= 0xffffffff;
break;
}
}
break; break;
case SrcImmByte: case OP_MEM:
case SrcImmUByte: if (c->lock_prefix)
c->src.type = OP_IMM; rc = ops->cmpxchg_emulated(
c->src.ptr = (unsigned long *)c->eip; (unsigned long)c->dst.ptr,
c->src.bytes = 1; &c->dst.orig_val,
if ((c->d & SrcMask) == SrcImmByte) &c->dst.val,
c->src.val = insn_fetch(s8, 1, c->eip); c->dst.bytes,
&err,
ctxt->vcpu);
else else
c->src.val = insn_fetch(u8, 1, c->eip); rc = ops->write_emulated(
break; (unsigned long)c->dst.ptr,
case SrcAcc: &c->dst.val,
c->src.type = OP_REG; c->dst.bytes,
c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; &err,
c->src.ptr = &c->regs[VCPU_REGS_RAX]; ctxt->vcpu);
switch (c->src.bytes) { if (rc == X86EMUL_PROPAGATE_FAULT)
case 1: emulate_pf(ctxt,
c->src.val = *(u8 *)c->src.ptr; (unsigned long)c->dst.ptr, err);
break; if (rc != X86EMUL_CONTINUE)
case 2: return rc;
c->src.val = *(u16 *)c->src.ptr;
break;
case 4:
c->src.val = *(u32 *)c->src.ptr;
break;
case 8:
c->src.val = *(u64 *)c->src.ptr;
break;
}
break;
case SrcOne:
c->src.bytes = 1;
c->src.val = 1;
break;
case SrcSI:
c->src.type = OP_MEM;
c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->src.ptr = (unsigned long *)
register_address(c, seg_override_base(ctxt, ops, c),
c->regs[VCPU_REGS_RSI]);
c->src.val = 0;
break; break;
case SrcImmFAddr: case OP_NONE:
c->src.type = OP_IMM; /* no writeback */
c->src.ptr = (unsigned long *)c->eip;
c->src.bytes = c->op_bytes + 2;
insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
break; break;
case SrcMemFAddr: default:
c->src.type = OP_MEM;
c->src.ptr = (unsigned long *)c->modrm_ea;
c->src.bytes = c->op_bytes + 2;
break; break;
} }
return X86EMUL_CONTINUE;
}
/* static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
* Decode and fetch the second source operand: register, memory struct x86_emulate_ops *ops)
* or immediate. {
*/ struct decode_cache *c = &ctxt->decode;
switch (c->d & Src2Mask) {
case Src2None:
break;
case Src2CL:
c->src2.bytes = 1;
c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
break;
case Src2ImmByte:
c->src2.type = OP_IMM;
c->src2.ptr = (unsigned long *)c->eip;
c->src2.bytes = 1;
c->src2.val = insn_fetch(u8, 1, c->eip);
break;
case Src2One:
c->src2.bytes = 1;
c->src2.val = 1;
break;
}
/* Decode and fetch the destination operand: register or memory. */ c->dst.type = OP_MEM;
switch (c->d & DstMask) { c->dst.bytes = c->op_bytes;
case ImplicitOps: c->dst.val = c->src.val;
/* Special instructions do their own operand decoding. */ register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
return 0; c->dst.ptr = (void *) register_address(c, ss_base(ctxt, ops),
case DstReg: c->regs[VCPU_REGS_RSP]);
decode_register_operand(&c->dst, c, }
c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
break;
case DstMem:
case DstMem64:
if ((c->d & ModRM) && c->modrm_mod == 3) {
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.type = OP_REG;
c->dst.val = c->dst.orig_val = c->modrm_val;
c->dst.ptr = c->modrm_ptr;
break;
}
c->dst.type = OP_MEM;
c->dst.ptr = (unsigned long *)c->modrm_ea;
if ((c->d & DstMask) == DstMem64)
c->dst.bytes = 8;
else
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.val = 0;
if (c->d & BitOp) {
unsigned long mask = ~(c->dst.bytes * 8 - 1);
c->dst.ptr = (void *)c->dst.ptr + static int emulate_pop(struct x86_emulate_ctxt *ctxt,
(c->src.val & mask) / 8; struct x86_emulate_ops *ops,
} void *dest, int len)
break; {
case DstAcc: struct decode_cache *c = &ctxt->decode;
c->dst.type = OP_REG; int rc;
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.ptr = &c->regs[VCPU_REGS_RAX];
switch (c->dst.bytes) {
case 1:
c->dst.val = *(u8 *)c->dst.ptr;
break;
case 2:
c->dst.val = *(u16 *)c->dst.ptr;
break;
case 4:
c->dst.val = *(u32 *)c->dst.ptr;
break;
case 8:
c->dst.val = *(u64 *)c->dst.ptr;
break;
}
c->dst.orig_val = c->dst.val;
break;
case DstDI:
c->dst.type = OP_MEM;
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.ptr = (unsigned long *)
register_address(c, es_base(ctxt, ops),
c->regs[VCPU_REGS_RDI]);
c->dst.val = 0;
break;
}
done: rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; c->regs[VCPU_REGS_RSP]),
dest, len);
if (rc != X86EMUL_CONTINUE)
return rc;
register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
return rc;
} }
static int read_emulated(struct x86_emulate_ctxt *ctxt, static int emulate_popf(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, struct x86_emulate_ops *ops,
unsigned long addr, void *dest, unsigned size) void *dest, int len)
{ {
int rc; int rc;
struct read_cache *mc = &ctxt->decode.mem_read; unsigned long val, change_mask;
u32 err; int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
int cpl = ops->cpl(ctxt->vcpu);
while (size) { rc = emulate_pop(ctxt, ops, &val, len);
int n = min(size, 8u); if (rc != X86EMUL_CONTINUE)
size -= n; return rc;
if (mc->pos < mc->end)
goto read_cached;
rc = ops->read_emulated(addr, mc->data + mc->end, n, &err, change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
ctxt->vcpu); | EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
if (rc == X86EMUL_PROPAGATE_FAULT)
emulate_pf(ctxt, addr, err);
if (rc != X86EMUL_CONTINUE)
return rc;
mc->end += n;
read_cached: switch(ctxt->mode) {
memcpy(dest, mc->data + mc->pos, n); case X86EMUL_MODE_PROT64:
mc->pos += n; case X86EMUL_MODE_PROT32:
dest += n; case X86EMUL_MODE_PROT16:
addr += n; if (cpl == 0)
change_mask |= EFLG_IOPL;
if (cpl <= iopl)
change_mask |= EFLG_IF;
break;
case X86EMUL_MODE_VM86:
if (iopl < 3) {
emulate_gp(ctxt, 0);
return X86EMUL_PROPAGATE_FAULT;
}
change_mask |= EFLG_IF;
break;
default: /* real mode */
change_mask |= (EFLG_IOPL | EFLG_IF);
break;
} }
return X86EMUL_CONTINUE;
*(unsigned long *)dest =
(ctxt->eflags & ~change_mask) | (val & change_mask);
return rc;
} }
static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, struct x86_emulate_ops *ops, int seg)
unsigned int size, unsigned short port,
void *dest)
{ {
struct read_cache *rc = &ctxt->decode.io_read; struct decode_cache *c = &ctxt->decode;
if (rc->pos == rc->end) { /* refill pio read ahead */ c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
struct decode_cache *c = &ctxt->decode;
unsigned int in_page, n;
unsigned int count = c->rep_prefix ?
address_mask(c, c->regs[VCPU_REGS_RCX]) : 1;
in_page = (ctxt->eflags & EFLG_DF) ?
offset_in_page(c->regs[VCPU_REGS_RDI]) :
PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]);
n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
count);
if (n == 0)
n = 1;
rc->pos = rc->end = 0;
if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu))
return 0;
rc->end = n * size;
}
memcpy(dest, rc->data + rc->pos, size); emulate_push(ctxt, ops);
rc->pos += size;
return 1;
} }
static u32 desc_limit_scaled(struct desc_struct *desc) static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, int seg)
{ {
u32 limit = get_desc_limit(desc); struct decode_cache *c = &ctxt->decode;
unsigned long selector;
int rc;
return desc->g ? (limit << 12) | 0xfff : limit; rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
return rc;
} }
static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, struct x86_emulate_ops *ops)
u16 selector, struct desc_ptr *dt)
{ {
if (selector & 1 << 2) { struct decode_cache *c = &ctxt->decode;
struct desc_struct desc; unsigned long old_esp = c->regs[VCPU_REGS_RSP];
memset (dt, 0, sizeof *dt); int rc = X86EMUL_CONTINUE;
if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu)) int reg = VCPU_REGS_RAX;
return;
dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ while (reg <= VCPU_REGS_RDI) {
dt->address = get_desc_base(&desc); (reg == VCPU_REGS_RSP) ?
} else (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
ops->get_gdt(dt, ctxt->vcpu);
}
/* allowed just for 8 bytes segments */ emulate_push(ctxt, ops);
static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
u16 selector, struct desc_struct *desc)
{
struct desc_ptr dt;
u16 index = selector >> 3;
int ret;
u32 err;
ulong addr;
get_descriptor_table_ptr(ctxt, ops, selector, &dt); rc = writeback(ctxt, ops);
if (rc != X86EMUL_CONTINUE)
return rc;
if (dt.size < index * 8 + 7) { ++reg;
emulate_gp(ctxt, selector & 0xfffc);
return X86EMUL_PROPAGATE_FAULT;
} }
addr = dt.address + index * 8;
ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
if (ret == X86EMUL_PROPAGATE_FAULT)
emulate_pf(ctxt, addr, err);
return ret; /* Disable writeback. */
c->dst.type = OP_NONE;
return rc;
} }
/* allowed just for 8 bytes segments */ static int emulate_popa(struct x86_emulate_ctxt *ctxt,
static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
struct x86_emulate_ops *ops,
u16 selector, struct desc_struct *desc)
{ {
struct desc_ptr dt; struct decode_cache *c = &ctxt->decode;
u16 index = selector >> 3; int rc = X86EMUL_CONTINUE;
u32 err; int reg = VCPU_REGS_RDI;
ulong addr;
int ret;
get_descriptor_table_ptr(ctxt, ops, selector, &dt); while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
register_address_increment(c, &c->regs[VCPU_REGS_RSP],
c->op_bytes);
--reg;
}
if (dt.size < index * 8 + 7) { rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
emulate_gp(ctxt, selector & 0xfffc); if (rc != X86EMUL_CONTINUE)
return X86EMUL_PROPAGATE_FAULT; break;
--reg;
} }
return rc;
addr = dt.address + index * 8;
ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
if (ret == X86EMUL_PROPAGATE_FAULT)
emulate_pf(ctxt, addr, err);
return ret;
} }
static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, struct x86_emulate_ops *ops)
u16 selector, int seg)
{ {
struct desc_struct seg_desc; struct decode_cache *c = &ctxt->decode;
u8 dpl, rpl, cpl; int rc = X86EMUL_CONTINUE;
unsigned err_vec = GP_VECTOR; unsigned long temp_eip = 0;
u32 err_code = 0; unsigned long temp_eflags = 0;
bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ unsigned long cs = 0;
int ret; unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
memset(&seg_desc, 0, sizeof seg_desc); /* TODO: Add stack limit check */
if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
|| ctxt->mode == X86EMUL_MODE_REAL) {
/* set real mode segment descriptor */
set_desc_base(&seg_desc, selector << 4);
set_desc_limit(&seg_desc, 0xffff);
seg_desc.type = 3;
seg_desc.p = 1;
seg_desc.s = 1;
goto load;
}
/* NULL selector is not valid for TR, CS and SS */ if (rc != X86EMUL_CONTINUE)
if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR) return rc;
&& null_selector)
goto exception;
/* TR should be in GDT only */ if (temp_eip & ~0xffff) {
if (seg == VCPU_SREG_TR && (selector & (1 << 2))) emulate_gp(ctxt, 0);
goto exception; return X86EMUL_PROPAGATE_FAULT;
}
if (null_selector) /* for NULL selector skip all following checks */ rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
goto load;
ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc); if (rc != X86EMUL_CONTINUE)
if (ret != X86EMUL_CONTINUE) return rc;
return ret;
err_code = selector & 0xfffc; rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
err_vec = GP_VECTOR;
/* can't load system descriptor into segment selecor */ if (rc != X86EMUL_CONTINUE)
if (seg <= VCPU_SREG_GS && !seg_desc.s) return rc;
goto exception;
if (!seg_desc.p) { rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR;
goto exception;
}
rpl = selector & 3; if (rc != X86EMUL_CONTINUE)
dpl = seg_desc.dpl; return rc;
cpl = ops->cpl(ctxt->vcpu);
switch (seg) { c->eip = temp_eip;
case VCPU_SREG_SS:
/*
* segment is not a writable data segment or segment
* selector's RPL != CPL or segment selector's RPL != CPL
*/
if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl)
goto exception;
break;
case VCPU_SREG_CS:
if (!(seg_desc.type & 8))
goto exception;
if (seg_desc.type & 4) {
/* conforming */
if (dpl > cpl)
goto exception;
} else {
/* nonconforming */
if (rpl > cpl || dpl != cpl)
goto exception;
}
/* CS(RPL) <- CPL */
selector = (selector & 0xfffc) | cpl;
break;
case VCPU_SREG_TR:
if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
goto exception;
break;
case VCPU_SREG_LDTR:
if (seg_desc.s || seg_desc.type != 2)
goto exception;
break;
default: /* DS, ES, FS, or GS */
/*
* segment is not a data or readable code segment or
* ((segment is a data or nonconforming code segment)
* and (both RPL and CPL > DPL))
*/
if ((seg_desc.type & 0xa) == 0x8 ||
(((seg_desc.type & 0xc) != 0xc) &&
(rpl > dpl && cpl > dpl)))
goto exception;
break;
}
if (seg_desc.s) { if (c->op_bytes == 4)
/* mark segment as accessed */ ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
seg_desc.type |= 1; else if (c->op_bytes == 2) {
ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc); ctxt->eflags &= ~0xffff;
if (ret != X86EMUL_CONTINUE) ctxt->eflags |= temp_eflags;
return ret;
} }
load:
ops->set_segment_selector(selector, seg, ctxt->vcpu); ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu); ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
return X86EMUL_CONTINUE;
exception: return rc;
emulate_exception(ctxt, err_vec, err_code, true);
return X86EMUL_PROPAGATE_FAULT;
} }
static inline int writeback(struct x86_emulate_ctxt *ctxt, static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops) struct x86_emulate_ops* ops)
{ {
int rc; switch(ctxt->mode) {
struct decode_cache *c = &ctxt->decode; case X86EMUL_MODE_REAL:
u32 err; return emulate_iret_real(ctxt, ops);
case X86EMUL_MODE_VM86:
switch (c->dst.type) { case X86EMUL_MODE_PROT16:
case OP_REG: case X86EMUL_MODE_PROT32:
/* The 4-byte case *is* correct: case X86EMUL_MODE_PROT64:
* in 64-bit mode we zero-extend.
*/
switch (c->dst.bytes) {
case 1:
*(u8 *)c->dst.ptr = (u8)c->dst.val;
break;
case 2:
*(u16 *)c->dst.ptr = (u16)c->dst.val;
break;
case 4:
*c->dst.ptr = (u32)c->dst.val;
break; /* 64b: zero-ext */
case 8:
*c->dst.ptr = c->dst.val;
break;
}
break;
case OP_MEM:
if (c->lock_prefix)
rc = ops->cmpxchg_emulated(
(unsigned long)c->dst.ptr,
&c->dst.orig_val,
&c->dst.val,
c->dst.bytes,
&err,
ctxt->vcpu);
else
rc = ops->write_emulated(
(unsigned long)c->dst.ptr,
&c->dst.val,
c->dst.bytes,
&err,
ctxt->vcpu);
if (rc == X86EMUL_PROPAGATE_FAULT)
emulate_pf(ctxt,
(unsigned long)c->dst.ptr, err);
if (rc != X86EMUL_CONTINUE)
return rc;
break;
case OP_NONE:
/* no writeback */
break;
default: default:
break; /* iret from protected mode unimplemented yet */
return X86EMUL_UNHANDLEABLE;
} }
return X86EMUL_CONTINUE;
} }
static inline void emulate_push(struct x86_emulate_ctxt *ctxt, static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops) struct x86_emulate_ops *ops)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
c->dst.type = OP_MEM; return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
c->dst.bytes = c->op_bytes;
c->dst.val = c->src.val;
register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
c->dst.ptr = (void *) register_address(c, ss_base(ctxt, ops),
c->regs[VCPU_REGS_RSP]);
} }
static int emulate_pop(struct x86_emulate_ctxt *ctxt, static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
struct x86_emulate_ops *ops,
void *dest, int len)
{
struct decode_cache *c = &ctxt->decode;
int rc;
rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
c->regs[VCPU_REGS_RSP]),
dest, len);
if (rc != X86EMUL_CONTINUE)
return rc;
register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
return rc;
}
static int emulate_popf(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops,
void *dest, int len)
{
int rc;
unsigned long val, change_mask;
int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
int cpl = ops->cpl(ctxt->vcpu);
rc = emulate_pop(ctxt, ops, &val, len);
if (rc != X86EMUL_CONTINUE)
return rc;
change_mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_OF
| EFLG_TF | EFLG_DF | EFLG_NT | EFLG_RF | EFLG_AC | EFLG_ID;
switch(ctxt->mode) {
case X86EMUL_MODE_PROT64:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT16:
if (cpl == 0)
change_mask |= EFLG_IOPL;
if (cpl <= iopl)
change_mask |= EFLG_IF;
break;
case X86EMUL_MODE_VM86:
if (iopl < 3) {
emulate_gp(ctxt, 0);
return X86EMUL_PROPAGATE_FAULT;
}
change_mask |= EFLG_IF;
break;
default: /* real mode */
change_mask |= (EFLG_IOPL | EFLG_IF);
break;
}
*(unsigned long *)dest =
(ctxt->eflags & ~change_mask) | (val & change_mask);
return rc;
}
static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, int seg)
{
struct decode_cache *c = &ctxt->decode;
c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
emulate_push(ctxt, ops);
}
static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops, int seg)
{
struct decode_cache *c = &ctxt->decode;
unsigned long selector;
int rc;
rc = emulate_pop(ctxt, ops, &selector, c->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg);
return rc;
}
static int emulate_pusha(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
unsigned long old_esp = c->regs[VCPU_REGS_RSP];
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RAX;
while (reg <= VCPU_REGS_RDI) {
(reg == VCPU_REGS_RSP) ?
(c->src.val = old_esp) : (c->src.val = c->regs[reg]);
emulate_push(ctxt, ops);
rc = writeback(ctxt, ops);
if (rc != X86EMUL_CONTINUE)
return rc;
++reg;
}
/* Disable writeback. */
c->dst.type = OP_NONE;
return rc;
}
static int emulate_popa(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
int reg = VCPU_REGS_RDI;
while (reg >= VCPU_REGS_RAX) {
if (reg == VCPU_REGS_RSP) {
register_address_increment(c, &c->regs[VCPU_REGS_RSP],
c->op_bytes);
--reg;
}
rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes);
if (rc != X86EMUL_CONTINUE)
break;
--reg;
}
return rc;
}
static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
unsigned long temp_eip = 0;
unsigned long temp_eflags = 0;
unsigned long cs = 0;
unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF |
EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF |
EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */
unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP;
/* TODO: Add stack limit check */
rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
if (temp_eip & ~0xffff) {
emulate_gp(ctxt, 0);
return X86EMUL_PROPAGATE_FAULT;
}
rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS);
if (rc != X86EMUL_CONTINUE)
return rc;
c->eip = temp_eip;
if (c->op_bytes == 4)
ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask));
else if (c->op_bytes == 2) {
ctxt->eflags &= ~0xffff;
ctxt->eflags |= temp_eflags;
}
ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */
ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
return rc;
}
static inline int emulate_iret(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops* ops)
{
switch(ctxt->mode) {
case X86EMUL_MODE_REAL:
return emulate_iret_real(ctxt, ops);
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
case X86EMUL_MODE_PROT32:
case X86EMUL_MODE_PROT64:
default:
/* iret from protected mode unimplemented yet */
return X86EMUL_UNHANDLEABLE;
}
}
static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt,
struct x86_emulate_ops *ops)
{
struct decode_cache *c = &ctxt->decode;
return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes);
}
static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt)
{ {
struct decode_cache *c = &ctxt->decode; struct decode_cache *c = &ctxt->decode;
switch (c->modrm_reg) { switch (c->modrm_reg) {
...@@ -2624,6 +2252,378 @@ static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base, ...@@ -2624,6 +2252,378 @@ static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base,
op->ptr = (unsigned long *)register_address(c, base, c->regs[reg]); op->ptr = (unsigned long *)register_address(c, base, c->regs[reg]);
} }
int
x86_decode_insn(struct x86_emulate_ctxt *ctxt)
{
struct x86_emulate_ops *ops = ctxt->ops;
struct decode_cache *c = &ctxt->decode;
int rc = X86EMUL_CONTINUE;
int mode = ctxt->mode;
int def_op_bytes, def_ad_bytes, dual, goffset;
struct opcode opcode, *g_mod012, *g_mod3;
/* we cannot decode insn before we complete previous rep insn */
WARN_ON(ctxt->restart);
c->eip = ctxt->eip;
c->fetch.start = c->fetch.end = c->eip;
ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
switch (mode) {
case X86EMUL_MODE_REAL:
case X86EMUL_MODE_VM86:
case X86EMUL_MODE_PROT16:
def_op_bytes = def_ad_bytes = 2;
break;
case X86EMUL_MODE_PROT32:
def_op_bytes = def_ad_bytes = 4;
break;
#ifdef CONFIG_X86_64
case X86EMUL_MODE_PROT64:
def_op_bytes = 4;
def_ad_bytes = 8;
break;
#endif
default:
return -1;
}
c->op_bytes = def_op_bytes;
c->ad_bytes = def_ad_bytes;
/* Legacy prefixes. */
for (;;) {
switch (c->b = insn_fetch(u8, 1, c->eip)) {
case 0x66: /* operand-size override */
/* switch between 2/4 bytes */
c->op_bytes = def_op_bytes ^ 6;
break;
case 0x67: /* address-size override */
if (mode == X86EMUL_MODE_PROT64)
/* switch between 4/8 bytes */
c->ad_bytes = def_ad_bytes ^ 12;
else
/* switch between 2/4 bytes */
c->ad_bytes = def_ad_bytes ^ 6;
break;
case 0x26: /* ES override */
case 0x2e: /* CS override */
case 0x36: /* SS override */
case 0x3e: /* DS override */
set_seg_override(c, (c->b >> 3) & 3);
break;
case 0x64: /* FS override */
case 0x65: /* GS override */
set_seg_override(c, c->b & 7);
break;
case 0x40 ... 0x4f: /* REX */
if (mode != X86EMUL_MODE_PROT64)
goto done_prefixes;
c->rex_prefix = c->b;
continue;
case 0xf0: /* LOCK */
c->lock_prefix = 1;
break;
case 0xf2: /* REPNE/REPNZ */
c->rep_prefix = REPNE_PREFIX;
break;
case 0xf3: /* REP/REPE/REPZ */
c->rep_prefix = REPE_PREFIX;
break;
default:
goto done_prefixes;
}
/* Any legacy prefix after a REX prefix nullifies its effect. */
c->rex_prefix = 0;
}
done_prefixes:
/* REX prefix. */
if (c->rex_prefix)
if (c->rex_prefix & 8)
c->op_bytes = 8; /* REX.W */
/* Opcode byte(s). */
opcode = opcode_table[c->b];
if (opcode.flags == 0) {
/* Two-byte opcode? */
if (c->b == 0x0f) {
c->twobyte = 1;
c->b = insn_fetch(u8, 1, c->eip);
opcode = twobyte_table[c->b];
}
}
c->d = opcode.flags;
if (c->d & Group) {
dual = c->d & GroupDual;
c->modrm = insn_fetch(u8, 1, c->eip);
--c->eip;
if (c->d & GroupDual) {
g_mod012 = opcode.u.gdual->mod012;
g_mod3 = opcode.u.gdual->mod3;
} else
g_mod012 = g_mod3 = opcode.u.group;
c->d &= ~(Group | GroupDual);
goffset = (c->modrm >> 3) & 7;
if ((c->modrm >> 6) == 3)
opcode = g_mod3[goffset];
else
opcode = g_mod012[goffset];
c->d |= opcode.flags;
}
c->execute = opcode.u.execute;
/* Unrecognised? */
if (c->d == 0 || (c->d & Undefined)) {
DPRINTF("Cannot emulate %02x\n", c->b);
return -1;
}
if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack))
c->op_bytes = 8;
/* ModRM and SIB bytes. */
if (c->d & ModRM)
rc = decode_modrm(ctxt, ops);
else if (c->d & MemAbs)
rc = decode_abs(ctxt, ops);
if (rc != X86EMUL_CONTINUE)
goto done;
if (!c->has_seg_override)
set_seg_override(c, VCPU_SREG_DS);
if (!(!c->twobyte && c->b == 0x8d))
c->modrm_ea += seg_override_base(ctxt, ops, c);
if (c->ad_bytes != 8)
c->modrm_ea = (u32)c->modrm_ea;
if (c->rip_relative)
c->modrm_ea += c->eip;
/*
* Decode and fetch the source operand: register, memory
* or immediate.
*/
switch (c->d & SrcMask) {
case SrcNone:
break;
case SrcReg:
decode_register_operand(&c->src, c, 0);
break;
case SrcMem16:
c->src.bytes = 2;
goto srcmem_common;
case SrcMem32:
c->src.bytes = 4;
goto srcmem_common;
case SrcMem:
c->src.bytes = (c->d & ByteOp) ? 1 :
c->op_bytes;
/* Don't fetch the address for invlpg: it could be unmapped. */
if (c->twobyte && c->b == 0x01 && c->modrm_reg == 7)
break;
srcmem_common:
/*
* For instructions with a ModR/M byte, switch to register
* access if Mod = 3.
*/
if ((c->d & ModRM) && c->modrm_mod == 3) {
c->src.type = OP_REG;
c->src.val = c->modrm_val;
c->src.ptr = c->modrm_ptr;
break;
}
c->src.type = OP_MEM;
c->src.ptr = (unsigned long *)c->modrm_ea;
c->src.val = 0;
break;
case SrcImm:
case SrcImmU:
c->src.type = OP_IMM;
c->src.ptr = (unsigned long *)c->eip;
c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
if (c->src.bytes == 8)
c->src.bytes = 4;
/* NB. Immediates are sign-extended as necessary. */
switch (c->src.bytes) {
case 1:
c->src.val = insn_fetch(s8, 1, c->eip);
break;
case 2:
c->src.val = insn_fetch(s16, 2, c->eip);
break;
case 4:
c->src.val = insn_fetch(s32, 4, c->eip);
break;
}
if ((c->d & SrcMask) == SrcImmU) {
switch (c->src.bytes) {
case 1:
c->src.val &= 0xff;
break;
case 2:
c->src.val &= 0xffff;
break;
case 4:
c->src.val &= 0xffffffff;
break;
}
}
break;
case SrcImmByte:
case SrcImmUByte:
c->src.type = OP_IMM;
c->src.ptr = (unsigned long *)c->eip;
c->src.bytes = 1;
if ((c->d & SrcMask) == SrcImmByte)
c->src.val = insn_fetch(s8, 1, c->eip);
else
c->src.val = insn_fetch(u8, 1, c->eip);
break;
case SrcAcc:
c->src.type = OP_REG;
c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->src.ptr = &c->regs[VCPU_REGS_RAX];
switch (c->src.bytes) {
case 1:
c->src.val = *(u8 *)c->src.ptr;
break;
case 2:
c->src.val = *(u16 *)c->src.ptr;
break;
case 4:
c->src.val = *(u32 *)c->src.ptr;
break;
case 8:
c->src.val = *(u64 *)c->src.ptr;
break;
}
break;
case SrcOne:
c->src.bytes = 1;
c->src.val = 1;
break;
case SrcSI:
c->src.type = OP_MEM;
c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->src.ptr = (unsigned long *)
register_address(c, seg_override_base(ctxt, ops, c),
c->regs[VCPU_REGS_RSI]);
c->src.val = 0;
break;
case SrcImmFAddr:
c->src.type = OP_IMM;
c->src.ptr = (unsigned long *)c->eip;
c->src.bytes = c->op_bytes + 2;
insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
break;
case SrcMemFAddr:
c->src.type = OP_MEM;
c->src.ptr = (unsigned long *)c->modrm_ea;
c->src.bytes = c->op_bytes + 2;
break;
}
/*
* Decode and fetch the second source operand: register, memory
* or immediate.
*/
switch (c->d & Src2Mask) {
case Src2None:
break;
case Src2CL:
c->src2.bytes = 1;
c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8;
break;
case Src2ImmByte:
c->src2.type = OP_IMM;
c->src2.ptr = (unsigned long *)c->eip;
c->src2.bytes = 1;
c->src2.val = insn_fetch(u8, 1, c->eip);
break;
case Src2One:
c->src2.bytes = 1;
c->src2.val = 1;
break;
}
/* Decode and fetch the destination operand: register or memory. */
switch (c->d & DstMask) {
case ImplicitOps:
/* Special instructions do their own operand decoding. */
return 0;
case DstReg:
decode_register_operand(&c->dst, c,
c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
break;
case DstMem:
case DstMem64:
if ((c->d & ModRM) && c->modrm_mod == 3) {
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.type = OP_REG;
c->dst.val = c->dst.orig_val = c->modrm_val;
c->dst.ptr = c->modrm_ptr;
break;
}
c->dst.type = OP_MEM;
c->dst.ptr = (unsigned long *)c->modrm_ea;
if ((c->d & DstMask) == DstMem64)
c->dst.bytes = 8;
else
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.val = 0;
if (c->d & BitOp) {
unsigned long mask = ~(c->dst.bytes * 8 - 1);
c->dst.ptr = (void *)c->dst.ptr +
(c->src.val & mask) / 8;
}
break;
case DstAcc:
c->dst.type = OP_REG;
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.ptr = &c->regs[VCPU_REGS_RAX];
switch (c->dst.bytes) {
case 1:
c->dst.val = *(u8 *)c->dst.ptr;
break;
case 2:
c->dst.val = *(u16 *)c->dst.ptr;
break;
case 4:
c->dst.val = *(u32 *)c->dst.ptr;
break;
case 8:
c->dst.val = *(u64 *)c->dst.ptr;
break;
}
c->dst.orig_val = c->dst.val;
break;
case DstDI:
c->dst.type = OP_MEM;
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.ptr = (unsigned long *)
register_address(c, es_base(ctxt, ops),
c->regs[VCPU_REGS_RDI]);
c->dst.val = 0;
break;
}
done:
return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
}
int int
x86_emulate_insn(struct x86_emulate_ctxt *ctxt) x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment