Commit b60d513c authored by Takuya Yoshikawa's avatar Takuya Yoshikawa Committed by Marcelo Tosatti

KVM: x86: Use macros for x86_emulate_ops to avoid future mistakes

The return values from x86_emulate_ops are defined
in kvm_emulate.h as macros X86EMUL_*.

But in emulate.c, we are comparing the return values
from these ops with 0 to check if they're X86EMUL_CONTINUE
or not: X86EMUL_CONTINUE is defined as 0 now.

To avoid possible mistakes in the future, this patch
substitutes "X86EMUL_CONTINUE" for "0" that are being
compared with the return values from x86_emulate_ops.

  We think that there are more places we should use these
  macros, but the meanings of rc values in x86_emulate_insn()
  were not so clear at a glance. If we use proper macros in
  this function, we would be able to follow the flow of each
  emulation more easily and, maybe, more securely.
Signed-off-by: default avatarTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: default avatarMarcelo Tosatti <mtosatti@redhat.com>
parent 64749204
...@@ -1196,7 +1196,7 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, ...@@ -1196,7 +1196,7 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
rc = ops->read_emulated(register_address(c, ss_base(ctxt), rc = ops->read_emulated(register_address(c, ss_base(ctxt),
c->regs[VCPU_REGS_RSP]), c->regs[VCPU_REGS_RSP]),
dest, len, ctxt->vcpu); dest, len, ctxt->vcpu);
if (rc != 0) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
register_address_increment(c, &c->regs[VCPU_REGS_RSP], len); register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
...@@ -1370,7 +1370,7 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, ...@@ -1370,7 +1370,7 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
int rc; int rc;
rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu); rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu);
if (rc != 0) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
...@@ -1385,7 +1385,7 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, ...@@ -1385,7 +1385,7 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
(u32) c->regs[VCPU_REGS_RBX]; (u32) c->regs[VCPU_REGS_RBX];
rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu); rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu);
if (rc != 0) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
ctxt->eflags |= EFLG_ZF; ctxt->eflags |= EFLG_ZF;
} }
...@@ -1451,7 +1451,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, ...@@ -1451,7 +1451,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
&c->dst.val, &c->dst.val,
c->dst.bytes, c->dst.bytes,
ctxt->vcpu); ctxt->vcpu);
if (rc != 0) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
break; break;
case OP_NONE: case OP_NONE:
...@@ -1749,7 +1749,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1749,7 +1749,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
&c->src.val, &c->src.val,
c->src.bytes, c->src.bytes,
ctxt->vcpu); ctxt->vcpu);
if (rc != 0) if (rc != X86EMUL_CONTINUE)
goto done; goto done;
c->src.orig_val = c->src.val; c->src.orig_val = c->src.val;
} }
...@@ -1768,13 +1768,16 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -1768,13 +1768,16 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
c->dst.ptr = (void *)c->dst.ptr + c->dst.ptr = (void *)c->dst.ptr +
(c->src.val & mask) / 8; (c->src.val & mask) / 8;
} }
if (!(c->d & Mov) && if (!(c->d & Mov)) {
/* optimisation - avoid slow emulated read */ /* optimisation - avoid slow emulated read */
((rc = ops->read_emulated((unsigned long)c->dst.ptr, rc = ops->read_emulated((unsigned long)c->dst.ptr,
&c->dst.val, &c->dst.val,
c->dst.bytes, ctxt->vcpu)) != 0)) c->dst.bytes,
ctxt->vcpu);
if (rc != X86EMUL_CONTINUE)
goto done; goto done;
} }
}
c->dst.orig_val = c->dst.val; c->dst.orig_val = c->dst.val;
special_insn: special_insn:
...@@ -2039,11 +2042,12 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -2039,11 +2042,12 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
c->dst.ptr = (unsigned long *)register_address(c, c->dst.ptr = (unsigned long *)register_address(c,
es_base(ctxt), es_base(ctxt),
c->regs[VCPU_REGS_RDI]); c->regs[VCPU_REGS_RDI]);
if ((rc = ops->read_emulated(register_address(c, rc = ops->read_emulated(register_address(c,
seg_override_base(ctxt, c), seg_override_base(ctxt, c),
c->regs[VCPU_REGS_RSI]), c->regs[VCPU_REGS_RSI]),
&c->dst.val, &c->dst.val,
c->dst.bytes, ctxt->vcpu)) != 0) c->dst.bytes, ctxt->vcpu);
if (rc != X86EMUL_CONTINUE)
goto done; goto done;
register_address_increment(c, &c->regs[VCPU_REGS_RSI], register_address_increment(c, &c->regs[VCPU_REGS_RSI],
(ctxt->eflags & EFLG_DF) ? -c->dst.bytes (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
...@@ -2058,10 +2062,11 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -2058,10 +2062,11 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
c->src.ptr = (unsigned long *)register_address(c, c->src.ptr = (unsigned long *)register_address(c,
seg_override_base(ctxt, c), seg_override_base(ctxt, c),
c->regs[VCPU_REGS_RSI]); c->regs[VCPU_REGS_RSI]);
if ((rc = ops->read_emulated((unsigned long)c->src.ptr, rc = ops->read_emulated((unsigned long)c->src.ptr,
&c->src.val, &c->src.val,
c->src.bytes, c->src.bytes,
ctxt->vcpu)) != 0) ctxt->vcpu);
if (rc != X86EMUL_CONTINUE)
goto done; goto done;
c->dst.type = OP_NONE; /* Disable writeback. */ c->dst.type = OP_NONE; /* Disable writeback. */
...@@ -2069,10 +2074,11 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -2069,10 +2074,11 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
c->dst.ptr = (unsigned long *)register_address(c, c->dst.ptr = (unsigned long *)register_address(c,
es_base(ctxt), es_base(ctxt),
c->regs[VCPU_REGS_RDI]); c->regs[VCPU_REGS_RDI]);
if ((rc = ops->read_emulated((unsigned long)c->dst.ptr, rc = ops->read_emulated((unsigned long)c->dst.ptr,
&c->dst.val, &c->dst.val,
c->dst.bytes, c->dst.bytes,
ctxt->vcpu)) != 0) ctxt->vcpu);
if (rc != X86EMUL_CONTINUE)
goto done; goto done;
DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr); DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr);
...@@ -2102,12 +2108,13 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) ...@@ -2102,12 +2108,13 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
c->dst.type = OP_REG; c->dst.type = OP_REG;
c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
if ((rc = ops->read_emulated(register_address(c, rc = ops->read_emulated(register_address(c,
seg_override_base(ctxt, c), seg_override_base(ctxt, c),
c->regs[VCPU_REGS_RSI]), c->regs[VCPU_REGS_RSI]),
&c->dst.val, &c->dst.val,
c->dst.bytes, c->dst.bytes,
ctxt->vcpu)) != 0) ctxt->vcpu);
if (rc != X86EMUL_CONTINUE)
goto done; goto done;
register_address_increment(c, &c->regs[VCPU_REGS_RSI], register_address_increment(c, &c->regs[VCPU_REGS_RSI],
(ctxt->eflags & EFLG_DF) ? -c->dst.bytes (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment