Commit 71ca9dc3 authored by Nadav Amit's avatar Nadav Amit Committed by Ben Hutchings

KVM: x86: Emulator fixes for eip canonical checks on near branches

commit 234f3ce4 upstream.

Before changing rip (during jmp, call, ret, etc.) the target should be asserted
to be canonical one, as real CPUs do.  During sysret, both target rsp and rip
should be canonical. If any of these values is noncanonical, a #GP exception
should occur.  The exception to this rule are syscall and sysenter instructions
in which the assigned rip is checked during the assignment to the relevant
MSRs.

This patch fixes the emulator to behave as real CPUs do for near branches.
Far branches are handled by the next patch.

This fixes CVE-2014-3647.
Signed-off-by: default avatarNadav Amit <namit@cs.technion.ac.il>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
[bwh: Backported to 3.2:
 - Adjust context
 - Use ctxt->regs[] instead of reg_read(), reg_write(), reg_rmw()]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
parent ea8064a2
...@@ -529,7 +529,8 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt) ...@@ -529,7 +529,8 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
return emulate_exception(ctxt, NM_VECTOR, 0, false); return emulate_exception(ctxt, NM_VECTOR, 0, false);
} }
static inline void assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
int cs_l)
{ {
switch (ctxt->op_bytes) { switch (ctxt->op_bytes) {
case 2: case 2:
...@@ -539,16 +540,25 @@ static inline void assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) ...@@ -539,16 +540,25 @@ static inline void assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
ctxt->_eip = (u32)dst; ctxt->_eip = (u32)dst;
break; break;
case 8: case 8:
if ((cs_l && is_noncanonical_address(dst)) ||
(!cs_l && (dst & ~(u32)-1)))
return emulate_gp(ctxt, 0);
ctxt->_eip = dst; ctxt->_eip = dst;
break; break;
default: default:
WARN(1, "unsupported eip assignment size\n"); WARN(1, "unsupported eip assignment size\n");
} }
return X86EMUL_CONTINUE;
}
static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
{
return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
} }
static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
{ {
assign_eip_near(ctxt, ctxt->_eip + rel); return assign_eip_near(ctxt, ctxt->_eip + rel);
} }
static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
...@@ -1787,13 +1797,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt) ...@@ -1787,13 +1797,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
case 2: /* call near abs */ { case 2: /* call near abs */ {
long int old_eip; long int old_eip;
old_eip = ctxt->_eip; old_eip = ctxt->_eip;
ctxt->_eip = ctxt->src.val; rc = assign_eip_near(ctxt, ctxt->src.val);
if (rc != X86EMUL_CONTINUE)
break;
ctxt->src.val = old_eip; ctxt->src.val = old_eip;
rc = em_push(ctxt); rc = em_push(ctxt);
break; break;
} }
case 4: /* jmp abs */ case 4: /* jmp abs */
ctxt->_eip = ctxt->src.val; rc = assign_eip_near(ctxt, ctxt->src.val);
break; break;
case 5: /* jmp far */ case 5: /* jmp far */
rc = em_jmp_far(ctxt); rc = em_jmp_far(ctxt);
...@@ -1825,10 +1837,14 @@ static int em_grp9(struct x86_emulate_ctxt *ctxt) ...@@ -1825,10 +1837,14 @@ static int em_grp9(struct x86_emulate_ctxt *ctxt)
static int em_ret(struct x86_emulate_ctxt *ctxt) static int em_ret(struct x86_emulate_ctxt *ctxt)
{ {
ctxt->dst.type = OP_REG; int rc;
ctxt->dst.addr.reg = &ctxt->_eip; unsigned long eip;
ctxt->dst.bytes = ctxt->op_bytes;
return em_pop(ctxt); rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
if (rc != X86EMUL_CONTINUE)
return rc;
return assign_eip_near(ctxt, eip);
} }
static int em_ret_far(struct x86_emulate_ctxt *ctxt) static int em_ret_far(struct x86_emulate_ctxt *ctxt)
...@@ -2060,7 +2076,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) ...@@ -2060,7 +2076,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
{ {
struct x86_emulate_ops *ops = ctxt->ops; struct x86_emulate_ops *ops = ctxt->ops;
struct desc_struct cs, ss; struct desc_struct cs, ss;
u64 msr_data; u64 msr_data, rcx, rdx;
int usermode; int usermode;
u16 cs_sel = 0, ss_sel = 0; u16 cs_sel = 0, ss_sel = 0;
...@@ -2076,6 +2092,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) ...@@ -2076,6 +2092,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
else else
usermode = X86EMUL_MODE_PROT32; usermode = X86EMUL_MODE_PROT32;
rcx = ctxt->regs[VCPU_REGS_RCX];
rdx = ctxt->regs[VCPU_REGS_RDX];
cs.dpl = 3; cs.dpl = 3;
ss.dpl = 3; ss.dpl = 3;
ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
...@@ -2093,6 +2112,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) ...@@ -2093,6 +2112,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
ss_sel = cs_sel + 8; ss_sel = cs_sel + 8;
cs.d = 0; cs.d = 0;
cs.l = 1; cs.l = 1;
if (is_noncanonical_address(rcx) ||
is_noncanonical_address(rdx))
return emulate_gp(ctxt, 0);
break; break;
} }
cs_sel |= SELECTOR_RPL_MASK; cs_sel |= SELECTOR_RPL_MASK;
...@@ -2101,8 +2123,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) ...@@ -2101,8 +2123,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
ctxt->_eip = ctxt->regs[VCPU_REGS_RDX]; ctxt->_eip = rdx;
ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX]; ctxt->regs[VCPU_REGS_RSP] = rcx;
return X86EMUL_CONTINUE; return X86EMUL_CONTINUE;
} }
...@@ -2555,10 +2577,13 @@ static int em_das(struct x86_emulate_ctxt *ctxt) ...@@ -2555,10 +2577,13 @@ static int em_das(struct x86_emulate_ctxt *ctxt)
static int em_call(struct x86_emulate_ctxt *ctxt) static int em_call(struct x86_emulate_ctxt *ctxt)
{ {
int rc;
long rel = ctxt->src.val; long rel = ctxt->src.val;
ctxt->src.val = (unsigned long)ctxt->_eip; ctxt->src.val = (unsigned long)ctxt->_eip;
jmp_rel(ctxt, rel); rc = jmp_rel(ctxt, rel);
if (rc != X86EMUL_CONTINUE)
return rc;
return em_push(ctxt); return em_push(ctxt);
} }
...@@ -2590,11 +2615,12 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) ...@@ -2590,11 +2615,12 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
{ {
int rc; int rc;
unsigned long eip;
ctxt->dst.type = OP_REG; rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
ctxt->dst.addr.reg = &ctxt->_eip; if (rc != X86EMUL_CONTINUE)
ctxt->dst.bytes = ctxt->op_bytes; return rc;
rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); rc = assign_eip_near(ctxt, eip);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val); register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], ctxt->src.val);
...@@ -2840,20 +2866,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt) ...@@ -2840,20 +2866,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
static int em_loop(struct x86_emulate_ctxt *ctxt) static int em_loop(struct x86_emulate_ctxt *ctxt)
{ {
int rc = X86EMUL_CONTINUE;
register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1); register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1);
if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) && if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) &&
(ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
jmp_rel(ctxt, ctxt->src.val); rc = jmp_rel(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE; return rc;
} }
static int em_jcxz(struct x86_emulate_ctxt *ctxt) static int em_jcxz(struct x86_emulate_ctxt *ctxt)
{ {
int rc = X86EMUL_CONTINUE;
if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0)
jmp_rel(ctxt, ctxt->src.val); rc = jmp_rel(ctxt, ctxt->src.val);
return X86EMUL_CONTINUE; return rc;
} }
static int em_cli(struct x86_emulate_ctxt *ctxt) static int em_cli(struct x86_emulate_ctxt *ctxt)
...@@ -3946,7 +3976,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3946,7 +3976,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
break; break;
case 0x70 ... 0x7f: /* jcc (short) */ case 0x70 ... 0x7f: /* jcc (short) */
if (test_cc(ctxt->b, ctxt->eflags)) if (test_cc(ctxt->b, ctxt->eflags))
jmp_rel(ctxt, ctxt->src.val); rc = jmp_rel(ctxt, ctxt->src.val);
break; break;
case 0x8d: /* lea r16/r32, m */ case 0x8d: /* lea r16/r32, m */
ctxt->dst.val = ctxt->src.addr.mem.ea; ctxt->dst.val = ctxt->src.addr.mem.ea;
...@@ -3994,7 +4024,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -3994,7 +4024,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
goto do_io_out; goto do_io_out;
case 0xe9: /* jmp rel */ case 0xe9: /* jmp rel */
case 0xeb: /* jmp rel short */ case 0xeb: /* jmp rel short */
jmp_rel(ctxt, ctxt->src.val); rc = jmp_rel(ctxt, ctxt->src.val);
ctxt->dst.type = OP_NONE; /* Disable writeback. */ ctxt->dst.type = OP_NONE; /* Disable writeback. */
break; break;
case 0xec: /* in al,dx */ case 0xec: /* in al,dx */
...@@ -4160,7 +4190,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) ...@@ -4160,7 +4190,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
break; break;
case 0x80 ... 0x8f: /* jnz rel, etc*/ case 0x80 ... 0x8f: /* jnz rel, etc*/
if (test_cc(ctxt->b, ctxt->eflags)) if (test_cc(ctxt->b, ctxt->eflags))
jmp_rel(ctxt, ctxt->src.val); rc = jmp_rel(ctxt, ctxt->src.val);
break; break;
case 0x90 ... 0x9f: /* setcc r/m8 */ case 0x90 ... 0x9f: /* setcc r/m8 */
ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment