Commit 129a72a0 authored by Steve Rutherford's avatar Steve Rutherford Committed by Paolo Bonzini

KVM: x86: Introduce segmented_write_std

Introduces segemented_write_std.

Switches from emulated reads/writes to standard read/writes in fxsave,
fxrstor, sgdt, and sidt.  This fixes CVE-2017-2584, a longstanding
kernel memory leak.

Since commit 283c95d0 ("KVM: x86: emulate FXSAVE and FXRSTOR",
2016-11-09), which is luckily not yet in any final release, this would
also be an exploitable kernel memory *write*!
Reported-by: default avatarDmitry Vyukov <dvyukov@google.com>
Cc: stable@vger.kernel.org
Fixes: 96051572
Fixes: 283c95d0Suggested-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
Signed-off-by: default avatarSteve Rutherford <srutherford@google.com>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent cef84c30
...@@ -818,6 +818,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, ...@@ -818,6 +818,20 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception);
} }
static int segmented_write_std(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr,
void *data,
unsigned int size)
{
int rc;
ulong linear;
rc = linearize(ctxt, addr, size, true, &linear);
if (rc != X86EMUL_CONTINUE)
return rc;
return ctxt->ops->write_std(ctxt, linear, data, size, &ctxt->exception);
}
/* /*
* Prefetch the remaining bytes of the instruction without crossing page * Prefetch the remaining bytes of the instruction without crossing page
* boundary if they are not in fetch_cache yet. * boundary if they are not in fetch_cache yet.
...@@ -3685,8 +3699,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt, ...@@ -3685,8 +3699,8 @@ static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
} }
/* Disable writeback. */ /* Disable writeback. */
ctxt->dst.type = OP_NONE; ctxt->dst.type = OP_NONE;
return segmented_write(ctxt, ctxt->dst.addr.mem, return segmented_write_std(ctxt, ctxt->dst.addr.mem,
&desc_ptr, 2 + ctxt->op_bytes); &desc_ptr, 2 + ctxt->op_bytes);
} }
static int em_sgdt(struct x86_emulate_ctxt *ctxt) static int em_sgdt(struct x86_emulate_ctxt *ctxt)
...@@ -3932,7 +3946,7 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt) ...@@ -3932,7 +3946,7 @@ static int em_fxsave(struct x86_emulate_ctxt *ctxt)
else else
size = offsetof(struct fxregs_state, xmm_space[0]); size = offsetof(struct fxregs_state, xmm_space[0]);
return segmented_write(ctxt, ctxt->memop.addr.mem, &fx_state, size); return segmented_write_std(ctxt, ctxt->memop.addr.mem, &fx_state, size);
} }
static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt, static int fxrstor_fixup(struct x86_emulate_ctxt *ctxt,
...@@ -3974,7 +3988,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt) ...@@ -3974,7 +3988,7 @@ static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
rc = segmented_read(ctxt, ctxt->memop.addr.mem, &fx_state, 512); rc = segmented_read_std(ctxt, ctxt->memop.addr.mem, &fx_state, 512);
if (rc != X86EMUL_CONTINUE) if (rc != X86EMUL_CONTINUE)
return rc; return rc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment