Commit fd56e154 authored by Paolo Bonzini's avatar Paolo Bonzini

KVM: emulator: fix execution close to the segment limit

Emulation of code that is 14 bytes to the segment limit or closer
(e.g. RIP = 0xFFFFFFF2 after reset) is broken because we try to read as
many as 15 bytes from the beginning of the instruction, and __linearize
fails when the passed (address, size) pair reaches out of the segment.

To fix this, let __linearize return the maximum accessible size (clamped
to 2^32-1) for usage in __do_insn_fetch_bytes, and avoid the limit check
by passing zero for the desired size.

For expand-down segments, __linearize is performing a redundant check.
(u32)(addr.ea + size - 1) <= lim can only happen if addr.ea is close
to 4GB; in this case, addr.ea + size - 1 will also fail the check against
the upper bound of the segment (which is provided by the D/B bit).
After eliminating the redundant check, it is simple to compute
the *max_size for expand-down segments too.

Now that the limit check is done in __do_insn_fetch_bytes, we want
to inject a general protection fault there if size < op_size (like
__linearize would have done), instead of just aborting.

This fixes booting Tiano Core from emulated flash with EPT disabled.

Cc: stable@vger.kernel.org
Fixes: 719d5a9bReported-by: default avatarBorislav Petkov <bp@suse.de>
Tested-by: default avatarBorislav Petkov <bp@suse.de>
Signed-off-by: default avatarPaolo Bonzini <pbonzini@redhat.com>
parent 3606189f
...@@ -641,7 +641,8 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) ...@@ -641,7 +641,8 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
static int __linearize(struct x86_emulate_ctxt *ctxt, static int __linearize(struct x86_emulate_ctxt *ctxt,
struct segmented_address addr, struct segmented_address addr,
unsigned size, bool write, bool fetch, unsigned *max_size, unsigned size,
bool write, bool fetch,
ulong *linear) ulong *linear)
{ {
struct desc_struct desc; struct desc_struct desc;
...@@ -652,10 +653,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, ...@@ -652,10 +653,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
unsigned cpl; unsigned cpl;
la = seg_base(ctxt, addr.seg) + addr.ea; la = seg_base(ctxt, addr.seg) + addr.ea;
*max_size = 0;
switch (ctxt->mode) { switch (ctxt->mode) {
case X86EMUL_MODE_PROT64: case X86EMUL_MODE_PROT64:
if (((signed long)la << 16) >> 16 != la) if (((signed long)la << 16) >> 16 != la)
return emulate_gp(ctxt, 0); return emulate_gp(ctxt, 0);
*max_size = min_t(u64, ~0u, (1ull << 48) - la);
if (size > *max_size)
goto bad;
break; break;
default: default:
usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
...@@ -673,20 +679,25 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, ...@@ -673,20 +679,25 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
(ctxt->d & NoBigReal)) { (ctxt->d & NoBigReal)) {
/* la is between zero and 0xffff */ /* la is between zero and 0xffff */
if (la > 0xffff || (u32)(la + size - 1) > 0xffff) if (la > 0xffff)
goto bad; goto bad;
*max_size = 0x10000 - la;
} else if ((desc.type & 8) || !(desc.type & 4)) { } else if ((desc.type & 8) || !(desc.type & 4)) {
/* expand-up segment */ /* expand-up segment */
if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) if (addr.ea > lim)
goto bad; goto bad;
*max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
} else { } else {
/* expand-down segment */ /* expand-down segment */
if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) if (addr.ea <= lim)
goto bad; goto bad;
lim = desc.d ? 0xffffffff : 0xffff; lim = desc.d ? 0xffffffff : 0xffff;
if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) if (addr.ea > lim)
goto bad; goto bad;
*max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
} }
if (size > *max_size)
goto bad;
cpl = ctxt->ops->cpl(ctxt); cpl = ctxt->ops->cpl(ctxt);
if (!(desc.type & 8)) { if (!(desc.type & 8)) {
/* data segment */ /* data segment */
...@@ -721,7 +732,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt, ...@@ -721,7 +732,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
unsigned size, bool write, unsigned size, bool write,
ulong *linear) ulong *linear)
{ {
return __linearize(ctxt, addr, size, write, false, linear); unsigned max_size;
return __linearize(ctxt, addr, &max_size, size, write, false, linear);
} }
...@@ -746,17 +758,27 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, ...@@ -746,17 +758,27 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
{ {
int rc; int rc;
unsigned size; unsigned size, max_size;
unsigned long linear; unsigned long linear;
int cur_size = ctxt->fetch.end - ctxt->fetch.data; int cur_size = ctxt->fetch.end - ctxt->fetch.data;
struct segmented_address addr = { .seg = VCPU_SREG_CS, struct segmented_address addr = { .seg = VCPU_SREG_CS,
.ea = ctxt->eip + cur_size }; .ea = ctxt->eip + cur_size };
size = 15UL ^ cur_size; /*
rc = __linearize(ctxt, addr, size, false, true, &linear); * We do not know exactly how many bytes will be needed, and
* __linearize is expensive, so fetch as much as possible. We
* just have to avoid going beyond the 15 byte limit, the end
* of the segment, or the end of the page.
*
* __linearize is called with size 0 so that it does not do any
* boundary check itself. Instead, we use max_size to check
* against op_size.
*/
rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
if (unlikely(rc != X86EMUL_CONTINUE)) if (unlikely(rc != X86EMUL_CONTINUE))
return rc; return rc;
size = min_t(unsigned, 15UL ^ cur_size, max_size);
size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
/* /*
...@@ -766,7 +788,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) ...@@ -766,7 +788,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
* still, we must have hit the 15-byte boundary. * still, we must have hit the 15-byte boundary.
*/ */
if (unlikely(size < op_size)) if (unlikely(size < op_size))
return X86EMUL_UNHANDLEABLE; return emulate_gp(ctxt, 0);
rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
size, &ctxt->exception); size, &ctxt->exception);
if (unlikely(rc != X86EMUL_CONTINUE)) if (unlikely(rc != X86EMUL_CONTINUE))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment