Commit ac61a757 authored by Vegard Nossum's avatar Vegard Nossum

kmemcheck: add opcode self-testing at boot

We've had some troubles in the past with weird instructions. This
patch adds a self-test framework which can be used to verify that
a certain set of opcodes are decoded correctly. Of course, the
opcodes which are not tested can still give the wrong results.

In short, this is just a safeguard to catch unintentional changes
in the opcode decoder. It does not mean that errors can't still
occur!

[rebased for mainline inclusion]
Signed-off-by: default avatarVegard Nossum <vegard.nossum@gmail.com>
parent eb63657e
obj-y := error.o kmemcheck.o opcode.o pte.o shadow.o obj-y := error.o kmemcheck.o opcode.o pte.o selftest.o shadow.o
...@@ -29,8 +29,10 @@ ...@@ -29,8 +29,10 @@
#include "error.h" #include "error.h"
#include "opcode.h" #include "opcode.h"
#include "pte.h" #include "pte.h"
#include "selftest.h"
#include "shadow.h" #include "shadow.h"
#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT #ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT
# define KMEMCHECK_ENABLED 0 # define KMEMCHECK_ENABLED 0
#endif #endif
...@@ -47,8 +49,6 @@ int kmemcheck_enabled = KMEMCHECK_ENABLED; ...@@ -47,8 +49,6 @@ int kmemcheck_enabled = KMEMCHECK_ENABLED;
int __init kmemcheck_init(void) int __init kmemcheck_init(void)
{ {
printk(KERN_INFO "kmemcheck: Initialized\n");
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* /*
* Limit SMP to use a single CPU. We rely on the fact that this code * Limit SMP to use a single CPU. We rely on the fact that this code
...@@ -61,25 +61,18 @@ int __init kmemcheck_init(void) ...@@ -61,25 +61,18 @@ int __init kmemcheck_init(void)
} }
#endif #endif
if (!kmemcheck_selftest()) {
printk(KERN_INFO "kmemcheck: self-tests failed; disabling\n");
kmemcheck_enabled = 0;
return -EINVAL;
}
printk(KERN_INFO "kmemcheck: Initialized\n");
return 0; return 0;
} }
early_initcall(kmemcheck_init); early_initcall(kmemcheck_init);
#ifdef CONFIG_KMEMCHECK_DISABLED_BY_DEFAULT
# define KMEMCHECK_ENABLED 0
#endif
#ifdef CONFIG_KMEMCHECK_ENABLED_BY_DEFAULT
# define KMEMCHECK_ENABLED 1
#endif
#ifdef CONFIG_KMEMCHECK_ONESHOT_BY_DEFAULT
# define KMEMCHECK_ENABLED 2
#endif
int kmemcheck_enabled = KMEMCHECK_ENABLED;
/* /*
* We need to parse the kmemcheck= option before any memory is allocated. * We need to parse the kmemcheck= option before any memory is allocated.
*/ */
......
#include <linux/kernel.h>
#include "opcode.h"
#include "selftest.h"
struct selftest_opcode {
unsigned int expected_size;
const uint8_t *insn;
const char *desc;
};
static const struct selftest_opcode selftest_opcodes[] = {
/* REP MOVS */
{1, "\xf3\xa4", "rep movsb <mem8>, <mem8>"},
{4, "\xf3\xa5", "rep movsl <mem32>, <mem32>"},
/* MOVZX / MOVZXD */
{1, "\x66\x0f\xb6\x51\xf8", "movzwq <mem8>, <reg16>"},
{1, "\x0f\xb6\x51\xf8", "movzwq <mem8>, <reg32>"},
/* MOVSX / MOVSXD */
{1, "\x66\x0f\xbe\x51\xf8", "movswq <mem8>, <reg16>"},
{1, "\x0f\xbe\x51\xf8", "movswq <mem8>, <reg32>"},
#ifdef CONFIG_X86_64
/* MOVZX / MOVZXD */
{1, "\x49\x0f\xb6\x51\xf8", "movzbq <mem8>, <reg64>"},
{2, "\x49\x0f\xb7\x51\xf8", "movzbq <mem16>, <reg64>"},
/* MOVSX / MOVSXD */
{1, "\x49\x0f\xbe\x51\xf8", "movsbq <mem8>, <reg64>"},
{2, "\x49\x0f\xbf\x51\xf8", "movsbq <mem16>, <reg64>"},
{4, "\x49\x63\x51\xf8", "movslq <mem32>, <reg64>"},
#endif
};
static bool selftest_opcode_one(const struct selftest_opcode *op)
{
unsigned size;
kmemcheck_opcode_decode(op->insn, &size);
if (size == op->expected_size)
return true;
printk(KERN_WARNING "kmemcheck: opcode %s: expected size %d, got %d\n",
op->desc, op->expected_size, size);
return false;
}
static bool selftest_opcodes_all(void)
{
bool pass = true;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(selftest_opcodes); ++i)
pass = pass && selftest_opcode_one(&selftest_opcodes[i]);
return pass;
}
bool kmemcheck_selftest(void)
{
bool pass = true;
pass = pass && selftest_opcodes_all();
return pass;
}
#ifndef ARCH_X86_MM_KMEMCHECK_SELFTEST_H
#define ARCH_X86_MM_KMEMCHECK_SELFTEST_H
bool kmemcheck_selftest(void);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment