Commit 3cc56bbb authored by Elena Reshetova's avatar Elena Reshetova Committed by Kleber Sacilotto de Souza

x86, bpf, jit: prevent speculative execution when JIT is enabled

CVE-2017-5753 (Spectre v1 Intel)

When constant blinding is enabled (bpf_jit_harden = 1), this adds
an observable speculation barrier before emitting x86 jitted code
for the BPF_ALU(64)_OR_X and BPF_ALU_LHS_X
(for BPF_REG_AX register) eBPF instructions. This is needed in order
to prevent speculative execution on out of bounds BPF_MAP array
indexes when JIT is enabled. This way an arbitary kernel memory is
not exposed through side-channel attacks.
Signed-off-by: default avatarElena Reshetova <elena.reshetova@intel.com>
Signed-off-by: default avatarAndy Whitcroft <apw@canonical.com>
Signed-off-by: default avatarKleber Sacilotto de Souza <kleber.souza@canonical.com>
parent 81774d48
......@@ -15,6 +15,7 @@
#include <linux/bpf.h>
int bpf_jit_enable __read_mostly;
u8 bpf_jit_fence = 0;
/*
* assembly code in arch/x86/net/bpf_jit.S
......@@ -106,6 +107,18 @@ static void bpf_flush_icache(void *start, void *end)
set_fs(old_fs);
}
static void emit_memory_barrier(u8 **pprog)
{
u8 *prog = *pprog;
int cnt = 0;
if (bpf_jit_fence)
EMIT3(0x0f, 0xae, 0xe8);
*pprog = prog;
return;
}
#define CHOOSE_LOAD_FUNC(K, func) \
((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
......@@ -379,7 +392,7 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_ADD: b2 = 0x01; break;
case BPF_SUB: b2 = 0x29; break;
case BPF_AND: b2 = 0x21; break;
case BPF_OR: b2 = 0x09; break;
case BPF_OR: b2 = 0x09; emit_memory_barrier(&prog); break;
case BPF_XOR: b2 = 0x31; break;
}
if (BPF_CLASS(insn->code) == BPF_ALU64)
......@@ -607,6 +620,16 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
case BPF_ALU64 | BPF_RSH | BPF_X:
case BPF_ALU64 | BPF_ARSH | BPF_X:
/* If blinding is enabled, each
* BPF_LD | BPF_IMM | BPF_DW instruction
* is converted to 4 eBPF instructions with
* BPF_ALU64_IMM(BPF_LSH, BPF_REG_AX, 32)
* always present(number 3). Detect such cases
* and insert memory barriers. */
if ((BPF_CLASS(insn->code) == BPF_ALU64)
&& (BPF_OP(insn->code) == BPF_LSH)
&& (src_reg == BPF_REG_AX))
emit_memory_barrier(&prog);
/* check for bad case when dst_reg == rcx */
if (dst_reg == BPF_REG_4) {
/* mov r11, dst_reg */
......@@ -1059,6 +1082,9 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
if (!prog || !prog->len)
return;
if (bpf_jit_fence_present() && bpf_jit_blinding_enabled())
bpf_jit_fence = 1;
addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
if (!addrs)
return prog;
......
......@@ -535,6 +535,14 @@ static inline bool bpf_jit_blinding_enabled(void)
return true;
}
static inline bool bpf_jit_fence_present(void)
{
/* Check if lfence is present on CPU
*/
if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
return true;
return false;
}
#else
static inline void bpf_jit_compile(struct bpf_prog *fp)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment