Commit 8aa2d4b4 authored by Alexei Starovoitov's avatar Alexei Starovoitov Committed by Daniel Borkmann

selftests/bpf: synthetic tests to push verifier limits

Add a test to generate 1m ld_imm64 insns to stress the verifier.

Bump the size of fill_ld_abs_vlan_push_pop test from 4k to 29k
and jump_around_ld_abs from 4k to 5.5k.
Larger sizes are not possible due to 16-bit offset encoding
in jump instructions.
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent e5e7a8f2
...@@ -50,6 +50,7 @@ ...@@ -50,6 +50,7 @@
#include "../../../include/linux/filter.h" #include "../../../include/linux/filter.h"
#define MAX_INSNS BPF_MAXINSNS #define MAX_INSNS BPF_MAXINSNS
#define MAX_TEST_INSNS 1000000
#define MAX_FIXUPS 8 #define MAX_FIXUPS 8
#define MAX_NR_MAPS 14 #define MAX_NR_MAPS 14
#define MAX_TEST_RUNS 8 #define MAX_TEST_RUNS 8
...@@ -66,6 +67,7 @@ static int skips; ...@@ -66,6 +67,7 @@ static int skips;
struct bpf_test { struct bpf_test {
const char *descr; const char *descr;
struct bpf_insn insns[MAX_INSNS]; struct bpf_insn insns[MAX_INSNS];
struct bpf_insn *fill_insns;
int fixup_map_hash_8b[MAX_FIXUPS]; int fixup_map_hash_8b[MAX_FIXUPS];
int fixup_map_hash_48b[MAX_FIXUPS]; int fixup_map_hash_48b[MAX_FIXUPS];
int fixup_map_hash_16b[MAX_FIXUPS]; int fixup_map_hash_16b[MAX_FIXUPS];
...@@ -83,6 +85,7 @@ struct bpf_test { ...@@ -83,6 +85,7 @@ struct bpf_test {
const char *errstr; const char *errstr;
const char *errstr_unpriv; const char *errstr_unpriv;
uint32_t retval, retval_unpriv, insn_processed; uint32_t retval, retval_unpriv, insn_processed;
int prog_len;
enum { enum {
UNDEF, UNDEF,
ACCEPT, ACCEPT,
...@@ -119,10 +122,11 @@ struct other_val { ...@@ -119,10 +122,11 @@ struct other_val {
static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self) static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
{ {
/* test: {skb->data[0], vlan_push} x 68 + {skb->data[0], vlan_pop} x 68 */ /* test: {skb->data[0], vlan_push} x 51 + {skb->data[0], vlan_pop} x 51 */
#define PUSH_CNT 51 #define PUSH_CNT 51
unsigned int len = BPF_MAXINSNS; /* jump range is limited to 16 bit. PUSH_CNT of ld_abs needs room */
struct bpf_insn *insn = self->insns; unsigned int len = (1 << 15) - PUSH_CNT * 2 * 5 * 6;
struct bpf_insn *insn = self->fill_insns;
int i = 0, j, k = 0; int i = 0, j, k = 0;
insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
...@@ -156,12 +160,14 @@ static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self) ...@@ -156,12 +160,14 @@ static void bpf_fill_ld_abs_vlan_push_pop(struct bpf_test *self)
for (; i < len - 1; i++) for (; i < len - 1; i++)
insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef); insn[i] = BPF_ALU32_IMM(BPF_MOV, BPF_REG_0, 0xbef);
insn[len - 1] = BPF_EXIT_INSN(); insn[len - 1] = BPF_EXIT_INSN();
self->prog_len = len;
} }
static void bpf_fill_jump_around_ld_abs(struct bpf_test *self) static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
{ {
struct bpf_insn *insn = self->insns; struct bpf_insn *insn = self->fill_insns;
unsigned int len = BPF_MAXINSNS; /* jump range is limited to 16 bit. every ld_abs is replaced by 6 insns */
unsigned int len = (1 << 15) / 6;
int i = 0; int i = 0;
insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1); insn[i++] = BPF_MOV64_REG(BPF_REG_6, BPF_REG_1);
...@@ -171,11 +177,12 @@ static void bpf_fill_jump_around_ld_abs(struct bpf_test *self) ...@@ -171,11 +177,12 @@ static void bpf_fill_jump_around_ld_abs(struct bpf_test *self)
while (i < len - 1) while (i < len - 1)
insn[i++] = BPF_LD_ABS(BPF_B, 1); insn[i++] = BPF_LD_ABS(BPF_B, 1);
insn[i] = BPF_EXIT_INSN(); insn[i] = BPF_EXIT_INSN();
self->prog_len = i + 1;
} }
static void bpf_fill_rand_ld_dw(struct bpf_test *self) static void bpf_fill_rand_ld_dw(struct bpf_test *self)
{ {
struct bpf_insn *insn = self->insns; struct bpf_insn *insn = self->fill_insns;
uint64_t res = 0; uint64_t res = 0;
int i = 0; int i = 0;
...@@ -193,6 +200,7 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self) ...@@ -193,6 +200,7 @@ static void bpf_fill_rand_ld_dw(struct bpf_test *self)
insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32); insn[i++] = BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32);
insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1); insn[i++] = BPF_ALU64_REG(BPF_XOR, BPF_REG_0, BPF_REG_1);
insn[i] = BPF_EXIT_INSN(); insn[i] = BPF_EXIT_INSN();
self->prog_len = i + 1;
res ^= (res >> 32); res ^= (res >> 32);
self->retval = (uint32_t)res; self->retval = (uint32_t)res;
} }
...@@ -520,8 +528,10 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type, ...@@ -520,8 +528,10 @@ static void do_test_fixup(struct bpf_test *test, enum bpf_prog_type prog_type,
int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage; int *fixup_percpu_cgroup_storage = test->fixup_percpu_cgroup_storage;
int *fixup_map_spin_lock = test->fixup_map_spin_lock; int *fixup_map_spin_lock = test->fixup_map_spin_lock;
if (test->fill_helper) if (test->fill_helper) {
test->fill_insns = calloc(MAX_TEST_INSNS, sizeof(struct bpf_insn));
test->fill_helper(test); test->fill_helper(test);
}
/* Allocating HTs with 1 elem is fine here, since we only test /* Allocating HTs with 1 elem is fine here, since we only test
* for verifier and not do a runtime lookup, so the only thing * for verifier and not do a runtime lookup, so the only thing
...@@ -718,12 +728,17 @@ static void do_test_single(struct bpf_test *test, bool unpriv, ...@@ -718,12 +728,17 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
prog_type = BPF_PROG_TYPE_SOCKET_FILTER; prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
fixup_skips = skips; fixup_skips = skips;
do_test_fixup(test, prog_type, prog, map_fds); do_test_fixup(test, prog_type, prog, map_fds);
if (test->fill_insns) {
prog = test->fill_insns;
prog_len = test->prog_len;
} else {
prog_len = probe_filter_length(prog);
}
/* If there were some map skips during fixup due to missing bpf /* If there were some map skips during fixup due to missing bpf
* features, skip this test. * features, skip this test.
*/ */
if (fixup_skips != skips) if (fixup_skips != skips)
return; return;
prog_len = probe_filter_length(prog);
pflags = 0; pflags = 0;
if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT) if (test->flags & F_LOAD_WITH_STRICT_ALIGNMENT)
...@@ -731,7 +746,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv, ...@@ -731,7 +746,7 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS) if (test->flags & F_NEEDS_EFFICIENT_UNALIGNED_ACCESS)
pflags |= BPF_F_ANY_ALIGNMENT; pflags |= BPF_F_ANY_ALIGNMENT;
fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags, fd_prog = bpf_verify_program(prog_type, prog, prog_len, pflags,
"GPL", 0, bpf_vlog, sizeof(bpf_vlog), 1); "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 4);
if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) { if (fd_prog < 0 && !bpf_probe_prog_type(prog_type, 0)) {
printf("SKIP (unsupported program type %d)\n", prog_type); printf("SKIP (unsupported program type %d)\n", prog_type);
skips++; skips++;
...@@ -830,6 +845,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv, ...@@ -830,6 +845,8 @@ static void do_test_single(struct bpf_test *test, bool unpriv,
goto fail_log; goto fail_log;
} }
close_fds: close_fds:
if (test->fill_insns)
free(test->fill_insns);
close(fd_prog); close(fd_prog);
for (i = 0; i < MAX_NR_MAPS; i++) for (i = 0; i < MAX_NR_MAPS; i++)
close(map_fds[i]); close(map_fds[i]);
......
...@@ -34,3 +34,12 @@ ...@@ -34,3 +34,12 @@
.result = ACCEPT, .result = ACCEPT,
.retval = 5, .retval = 5,
}, },
{
"ld_dw: xor semi-random 64 bit imms, test 5",
.insns = { },
.data = { },
.fill_helper = bpf_fill_rand_ld_dw,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1000000 - 6,
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment