Commit 20600f3e authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'split-test_verifier'

Jakub Kicinski says:

====================
The tools/testing/selftests/bpf/test_verifier.c file is
way too large, and since most people add their at the
end of the list it's very prone to conflicts.

Break it up in the simplest possible way - slice the
array up into smaller C files and include them in the
right spot.

Tested:
$ make -C tools/testing/selftests/bpf/
$ cd tools/testing/selftests/bpf/ ; make

v2:

The indentation is reduced further as discussed and lines folded.
The conversion was scripted, and double checked by hand.
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents ae575c8a 48729226
...@@ -212,4 +212,17 @@ ifeq ($(DWARF2BTF),y) ...@@ -212,4 +212,17 @@ ifeq ($(DWARF2BTF),y)
$(BTF_PAHOLE) -J $@ $(BTF_PAHOLE) -J $@
endif endif
$(OUTPUT)/test_verifier: $(OUTPUT)/verifier/tests.h
$(OUTPUT)/test_verifier: CFLAGS += -I$(OUTPUT)
VERIFIER_TEST_FILES := $(wildcard verifier/*.c)
$(OUTPUT)/verifier/tests.h: $(VERIFIER_TEST_FILES)
$(shell ( cd verifier/
echo '/* Generated header, do not edit */'; \
echo '#ifdef FILL_ARRAY'; \
ls *.c 2> /dev/null | \
sed -e 's@\(.*\)@#include \"\1\"@'; \
echo '#endif' \
) > $(OUTPUT)/verifier/tests.h)
EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR) EXTRA_CLEAN := $(TEST_CUSTOM_PROGS) $(ALU32_BUILD_DIR)
This diff is collapsed.
{
"invalid and of negative number",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_LDX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_1, -4),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3 },
.errstr = "R0 max value is outside of the array range",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid range check",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 12),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_9, 1),
BPF_ALU32_IMM(BPF_MOD, BPF_REG_1, 2),
BPF_ALU32_IMM(BPF_ADD, BPF_REG_1, 1),
BPF_ALU32_REG(BPF_AND, BPF_REG_9, BPF_REG_1),
BPF_ALU32_IMM(BPF_ADD, BPF_REG_9, 1),
BPF_ALU32_IMM(BPF_RSH, BPF_REG_9, 1),
BPF_MOV32_IMM(BPF_REG_3, 1),
BPF_ALU32_REG(BPF_SUB, BPF_REG_3, BPF_REG_9),
BPF_ALU32_IMM(BPF_MUL, BPF_REG_3, 0x10000000),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_3),
BPF_STX_MEM(BPF_W, BPF_REG_0, BPF_REG_3, 0),
BPF_MOV64_REG(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3 },
.errstr = "R0 max value is outside of the array range",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"valid map access into an array with a constant",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"valid map access into an array with a register",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_IMM(BPF_REG_1, 4),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"valid map access into an array with a variable",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 5),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, MAX_ENTRIES, 3),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"valid map access into an array with a signed variable",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 9),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_1, 0xffffffff, 1),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.result_unpriv = REJECT,
.result = ACCEPT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a constant",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, (MAX_ENTRIES + 1) << 2,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3 },
.errstr = "invalid access to map value, value_size=48 off=48 size=8",
.result = REJECT,
},
{
"invalid map access into an array with a register",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_MOV64_IMM(BPF_REG_1, MAX_ENTRIES + 1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3 },
.errstr = "R0 min value is outside of the array range",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a variable",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 4),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3 },
.errstr = "R0 unbounded memory access, make sure to bounds check any array access into a map",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with no floor check",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_LDX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES),
BPF_JMP_REG(BPF_JSGT, BPF_REG_2, BPF_REG_1, 1),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.errstr = "R0 unbounded memory access",
.result_unpriv = REJECT,
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a invalid max check",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 7),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV32_IMM(BPF_REG_2, MAX_ENTRIES + 1),
BPF_JMP_REG(BPF_JGT, BPF_REG_2, BPF_REG_1, 1),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_0, 0, offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3 },
.errstr_unpriv = "R0 leaks addr",
.errstr = "invalid access to map value, value_size=48 off=44 size=8",
.result_unpriv = REJECT,
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid map access into an array with a invalid max check",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 10),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_8),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_0,
offsetof(struct test_val, foo)),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 3, 11 },
.errstr = "R0 pointer += pointer",
.result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"empty prog",
.insns = {
},
.errstr = "unknown opcode 00",
.result = REJECT,
},
{
"only exit insn",
.insns = {
BPF_EXIT_INSN(),
},
.errstr = "R0 !read_ok",
.result = REJECT,
},
{
"no bpf_exit",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_0, BPF_REG_2),
},
.errstr = "not an exit",
.result = REJECT,
},
{
"invalid call insn1",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL | BPF_X, 0, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "unknown opcode 8d",
.result = REJECT,
},
{
"invalid call insn2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 1, 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_CALL uses reserved",
.result = REJECT,
},
{
"invalid function call",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, 1234567),
BPF_EXIT_INSN(),
},
.errstr = "invalid func unknown#1234567",
.result = REJECT,
},
{
"invalid argument register",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
BPF_EXIT_INSN(),
},
.errstr = "R1 !read_ok",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"non-invalid argument register",
.insns = {
BPF_ALU64_REG(BPF_MOV, BPF_REG_6, BPF_REG_1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
BPF_ALU64_REG(BPF_MOV, BPF_REG_1, BPF_REG_6),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_cgroup_classid),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"add+sub+mul",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 2),
BPF_MOV64_IMM(BPF_REG_2, 3),
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -1),
BPF_ALU64_IMM(BPF_MUL, BPF_REG_1, 3),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = -3,
},
{
"xor32 zero extend check",
.insns = {
BPF_MOV32_IMM(BPF_REG_2, -1),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 32),
BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 0xffff),
BPF_ALU32_REG(BPF_XOR, BPF_REG_2, BPF_REG_2),
BPF_MOV32_IMM(BPF_REG_0, 2),
BPF_JMP_IMM(BPF_JNE, BPF_REG_2, 0, 1),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1,
},
{
"arsh32 on imm",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 5),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"arsh32 on imm 2",
.insns = {
BPF_LD_IMM64(BPF_REG_0, 0x1122334485667788),
BPF_ALU32_IMM(BPF_ARSH, BPF_REG_0, 7),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = -16069393,
},
{
"arsh32 on reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 5),
BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 0,
},
{
"arsh32 on reg 2",
.insns = {
BPF_LD_IMM64(BPF_REG_0, 0xffff55667788),
BPF_MOV64_IMM(BPF_REG_1, 15),
BPF_ALU32_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 43724,
},
{
"arsh64 on imm",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_0, 5),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"arsh64 on reg",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 5),
BPF_ALU64_REG(BPF_ARSH, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"invalid 64-bit BPF_END",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 0),
{
.code = BPF_ALU64 | BPF_END | BPF_TO_LE,
.dst_reg = BPF_REG_0,
.src_reg = 0,
.off = 0,
.imm = 32,
},
BPF_EXIT_INSN(),
},
.errstr = "unknown opcode d7",
.result = REJECT,
},
{
"mov64 src == dst",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_2),
// Check bounds are OK
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"mov64 src != dst",
.insns = {
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_3),
// Check bounds are OK
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"stack out of bounds",
.insns = {
BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack",
.result = REJECT,
},
{
"uninitialized stack1",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 2 },
.errstr = "invalid indirect read from stack",
.result = REJECT,
},
{
"uninitialized stack2",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -8),
BPF_EXIT_INSN(),
},
.errstr = "invalid read from stack",
.result = REJECT,
},
{
"invalid fp arithmetic",
/* If this gets ever changed, make sure JITs can deal with it. */
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_SUB, BPF_REG_1, 8),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "R1 subtraction from stack pointer",
.result = REJECT,
},
{
"non-invalid fp arithmetic",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_10, BPF_REG_0, -8),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"misaligned read from stack",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, -4),
BPF_EXIT_INSN(),
},
.errstr = "misaligned stack access",
.result = REJECT,
},
{
"invalid src register in STX",
.insns = {
BPF_STX_MEM(BPF_B, BPF_REG_10, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "R15 is invalid",
.result = REJECT,
},
{
"invalid dst register in STX",
.insns = {
BPF_STX_MEM(BPF_B, 14, BPF_REG_10, -1),
BPF_EXIT_INSN(),
},
.errstr = "R14 is invalid",
.result = REJECT,
},
{
"invalid dst register in ST",
.insns = {
BPF_ST_MEM(BPF_B, 14, -1, -1),
BPF_EXIT_INSN(),
},
.errstr = "R14 is invalid",
.result = REJECT,
},
{
"invalid src register in LDX",
.insns = {
BPF_LDX_MEM(BPF_B, BPF_REG_0, 12, 0),
BPF_EXIT_INSN(),
},
.errstr = "R12 is invalid",
.result = REJECT,
},
{
"invalid dst register in LDX",
.insns = {
BPF_LDX_MEM(BPF_B, 11, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.errstr = "R11 is invalid",
.result = REJECT,
},
This diff is collapsed.
{
"check deducing bounds from const, 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 0),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R0 tried to subtract pointer from scalar",
},
{
"check deducing bounds from const, 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 1, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 1,
},
{
"check deducing bounds from const, 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R0 tried to subtract pointer from scalar",
},
{
"check deducing bounds from const, 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
},
{
"check deducing bounds from const, 5",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 1, 1),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R0 tried to subtract pointer from scalar",
},
{
"check deducing bounds from const, 6",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R0 tried to subtract pointer from scalar",
},
{
"check deducing bounds from const, 7",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, ~0),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "dereference of modified ctx ptr",
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"check deducing bounds from const, 8",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, ~0),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_0),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "dereference of modified ctx ptr",
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"check deducing bounds from const, 9",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 0),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "R0 tried to subtract pointer from scalar",
},
{
"check deducing bounds from const, 10",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 0),
/* Marks reg as unknown. */
BPF_ALU64_IMM(BPF_NEG, BPF_REG_0, 0),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "math between ctx pointer and register with unbounded min value is not allowed",
},
This diff is collapsed.
{
"bpf_get_stack return R0 within range",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)),
BPF_MOV64_IMM(BPF_REG_4, 256),
BPF_EMIT_CALL(BPF_FUNC_get_stack),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
BPF_JMP_REG(BPF_JSLT, BPF_REG_1, BPF_REG_8, 16),
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_EMIT_CALL(BPF_FUNC_get_stack),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 4 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
This diff is collapsed.
{
"unreachable",
.insns = {
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.errstr = "unreachable",
.result = REJECT,
},
{
"unreachable2",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_EXIT_INSN(),
},
.errstr = "unreachable",
.result = REJECT,
},
{
"out of range jump",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_EXIT_INSN(),
},
.errstr = "jump out of range",
.result = REJECT,
},
{
"out of range jump2",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, -2),
BPF_EXIT_INSN(),
},
.errstr = "jump out of range",
.result = REJECT,
},
{
"loop (back-edge)",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, -1),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.result = REJECT,
},
{
"loop2 (back-edge)",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_JMP_IMM(BPF_JA, 0, 0, -4),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.result = REJECT,
},
{
"conditional loop",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, -3),
BPF_EXIT_INSN(),
},
.errstr = "back-edge",
.result = REJECT,
},
{
"bpf_exit with invalid return code. test1",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
BPF_EXIT_INSN(),
},
.errstr = "R0 has value (0x0; 0xffffffff)",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"bpf_exit with invalid return code. test2",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"bpf_exit with invalid return code. test3",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 3),
BPF_EXIT_INSN(),
},
.errstr = "R0 has value (0x0; 0x3)",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"bpf_exit with invalid return code. test4",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"bpf_exit with invalid return code. test5",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.errstr = "R0 has value (0x2; 0x0)",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"bpf_exit with invalid return code. test6",
.insns = {
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr = "R0 is not a known value (ctx)",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"bpf_exit with invalid return code. test7",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 4),
BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.errstr = "R0 has unknown scalar value",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_CGROUP_SOCK,
},
{
"direct packet read test#1 for CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
offsetof(struct __sk_buff, pkt_type)),
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, queue_mapping)),
BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
offsetof(struct __sk_buff, protocol)),
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
offsetof(struct __sk_buff, vlan_present)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "invalid bpf_context access off=76 size=4",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"direct packet read test#2 for CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
offsetof(struct __sk_buff, vlan_tci)),
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
offsetof(struct __sk_buff, vlan_proto)),
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, priority)),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
offsetof(struct __sk_buff, priority)),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, ingress_ifindex)),
BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
offsetof(struct __sk_buff, tc_index)),
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
offsetof(struct __sk_buff, hash)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"direct packet read test#3 for CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
offsetof(struct __sk_buff, cb[0])),
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
offsetof(struct __sk_buff, cb[1])),
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, cb[2])),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, cb[3])),
BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
offsetof(struct __sk_buff, cb[4])),
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
offsetof(struct __sk_buff, napi_id)),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_4,
offsetof(struct __sk_buff, cb[0])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_5,
offsetof(struct __sk_buff, cb[1])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_6,
offsetof(struct __sk_buff, cb[2])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_7,
offsetof(struct __sk_buff, cb[3])),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_8,
offsetof(struct __sk_buff, cb[4])),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"direct packet read test#4 for CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, family)),
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip4)),
BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_1,
offsetof(struct __sk_buff, local_ip4)),
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip6[0])),
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip6[1])),
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip6[2])),
BPF_LDX_MEM(BPF_W, BPF_REG_5, BPF_REG_1,
offsetof(struct __sk_buff, remote_ip6[3])),
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, local_ip6[0])),
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, local_ip6[1])),
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, local_ip6[2])),
BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_1,
offsetof(struct __sk_buff, local_ip6[3])),
BPF_LDX_MEM(BPF_W, BPF_REG_7, BPF_REG_1,
offsetof(struct __sk_buff, remote_port)),
BPF_LDX_MEM(BPF_W, BPF_REG_8, BPF_REG_1,
offsetof(struct __sk_buff, local_port)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"invalid access of tc_classid for CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tc_classid)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"invalid access of data_meta for CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data_meta)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"invalid access of flow_keys for CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, flow_keys)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"invalid write access to napi_id for CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_9, BPF_REG_1,
offsetof(struct __sk_buff, napi_id)),
BPF_STX_MEM(BPF_W, BPF_REG_1, BPF_REG_9,
offsetof(struct __sk_buff, napi_id)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "invalid bpf_context access",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"write tstamp from CGROUP_SKB",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0,
offsetof(struct __sk_buff, tstamp)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "invalid bpf_context access off=152 size=8",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"read tstamp from CGROUP_SKB",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, tstamp)),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"valid cgroup storage access",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_cgroup_storage = { 1 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"invalid cgroup storage access 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 1 },
.result = REJECT,
.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"invalid cgroup storage access 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_MAP_FD(BPF_REG_1, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "fd 1 is not pointing to valid bpf_map",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"invalid cgroup storage access 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_cgroup_storage = { 1 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=64 off=256 size=4",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"invalid cgroup storage access 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
BPF_EXIT_INSN(),
},
.fixup_cgroup_storage = { 1 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid cgroup storage access 5",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 7),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_cgroup_storage = { 1 },
.result = REJECT,
.errstr = "get_local_storage() doesn't support non-zero flags",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"invalid cgroup storage access 6",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_cgroup_storage = { 1 },
.result = REJECT,
.errstr = "get_local_storage() doesn't support non-zero flags",
.errstr_unpriv = "R2 leaks addr into helper function",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"valid per-cpu cgroup storage access",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_percpu_cgroup_storage = { 1 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"invalid per-cpu cgroup storage access 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_map_hash_8b = { 1 },
.result = REJECT,
.errstr = "cannot pass map_type 1 into func bpf_get_local_storage",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"invalid per-cpu cgroup storage access 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_MAP_FD(BPF_REG_1, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = REJECT,
.errstr = "fd 1 is not pointing to valid bpf_map",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"invalid per-cpu cgroup storage access 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 256),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.fixup_percpu_cgroup_storage = { 1 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=64 off=256 size=4",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"invalid per-cpu cgroup storage access 4",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, -2),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 1),
BPF_EXIT_INSN(),
},
.fixup_cgroup_storage = { 1 },
.result = REJECT,
.errstr = "invalid access to map value, value_size=64 off=-2 size=4",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"invalid per-cpu cgroup storage access 5",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 7),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_percpu_cgroup_storage = { 1 },
.result = REJECT,
.errstr = "get_local_storage() doesn't support non-zero flags",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"invalid per-cpu cgroup storage access 6",
.insns = {
BPF_MOV64_REG(BPF_REG_2, BPF_REG_1),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_local_storage),
BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.fixup_percpu_cgroup_storage = { 1 },
.result = REJECT,
.errstr = "get_local_storage() doesn't support non-zero flags",
.errstr_unpriv = "R2 leaks addr into helper function",
.prog_type = BPF_PROG_TYPE_CGROUP_SKB,
},
{
"constant register |= constant should keep constant type",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 13),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"constant register |= constant should not bypass stack boundary checks",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_ALU64_IMM(BPF_OR, BPF_REG_2, 24),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-48 access_size=58",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"constant register |= constant register should keep constant type",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_MOV64_IMM(BPF_REG_4, 13),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"constant register |= constant register should not bypass stack boundary checks",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -48),
BPF_MOV64_IMM(BPF_REG_2, 34),
BPF_MOV64_IMM(BPF_REG_4, 24),
BPF_ALU64_REG(BPF_OR, BPF_REG_2, BPF_REG_4),
BPF_MOV64_IMM(BPF_REG_3, 0),
BPF_EMIT_CALL(BPF_FUNC_probe_read),
BPF_EXIT_INSN(),
},
.errstr = "invalid stack type R1 off=-48 access_size=58",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"context stores via ST",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_ST_MEM(BPF_DW, BPF_REG_1, offsetof(struct __sk_buff, mark), 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_ST stores into R1 ctx is not allowed",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"context stores via XADD",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_RAW_INSN(BPF_STX | BPF_XADD | BPF_W, BPF_REG_1,
BPF_REG_0, offsetof(struct __sk_buff, mark), 0),
BPF_EXIT_INSN(),
},
.errstr = "BPF_XADD stores into R1 ctx is not allowed",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"arithmetic ops make PTR_TO_CTX unusable",
.insns = {
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1,
offsetof(struct __sk_buff, data) -
offsetof(struct __sk_buff, mark)),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(),
},
.errstr = "dereference of modified ctx ptr",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{
"pass unmodified ctx pointer to helper",
.insns = {
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_update),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
},
{
"pass modified ctx pointer to helper, 1",
.insns = {
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_update),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "dereference of modified ctx ptr",
},
{
"pass modified ctx pointer to helper, 2",
.insns = {
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -612),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_get_socket_cookie),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result_unpriv = REJECT,
.result = REJECT,
.errstr_unpriv = "dereference of modified ctx ptr",
.errstr = "dereference of modified ctx ptr",
},
{
"pass modified ctx pointer to helper, 3",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, 0),
BPF_ALU64_IMM(BPF_AND, BPF_REG_3, 4),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_3),
BPF_MOV64_IMM(BPF_REG_2, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0,
BPF_FUNC_csum_update),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT,
.errstr = "variable ctx access var_off=(0x0; 0x4)",
},
{
"valid access family in SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct sk_msg_md, family)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
{
"valid access remote_ip4 in SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct sk_msg_md, remote_ip4)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
{
"valid access local_ip4 in SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct sk_msg_md, local_ip4)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
{
"valid access remote_port in SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct sk_msg_md, remote_port)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
{
"valid access local_port in SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct sk_msg_md, local_port)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
{
"valid access remote_ip6 in SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct sk_msg_md, remote_ip6[0])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct sk_msg_md, remote_ip6[1])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct sk_msg_md, remote_ip6[2])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct sk_msg_md, remote_ip6[3])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"valid access local_ip6 in SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct sk_msg_md, local_ip6[0])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct sk_msg_md, local_ip6[1])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct sk_msg_md, local_ip6[2])),
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct sk_msg_md, local_ip6[3])),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_SKB,
},
{
"valid access size in SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct sk_msg_md, size)),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
{
"invalid 64B read of size in SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
offsetof(struct sk_msg_md, size)),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
{
"invalid read past end of SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct sk_msg_md, size) + 4),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
{
"invalid read offset in SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct sk_msg_md, family) + 1),
BPF_EXIT_INSN(),
},
.errstr = "invalid bpf_context access",
.result = REJECT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
},
{
"direct packet read for SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
offsetof(struct sk_msg_md, data)),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
offsetof(struct sk_msg_md, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_B, BPF_REG_0, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
{
"direct packet write for SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
offsetof(struct sk_msg_md, data)),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
offsetof(struct sk_msg_md, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 1),
BPF_STX_MEM(BPF_B, BPF_REG_2, BPF_REG_2, 0),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
{
"overlapping checks for direct packet access SK_MSG",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_2, BPF_REG_1,
offsetof(struct sk_msg_md, data)),
BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_1,
offsetof(struct sk_msg_md, data_end)),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8),
BPF_JMP_REG(BPF_JGT, BPF_REG_0, BPF_REG_3, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_2),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 6),
BPF_JMP_REG(BPF_JGT, BPF_REG_1, BPF_REG_3, 1),
BPF_LDX_MEM(BPF_H, BPF_REG_0, BPF_REG_2, 6),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SK_MSG,
},
This diff is collapsed.
{
"dead code: start",
.insns = {
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_LDX_MEM(BPF_B, BPF_REG_8, BPF_REG_9, 0),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, -4),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 7,
},
{
"dead code: mid 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 0, 1),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 7,
},
{
"dead code: mid 2",
.insns = {
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 4),
BPF_JMP_IMM(BPF_JSET, BPF_REG_0, 1, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 1,
},
{
"dead code: end 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
BPF_EXIT_INSN(),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 7,
},
{
"dead code: end 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 7,
},
{
"dead code: end 3",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 10, 1),
BPF_JMP_IMM(BPF_JA, 0, 0, 1),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_JMP_IMM(BPF_JA, 0, 0, -5),
},
.result = ACCEPT,
.retval = 7,
},
{
"dead code: tail of main + func",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
},
{
"dead code: tail of main + two functions",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_0, 8, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
},
{
"dead code: function in the middle and mid of another func",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 7),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 3),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 12),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 7),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 7, 1),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, -5),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 7,
},
{
"dead code: middle of main before call",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 2, 1),
BPF_MOV64_IMM(BPF_REG_1, 5),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
{
"dead code: start of a function",
.insns = {
BPF_MOV64_IMM(BPF_REG_1, 2),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 1, 0, 1),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JA, 0, 0, 0),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "function calls to other bpf functions are allowed for root only",
.result_unpriv = REJECT,
.result = ACCEPT,
.retval = 2,
},
This diff is collapsed.
{
"direct stack access with 32-bit wraparound. test1",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x7fffffff),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "fp pointer and 2147483647",
.result = REJECT
},
{
"direct stack access with 32-bit wraparound. test2",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x3fffffff),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "fp pointer and 1073741823",
.result = REJECT
},
{
"direct stack access with 32-bit wraparound. test3",
.insns = {
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0x1fffffff),
BPF_MOV32_IMM(BPF_REG_0, 0),
BPF_STX_MEM(BPF_B, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.errstr = "fp pointer offset 1073741822",
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
.result = REJECT
},
{
"DIV32 by 0, zero check 1",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 42,
},
{
"DIV32 by 0, zero check 2",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 42,
},
{
"DIV64 by 0, zero check",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_ALU64_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 42,
},
{
"MOD32 by 0, zero check 1",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 42,
},
{
"MOD32 by 0, zero check 2",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 42,
},
{
"MOD64 by 0, zero check",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_2, 1),
BPF_ALU64_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 42,
},
{
"DIV32 by 0, zero check ok, cls",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_MOV32_IMM(BPF_REG_1, 2),
BPF_MOV32_IMM(BPF_REG_2, 16),
BPF_ALU32_REG(BPF_DIV, BPF_REG_2, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 8,
},
{
"DIV32 by 0, zero check 1, cls",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 0,
},
{
"DIV32 by 0, zero check 2, cls",
.insns = {
BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_ALU32_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 0,
},
{
"DIV64 by 0, zero check, cls",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_ALU64_REG(BPF_DIV, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 0,
},
{
"MOD32 by 0, zero check ok, cls",
.insns = {
BPF_MOV32_IMM(BPF_REG_0, 42),
BPF_MOV32_IMM(BPF_REG_1, 3),
BPF_MOV32_IMM(BPF_REG_2, 5),
BPF_ALU32_REG(BPF_MOD, BPF_REG_2, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 2,
},
{
"MOD32 by 0, zero check 1, cls",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1,
},
{
"MOD32 by 0, zero check 2, cls",
.insns = {
BPF_LD_IMM64(BPF_REG_1, 0xffffffff00000000LL),
BPF_MOV32_IMM(BPF_REG_0, 1),
BPF_ALU32_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 1,
},
{
"MOD64 by 0, zero check 1, cls",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_0, 2),
BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = 2,
},
{
"MOD64 by 0, zero check 2, cls",
.insns = {
BPF_MOV32_IMM(BPF_REG_1, 0),
BPF_MOV32_IMM(BPF_REG_0, -1),
BPF_ALU64_REG(BPF_MOD, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(),
},
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = ACCEPT,
.retval = -1,
},
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
{
"jit: lsh, rsh, arsh by 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_MOV64_IMM(BPF_REG_1, 0xff),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 1),
BPF_ALU32_IMM(BPF_LSH, BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x3fc, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 1),
BPF_ALU32_IMM(BPF_RSH, BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0xff, 1),
BPF_EXIT_INSN(),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 1),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0x7f, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 2,
},
{
"jit: mov32 for ldimm64, 1",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_LD_IMM64(BPF_REG_1, 0xfeffffffffffffffULL),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_1, 32),
BPF_LD_IMM64(BPF_REG_2, 0xfeffffffULL),
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 2,
},
{
"jit: mov32 for ldimm64, 2",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_LD_IMM64(BPF_REG_1, 0x1ffffffffULL),
BPF_LD_IMM64(BPF_REG_2, 0xffffffffULL),
BPF_JMP_REG(BPF_JEQ, BPF_REG_1, BPF_REG_2, 1),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 2,
},
{
"jit: various mul tests",
.insns = {
BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
BPF_LD_IMM64(BPF_REG_1, 0xefefefULL),
BPF_ALU64_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
BPF_ALU64_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV32_REG(BPF_REG_2, BPF_REG_2),
BPF_LD_IMM64(BPF_REG_0, 0xfefefeULL),
BPF_ALU32_REG(BPF_MUL, BPF_REG_0, BPF_REG_1),
BPF_JMP_REG(BPF_JEQ, BPF_REG_0, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_3, 0xfefefeULL),
BPF_ALU32_REG(BPF_MUL, BPF_REG_3, BPF_REG_1),
BPF_JMP_REG(BPF_JEQ, BPF_REG_3, BPF_REG_2, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_LD_IMM64(BPF_REG_0, 0x952a7bbcULL),
BPF_LD_IMM64(BPF_REG_1, 0xfefefeULL),
BPF_LD_IMM64(BPF_REG_2, 0xeeff0d413122ULL),
BPF_ALU32_REG(BPF_MUL, BPF_REG_2, BPF_REG_1),
BPF_JMP_REG(BPF_JEQ, BPF_REG_2, BPF_REG_0, 2),
BPF_MOV64_IMM(BPF_REG_0, 1),
BPF_EXIT_INSN(),
BPF_MOV64_IMM(BPF_REG_0, 2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.retval = 2,
},
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment