Commit 965a3f91 authored by Eduard Zingerman's avatar Eduard Zingerman Committed by Alexei Starovoitov

selftests/bpf: verifier/bpf_get_stack converted to inline assembly

Test verifier/bpf_get_stack automatically converted to use inline assembly.
Signed-off-by: default avatarEduard Zingerman <eddyz87@gmail.com>
Link: https://lore.kernel.org/r/20230421174234.2391278-4-eddyz87@gmail.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent c9233655
......@@ -10,6 +10,7 @@
#include "verifier_bounds_deduction.skel.h"
#include "verifier_bounds_deduction_non_const.skel.h"
#include "verifier_bounds_mix_sign_unsign.skel.h"
#include "verifier_bpf_get_stack.skel.h"
#include "verifier_cfg.skel.h"
#include "verifier_cgroup_inv_retcode.skel.h"
#include "verifier_cgroup_skb.skel.h"
......@@ -87,6 +88,7 @@ void test_verifier_bounds(void) { RUN(verifier_bounds); }
void test_verifier_bounds_deduction(void) { RUN(verifier_bounds_deduction); }
void test_verifier_bounds_deduction_non_const(void) { RUN(verifier_bounds_deduction_non_const); }
void test_verifier_bounds_mix_sign_unsign(void) { RUN(verifier_bounds_mix_sign_unsign); }
void test_verifier_bpf_get_stack(void) { RUN(verifier_bpf_get_stack); }
void test_verifier_cfg(void) { RUN(verifier_cfg); }
void test_verifier_cgroup_inv_retcode(void) { RUN(verifier_cgroup_inv_retcode); }
void test_verifier_cgroup_skb(void) { RUN(verifier_cgroup_skb); }
......
// SPDX-License-Identifier: GPL-2.0
/* Converted from tools/testing/selftests/bpf/verifier/bpf_get_stack.c */
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include "bpf_misc.h"
#define MAX_ENTRIES 11
struct test_val {
unsigned int index;
int foo[MAX_ENTRIES];
};
struct {
__uint(type, BPF_MAP_TYPE_ARRAY);
__uint(max_entries, 1);
__type(key, int);
__type(value, struct test_val);
} map_array_48b SEC(".maps");
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__uint(max_entries, 1);
__type(key, long long);
__type(value, struct test_val);
} map_hash_48b SEC(".maps");
SEC("tracepoint")
__description("bpf_get_stack return R0 within range")
__success
__naked void stack_return_r0_within_range(void)
{
asm volatile (" \
r6 = r1; \
r1 = 0; \
*(u64*)(r10 - 8) = r1; \
r2 = r10; \
r2 += -8; \
r1 = %[map_hash_48b] ll; \
call %[bpf_map_lookup_elem]; \
if r0 == 0 goto l0_%=; \
r7 = r0; \
r9 = %[__imm_0]; \
r1 = r6; \
r2 = r7; \
r3 = %[__imm_0]; \
r4 = 256; \
call %[bpf_get_stack]; \
r1 = 0; \
r8 = r0; \
r8 <<= 32; \
r8 s>>= 32; \
if r1 s> r8 goto l0_%=; \
r9 -= r8; \
r2 = r7; \
r2 += r8; \
r1 = r9; \
r1 <<= 32; \
r1 s>>= 32; \
r3 = r2; \
r3 += r1; \
r1 = r7; \
r5 = %[__imm_0]; \
r1 += r5; \
if r3 >= r1 goto l0_%=; \
r1 = r6; \
r3 = r9; \
r4 = 0; \
call %[bpf_get_stack]; \
l0_%=: exit; \
" :
: __imm(bpf_get_stack),
__imm(bpf_map_lookup_elem),
__imm_addr(map_hash_48b),
__imm_const(__imm_0, sizeof(struct test_val) / 2)
: __clobber_all);
}
SEC("iter/task")
__description("bpf_get_task_stack return R0 range is refined")
__success
__naked void return_r0_range_is_refined(void)
{
asm volatile (" \
r6 = *(u64*)(r1 + 0); \
r6 = *(u64*)(r6 + 0); /* ctx->meta->seq */\
r7 = *(u64*)(r1 + 8); /* ctx->task */\
r1 = %[map_array_48b] ll; /* fixup_map_array_48b */\
r2 = 0; \
*(u64*)(r10 - 8) = r2; \
r2 = r10; \
r2 += -8; \
call %[bpf_map_lookup_elem]; \
if r0 != 0 goto l0_%=; \
r0 = 0; \
exit; \
l0_%=: if r7 != 0 goto l1_%=; \
r0 = 0; \
exit; \
l1_%=: r1 = r7; \
r2 = r0; \
r9 = r0; /* keep buf for seq_write */\
r3 = 48; \
r4 = 0; \
call %[bpf_get_task_stack]; \
if r0 s> 0 goto l2_%=; \
r0 = 0; \
exit; \
l2_%=: r1 = r6; \
r2 = r9; \
r3 = r0; \
call %[bpf_seq_write]; \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_task_stack),
__imm(bpf_map_lookup_elem),
__imm(bpf_seq_write),
__imm_addr(map_array_48b)
: __clobber_all);
}
char _license[] SEC("license") = "GPL";
{
"bpf_get_stack return R0 within range",
.insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_LD_MAP_FD(BPF_REG_1, 0),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JEQ, BPF_REG_0, 0, 28),
BPF_MOV64_REG(BPF_REG_7, BPF_REG_0),
BPF_MOV64_IMM(BPF_REG_9, sizeof(struct test_val)/2),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_3, sizeof(struct test_val)/2),
BPF_MOV64_IMM(BPF_REG_4, 256),
BPF_EMIT_CALL(BPF_FUNC_get_stack),
BPF_MOV64_IMM(BPF_REG_1, 0),
BPF_MOV64_REG(BPF_REG_8, BPF_REG_0),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_8, 32),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_8, 32),
BPF_JMP_REG(BPF_JSGT, BPF_REG_1, BPF_REG_8, 16),
BPF_ALU64_REG(BPF_SUB, BPF_REG_9, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_7),
BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_8),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_9),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_1, 32),
BPF_ALU64_IMM(BPF_ARSH, BPF_REG_1, 32),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_1),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_IMM(BPF_REG_5, sizeof(struct test_val)/2),
BPF_ALU64_REG(BPF_ADD, BPF_REG_1, BPF_REG_5),
BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_1, 4),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_9),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_EMIT_CALL(BPF_FUNC_get_stack),
BPF_EXIT_INSN(),
},
.fixup_map_hash_48b = { 4 },
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACEPOINT,
},
{
"bpf_get_task_stack return R0 range is refined",
.insns = {
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_1, 0),
BPF_LDX_MEM(BPF_DW, BPF_REG_6, BPF_REG_6, 0), // ctx->meta->seq
BPF_LDX_MEM(BPF_DW, BPF_REG_7, BPF_REG_1, 8), // ctx->task
BPF_LD_MAP_FD(BPF_REG_1, 0), // fixup_map_array_48b
BPF_ST_MEM(BPF_DW, BPF_REG_10, -8, 0),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -8),
BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_map_lookup_elem),
BPF_JMP_IMM(BPF_JNE, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JNE, BPF_REG_7, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_7),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_0),
BPF_MOV64_REG(BPF_REG_9, BPF_REG_0), // keep buf for seq_write
BPF_MOV64_IMM(BPF_REG_3, 48),
BPF_MOV64_IMM(BPF_REG_4, 0),
BPF_EMIT_CALL(BPF_FUNC_get_task_stack),
BPF_JMP_IMM(BPF_JSGT, BPF_REG_0, 0, 2),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_6),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_9),
BPF_MOV64_REG(BPF_REG_3, BPF_REG_0),
BPF_EMIT_CALL(BPF_FUNC_seq_write),
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACING,
.expected_attach_type = BPF_TRACE_ITER,
.kfunc = "task",
.runs = -1, // Don't run, just load
.fixup_map_array_48b = { 3 },
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment