Commit e94fac38 authored by Alexei Starovoitov's avatar Alexei Starovoitov

Merge branch 'bpf: Add helpers to access traced function arguments'

Jiri Olsa says:

====================
Add new helpers to access traced function arguments that
came out of the trampoline batch changes [1].

  Get n-th argument of the traced function:
    long bpf_get_func_arg(void *ctx, u32 n, u64 *value)

  Get return value of the traced function:
    long bpf_get_func_ret(void *ctx, u64 *value)

  Get arguments count of the traced funtion:
    long bpf_get_func_arg_cnt(void *ctx)

v2 changes:
  - added acks
  - updated stack diagram
  - return -EOPNOTSUPP instead of -EINVAL in bpf_get_func_ret
  - removed gpl_only for all helpers
  - added verifier fix to allow proper arguments checks,
    Andrii asked for checking also 'int *b' argument in
    bpf_modify_return_test programs and it turned out that it's currently
    not supported by verifier - we can't read argument that is int pointer,
    so I had to add verifier change to allow that + adding verifier selftest
  - checking all arguments in bpf_modify_return_test test programs
  - moved helpers proto gets in tracing_prog_func_proto with attach type check

thanks,
jirka

[1] https://lore.kernel.org/bpf/20211118112455.475349-1-jolsa@kernel.org/
====================
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parents 4b443bc1 006004b7
...@@ -1941,7 +1941,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ...@@ -1941,7 +1941,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
void *orig_call) void *orig_call)
{ {
int ret, i, nr_args = m->nr_args; int ret, i, nr_args = m->nr_args;
int stack_size = nr_args * 8; int regs_off, ip_off, args_off, stack_size = nr_args * 8;
struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY]; struct bpf_tramp_progs *fentry = &tprogs[BPF_TRAMP_FENTRY];
struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT]; struct bpf_tramp_progs *fexit = &tprogs[BPF_TRAMP_FEXIT];
struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN]; struct bpf_tramp_progs *fmod_ret = &tprogs[BPF_TRAMP_MODIFY_RETURN];
...@@ -1956,14 +1956,39 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ...@@ -1956,14 +1956,39 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
if (!is_valid_bpf_tramp_flags(flags)) if (!is_valid_bpf_tramp_flags(flags))
return -EINVAL; return -EINVAL;
/* Generated trampoline stack layout:
*
* RBP + 8 [ return address ]
* RBP + 0 [ RBP ]
*
* RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or
* BPF_TRAMP_F_RET_FENTRY_RET flags
*
* [ reg_argN ] always
* [ ... ]
* RBP - regs_off [ reg_arg1 ] program's ctx pointer
*
* RBP - args_off [ args count ] always
*
* RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
*/
/* room for return value of orig_call or fentry prog */ /* room for return value of orig_call or fentry prog */
save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET); save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
if (save_ret) if (save_ret)
stack_size += 8; stack_size += 8;
regs_off = stack_size;
/* args count */
stack_size += 8;
args_off = stack_size;
if (flags & BPF_TRAMP_F_IP_ARG) if (flags & BPF_TRAMP_F_IP_ARG)
stack_size += 8; /* room for IP address argument */ stack_size += 8; /* room for IP address argument */
ip_off = stack_size;
if (flags & BPF_TRAMP_F_SKIP_FRAME) if (flags & BPF_TRAMP_F_SKIP_FRAME)
/* skip patched call instruction and point orig_call to actual /* skip patched call instruction and point orig_call to actual
* body of the kernel function. * body of the kernel function.
...@@ -1977,23 +2002,25 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ...@@ -1977,23 +2002,25 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */ EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
EMIT1(0x53); /* push rbx */ EMIT1(0x53); /* push rbx */
/* Store number of arguments of the traced function:
* mov rax, nr_args
* mov QWORD PTR [rbp - args_off], rax
*/
emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args);
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off);
if (flags & BPF_TRAMP_F_IP_ARG) { if (flags & BPF_TRAMP_F_IP_ARG) {
/* Store IP address of the traced function: /* Store IP address of the traced function:
* mov rax, QWORD PTR [rbp + 8] * mov rax, QWORD PTR [rbp + 8]
* sub rax, X86_PATCH_SIZE * sub rax, X86_PATCH_SIZE
* mov QWORD PTR [rbp - stack_size], rax * mov QWORD PTR [rbp - ip_off], rax
*/ */
emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE); EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE);
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -stack_size); emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
/* Continue with stack_size for regs storage, stack will
* be correctly restored with 'leave' instruction.
*/
stack_size -= 8;
} }
save_regs(m, &prog, nr_args, stack_size); save_regs(m, &prog, nr_args, regs_off);
if (flags & BPF_TRAMP_F_CALL_ORIG) { if (flags & BPF_TRAMP_F_CALL_ORIG) {
/* arg1: mov rdi, im */ /* arg1: mov rdi, im */
...@@ -2005,7 +2032,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ...@@ -2005,7 +2032,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
} }
if (fentry->nr_progs) if (fentry->nr_progs)
if (invoke_bpf(m, &prog, fentry, stack_size, if (invoke_bpf(m, &prog, fentry, regs_off,
flags & BPF_TRAMP_F_RET_FENTRY_RET)) flags & BPF_TRAMP_F_RET_FENTRY_RET))
return -EINVAL; return -EINVAL;
...@@ -2015,7 +2042,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ...@@ -2015,7 +2042,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
if (!branches) if (!branches)
return -ENOMEM; return -ENOMEM;
if (invoke_bpf_mod_ret(m, &prog, fmod_ret, stack_size, if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
branches)) { branches)) {
ret = -EINVAL; ret = -EINVAL;
goto cleanup; goto cleanup;
...@@ -2023,7 +2050,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ...@@ -2023,7 +2050,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
} }
if (flags & BPF_TRAMP_F_CALL_ORIG) { if (flags & BPF_TRAMP_F_CALL_ORIG) {
restore_regs(m, &prog, nr_args, stack_size); restore_regs(m, &prog, nr_args, regs_off);
/* call original function */ /* call original function */
if (emit_call(&prog, orig_call, prog)) { if (emit_call(&prog, orig_call, prog)) {
...@@ -2053,13 +2080,13 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i ...@@ -2053,13 +2080,13 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i
} }
if (fexit->nr_progs) if (fexit->nr_progs)
if (invoke_bpf(m, &prog, fexit, stack_size, false)) { if (invoke_bpf(m, &prog, fexit, regs_off, false)) {
ret = -EINVAL; ret = -EINVAL;
goto cleanup; goto cleanup;
} }
if (flags & BPF_TRAMP_F_RESTORE_REGS) if (flags & BPF_TRAMP_F_RESTORE_REGS)
restore_regs(m, &prog, nr_args, stack_size); restore_regs(m, &prog, nr_args, regs_off);
/* This needs to be done regardless. If there were fmod_ret programs, /* This needs to be done regardless. If there were fmod_ret programs,
* the return value is only updated on the stack and still needs to be * the return value is only updated on the stack and still needs to be
......
...@@ -777,6 +777,7 @@ void bpf_ksym_add(struct bpf_ksym *ksym); ...@@ -777,6 +777,7 @@ void bpf_ksym_add(struct bpf_ksym *ksym);
void bpf_ksym_del(struct bpf_ksym *ksym); void bpf_ksym_del(struct bpf_ksym *ksym);
int bpf_jit_charge_modmem(u32 pages); int bpf_jit_charge_modmem(u32 pages);
void bpf_jit_uncharge_modmem(u32 pages); void bpf_jit_uncharge_modmem(u32 pages);
bool bpf_prog_has_trampoline(const struct bpf_prog *prog);
#else #else
static inline int bpf_trampoline_link_prog(struct bpf_prog *prog, static inline int bpf_trampoline_link_prog(struct bpf_prog *prog,
struct bpf_trampoline *tr) struct bpf_trampoline *tr)
...@@ -805,6 +806,10 @@ static inline bool is_bpf_image_address(unsigned long address) ...@@ -805,6 +806,10 @@ static inline bool is_bpf_image_address(unsigned long address)
{ {
return false; return false;
} }
static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
{
return false;
}
#endif #endif
struct bpf_func_info_aux { struct bpf_func_info_aux {
......
...@@ -4993,6 +4993,31 @@ union bpf_attr { ...@@ -4993,6 +4993,31 @@ union bpf_attr {
* An integer less than, equal to, or greater than zero * An integer less than, equal to, or greater than zero
* if the first **s1_sz** bytes of **s1** is found to be * if the first **s1_sz** bytes of **s1** is found to be
* less than, to match, or be greater than **s2**. * less than, to match, or be greater than **s2**.
*
* long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
* Description
* Get **n**-th argument (zero based) of the traced function (for tracing programs)
* returned in **value**.
*
* Return
* 0 on success.
* **-EINVAL** if n >= arguments count of traced function.
*
* long bpf_get_func_ret(void *ctx, u64 *value)
* Description
* Get return value of the traced function (for tracing programs)
* in **value**.
*
* Return
* 0 on success.
* **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN.
*
* long bpf_get_func_arg_cnt(void *ctx)
* Description
* Get number of arguments of the traced function (for tracing programs).
*
* Return
* The number of arguments of the traced function.
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -5178,6 +5203,9 @@ union bpf_attr { ...@@ -5178,6 +5203,9 @@ union bpf_attr {
FN(find_vma), \ FN(find_vma), \
FN(loop), \ FN(loop), \
FN(strncmp), \ FN(strncmp), \
FN(get_func_arg), \
FN(get_func_ret), \
FN(get_func_arg_cnt), \
/* */ /* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
......
...@@ -4826,7 +4826,7 @@ struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog) ...@@ -4826,7 +4826,7 @@ struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog)
return prog->aux->attach_btf; return prog->aux->attach_btf;
} }
static bool is_string_ptr(struct btf *btf, const struct btf_type *t) static bool is_int_ptr(struct btf *btf, const struct btf_type *t)
{ {
/* t comes in already as a pointer */ /* t comes in already as a pointer */
t = btf_type_by_id(btf, t->type); t = btf_type_by_id(btf, t->type);
...@@ -4835,8 +4835,7 @@ static bool is_string_ptr(struct btf *btf, const struct btf_type *t) ...@@ -4835,8 +4835,7 @@ static bool is_string_ptr(struct btf *btf, const struct btf_type *t)
if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST) if (BTF_INFO_KIND(t->info) == BTF_KIND_CONST)
t = btf_type_by_id(btf, t->type); t = btf_type_by_id(btf, t->type);
/* char, signed char, unsigned char */ return btf_type_is_int(t);
return btf_type_is_int(t) && t->size == 1;
} }
bool btf_ctx_access(int off, int size, enum bpf_access_type type, bool btf_ctx_access(int off, int size, enum bpf_access_type type,
...@@ -4957,7 +4956,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type, ...@@ -4957,7 +4956,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,
*/ */
return true; return true;
if (is_string_ptr(btf, t)) if (is_int_ptr(btf, t))
return true; return true;
/* this is a pointer to another type */ /* this is a pointer to another type */
......
...@@ -27,6 +27,14 @@ static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE]; ...@@ -27,6 +27,14 @@ static struct hlist_head trampoline_table[TRAMPOLINE_TABLE_SIZE];
/* serializes access to trampoline_table */ /* serializes access to trampoline_table */
static DEFINE_MUTEX(trampoline_mutex); static DEFINE_MUTEX(trampoline_mutex);
bool bpf_prog_has_trampoline(const struct bpf_prog *prog)
{
enum bpf_attach_type eatype = prog->expected_attach_type;
return eatype == BPF_TRACE_FENTRY || eatype == BPF_TRACE_FEXIT ||
eatype == BPF_MODIFY_RETURN;
}
void *bpf_jit_alloc_exec_page(void) void *bpf_jit_alloc_exec_page(void)
{ {
void *image; void *image;
......
...@@ -6395,13 +6395,11 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env, ...@@ -6395,13 +6395,11 @@ static int check_bpf_snprintf_call(struct bpf_verifier_env *env,
static int check_get_func_ip(struct bpf_verifier_env *env) static int check_get_func_ip(struct bpf_verifier_env *env)
{ {
enum bpf_attach_type eatype = env->prog->expected_attach_type;
enum bpf_prog_type type = resolve_prog_type(env->prog); enum bpf_prog_type type = resolve_prog_type(env->prog);
int func_id = BPF_FUNC_get_func_ip; int func_id = BPF_FUNC_get_func_ip;
if (type == BPF_PROG_TYPE_TRACING) { if (type == BPF_PROG_TYPE_TRACING) {
if (eatype != BPF_TRACE_FENTRY && eatype != BPF_TRACE_FEXIT && if (!bpf_prog_has_trampoline(env->prog)) {
eatype != BPF_MODIFY_RETURN) {
verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n", verbose(env, "func %s#%d supported only for fentry/fexit/fmod_ret programs\n",
func_id_name(func_id), func_id); func_id_name(func_id), func_id);
return -ENOTSUPP; return -ENOTSUPP;
...@@ -12997,6 +12995,7 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env, ...@@ -12997,6 +12995,7 @@ static int fixup_kfunc_call(struct bpf_verifier_env *env,
static int do_misc_fixups(struct bpf_verifier_env *env) static int do_misc_fixups(struct bpf_verifier_env *env)
{ {
struct bpf_prog *prog = env->prog; struct bpf_prog *prog = env->prog;
enum bpf_attach_type eatype = prog->expected_attach_type;
bool expect_blinding = bpf_jit_blinding_enabled(prog); bool expect_blinding = bpf_jit_blinding_enabled(prog);
enum bpf_prog_type prog_type = resolve_prog_type(prog); enum bpf_prog_type prog_type = resolve_prog_type(prog);
struct bpf_insn *insn = prog->insnsi; struct bpf_insn *insn = prog->insnsi;
...@@ -13367,11 +13366,79 @@ static int do_misc_fixups(struct bpf_verifier_env *env) ...@@ -13367,11 +13366,79 @@ static int do_misc_fixups(struct bpf_verifier_env *env)
continue; continue;
} }
/* Implement bpf_get_func_arg inline. */
if (prog_type == BPF_PROG_TYPE_TRACING &&
insn->imm == BPF_FUNC_get_func_arg) {
/* Load nr_args from ctx - 8 */
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
insn_buf[1] = BPF_JMP32_REG(BPF_JGE, BPF_REG_2, BPF_REG_0, 6);
insn_buf[2] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 3);
insn_buf[3] = BPF_ALU64_REG(BPF_ADD, BPF_REG_2, BPF_REG_1);
insn_buf[4] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_2, 0);
insn_buf[5] = BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
insn_buf[6] = BPF_MOV64_IMM(BPF_REG_0, 0);
insn_buf[7] = BPF_JMP_A(1);
insn_buf[8] = BPF_MOV64_IMM(BPF_REG_0, -EINVAL);
cnt = 9;
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
/* Implement bpf_get_func_ret inline. */
if (prog_type == BPF_PROG_TYPE_TRACING &&
insn->imm == BPF_FUNC_get_func_ret) {
if (eatype == BPF_TRACE_FEXIT ||
eatype == BPF_MODIFY_RETURN) {
/* Load nr_args from ctx - 8 */
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
insn_buf[1] = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
insn_buf[2] = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
insn_buf[3] = BPF_LDX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, 0);
insn_buf[4] = BPF_STX_MEM(BPF_DW, BPF_REG_2, BPF_REG_3, 0);
insn_buf[5] = BPF_MOV64_IMM(BPF_REG_0, 0);
cnt = 6;
} else {
insn_buf[0] = BPF_MOV64_IMM(BPF_REG_0, -EOPNOTSUPP);
cnt = 1;
}
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, cnt);
if (!new_prog)
return -ENOMEM;
delta += cnt - 1;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
/* Implement get_func_arg_cnt inline. */
if (prog_type == BPF_PROG_TYPE_TRACING &&
insn->imm == BPF_FUNC_get_func_arg_cnt) {
/* Load nr_args from ctx - 8 */
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8);
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
if (!new_prog)
return -ENOMEM;
env->prog = prog = new_prog;
insn = new_prog->insnsi + i + delta;
continue;
}
/* Implement bpf_get_func_ip inline. */ /* Implement bpf_get_func_ip inline. */
if (prog_type == BPF_PROG_TYPE_TRACING && if (prog_type == BPF_PROG_TYPE_TRACING &&
insn->imm == BPF_FUNC_get_func_ip) { insn->imm == BPF_FUNC_get_func_ip) {
/* Load IP address from ctx - 8 */ /* Load IP address from ctx - 16 */
insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -8); insn_buf[0] = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_1, -16);
new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1); new_prog = bpf_patch_insn_data(env, i + delta, insn_buf, 1);
if (!new_prog) if (!new_prog)
......
...@@ -1012,7 +1012,7 @@ const struct bpf_func_proto bpf_snprintf_btf_proto = { ...@@ -1012,7 +1012,7 @@ const struct bpf_func_proto bpf_snprintf_btf_proto = {
BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx) BPF_CALL_1(bpf_get_func_ip_tracing, void *, ctx)
{ {
/* This helper call is inlined by verifier. */ /* This helper call is inlined by verifier. */
return ((u64 *)ctx)[-1]; return ((u64 *)ctx)[-2];
} }
static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = { static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = {
...@@ -1091,6 +1091,53 @@ static const struct bpf_func_proto bpf_get_branch_snapshot_proto = { ...@@ -1091,6 +1091,53 @@ static const struct bpf_func_proto bpf_get_branch_snapshot_proto = {
.arg2_type = ARG_CONST_SIZE_OR_ZERO, .arg2_type = ARG_CONST_SIZE_OR_ZERO,
}; };
BPF_CALL_3(get_func_arg, void *, ctx, u32, n, u64 *, value)
{
/* This helper call is inlined by verifier. */
u64 nr_args = ((u64 *)ctx)[-1];
if ((u64) n >= nr_args)
return -EINVAL;
*value = ((u64 *)ctx)[n];
return 0;
}
static const struct bpf_func_proto bpf_get_func_arg_proto = {
.func = get_func_arg,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_ANYTHING,
.arg3_type = ARG_PTR_TO_LONG,
};
BPF_CALL_2(get_func_ret, void *, ctx, u64 *, value)
{
/* This helper call is inlined by verifier. */
u64 nr_args = ((u64 *)ctx)[-1];
*value = ((u64 *)ctx)[nr_args];
return 0;
}
static const struct bpf_func_proto bpf_get_func_ret_proto = {
.func = get_func_ret,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
.arg2_type = ARG_PTR_TO_LONG,
};
BPF_CALL_1(get_func_arg_cnt, void *, ctx)
{
/* This helper call is inlined by verifier. */
return ((u64 *)ctx)[-1];
}
static const struct bpf_func_proto bpf_get_func_arg_cnt_proto = {
.func = get_func_arg_cnt,
.ret_type = RET_INTEGER,
.arg1_type = ARG_PTR_TO_CTX,
};
static const struct bpf_func_proto * static const struct bpf_func_proto *
bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) bpf_tracing_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
{ {
...@@ -1629,6 +1676,12 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -1629,6 +1676,12 @@ tracing_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
NULL; NULL;
case BPF_FUNC_d_path: case BPF_FUNC_d_path:
return &bpf_d_path_proto; return &bpf_d_path_proto;
case BPF_FUNC_get_func_arg:
return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_proto : NULL;
case BPF_FUNC_get_func_ret:
return bpf_prog_has_trampoline(prog) ? &bpf_get_func_ret_proto : NULL;
case BPF_FUNC_get_func_arg_cnt:
return bpf_prog_has_trampoline(prog) ? &bpf_get_func_arg_cnt_proto : NULL;
default: default:
fn = raw_tp_prog_func_proto(func_id, prog); fn = raw_tp_prog_func_proto(func_id, prog);
if (!fn && prog->expected_attach_type == BPF_TRACE_ITER) if (!fn && prog->expected_attach_type == BPF_TRACE_ITER)
......
...@@ -4993,6 +4993,31 @@ union bpf_attr { ...@@ -4993,6 +4993,31 @@ union bpf_attr {
* An integer less than, equal to, or greater than zero * An integer less than, equal to, or greater than zero
* if the first **s1_sz** bytes of **s1** is found to be * if the first **s1_sz** bytes of **s1** is found to be
* less than, to match, or be greater than **s2**. * less than, to match, or be greater than **s2**.
*
* long bpf_get_func_arg(void *ctx, u32 n, u64 *value)
* Description
* Get **n**-th argument (zero based) of the traced function (for tracing programs)
* returned in **value**.
*
* Return
* 0 on success.
* **-EINVAL** if n >= arguments count of traced function.
*
* long bpf_get_func_ret(void *ctx, u64 *value)
* Description
* Get return value of the traced function (for tracing programs)
* in **value**.
*
* Return
* 0 on success.
* **-EOPNOTSUPP** for tracing programs other than BPF_TRACE_FEXIT or BPF_MODIFY_RETURN.
*
* long bpf_get_func_arg_cnt(void *ctx)
* Description
* Get number of arguments of the traced function (for tracing programs).
*
* Return
* The number of arguments of the traced function.
*/ */
#define __BPF_FUNC_MAPPER(FN) \ #define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \ FN(unspec), \
...@@ -5178,6 +5203,9 @@ union bpf_attr { ...@@ -5178,6 +5203,9 @@ union bpf_attr {
FN(find_vma), \ FN(find_vma), \
FN(loop), \ FN(loop), \
FN(strncmp), \ FN(strncmp), \
FN(get_func_arg), \
FN(get_func_ret), \
FN(get_func_arg_cnt), \
/* */ /* */
/* integer value in 'imm' field of BPF_CALL instruction selects which helper /* integer value in 'imm' field of BPF_CALL instruction selects which helper
......
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "get_func_args_test.skel.h"
void test_get_func_args_test(void)
{
struct get_func_args_test *skel = NULL;
__u32 duration = 0, retval;
int err, prog_fd;
skel = get_func_args_test__open_and_load();
if (!ASSERT_OK_PTR(skel, "get_func_args_test__open_and_load"))
return;
err = get_func_args_test__attach(skel);
if (!ASSERT_OK(err, "get_func_args_test__attach"))
goto cleanup;
/* This runs bpf_fentry_test* functions and triggers
* fentry/fexit programs.
*/
prog_fd = bpf_program__fd(skel->progs.test1);
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
ASSERT_OK(err, "test_run");
ASSERT_EQ(retval, 0, "test_run");
/* This runs bpf_modify_return_test function and triggers
* fmod_ret_test and fexit_test programs.
*/
prog_fd = bpf_program__fd(skel->progs.fmod_ret_test);
err = bpf_prog_test_run(prog_fd, 1, NULL, 0,
NULL, NULL, &retval, &duration);
ASSERT_OK(err, "test_run");
ASSERT_EQ(retval, 1234, "test_run");
ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");
ASSERT_EQ(skel->bss->test2_result, 1, "test2_result");
ASSERT_EQ(skel->bss->test3_result, 1, "test3_result");
ASSERT_EQ(skel->bss->test4_result, 1, "test4_result");
cleanup:
get_func_args_test__destroy(skel);
}
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <errno.h>
char _license[] SEC("license") = "GPL";
__u64 test1_result = 0;
SEC("fentry/bpf_fentry_test1")
int BPF_PROG(test1)
{
__u64 cnt = bpf_get_func_arg_cnt(ctx);
__u64 a = 0, z = 0, ret = 0;
__s64 err;
test1_result = cnt == 1;
/* valid arguments */
err = bpf_get_func_arg(ctx, 0, &a);
/* We need to cast access to traced function argument values with
* proper type cast, because trampoline uses type specific instruction
* to save it, like for 'int a' with 32-bit mov like:
*
* mov %edi,-0x8(%rbp)
*
* so the upper 4 bytes are not zeroed.
*/
test1_result &= err == 0 && ((int) a == 1);
/* not valid argument */
err = bpf_get_func_arg(ctx, 1, &z);
test1_result &= err == -EINVAL;
/* return value fails in fentry */
err = bpf_get_func_ret(ctx, &ret);
test1_result &= err == -EOPNOTSUPP;
return 0;
}
__u64 test2_result = 0;
SEC("fexit/bpf_fentry_test2")
int BPF_PROG(test2)
{
__u64 cnt = bpf_get_func_arg_cnt(ctx);
__u64 a = 0, b = 0, z = 0, ret = 0;
__s64 err;
test2_result = cnt == 2;
/* valid arguments */
err = bpf_get_func_arg(ctx, 0, &a);
test2_result &= err == 0 && (int) a == 2;
err = bpf_get_func_arg(ctx, 1, &b);
test2_result &= err == 0 && b == 3;
/* not valid argument */
err = bpf_get_func_arg(ctx, 2, &z);
test2_result &= err == -EINVAL;
/* return value */
err = bpf_get_func_ret(ctx, &ret);
test2_result &= err == 0 && ret == 5;
return 0;
}
__u64 test3_result = 0;
SEC("fmod_ret/bpf_modify_return_test")
int BPF_PROG(fmod_ret_test, int _a, int *_b, int _ret)
{
__u64 cnt = bpf_get_func_arg_cnt(ctx);
__u64 a = 0, b = 0, z = 0, ret = 0;
__s64 err;
test3_result = cnt == 2;
/* valid arguments */
err = bpf_get_func_arg(ctx, 0, &a);
test3_result &= err == 0 && ((int) a == 1);
err = bpf_get_func_arg(ctx, 1, &b);
test3_result &= err == 0 && ((int *) b == _b);
/* not valid argument */
err = bpf_get_func_arg(ctx, 2, &z);
test3_result &= err == -EINVAL;
/* return value */
err = bpf_get_func_ret(ctx, &ret);
test3_result &= err == 0 && ret == 0;
/* change return value, it's checked in fexit_test program */
return 1234;
}
__u64 test4_result = 0;
SEC("fexit/bpf_modify_return_test")
int BPF_PROG(fexit_test, int _a, int *_b, int _ret)
{
__u64 cnt = bpf_get_func_arg_cnt(ctx);
__u64 a = 0, b = 0, z = 0, ret = 0;
__s64 err;
test4_result = cnt == 2;
/* valid arguments */
err = bpf_get_func_arg(ctx, 0, &a);
test4_result &= err == 0 && ((int) a == 1);
err = bpf_get_func_arg(ctx, 1, &b);
test4_result &= err == 0 && ((int *) b == _b);
/* not valid argument */
err = bpf_get_func_arg(ctx, 2, &z);
test4_result &= err == -EINVAL;
/* return value */
err = bpf_get_func_ret(ctx, &ret);
test4_result &= err == 0 && ret == 1234;
return 0;
}
{
"btf_ctx_access accept",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, 8), /* load 2nd argument value (int pointer) */
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_TRACING,
.expected_attach_type = BPF_TRACE_FENTRY,
.kfunc = "bpf_modify_return_test",
},
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment