diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c index ae89f4143eb411b72de1d5de8fb14b5d2f0d2fcf..d4a6183197e99f4fc2c0fd89279e95c6e7c76886 100644 --- a/arch/x86/net/bpf_jit_comp.c +++ b/arch/x86/net/bpf_jit_comp.c @@ -662,7 +662,7 @@ static void emit_mov_imm64(u8 **pprog, u32 dst_reg, */ emit_mov_imm32(&prog, false, dst_reg, imm32_lo); } else { - /* movabsq %rax, imm64 */ + /* movabsq rax, imm64 */ EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg)); EMIT(imm32_lo, 4); EMIT(imm32_hi, 4); @@ -2039,13 +2039,14 @@ static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog, int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, const struct btf_func_model *m, u32 flags, struct bpf_tramp_links *tlinks, - void *orig_call) + void *func_addr) { int ret, i, nr_args = m->nr_args, extra_nregs = 0; int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off; struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; + void *orig_call = func_addr; u8 **branches = NULL; u8 *prog; bool save_ret; @@ -2126,12 +2127,10 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *i if (flags & BPF_TRAMP_F_IP_ARG) { /* Store IP address of the traced function: - * mov rax, QWORD PTR [rbp + 8] - * sub rax, X86_PATCH_SIZE + * movabsq rax, func_addr * mov QWORD PTR [rbp - ip_off], rax */ - emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8); - EMIT4(0x48, 0x83, 0xe8, X86_PATCH_SIZE); + emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr); emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off); } diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 55041d2f884de12bcb58b2ddd8124609195607cc..a0b92be98984ea58b3608b664d8f45d4af60ac32 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -103,6 +103,7 @@ struct kprobe { * this flag is only for optimized_kprobe. */ #define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */ +#define KPROBE_FLAG_ON_FUNC_ENTRY 16 /* probe is on the function entry */ /* Has this kprobe gone ? */ static inline bool kprobe_gone(struct kprobe *p) diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h index ead35f39f185aa1117816498e87a72b9f798d998..d6bd10759eaf9e88cde316bdf47d3192058ea2e6 100644 --- a/include/uapi/linux/bpf.h +++ b/include/uapi/linux/bpf.h @@ -4951,6 +4951,7 @@ union bpf_attr { * Get address of the traced function (for tracing and kprobe programs). * Return * Address of the traced function. + * 0 for kprobes placed within the function (not at the entry). * * u64 bpf_get_attach_cookie(void *ctx) * Description diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 08350e35aba240b8042c74aa8993012a8277c344..51adc3c94503944a15a199980fa6186a1bfea0df 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -1606,9 +1606,10 @@ int register_kprobe(struct kprobe *p) struct kprobe *old_p; struct module *probed_mod; kprobe_opcode_t *addr; + bool on_func_entry; /* Adjust probe address from symbol */ - addr = kprobe_addr(p); + addr = _kprobe_addr(p->addr, p->symbol_name, p->offset, &on_func_entry); if (IS_ERR(addr)) return PTR_ERR(addr); p->addr = addr; @@ -1628,6 +1629,9 @@ int register_kprobe(struct kprobe *p) mutex_lock(&kprobe_mutex); + if (on_func_entry) + p->flags |= KPROBE_FLAG_ON_FUNC_ENTRY; + old_p = get_kprobe(p->addr); if (old_p) { /* Since this may unoptimize 'old_p', locking 'text_mutex'. */ diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c index b05f0310dbd3eb01b18b7562b4711792786f4bf5..688552df95cacaf24817852e7eef3b23dd4f2e78 100644 --- a/kernel/trace/bpf_trace.c +++ b/kernel/trace/bpf_trace.c @@ -1028,11 +1028,30 @@ static const struct bpf_func_proto bpf_get_func_ip_proto_tracing = { .arg1_type = ARG_PTR_TO_CTX, }; +#ifdef CONFIG_X86_KERNEL_IBT +static unsigned long get_entry_ip(unsigned long fentry_ip) +{ + u32 instr; + + /* Being extra safe in here in case entry ip is on the page-edge. */ + if (get_kernel_nofault(instr, (u32 *) fentry_ip - 1)) + return fentry_ip; + if (is_endbr(instr)) + fentry_ip -= ENDBR_INSN_SIZE; + return fentry_ip; +} +#else +#define get_entry_ip(fentry_ip) fentry_ip +#endif + BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs) { struct kprobe *kp = kprobe_running(); - return kp ? (uintptr_t)kp->addr : 0; + if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY)) + return 0; + + return get_entry_ip((uintptr_t)kp->addr); } static const struct bpf_func_proto bpf_get_func_ip_proto_kprobe = { @@ -2600,13 +2619,13 @@ kprobe_multi_link_prog_run(struct bpf_kprobe_multi_link *link, } static void -kprobe_multi_link_handler(struct fprobe *fp, unsigned long entry_ip, +kprobe_multi_link_handler(struct fprobe *fp, unsigned long fentry_ip, struct pt_regs *regs) { struct bpf_kprobe_multi_link *link; link = container_of(fp, struct bpf_kprobe_multi_link, fp); - kprobe_multi_link_prog_run(link, entry_ip, regs); + kprobe_multi_link_prog_run(link, get_entry_ip(fentry_ip), regs); } static int symbols_cmp_r(const void *a, const void *b, const void *priv) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 439e2ab6905ee1e05e6f68c508b7e759e87fc86a..447d2e2a8549a3bfd9e876b19e9d87ef1c3f2b94 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -8265,8 +8265,7 @@ static int kallsyms_callback(void *data, const char *name, if (args->addrs[idx]) return 0; - addr = ftrace_location(addr); - if (!addr) + if (!ftrace_location(addr)) return 0; args->addrs[idx] = addr; diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h index ead35f39f185aa1117816498e87a72b9f798d998..d6bd10759eaf9e88cde316bdf47d3192058ea2e6 100644 --- a/tools/include/uapi/linux/bpf.h +++ b/tools/include/uapi/linux/bpf.h @@ -4951,6 +4951,7 @@ union bpf_attr { * Get address of the traced function (for tracing and kprobe programs). * Return * Address of the traced function. + * 0 for kprobes placed within the function (not at the entry). * * u64 bpf_get_attach_cookie(void *ctx) * Description diff --git a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c index 938dbd4d7c2ff023a464f8e2cfa9e5b898b850a2..fede8ef58b5b05c52df36c501ac01f78a3107778 100644 --- a/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c +++ b/tools/testing/selftests/bpf/prog_tests/get_func_ip_test.c @@ -2,7 +2,7 @@ #include <test_progs.h> #include "get_func_ip_test.skel.h" -void test_get_func_ip_test(void) +static void test_function_entry(void) { struct get_func_ip_test *skel = NULL; int err, prog_fd; @@ -12,14 +12,6 @@ void test_get_func_ip_test(void) if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open")) return; - /* test6 is x86_64 specifc because of the instruction - * offset, disabling it for all other archs - */ -#ifndef __x86_64__ - bpf_program__set_autoload(skel->progs.test6, false); - bpf_program__set_autoload(skel->progs.test7, false); -#endif - err = get_func_ip_test__load(skel); if (!ASSERT_OK(err, "get_func_ip_test__load")) goto cleanup; @@ -43,11 +35,56 @@ void test_get_func_ip_test(void) ASSERT_EQ(skel->bss->test3_result, 1, "test3_result"); ASSERT_EQ(skel->bss->test4_result, 1, "test4_result"); ASSERT_EQ(skel->bss->test5_result, 1, "test5_result"); + +cleanup: + get_func_ip_test__destroy(skel); +} + +/* test6 is x86_64 specific because of the instruction + * offset, disabling it for all other archs + */ #ifdef __x86_64__ +static void test_function_body(void) +{ + struct get_func_ip_test *skel = NULL; + LIBBPF_OPTS(bpf_test_run_opts, topts); + LIBBPF_OPTS(bpf_kprobe_opts, kopts); + struct bpf_link *link6 = NULL; + int err, prog_fd; + + skel = get_func_ip_test__open(); + if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open")) + return; + + bpf_program__set_autoload(skel->progs.test6, true); + + err = get_func_ip_test__load(skel); + if (!ASSERT_OK(err, "get_func_ip_test__load")) + goto cleanup; + + kopts.offset = skel->kconfig->CONFIG_X86_KERNEL_IBT ? 9 : 5; + + link6 = bpf_program__attach_kprobe_opts(skel->progs.test6, "bpf_fentry_test6", &kopts); + if (!ASSERT_OK_PTR(link6, "link6")) + goto cleanup; + + prog_fd = bpf_program__fd(skel->progs.test1); + err = bpf_prog_test_run_opts(prog_fd, &topts); + ASSERT_OK(err, "test_run"); + ASSERT_EQ(topts.retval, 0, "test_run"); + ASSERT_EQ(skel->bss->test6_result, 1, "test6_result"); - ASSERT_EQ(skel->bss->test7_result, 1, "test7_result"); -#endif cleanup: + bpf_link__destroy(link6); get_func_ip_test__destroy(skel); } +#else +#define test_function_body() +#endif + +void test_get_func_ip_test(void) +{ + test_function_entry(); + test_function_body(); +} diff --git a/tools/testing/selftests/bpf/progs/get_func_ip_test.c b/tools/testing/selftests/bpf/progs/get_func_ip_test.c index a587aeca5ae0848b759a2cfc95a22a0a564e87e5..8559e698b40d7ce9d103a4ba868286f33782dda8 100644 --- a/tools/testing/selftests/bpf/progs/get_func_ip_test.c +++ b/tools/testing/selftests/bpf/progs/get_func_ip_test.c @@ -2,6 +2,7 @@ #include <linux/bpf.h> #include <bpf/bpf_helpers.h> #include <bpf/bpf_tracing.h> +#include <stdbool.h> char _license[] SEC("license") = "GPL"; @@ -13,6 +14,16 @@ extern const void bpf_modify_return_test __ksym; extern const void bpf_fentry_test6 __ksym; extern const void bpf_fentry_test7 __ksym; +extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak; + +/* This function is here to have CONFIG_X86_KERNEL_IBT + * used and added to object BTF. + */ +int unused(void) +{ + return CONFIG_X86_KERNEL_IBT ? 0 : 1; +} + __u64 test1_result = 0; SEC("fentry/bpf_fentry_test1") int BPF_PROG(test1, int a) @@ -64,21 +75,11 @@ int BPF_PROG(test5, int a, int *b, int ret) } __u64 test6_result = 0; -SEC("kprobe/bpf_fentry_test6+0x5") +SEC("?kprobe") int test6(struct pt_regs *ctx) { __u64 addr = bpf_get_func_ip(ctx); - test6_result = (const void *) addr == &bpf_fentry_test6 + 5; - return 0; -} - -__u64 test7_result = 0; -SEC("kprobe/bpf_fentry_test7+5") -int test7(struct pt_regs *ctx) -{ - __u64 addr = bpf_get_func_ip(ctx); - - test7_result = (const void *) addr == &bpf_fentry_test7 + 5; + test6_result = (const void *) addr == 0; return 0; } diff --git a/tools/testing/selftests/bpf/progs/kprobe_multi.c b/tools/testing/selftests/bpf/progs/kprobe_multi.c index 08f95a8155d1bdeea9ce2fef2c90ca24af19954c..98c3399e15c03c1b83d09f177a9c8fa8284f2ab2 100644 --- a/tools/testing/selftests/bpf/progs/kprobe_multi.c +++ b/tools/testing/selftests/bpf/progs/kprobe_multi.c @@ -36,15 +36,13 @@ __u64 kretprobe_test6_result = 0; __u64 kretprobe_test7_result = 0; __u64 kretprobe_test8_result = 0; -extern bool CONFIG_X86_KERNEL_IBT __kconfig __weak; - static void kprobe_multi_check(void *ctx, bool is_return) { if (bpf_get_current_pid_tgid() >> 32 != pid) return; __u64 cookie = test_cookie ? bpf_get_attach_cookie(ctx) : 0; - __u64 addr = bpf_get_func_ip(ctx) - (CONFIG_X86_KERNEL_IBT ? 4 : 0); + __u64 addr = bpf_get_func_ip(ctx); #define SET(__var, __addr, __cookie) ({ \ if (((const void *) addr == __addr) && \