Commit eb62e6ae authored by Martin KaFai Lau's avatar Martin KaFai Lau

Merge branch 'bpf: Support bpf_get_func_ip helper in uprobes'

Jiri Olsa says:

====================
adding support for bpf_get_func_ip helper for uprobe program to return
probed address for both uprobe and return uprobe as suggested by Andrii
in [1].

We agreed that uprobe can have special use of bpf_get_func_ip helper
that differs from kprobe.

The kprobe bpf_get_func_ip returns:
  - address of the function if probe is attach on function entry
    for both kprobe and return kprobe
  - 0 if the probe is not attach on function entry

The uprobe bpf_get_func_ip returns:
  - address of the probe for both uprobe and return uprobe

The reason for this semantic change is that kernel can't really tell
if the probe user space address is function entry.

v3 changes:
  - removed bpf_get_func_ip_uprobe helper function [Yonghong]

Also available at:
  https://git.kernel.org/pub/scm/linux/kernel/git/jolsa/perf.git
  uprobe_get_func_ip

[1] https://lore.kernel.org/bpf/CAEf4BzZ=xLVkG5eurEuvLU79wAMtwho7ReR+XJAgwhFF4M-7Cg@mail.gmail.com/
====================
Signed-off-by: default avatarMartin KaFai Lau <martin.lau@kernel.org>
parents a5c0a42b 7febf573
......@@ -1819,6 +1819,7 @@ struct bpf_cg_run_ctx {
struct bpf_trace_run_ctx {
struct bpf_run_ctx run_ctx;
u64 bpf_cookie;
bool is_uprobe;
};
struct bpf_tramp_run_ctx {
......@@ -1867,6 +1868,8 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
if (unlikely(!array))
return ret;
run_ctx.is_uprobe = false;
migrate_disable();
old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
item = &array->items[0];
......@@ -1891,8 +1894,8 @@ bpf_prog_run_array(const struct bpf_prog_array *array,
* rcu-protected dynamically sized maps.
*/
static __always_inline u32
bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
const void *ctx, bpf_prog_run_fn run_prog)
bpf_prog_run_array_uprobe(const struct bpf_prog_array __rcu *array_rcu,
const void *ctx, bpf_prog_run_fn run_prog)
{
const struct bpf_prog_array_item *item;
const struct bpf_prog *prog;
......@@ -1906,6 +1909,8 @@ bpf_prog_run_array_sleepable(const struct bpf_prog_array __rcu *array_rcu,
rcu_read_lock_trace();
migrate_disable();
run_ctx.is_uprobe = true;
array = rcu_dereference_check(array_rcu, rcu_read_lock_trace_held());
if (unlikely(!array))
goto out;
......
......@@ -5086,9 +5086,14 @@ union bpf_attr {
* u64 bpf_get_func_ip(void *ctx)
* Description
* Get address of the traced function (for tracing and kprobe programs).
*
* When called for kprobe program attached as uprobe it returns
* probe address for both entry and return uprobe.
*
* Return
* Address of the traced function.
* Address of the traced function for kprobe.
* 0 for kprobes placed within the function (not at the entry).
* Address of the probe for uprobe and return uprobe.
*
* u64 bpf_get_attach_cookie(void *ctx)
* Description
......
......@@ -1055,7 +1055,16 @@ static unsigned long get_entry_ip(unsigned long fentry_ip)
BPF_CALL_1(bpf_get_func_ip_kprobe, struct pt_regs *, regs)
{
struct kprobe *kp = kprobe_running();
struct bpf_trace_run_ctx *run_ctx __maybe_unused;
struct kprobe *kp;
#ifdef CONFIG_UPROBES
run_ctx = container_of(current->bpf_ctx, struct bpf_trace_run_ctx, run_ctx);
if (run_ctx->is_uprobe)
return ((struct uprobe_dispatch_data *)current->utask->vaddr)->bp_addr;
#endif
kp = kprobe_running();
if (!kp || !(kp->flags & KPROBE_FLAG_ON_FUNC_ENTRY))
return 0;
......
......@@ -519,3 +519,8 @@ void __trace_probe_log_err(int offset, int err);
#define trace_probe_log_err(offs, err) \
__trace_probe_log_err(offs, TP_ERR_##err)
struct uprobe_dispatch_data {
struct trace_uprobe *tu;
unsigned long bp_addr;
};
......@@ -88,11 +88,6 @@ static struct trace_uprobe *to_trace_uprobe(struct dyn_event *ev)
static int register_uprobe_event(struct trace_uprobe *tu);
static int unregister_uprobe_event(struct trace_uprobe *tu);
struct uprobe_dispatch_data {
struct trace_uprobe *tu;
unsigned long bp_addr;
};
static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs);
static int uretprobe_dispatcher(struct uprobe_consumer *con,
unsigned long func, struct pt_regs *regs);
......@@ -1352,7 +1347,7 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
if (bpf_prog_array_valid(call)) {
u32 ret;
ret = bpf_prog_run_array_sleepable(call->prog_array, regs, bpf_prog_run);
ret = bpf_prog_run_array_uprobe(call->prog_array, regs, bpf_prog_run);
if (!ret)
return;
}
......
......@@ -5086,9 +5086,14 @@ union bpf_attr {
* u64 bpf_get_func_ip(void *ctx)
* Description
* Get address of the traced function (for tracing and kprobe programs).
*
* When called for kprobe program attached as uprobe it returns
* probe address for both entry and return uprobe.
*
* Return
* Address of the traced function.
* Address of the traced function for kprobe.
* 0 for kprobes placed within the function (not at the entry).
* Address of the probe for uprobe and return uprobe.
*
* u64 bpf_get_attach_cookie(void *ctx)
* Description
......
// SPDX-License-Identifier: GPL-2.0
#include <test_progs.h>
#include "get_func_ip_test.skel.h"
#include "get_func_ip_uprobe_test.skel.h"
static noinline void uprobe_trigger(void)
{
}
static void test_function_entry(void)
{
......@@ -20,6 +25,8 @@ static void test_function_entry(void)
if (!ASSERT_OK(err, "get_func_ip_test__attach"))
goto cleanup;
skel->bss->uprobe_trigger = (unsigned long) uprobe_trigger;
prog_fd = bpf_program__fd(skel->progs.test1);
err = bpf_prog_test_run_opts(prog_fd, &topts);
ASSERT_OK(err, "test_run");
......@@ -30,21 +37,31 @@ static void test_function_entry(void)
ASSERT_OK(err, "test_run");
uprobe_trigger();
ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");
ASSERT_EQ(skel->bss->test2_result, 1, "test2_result");
ASSERT_EQ(skel->bss->test3_result, 1, "test3_result");
ASSERT_EQ(skel->bss->test4_result, 1, "test4_result");
ASSERT_EQ(skel->bss->test5_result, 1, "test5_result");
ASSERT_EQ(skel->bss->test7_result, 1, "test7_result");
ASSERT_EQ(skel->bss->test8_result, 1, "test8_result");
cleanup:
get_func_ip_test__destroy(skel);
}
/* test6 is x86_64 specific because of the instruction
* offset, disabling it for all other archs
*/
#ifdef __x86_64__
static void test_function_body(void)
extern void uprobe_trigger_body(void);
asm(
".globl uprobe_trigger_body\n"
".type uprobe_trigger_body, @function\n"
"uprobe_trigger_body:\n"
" nop\n"
" ret\n"
);
static void test_function_body_kprobe(void)
{
struct get_func_ip_test *skel = NULL;
LIBBPF_OPTS(bpf_test_run_opts, topts);
......@@ -56,6 +73,9 @@ static void test_function_body(void)
if (!ASSERT_OK_PTR(skel, "get_func_ip_test__open"))
return;
/* test6 is x86_64 specific and is disabled by default,
* enable it for body test.
*/
bpf_program__set_autoload(skel->progs.test6, true);
err = get_func_ip_test__load(skel);
......@@ -79,6 +99,35 @@ static void test_function_body(void)
bpf_link__destroy(link6);
get_func_ip_test__destroy(skel);
}
static void test_function_body_uprobe(void)
{
struct get_func_ip_uprobe_test *skel = NULL;
int err;
skel = get_func_ip_uprobe_test__open_and_load();
if (!ASSERT_OK_PTR(skel, "get_func_ip_uprobe_test__open_and_load"))
return;
err = get_func_ip_uprobe_test__attach(skel);
if (!ASSERT_OK(err, "get_func_ip_test__attach"))
goto cleanup;
skel->bss->uprobe_trigger_body = (unsigned long) uprobe_trigger_body;
uprobe_trigger_body();
ASSERT_EQ(skel->bss->test1_result, 1, "test1_result");
cleanup:
get_func_ip_uprobe_test__destroy(skel);
}
static void test_function_body(void)
{
test_function_body_kprobe();
test_function_body_uprobe();
}
#else
#define test_function_body()
#endif
......
// SPDX-License-Identifier: GPL-2.0
#include <linux/bpf.h>
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
#include <stdbool.h>
char _license[] SEC("license") = "GPL";
......@@ -83,3 +82,25 @@ int test6(struct pt_regs *ctx)
test6_result = (const void *) addr == 0;
return 0;
}
unsigned long uprobe_trigger;
__u64 test7_result = 0;
SEC("uprobe//proc/self/exe:uprobe_trigger")
int BPF_UPROBE(test7)
{
__u64 addr = bpf_get_func_ip(ctx);
test7_result = (const void *) addr == (const void *) uprobe_trigger;
return 0;
}
__u64 test8_result = 0;
SEC("uretprobe//proc/self/exe:uprobe_trigger")
int BPF_URETPROBE(test8, int ret)
{
__u64 addr = bpf_get_func_ip(ctx);
test8_result = (const void *) addr == (const void *) uprobe_trigger;
return 0;
}
// SPDX-License-Identifier: GPL-2.0
#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>
char _license[] SEC("license") = "GPL";
unsigned long uprobe_trigger_body;
__u64 test1_result = 0;
SEC("uprobe//proc/self/exe:uprobe_trigger_body+1")
int BPF_UPROBE(test1)
{
__u64 addr = bpf_get_func_ip(ctx);
test1_result = (const void *) addr == (const void *) uprobe_trigger_body + 1;
return 0;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment