Commit 2a9fedc1 authored by Matt Mullins's avatar Matt Mullins Committed by Greg Kroah-Hartman

bpf: fix nested bpf tracepoints with per-cpu data

commit 9594dc3c upstream.

BPF_PROG_TYPE_RAW_TRACEPOINTs can be executed nested on the same CPU, as
they do not increment bpf_prog_active while executing.

This enables three levels of nesting, to support
  - a kprobe or raw tp or perf event,
  - another one of the above that irq context happens to call, and
  - another one in nmi context
(at most one of which may be a kprobe or perf event).

Fixes: 20b9d7ac ("bpf: avoid excessive stack usage for perf_sample_data")
Signed-off-by: default avatarMatt Mullins <mmullins@fb.com>
Acked-by: default avatarAndrii Nakryiko <andriin@fb.com>
Acked-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 7cec8976
...@@ -402,8 +402,6 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = { ...@@ -402,8 +402,6 @@ static const struct bpf_func_proto bpf_perf_event_read_value_proto = {
.arg4_type = ARG_CONST_SIZE, .arg4_type = ARG_CONST_SIZE,
}; };
static DEFINE_PER_CPU(struct perf_sample_data, bpf_trace_sd);
static __always_inline u64 static __always_inline u64
__bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
u64 flags, struct perf_sample_data *sd) u64 flags, struct perf_sample_data *sd)
...@@ -434,24 +432,50 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map, ...@@ -434,24 +432,50 @@ __bpf_perf_event_output(struct pt_regs *regs, struct bpf_map *map,
return perf_event_output(event, sd, regs); return perf_event_output(event, sd, regs);
} }
/*
* Support executing tracepoints in normal, irq, and nmi context that each call
* bpf_perf_event_output
*/
struct bpf_trace_sample_data {
struct perf_sample_data sds[3];
};
static DEFINE_PER_CPU(struct bpf_trace_sample_data, bpf_trace_sds);
static DEFINE_PER_CPU(int, bpf_trace_nest_level);
BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map, BPF_CALL_5(bpf_perf_event_output, struct pt_regs *, regs, struct bpf_map *, map,
u64, flags, void *, data, u64, size) u64, flags, void *, data, u64, size)
{ {
struct perf_sample_data *sd = this_cpu_ptr(&bpf_trace_sd); struct bpf_trace_sample_data *sds = this_cpu_ptr(&bpf_trace_sds);
int nest_level = this_cpu_inc_return(bpf_trace_nest_level);
struct perf_raw_record raw = { struct perf_raw_record raw = {
.frag = { .frag = {
.size = size, .size = size,
.data = data, .data = data,
}, },
}; };
struct perf_sample_data *sd;
int err;
if (unlikely(flags & ~(BPF_F_INDEX_MASK))) if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(sds->sds))) {
return -EINVAL; err = -EBUSY;
goto out;
}
sd = &sds->sds[nest_level - 1];
if (unlikely(flags & ~(BPF_F_INDEX_MASK))) {
err = -EINVAL;
goto out;
}
perf_sample_data_init(sd, 0, 0); perf_sample_data_init(sd, 0, 0);
sd->raw = &raw; sd->raw = &raw;
return __bpf_perf_event_output(regs, map, flags, sd); err = __bpf_perf_event_output(regs, map, flags, sd);
out:
this_cpu_dec(bpf_trace_nest_level);
return err;
} }
static const struct bpf_func_proto bpf_perf_event_output_proto = { static const struct bpf_func_proto bpf_perf_event_output_proto = {
...@@ -808,16 +832,48 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) ...@@ -808,16 +832,48 @@ pe_prog_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog)
/* /*
* bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp * bpf_raw_tp_regs are separate from bpf_pt_regs used from skb/xdp
* to avoid potential recursive reuse issue when/if tracepoints are added * to avoid potential recursive reuse issue when/if tracepoints are added
* inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack * inside bpf_*_event_output, bpf_get_stackid and/or bpf_get_stack.
*
* Since raw tracepoints run despite bpf_prog_active, support concurrent usage
* in normal, irq, and nmi context.
*/ */
static DEFINE_PER_CPU(struct pt_regs, bpf_raw_tp_regs); struct bpf_raw_tp_regs {
struct pt_regs regs[3];
};
static DEFINE_PER_CPU(struct bpf_raw_tp_regs, bpf_raw_tp_regs);
static DEFINE_PER_CPU(int, bpf_raw_tp_nest_level);
static struct pt_regs *get_bpf_raw_tp_regs(void)
{
struct bpf_raw_tp_regs *tp_regs = this_cpu_ptr(&bpf_raw_tp_regs);
int nest_level = this_cpu_inc_return(bpf_raw_tp_nest_level);
if (WARN_ON_ONCE(nest_level > ARRAY_SIZE(tp_regs->regs))) {
this_cpu_dec(bpf_raw_tp_nest_level);
return ERR_PTR(-EBUSY);
}
return &tp_regs->regs[nest_level - 1];
}
static void put_bpf_raw_tp_regs(void)
{
this_cpu_dec(bpf_raw_tp_nest_level);
}
BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args, BPF_CALL_5(bpf_perf_event_output_raw_tp, struct bpf_raw_tracepoint_args *, args,
struct bpf_map *, map, u64, flags, void *, data, u64, size) struct bpf_map *, map, u64, flags, void *, data, u64, size)
{ {
struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs); struct pt_regs *regs = get_bpf_raw_tp_regs();
int ret;
if (IS_ERR(regs))
return PTR_ERR(regs);
perf_fetch_caller_regs(regs); perf_fetch_caller_regs(regs);
return ____bpf_perf_event_output(regs, map, flags, data, size); ret = ____bpf_perf_event_output(regs, map, flags, data, size);
put_bpf_raw_tp_regs();
return ret;
} }
static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
...@@ -834,12 +890,18 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = { ...@@ -834,12 +890,18 @@ static const struct bpf_func_proto bpf_perf_event_output_proto_raw_tp = {
BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args, BPF_CALL_3(bpf_get_stackid_raw_tp, struct bpf_raw_tracepoint_args *, args,
struct bpf_map *, map, u64, flags) struct bpf_map *, map, u64, flags)
{ {
struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs); struct pt_regs *regs = get_bpf_raw_tp_regs();
int ret;
if (IS_ERR(regs))
return PTR_ERR(regs);
perf_fetch_caller_regs(regs); perf_fetch_caller_regs(regs);
/* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */ /* similar to bpf_perf_event_output_tp, but pt_regs fetched differently */
return bpf_get_stackid((unsigned long) regs, (unsigned long) map, ret = bpf_get_stackid((unsigned long) regs, (unsigned long) map,
flags, 0, 0); flags, 0, 0);
put_bpf_raw_tp_regs();
return ret;
} }
static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
...@@ -854,11 +916,17 @@ static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = { ...@@ -854,11 +916,17 @@ static const struct bpf_func_proto bpf_get_stackid_proto_raw_tp = {
BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args, BPF_CALL_4(bpf_get_stack_raw_tp, struct bpf_raw_tracepoint_args *, args,
void *, buf, u32, size, u64, flags) void *, buf, u32, size, u64, flags)
{ {
struct pt_regs *regs = this_cpu_ptr(&bpf_raw_tp_regs); struct pt_regs *regs = get_bpf_raw_tp_regs();
int ret;
if (IS_ERR(regs))
return PTR_ERR(regs);
perf_fetch_caller_regs(regs); perf_fetch_caller_regs(regs);
return bpf_get_stack((unsigned long) regs, (unsigned long) buf, ret = bpf_get_stack((unsigned long) regs, (unsigned long) buf,
(unsigned long) size, flags, 0); (unsigned long) size, flags, 0);
put_bpf_raw_tp_regs();
return ret;
} }
static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = { static const struct bpf_func_proto bpf_get_stack_proto_raw_tp = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment