Commit 70ed0706 authored by Alexei Starovoitov's avatar Alexei Starovoitov

bpf: disable preemption for bpf progs attached to uprobe

trace_call_bpf() no longer disables preemption on its own.
All callers of this function has to do it explicitly.
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 1b7a51a6
...@@ -1333,8 +1333,15 @@ static void __uprobe_perf_func(struct trace_uprobe *tu, ...@@ -1333,8 +1333,15 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
int size, esize; int size, esize;
int rctx; int rctx;
if (bpf_prog_array_valid(call) && !trace_call_bpf(call, regs)) if (bpf_prog_array_valid(call)) {
return; u32 ret;
preempt_disable();
ret = trace_call_bpf(call, regs);
preempt_enable();
if (!ret)
return;
}
esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu)); esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment