Commit b4da3340 authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Alexei Starovoitov

tracing/kprobe: bpf: Check error injectable event is on function entry

Check whether error injectable event is on function entry or not.
Currently it checks the event is ftrace-based kprobes or not,
but that is wrong. It should check if the event is on the entry
of target function. Since error injection will override a function
to just return with modified return value, that operation must
be done before the target function starts making stackframe.

As a side effect, bpf error injection is no need to depend on
function-tracer. It can work with sw-breakpoint based kprobe
events too.
Signed-off-by: default avatarMasami Hiramatsu <mhiramat@kernel.org>
Reviewed-by: default avatarJosef Bacik <jbacik@fb.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent daaf24c6
...@@ -67,9 +67,7 @@ extern const int kretprobe_blacklist_size; ...@@ -67,9 +67,7 @@ extern const int kretprobe_blacklist_size;
void arch_remove_kprobe(struct kprobe *p); void arch_remove_kprobe(struct kprobe *p);
asmlinkage void kretprobe_trampoline(void); asmlinkage void kretprobe_trampoline(void);
#ifdef CONFIG_KPROBES_ON_FTRACE extern void arch_kprobe_override_function(struct pt_regs *regs);
extern void arch_ftrace_kprobe_override_function(struct pt_regs *regs);
#endif
/* Architecture specific copy of original instruction*/ /* Architecture specific copy of original instruction*/
struct arch_specific_insn { struct arch_specific_insn {
......
...@@ -1183,3 +1183,17 @@ int arch_trampoline_kprobe(struct kprobe *p) ...@@ -1183,3 +1183,17 @@ int arch_trampoline_kprobe(struct kprobe *p)
{ {
return 0; return 0;
} }
asmlinkage void override_func(void);
asm(
".type override_func, @function\n"
"override_func:\n"
" ret\n"
".size override_func, .-override_func\n"
);
void arch_kprobe_override_function(struct pt_regs *regs)
{
regs->ip = (unsigned long)&override_func;
}
NOKPROBE_SYMBOL(arch_kprobe_override_function);
...@@ -97,17 +97,3 @@ int arch_prepare_kprobe_ftrace(struct kprobe *p) ...@@ -97,17 +97,3 @@ int arch_prepare_kprobe_ftrace(struct kprobe *p)
p->ainsn.boostable = false; p->ainsn.boostable = false;
return 0; return 0;
} }
asmlinkage void override_func(void);
asm(
".type override_func, @function\n"
"override_func:\n"
" ret\n"
".size override_func, .-override_func\n"
);
void arch_ftrace_kprobe_override_function(struct pt_regs *regs)
{
regs->ip = (unsigned long)&override_func;
}
NOKPROBE_SYMBOL(arch_ftrace_kprobe_override_function);
...@@ -533,9 +533,7 @@ config FUNCTION_PROFILER ...@@ -533,9 +533,7 @@ config FUNCTION_PROFILER
config BPF_KPROBE_OVERRIDE config BPF_KPROBE_OVERRIDE
bool "Enable BPF programs to override a kprobed function" bool "Enable BPF programs to override a kprobed function"
depends on BPF_EVENTS depends on BPF_EVENTS
depends on KPROBES_ON_FTRACE
depends on HAVE_KPROBE_OVERRIDE depends on HAVE_KPROBE_OVERRIDE
depends on DYNAMIC_FTRACE_WITH_REGS
default n default n
help help
Allows BPF to override the execution of a probed function and Allows BPF to override the execution of a probed function and
......
...@@ -85,7 +85,7 @@ BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc) ...@@ -85,7 +85,7 @@ BPF_CALL_2(bpf_override_return, struct pt_regs *, regs, unsigned long, rc)
{ {
__this_cpu_write(bpf_kprobe_override, 1); __this_cpu_write(bpf_kprobe_override, 1);
regs_set_return_value(regs, rc); regs_set_return_value(regs, rc);
arch_ftrace_kprobe_override_function(regs); arch_kprobe_override_function(regs);
return 0; return 0;
} }
...@@ -800,11 +800,11 @@ int perf_event_attach_bpf_prog(struct perf_event *event, ...@@ -800,11 +800,11 @@ int perf_event_attach_bpf_prog(struct perf_event *event,
int ret = -EEXIST; int ret = -EEXIST;
/* /*
* Kprobe override only works for ftrace based kprobes, and only if they * Kprobe override only works if they are on the function entry,
* are on the opt-in list. * and only if they are on the opt-in list.
*/ */
if (prog->kprobe_override && if (prog->kprobe_override &&
(!trace_kprobe_ftrace(event->tp_event) || (!trace_kprobe_on_func_entry(event->tp_event) ||
!trace_kprobe_error_injectable(event->tp_event))) !trace_kprobe_error_injectable(event->tp_event)))
return -EINVAL; return -EINVAL;
......
...@@ -88,13 +88,16 @@ static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk) ...@@ -88,13 +88,16 @@ static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
return nhit; return nhit;
} }
int trace_kprobe_ftrace(struct trace_event_call *call) bool trace_kprobe_on_func_entry(struct trace_event_call *call)
{ {
struct trace_kprobe *tk = (struct trace_kprobe *)call->data; struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
return kprobe_ftrace(&tk->rp.kp);
return kprobe_on_func_entry(tk->rp.kp.addr,
tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
tk->rp.kp.addr ? 0 : tk->rp.kp.offset);
} }
int trace_kprobe_error_injectable(struct trace_event_call *call) bool trace_kprobe_error_injectable(struct trace_event_call *call)
{ {
struct trace_kprobe *tk = (struct trace_kprobe *)call->data; struct trace_kprobe *tk = (struct trace_kprobe *)call->data;
unsigned long addr; unsigned long addr;
......
...@@ -252,8 +252,8 @@ struct symbol_cache; ...@@ -252,8 +252,8 @@ struct symbol_cache;
unsigned long update_symbol_cache(struct symbol_cache *sc); unsigned long update_symbol_cache(struct symbol_cache *sc);
void free_symbol_cache(struct symbol_cache *sc); void free_symbol_cache(struct symbol_cache *sc);
struct symbol_cache *alloc_symbol_cache(const char *sym, long offset); struct symbol_cache *alloc_symbol_cache(const char *sym, long offset);
int trace_kprobe_ftrace(struct trace_event_call *call); bool trace_kprobe_on_func_entry(struct trace_event_call *call);
int trace_kprobe_error_injectable(struct trace_event_call *call); bool trace_kprobe_error_injectable(struct trace_event_call *call);
#else #else
/* uprobes do not support symbol fetch methods */ /* uprobes do not support symbol fetch methods */
#define fetch_symbol_u8 NULL #define fetch_symbol_u8 NULL
...@@ -280,14 +280,14 @@ alloc_symbol_cache(const char *sym, long offset) ...@@ -280,14 +280,14 @@ alloc_symbol_cache(const char *sym, long offset)
return NULL; return NULL;
} }
static inline int trace_kprobe_ftrace(struct trace_event_call *call) static inline bool trace_kprobe_on_func_entry(struct trace_event_call *call)
{ {
return 0; return false;
} }
static inline int trace_kprobe_error_injectable(struct trace_event_call *call) static inline bool trace_kprobe_error_injectable(struct trace_event_call *call)
{ {
return 0; return false;
} }
#endif /* CONFIG_KPROBE_EVENTS */ #endif /* CONFIG_KPROBE_EVENTS */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment