Commit e5253896 authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Steven Rostedt

kprobes/x86: ftrace based optimization for x86

Add function tracer based kprobe optimization support
handlers on x86. This allows kprobes to use function
tracer for probing on mcount call.

Link: http://lkml.kernel.org/r/20120605102838.27845.26317.stgit@localhost.localdomain

Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: "Frank Ch. Eigler" <fche@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Signed-off-by: default avatarMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>

[ Updated to new port of ftrace save regs functions ]
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent ae6aa16f
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/insn.h> #include <asm/insn.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT #define __ARCH_WANT_KPROBES_INSN_SLOT
#define ARCH_SUPPORTS_KPROBES_ON_FTRACE
struct pt_regs; struct pt_regs;
struct kprobe; struct kprobe;
......
...@@ -1052,6 +1052,54 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) ...@@ -1052,6 +1052,54 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
return 0; return 0;
} }
#ifdef KPROBES_CAN_USE_FTRACE
/* Ftrace callback handler for kprobes */
void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
unsigned long flags;
/* Disable irq for emulating a breakpoint and avoiding preempt */
local_irq_save(flags);
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
goto end;
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
} else {
regs->ip += sizeof(kprobe_opcode_t);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler)
p->pre_handler(p, regs);
if (unlikely(p->post_handler)) {
/* Emulate singlestep as if there is a 5byte nop */
regs->ip = ip + MCOUNT_INSN_SIZE;
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
__this_cpu_write(current_kprobe, NULL);
regs->ip = ip; /* Recover for next callback */
}
end:
local_irq_restore(flags);
}
int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.insn = NULL;
p->ainsn.boostable = -1;
return 0;
}
#endif
int __init arch_init_kprobes(void) int __init arch_init_kprobes(void)
{ {
return arch_init_optprobes(); return arch_init_optprobes();
......
...@@ -318,7 +318,7 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table, ...@@ -318,7 +318,7 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
#endif /* CONFIG_OPTPROBES */ #endif /* CONFIG_OPTPROBES */
#ifdef KPROBES_CAN_USE_FTRACE #ifdef KPROBES_CAN_USE_FTRACE
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct pt_regs *regs); struct ftrace_ops *ops, struct pt_regs *regs);
extern int arch_prepare_kprobe_ftrace(struct kprobe *p); extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
#endif #endif
......
...@@ -921,7 +921,7 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) ...@@ -921,7 +921,7 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
#ifdef KPROBES_CAN_USE_FTRACE #ifdef KPROBES_CAN_USE_FTRACE
static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
.regs_func = kprobe_ftrace_handler, .func = kprobe_ftrace_handler,
.flags = FTRACE_OPS_FL_SAVE_REGS, .flags = FTRACE_OPS_FL_SAVE_REGS,
}; };
static int kprobe_ftrace_enabled; static int kprobe_ftrace_enabled;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment