Commit 26198c21 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/perf/core' of...

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core

Pull ftrace updates from Steve Rostedt:

" This patch series extends ftrace function tracing utility to be
  more dynamic for its users. It allows for data passing to the callback
  functions, as well as reading regs as if a breakpoint were to trigger
  at function entry.

  The main goal of this patch series was to allow kprobes to use ftrace
  as an optimized probe point when a probe is placed on an ftrace nop.
  With lots of help from Masami Hiramatsu, and going through lots of
  iterations, we finally came up with a good solution. "
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 194f8dcb e5253896
...@@ -3,27 +3,33 @@ ...@@ -3,27 +3,33 @@
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
.macro MCOUNT_SAVE_FRAME /* skip is set if the stack was already partially adjusted */
/* taken from glibc */ .macro MCOUNT_SAVE_FRAME skip=0
subq $0x38, %rsp /*
movq %rax, (%rsp) * We add enough stack to save all regs.
movq %rcx, 8(%rsp) */
movq %rdx, 16(%rsp) subq $(SS+8-\skip), %rsp
movq %rsi, 24(%rsp) movq %rax, RAX(%rsp)
movq %rdi, 32(%rsp) movq %rcx, RCX(%rsp)
movq %r8, 40(%rsp) movq %rdx, RDX(%rsp)
movq %r9, 48(%rsp) movq %rsi, RSI(%rsp)
movq %rdi, RDI(%rsp)
movq %r8, R8(%rsp)
movq %r9, R9(%rsp)
/* Move RIP to its proper location */
movq SS+8(%rsp), %rdx
movq %rdx, RIP(%rsp)
.endm .endm
.macro MCOUNT_RESTORE_FRAME .macro MCOUNT_RESTORE_FRAME skip=0
movq 48(%rsp), %r9 movq R9(%rsp), %r9
movq 40(%rsp), %r8 movq R8(%rsp), %r8
movq 32(%rsp), %rdi movq RDI(%rsp), %rdi
movq 24(%rsp), %rsi movq RSI(%rsp), %rsi
movq 16(%rsp), %rdx movq RDX(%rsp), %rdx
movq 8(%rsp), %rcx movq RCX(%rsp), %rcx
movq (%rsp), %rax movq RAX(%rsp), %rax
addq $0x38, %rsp addq $(SS+8-\skip), %rsp
.endm .endm
#endif #endif
...@@ -32,6 +38,11 @@ ...@@ -32,6 +38,11 @@
#define MCOUNT_ADDR ((long)(mcount)) #define MCOUNT_ADDR ((long)(mcount))
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
#ifdef CONFIG_DYNAMIC_FTRACE
#define ARCH_SUPPORTS_FTRACE_OPS 1
#define ARCH_SUPPORTS_FTRACE_SAVE_REGS
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern void mcount(void); extern void mcount(void);
extern atomic_t modifying_ftrace_code; extern atomic_t modifying_ftrace_code;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/insn.h> #include <asm/insn.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT #define __ARCH_WANT_KPROBES_INSN_SLOT
#define ARCH_SUPPORTS_KPROBES_ON_FTRACE
struct pt_regs; struct pt_regs;
struct kprobe; struct kprobe;
......
...@@ -1109,17 +1109,21 @@ ENTRY(ftrace_caller) ...@@ -1109,17 +1109,21 @@ ENTRY(ftrace_caller)
pushl %eax pushl %eax
pushl %ecx pushl %ecx
pushl %edx pushl %edx
movl 0xc(%esp), %eax pushl $0 /* Pass NULL as regs pointer */
movl 4*4(%esp), %eax
movl 0x4(%ebp), %edx movl 0x4(%ebp), %edx
leal function_trace_op, %ecx
subl $MCOUNT_INSN_SIZE, %eax subl $MCOUNT_INSN_SIZE, %eax
.globl ftrace_call .globl ftrace_call
ftrace_call: ftrace_call:
call ftrace_stub call ftrace_stub
addl $4,%esp /* skip NULL pointer */
popl %edx popl %edx
popl %ecx popl %ecx
popl %eax popl %eax
ftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call .globl ftrace_graph_call
ftrace_graph_call: ftrace_graph_call:
...@@ -1131,6 +1135,72 @@ ftrace_stub: ...@@ -1131,6 +1135,72 @@ ftrace_stub:
ret ret
END(ftrace_caller) END(ftrace_caller)
ENTRY(ftrace_regs_caller)
pushf /* push flags before compare (in cs location) */
cmpl $0, function_trace_stop
jne ftrace_restore_flags
/*
* i386 does not save SS and ESP when coming from kernel.
* Instead, to get sp, &regs->sp is used (see ptrace.h).
* Unfortunately, that means eflags must be at the same location
* as the current return ip is. We move the return ip into the
* ip location, and move flags into the return ip location.
*/
pushl 4(%esp) /* save return ip into ip slot */
subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */
pushl $0 /* Load 0 into orig_ax */
pushl %gs
pushl %fs
pushl %es
pushl %ds
pushl %eax
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx
movl 13*4(%esp), %eax /* Get the saved flags */
movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
/* clobbering return ip */
movl $__KERNEL_CS,13*4(%esp)
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
pushl %esp /* Save pt_regs as 4th parameter */
GLOBAL(ftrace_regs_call)
call ftrace_stub
addl $4, %esp /* Skip pt_regs */
movl 14*4(%esp), %eax /* Move flags back into cs */
movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
movl 12*4(%esp), %eax /* Get return ip from regs->ip */
addl $MCOUNT_INSN_SIZE, %eax
movl %eax, 14*4(%esp) /* Put return ip back for ret */
popl %ebx
popl %ecx
popl %edx
popl %esi
popl %edi
popl %ebp
popl %eax
popl %ds
popl %es
popl %fs
popl %gs
addl $8, %esp /* Skip orig_ax and ip */
popf /* Pop flags at end (no addl to corrupt flags) */
jmp ftrace_ret
ftrace_restore_flags:
popf
jmp ftrace_stub
#else /* ! CONFIG_DYNAMIC_FTRACE */ #else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(mcount) ENTRY(mcount)
...@@ -1171,9 +1241,6 @@ END(mcount) ...@@ -1171,9 +1241,6 @@ END(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller) ENTRY(ftrace_graph_caller)
cmpl $0, function_trace_stop
jne ftrace_stub
pushl %eax pushl %eax
pushl %ecx pushl %ecx
pushl %edx pushl %edx
......
...@@ -73,20 +73,34 @@ ENTRY(mcount) ...@@ -73,20 +73,34 @@ ENTRY(mcount)
retq retq
END(mcount) END(mcount)
/* skip is set if stack has been adjusted */
.macro ftrace_caller_setup skip=0
MCOUNT_SAVE_FRAME \skip
/* Load the ftrace_ops into the 3rd parameter */
leaq function_trace_op, %rdx
/* Load ip into the first parameter */
movq RIP(%rsp), %rdi
subq $MCOUNT_INSN_SIZE, %rdi
/* Load the parent_ip into the second parameter */
movq 8(%rbp), %rsi
.endm
ENTRY(ftrace_caller) ENTRY(ftrace_caller)
/* Check if tracing was disabled (quick check) */
cmpl $0, function_trace_stop cmpl $0, function_trace_stop
jne ftrace_stub jne ftrace_stub
MCOUNT_SAVE_FRAME ftrace_caller_setup
/* regs go into 4th parameter (but make it NULL) */
movq 0x38(%rsp), %rdi movq $0, %rcx
movq 8(%rbp), %rsi
subq $MCOUNT_INSN_SIZE, %rdi
GLOBAL(ftrace_call) GLOBAL(ftrace_call)
call ftrace_stub call ftrace_stub
MCOUNT_RESTORE_FRAME MCOUNT_RESTORE_FRAME
ftrace_return:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
GLOBAL(ftrace_graph_call) GLOBAL(ftrace_graph_call)
...@@ -97,6 +111,71 @@ GLOBAL(ftrace_stub) ...@@ -97,6 +111,71 @@ GLOBAL(ftrace_stub)
retq retq
END(ftrace_caller) END(ftrace_caller)
ENTRY(ftrace_regs_caller)
/* Save the current flags before compare (in SS location)*/
pushfq
/* Check if tracing was disabled (quick check) */
cmpl $0, function_trace_stop
jne ftrace_restore_flags
/* skip=8 to skip flags saved in SS */
ftrace_caller_setup 8
/* Save the rest of pt_regs */
movq %r15, R15(%rsp)
movq %r14, R14(%rsp)
movq %r13, R13(%rsp)
movq %r12, R12(%rsp)
movq %r11, R11(%rsp)
movq %r10, R10(%rsp)
movq %rbp, RBP(%rsp)
movq %rbx, RBX(%rsp)
/* Copy saved flags */
movq SS(%rsp), %rcx
movq %rcx, EFLAGS(%rsp)
/* Kernel segments */
movq $__KERNEL_DS, %rcx
movq %rcx, SS(%rsp)
movq $__KERNEL_CS, %rcx
movq %rcx, CS(%rsp)
/* Stack - skipping return address */
leaq SS+16(%rsp), %rcx
movq %rcx, RSP(%rsp)
/* regs go into 4th parameter */
leaq (%rsp), %rcx
GLOBAL(ftrace_regs_call)
call ftrace_stub
/* Copy flags back to SS, to restore them */
movq EFLAGS(%rsp), %rax
movq %rax, SS(%rsp)
/* restore the rest of pt_regs */
movq R15(%rsp), %r15
movq R14(%rsp), %r14
movq R13(%rsp), %r13
movq R12(%rsp), %r12
movq R10(%rsp), %r10
movq RBP(%rsp), %rbp
movq RBX(%rsp), %rbx
/* skip=8 to skip flags saved in SS */
MCOUNT_RESTORE_FRAME 8
/* Restore flags */
popfq
jmp ftrace_return
ftrace_restore_flags:
popfq
jmp ftrace_stub
END(ftrace_regs_caller)
#else /* ! CONFIG_DYNAMIC_FTRACE */ #else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(mcount) ENTRY(mcount)
cmpl $0, function_trace_stop cmpl $0, function_trace_stop
...@@ -119,7 +198,7 @@ GLOBAL(ftrace_stub) ...@@ -119,7 +198,7 @@ GLOBAL(ftrace_stub)
trace: trace:
MCOUNT_SAVE_FRAME MCOUNT_SAVE_FRAME
movq 0x38(%rsp), %rdi movq RIP(%rsp), %rdi
movq 8(%rbp), %rsi movq 8(%rbp), %rsi
subq $MCOUNT_INSN_SIZE, %rdi subq $MCOUNT_INSN_SIZE, %rdi
...@@ -134,13 +213,10 @@ END(mcount) ...@@ -134,13 +213,10 @@ END(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller) ENTRY(ftrace_graph_caller)
cmpl $0, function_trace_stop
jne ftrace_stub
MCOUNT_SAVE_FRAME MCOUNT_SAVE_FRAME
leaq 8(%rbp), %rdi leaq 8(%rbp), %rdi
movq 0x38(%rsp), %rsi movq RIP(%rsp), %rsi
movq (%rbp), %rdx movq (%rbp), %rdx
subq $MCOUNT_INSN_SIZE, %rsi subq $MCOUNT_INSN_SIZE, %rsi
......
...@@ -206,6 +206,21 @@ static int ...@@ -206,6 +206,21 @@ static int
ftrace_modify_code(unsigned long ip, unsigned const char *old_code, ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
unsigned const char *new_code); unsigned const char *new_code);
/*
* Should never be called:
* As it is only called by __ftrace_replace_code() which is called by
* ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
* which is called to turn mcount into nops or nops into function calls
* but not to convert a function from not using regs to one that uses
* regs, which ftrace_modify_call() is for.
*/
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
WARN_ON(1);
return -EINVAL;
}
int ftrace_update_ftrace_func(ftrace_func_t func) int ftrace_update_ftrace_func(ftrace_func_t func)
{ {
unsigned long ip = (unsigned long)(&ftrace_call); unsigned long ip = (unsigned long)(&ftrace_call);
...@@ -220,6 +235,14 @@ int ftrace_update_ftrace_func(ftrace_func_t func) ...@@ -220,6 +235,14 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
ret = ftrace_modify_code(ip, old, new); ret = ftrace_modify_code(ip, old, new);
/* Also update the regs callback function */
if (!ret) {
ip = (unsigned long)(&ftrace_regs_call);
memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(ip, (unsigned long)func);
ret = ftrace_modify_code(ip, old, new);
}
atomic_dec(&modifying_ftrace_code); atomic_dec(&modifying_ftrace_code);
return ret; return ret;
...@@ -299,6 +322,32 @@ static int add_brk_on_nop(struct dyn_ftrace *rec) ...@@ -299,6 +322,32 @@ static int add_brk_on_nop(struct dyn_ftrace *rec)
return add_break(rec->ip, old); return add_break(rec->ip, old);
} }
/*
* If the record has the FTRACE_FL_REGS set, that means that it
* wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
* is not not set, then it wants to convert to the normal callback.
*/
static unsigned long get_ftrace_addr(struct dyn_ftrace *rec)
{
if (rec->flags & FTRACE_FL_REGS)
return (unsigned long)FTRACE_REGS_ADDR;
else
return (unsigned long)FTRACE_ADDR;
}
/*
* The FTRACE_FL_REGS_EN is set when the record already points to
* a function that saves all the regs. Basically the '_EN' version
* represents the current state of the function.
*/
static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec)
{
if (rec->flags & FTRACE_FL_REGS_EN)
return (unsigned long)FTRACE_REGS_ADDR;
else
return (unsigned long)FTRACE_ADDR;
}
static int add_breakpoints(struct dyn_ftrace *rec, int enable) static int add_breakpoints(struct dyn_ftrace *rec, int enable)
{ {
unsigned long ftrace_addr; unsigned long ftrace_addr;
...@@ -306,7 +355,7 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable) ...@@ -306,7 +355,7 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable)
ret = ftrace_test_record(rec, enable); ret = ftrace_test_record(rec, enable);
ftrace_addr = (unsigned long)FTRACE_ADDR; ftrace_addr = get_ftrace_addr(rec);
switch (ret) { switch (ret) {
case FTRACE_UPDATE_IGNORE: case FTRACE_UPDATE_IGNORE:
...@@ -316,6 +365,10 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable) ...@@ -316,6 +365,10 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable)
/* converting nop to call */ /* converting nop to call */
return add_brk_on_nop(rec); return add_brk_on_nop(rec);
case FTRACE_UPDATE_MODIFY_CALL_REGS:
case FTRACE_UPDATE_MODIFY_CALL:
ftrace_addr = get_ftrace_old_addr(rec);
/* fall through */
case FTRACE_UPDATE_MAKE_NOP: case FTRACE_UPDATE_MAKE_NOP:
/* converting a call to a nop */ /* converting a call to a nop */
return add_brk_on_call(rec, ftrace_addr); return add_brk_on_call(rec, ftrace_addr);
...@@ -360,13 +413,21 @@ static int remove_breakpoint(struct dyn_ftrace *rec) ...@@ -360,13 +413,21 @@ static int remove_breakpoint(struct dyn_ftrace *rec)
* If not, don't touch the breakpoint, we make just create * If not, don't touch the breakpoint, we make just create
* a disaster. * a disaster.
*/ */
ftrace_addr = (unsigned long)FTRACE_ADDR; ftrace_addr = get_ftrace_addr(rec);
nop = ftrace_call_replace(ip, ftrace_addr);
if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
goto update;
/* Check both ftrace_addr and ftrace_old_addr */
ftrace_addr = get_ftrace_old_addr(rec);
nop = ftrace_call_replace(ip, ftrace_addr); nop = ftrace_call_replace(ip, ftrace_addr);
if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
return -EINVAL; return -EINVAL;
} }
update:
return probe_kernel_write((void *)ip, &nop[0], 1); return probe_kernel_write((void *)ip, &nop[0], 1);
} }
...@@ -405,12 +466,14 @@ static int add_update(struct dyn_ftrace *rec, int enable) ...@@ -405,12 +466,14 @@ static int add_update(struct dyn_ftrace *rec, int enable)
ret = ftrace_test_record(rec, enable); ret = ftrace_test_record(rec, enable);
ftrace_addr = (unsigned long)FTRACE_ADDR; ftrace_addr = get_ftrace_addr(rec);
switch (ret) { switch (ret) {
case FTRACE_UPDATE_IGNORE: case FTRACE_UPDATE_IGNORE:
return 0; return 0;
case FTRACE_UPDATE_MODIFY_CALL_REGS:
case FTRACE_UPDATE_MODIFY_CALL:
case FTRACE_UPDATE_MAKE_CALL: case FTRACE_UPDATE_MAKE_CALL:
/* converting nop to call */ /* converting nop to call */
return add_update_call(rec, ftrace_addr); return add_update_call(rec, ftrace_addr);
...@@ -455,12 +518,14 @@ static int finish_update(struct dyn_ftrace *rec, int enable) ...@@ -455,12 +518,14 @@ static int finish_update(struct dyn_ftrace *rec, int enable)
ret = ftrace_update_record(rec, enable); ret = ftrace_update_record(rec, enable);
ftrace_addr = (unsigned long)FTRACE_ADDR; ftrace_addr = get_ftrace_addr(rec);
switch (ret) { switch (ret) {
case FTRACE_UPDATE_IGNORE: case FTRACE_UPDATE_IGNORE:
return 0; return 0;
case FTRACE_UPDATE_MODIFY_CALL_REGS:
case FTRACE_UPDATE_MODIFY_CALL:
case FTRACE_UPDATE_MAKE_CALL: case FTRACE_UPDATE_MAKE_CALL:
/* converting nop to call */ /* converting nop to call */
return finish_update_call(rec, ftrace_addr); return finish_update_call(rec, ftrace_addr);
......
...@@ -1052,6 +1052,54 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) ...@@ -1052,6 +1052,54 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
return 0; return 0;
} }
#ifdef KPROBES_CAN_USE_FTRACE
/* Ftrace callback handler for kprobes */
void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
unsigned long flags;
/* Disable irq for emulating a breakpoint and avoiding preempt */
local_irq_save(flags);
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
goto end;
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
} else {
regs->ip += sizeof(kprobe_opcode_t);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler)
p->pre_handler(p, regs);
if (unlikely(p->post_handler)) {
/* Emulate singlestep as if there is a 5byte nop */
regs->ip = ip + MCOUNT_INSN_SIZE;
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
__this_cpu_write(current_kprobe, NULL);
regs->ip = ip; /* Recover for next callback */
}
end:
local_irq_restore(flags);
}
int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.insn = NULL;
p->ainsn.boostable = -1;
return 0;
}
#endif
int __init arch_init_kprobes(void) int __init arch_init_kprobes(void)
{ {
return arch_init_optprobes(); return arch_init_optprobes();
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/ptrace.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -18,6 +19,28 @@ ...@@ -18,6 +19,28 @@
#include <asm/ftrace.h> #include <asm/ftrace.h>
/*
* If the arch supports passing the variable contents of
* function_trace_op as the third parameter back from the
* mcount call, then the arch should define this as 1.
*/
#ifndef ARCH_SUPPORTS_FTRACE_OPS
#define ARCH_SUPPORTS_FTRACE_OPS 0
#endif
/*
* If the arch's mcount caller does not support all of ftrace's
* features, then it must call an indirect function that
* does. Or at least does enough to prevent any unwelcomed side effects.
*/
#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \
!ARCH_SUPPORTS_FTRACE_OPS
# define FTRACE_FORCE_LIST_FUNC 1
#else
# define FTRACE_FORCE_LIST_FUNC 0
#endif
struct module; struct module;
struct ftrace_hash; struct ftrace_hash;
...@@ -29,7 +52,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, ...@@ -29,7 +52,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
loff_t *ppos); loff_t *ppos);
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); struct ftrace_ops;
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs);
/* /*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
...@@ -45,12 +71,33 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); ...@@ -45,12 +71,33 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
* could be controled by following calls: * could be controled by following calls:
* ftrace_function_local_enable * ftrace_function_local_enable
* ftrace_function_local_disable * ftrace_function_local_disable
* SAVE_REGS - The ftrace_ops wants regs saved at each function called
* and passed to the callback. If this flag is set, but the
* architecture does not support passing regs
* (ARCH_SUPPORTS_FTRACE_SAVE_REGS is not defined), then the
* ftrace_ops will fail to register, unless the next flag
* is set.
* SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
* handler can handle an arch that does not save regs
* (the handler tests if regs == NULL), then it can set
* this flag instead. It will not fail registering the ftrace_ops
* but, the regs field will be NULL if the arch does not support
* passing regs to the handler.
* Note, if this flag is set, the SAVE_REGS flag will automatically
* get set upon registering the ftrace_ops, if the arch supports it.
* RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
* that the call back has its own recursion protection. If it does
* not set this, then the ftrace infrastructure will add recursion
* protection for the caller.
*/ */
enum { enum {
FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_ENABLED = 1 << 0,
FTRACE_OPS_FL_GLOBAL = 1 << 1, FTRACE_OPS_FL_GLOBAL = 1 << 1,
FTRACE_OPS_FL_DYNAMIC = 1 << 2, FTRACE_OPS_FL_DYNAMIC = 1 << 2,
FTRACE_OPS_FL_CONTROL = 1 << 3, FTRACE_OPS_FL_CONTROL = 1 << 3,
FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
}; };
struct ftrace_ops { struct ftrace_ops {
...@@ -163,7 +210,8 @@ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) ...@@ -163,7 +210,8 @@ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
return *this_cpu_ptr(ops->disabled); return *this_cpu_ptr(ops->disabled);
} }
extern void ftrace_stub(unsigned long a0, unsigned long a1); extern void ftrace_stub(unsigned long a0, unsigned long a1,
struct ftrace_ops *op, struct pt_regs *regs);
#else /* !CONFIG_FUNCTION_TRACER */ #else /* !CONFIG_FUNCTION_TRACER */
/* /*
...@@ -172,6 +220,10 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1); ...@@ -172,6 +220,10 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1);
*/ */
#define register_ftrace_function(ops) ({ 0; }) #define register_ftrace_function(ops) ({ 0; })
#define unregister_ftrace_function(ops) ({ 0; }) #define unregister_ftrace_function(ops) ({ 0; })
static inline int ftrace_nr_registered_ops(void)
{
return 0;
}
static inline void clear_ftrace_function(void) { } static inline void clear_ftrace_function(void) { }
static inline void ftrace_kill(void) { } static inline void ftrace_kill(void) { }
static inline void ftrace_stop(void) { } static inline void ftrace_stop(void) { }
...@@ -227,12 +279,33 @@ extern void unregister_ftrace_function_probe_all(char *glob); ...@@ -227,12 +279,33 @@ extern void unregister_ftrace_function_probe_all(char *glob);
extern int ftrace_text_reserved(void *start, void *end); extern int ftrace_text_reserved(void *start, void *end);
extern int ftrace_nr_registered_ops(void);
/*
* The dyn_ftrace record's flags field is split into two parts.
* the first part which is '0-FTRACE_REF_MAX' is a counter of
* the number of callbacks that have registered the function that
* the dyn_ftrace descriptor represents.
*
* The second part is a mask:
* ENABLED - the function is being traced
* REGS - the record wants the function to save regs
* REGS_EN - the function is set up to save regs.
*
* When a new ftrace_ops is registered and wants a function to save
* pt_regs, the rec->flag REGS is set. When the function has been
* set up to save regs, the REG_EN flag is set. Once a function
* starts saving regs it will do so until all ftrace_ops are removed
* from tracing that function.
*/
enum { enum {
FTRACE_FL_ENABLED = (1 << 30), FTRACE_FL_ENABLED = (1UL << 29),
FTRACE_FL_REGS = (1UL << 30),
FTRACE_FL_REGS_EN = (1UL << 31)
}; };
#define FTRACE_FL_MASK (0x3UL << 30) #define FTRACE_FL_MASK (0x7UL << 29)
#define FTRACE_REF_MAX ((1 << 30) - 1) #define FTRACE_REF_MAX ((1UL << 29) - 1)
struct dyn_ftrace { struct dyn_ftrace {
union { union {
...@@ -244,6 +317,8 @@ struct dyn_ftrace { ...@@ -244,6 +317,8 @@ struct dyn_ftrace {
}; };
int ftrace_force_update(void); int ftrace_force_update(void);
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset);
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset); int len, int reset);
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
...@@ -263,9 +338,23 @@ enum { ...@@ -263,9 +338,23 @@ enum {
FTRACE_STOP_FUNC_RET = (1 << 4), FTRACE_STOP_FUNC_RET = (1 << 4),
}; };
/*
* The FTRACE_UPDATE_* enum is used to pass information back
* from the ftrace_update_record() and ftrace_test_record()
* functions. These are called by the code update routines
* to find out what is to be done for a given function.
*
* IGNORE - The function is already what we want it to be
* MAKE_CALL - Start tracing the function
* MODIFY_CALL - Stop saving regs for the function
* MODIFY_CALL_REGS - Start saving regs for the function
* MAKE_NOP - Stop tracing the function
*/
enum { enum {
FTRACE_UPDATE_IGNORE, FTRACE_UPDATE_IGNORE,
FTRACE_UPDATE_MAKE_CALL, FTRACE_UPDATE_MAKE_CALL,
FTRACE_UPDATE_MODIFY_CALL,
FTRACE_UPDATE_MODIFY_CALL_REGS,
FTRACE_UPDATE_MAKE_NOP, FTRACE_UPDATE_MAKE_NOP,
}; };
...@@ -317,7 +406,9 @@ extern int ftrace_dyn_arch_init(void *data); ...@@ -317,7 +406,9 @@ extern int ftrace_dyn_arch_init(void *data);
extern void ftrace_replace_code(int enable); extern void ftrace_replace_code(int enable);
extern int ftrace_update_ftrace_func(ftrace_func_t func); extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void); extern void ftrace_caller(void);
extern void ftrace_regs_caller(void);
extern void ftrace_call(void); extern void ftrace_call(void);
extern void ftrace_regs_call(void);
extern void mcount_call(void); extern void mcount_call(void);
void ftrace_modify_all_code(int command); void ftrace_modify_all_code(int command);
...@@ -325,6 +416,15 @@ void ftrace_modify_all_code(int command); ...@@ -325,6 +416,15 @@ void ftrace_modify_all_code(int command);
#ifndef FTRACE_ADDR #ifndef FTRACE_ADDR
#define FTRACE_ADDR ((unsigned long)ftrace_caller) #define FTRACE_ADDR ((unsigned long)ftrace_caller)
#endif #endif
#ifndef FTRACE_REGS_ADDR
#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
#else
# define FTRACE_REGS_ADDR FTRACE_ADDR
#endif
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_caller(void); extern void ftrace_graph_caller(void);
extern int ftrace_enable_ftrace_graph_caller(void); extern int ftrace_enable_ftrace_graph_caller(void);
...@@ -380,6 +480,39 @@ extern int ftrace_make_nop(struct module *mod, ...@@ -380,6 +480,39 @@ extern int ftrace_make_nop(struct module *mod,
*/ */
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
/**
* ftrace_modify_call - convert from one addr to another (no nop)
* @rec: the mcount call site record
* @old_addr: the address expected to be currently called to
* @addr: the address to change to
*
* This is a very sensitive operation and great care needs
* to be taken by the arch. The operation should carefully
* read the location, check to see if what is read is indeed
* what we expect it to be, and then on success of the compare,
* it should write to the location.
*
* The code segment at @rec->ip should be a caller to @old_addr
*
* Return must be:
* 0 on success
* -EFAULT on error reading the location
* -EINVAL on a failed compare of the contents
* -EPERM on error writing to the location
* Any other value will be considered a failure.
*/
extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr);
#else
/* Should never be called */
static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
return -EINVAL;
}
#endif
/* May be defined in arch */ /* May be defined in arch */
extern int ftrace_arch_read_dyn_info(char *buf, int size); extern int ftrace_arch_read_dyn_info(char *buf, int size);
...@@ -387,7 +520,7 @@ extern int skip_trace(unsigned long ip); ...@@ -387,7 +520,7 @@ extern int skip_trace(unsigned long ip);
extern void ftrace_disable_daemon(void); extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void); extern void ftrace_enable_daemon(void);
#else #else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; } static inline int skip_trace(unsigned long ip) { return 0; }
static inline int ftrace_force_update(void) { return 0; } static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { } static inline void ftrace_disable_daemon(void) { }
...@@ -405,6 +538,10 @@ static inline int ftrace_text_reserved(void *start, void *end) ...@@ -405,6 +538,10 @@ static inline int ftrace_text_reserved(void *start, void *end)
{ {
return 0; return 0;
} }
static inline unsigned long ftrace_location(unsigned long ip)
{
return 0;
}
/* /*
* Again users of functions that have ftrace_ops may not * Again users of functions that have ftrace_ops may not
...@@ -413,6 +550,7 @@ static inline int ftrace_text_reserved(void *start, void *end) ...@@ -413,6 +550,7 @@ static inline int ftrace_text_reserved(void *start, void *end)
*/ */
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0) #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0) #define ftrace_free_filter(ops) do { } while (0)
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/ftrace.h>
#ifdef CONFIG_KPROBES #ifdef CONFIG_KPROBES
#include <asm/kprobes.h> #include <asm/kprobes.h>
...@@ -48,14 +49,26 @@ ...@@ -48,14 +49,26 @@
#define KPROBE_REENTER 0x00000004 #define KPROBE_REENTER 0x00000004
#define KPROBE_HIT_SSDONE 0x00000008 #define KPROBE_HIT_SSDONE 0x00000008
/*
* If function tracer is enabled and the arch supports full
* passing of pt_regs to function tracing, then kprobes can
* optimize on top of function tracing.
*/
#if defined(CONFIG_FUNCTION_TRACER) && defined(ARCH_SUPPORTS_FTRACE_SAVE_REGS) \
&& defined(ARCH_SUPPORTS_KPROBES_ON_FTRACE)
# define KPROBES_CAN_USE_FTRACE
#endif
/* Attach to insert probes on any functions which should be ignored*/ /* Attach to insert probes on any functions which should be ignored*/
#define __kprobes __attribute__((__section__(".kprobes.text"))) #define __kprobes __attribute__((__section__(".kprobes.text")))
#else /* CONFIG_KPROBES */ #else /* CONFIG_KPROBES */
typedef int kprobe_opcode_t; typedef int kprobe_opcode_t;
struct arch_specific_insn { struct arch_specific_insn {
int dummy; int dummy;
}; };
#define __kprobes #define __kprobes
#endif /* CONFIG_KPROBES */ #endif /* CONFIG_KPROBES */
struct kprobe; struct kprobe;
...@@ -128,6 +141,7 @@ struct kprobe { ...@@ -128,6 +141,7 @@ struct kprobe {
* NOTE: * NOTE:
* this flag is only for optimized_kprobe. * this flag is only for optimized_kprobe.
*/ */
#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
/* Has this kprobe gone ? */ /* Has this kprobe gone ? */
static inline int kprobe_gone(struct kprobe *p) static inline int kprobe_gone(struct kprobe *p)
...@@ -146,6 +160,13 @@ static inline int kprobe_optimized(struct kprobe *p) ...@@ -146,6 +160,13 @@ static inline int kprobe_optimized(struct kprobe *p)
{ {
return p->flags & KPROBE_FLAG_OPTIMIZED; return p->flags & KPROBE_FLAG_OPTIMIZED;
} }
/* Is this kprobe uses ftrace ? */
static inline int kprobe_ftrace(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_FTRACE;
}
/* /*
* Special probe type that uses setjmp-longjmp type tricks to resume * Special probe type that uses setjmp-longjmp type tricks to resume
* execution at a specified entry with a matching prototype corresponding * execution at a specified entry with a matching prototype corresponding
...@@ -295,6 +316,12 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table, ...@@ -295,6 +316,12 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
#endif #endif
#endif /* CONFIG_OPTPROBES */ #endif /* CONFIG_OPTPROBES */
#ifdef KPROBES_CAN_USE_FTRACE
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs);
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
#endif
/* Get the kprobe at this addr (if any) - called with preemption disabled */ /* Get the kprobe at this addr (if any) - called with preemption disabled */
struct kprobe *get_kprobe(void *addr); struct kprobe *get_kprobe(void *addr);
......
...@@ -561,9 +561,9 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) ...@@ -561,9 +561,9 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
{ {
LIST_HEAD(free_list); LIST_HEAD(free_list);
mutex_lock(&kprobe_mutex);
/* Lock modules while optimizing kprobes */ /* Lock modules while optimizing kprobes */
mutex_lock(&module_mutex); mutex_lock(&module_mutex);
mutex_lock(&kprobe_mutex);
/* /*
* Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed) * Step 1: Unoptimize kprobes and collect cleaned (unused and disarmed)
...@@ -586,8 +586,8 @@ static __kprobes void kprobe_optimizer(struct work_struct *work) ...@@ -586,8 +586,8 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
/* Step 4: Free cleaned kprobes after quiesence period */ /* Step 4: Free cleaned kprobes after quiesence period */
do_free_cleaned_kprobes(&free_list); do_free_cleaned_kprobes(&free_list);
mutex_unlock(&kprobe_mutex);
mutex_unlock(&module_mutex); mutex_unlock(&module_mutex);
mutex_unlock(&kprobe_mutex);
/* Step 5: Kick optimizer again if needed */ /* Step 5: Kick optimizer again if needed */
if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list)) if (!list_empty(&optimizing_list) || !list_empty(&unoptimizing_list))
...@@ -759,20 +759,32 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p) ...@@ -759,20 +759,32 @@ static __kprobes void try_to_optimize_kprobe(struct kprobe *p)
struct kprobe *ap; struct kprobe *ap;
struct optimized_kprobe *op; struct optimized_kprobe *op;
/* Impossible to optimize ftrace-based kprobe */
if (kprobe_ftrace(p))
return;
/* For preparing optimization, jump_label_text_reserved() is called */
jump_label_lock();
mutex_lock(&text_mutex);
ap = alloc_aggr_kprobe(p); ap = alloc_aggr_kprobe(p);
if (!ap) if (!ap)
return; goto out;
op = container_of(ap, struct optimized_kprobe, kp); op = container_of(ap, struct optimized_kprobe, kp);
if (!arch_prepared_optinsn(&op->optinsn)) { if (!arch_prepared_optinsn(&op->optinsn)) {
/* If failed to setup optimizing, fallback to kprobe */ /* If failed to setup optimizing, fallback to kprobe */
arch_remove_optimized_kprobe(op); arch_remove_optimized_kprobe(op);
kfree(op); kfree(op);
return; goto out;
} }
init_aggr_kprobe(ap, p); init_aggr_kprobe(ap, p);
optimize_kprobe(ap); optimize_kprobe(ap); /* This just kicks optimizer thread */
out:
mutex_unlock(&text_mutex);
jump_label_unlock();
} }
#ifdef CONFIG_SYSCTL #ifdef CONFIG_SYSCTL
...@@ -907,9 +919,64 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p) ...@@ -907,9 +919,64 @@ static __kprobes struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
} }
#endif /* CONFIG_OPTPROBES */ #endif /* CONFIG_OPTPROBES */
#ifdef KPROBES_CAN_USE_FTRACE
static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
.func = kprobe_ftrace_handler,
.flags = FTRACE_OPS_FL_SAVE_REGS,
};
static int kprobe_ftrace_enabled;
/* Must ensure p->addr is really on ftrace */
static int __kprobes prepare_kprobe(struct kprobe *p)
{
if (!kprobe_ftrace(p))
return arch_prepare_kprobe(p);
return arch_prepare_kprobe_ftrace(p);
}
/* Caller must lock kprobe_mutex */
static void __kprobes arm_kprobe_ftrace(struct kprobe *p)
{
int ret;
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
(unsigned long)p->addr, 0, 0);
WARN(ret < 0, "Failed to arm kprobe-ftrace at %p (%d)\n", p->addr, ret);
kprobe_ftrace_enabled++;
if (kprobe_ftrace_enabled == 1) {
ret = register_ftrace_function(&kprobe_ftrace_ops);
WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
}
}
/* Caller must lock kprobe_mutex */
static void __kprobes disarm_kprobe_ftrace(struct kprobe *p)
{
int ret;
kprobe_ftrace_enabled--;
if (kprobe_ftrace_enabled == 0) {
ret = unregister_ftrace_function(&kprobe_ftrace_ops);
WARN(ret < 0, "Failed to init kprobe-ftrace (%d)\n", ret);
}
ret = ftrace_set_filter_ip(&kprobe_ftrace_ops,
(unsigned long)p->addr, 1, 0);
WARN(ret < 0, "Failed to disarm kprobe-ftrace at %p (%d)\n", p->addr, ret);
}
#else /* !KPROBES_CAN_USE_FTRACE */
#define prepare_kprobe(p) arch_prepare_kprobe(p)
#define arm_kprobe_ftrace(p) do {} while (0)
#define disarm_kprobe_ftrace(p) do {} while (0)
#endif
/* Arm a kprobe with text_mutex */ /* Arm a kprobe with text_mutex */
static void __kprobes arm_kprobe(struct kprobe *kp) static void __kprobes arm_kprobe(struct kprobe *kp)
{ {
if (unlikely(kprobe_ftrace(kp))) {
arm_kprobe_ftrace(kp);
return;
}
/* /*
* Here, since __arm_kprobe() doesn't use stop_machine(), * Here, since __arm_kprobe() doesn't use stop_machine(),
* this doesn't cause deadlock on text_mutex. So, we don't * this doesn't cause deadlock on text_mutex. So, we don't
...@@ -921,11 +988,15 @@ static void __kprobes arm_kprobe(struct kprobe *kp) ...@@ -921,11 +988,15 @@ static void __kprobes arm_kprobe(struct kprobe *kp)
} }
/* Disarm a kprobe with text_mutex */ /* Disarm a kprobe with text_mutex */
static void __kprobes disarm_kprobe(struct kprobe *kp) static void __kprobes disarm_kprobe(struct kprobe *kp, bool reopt)
{ {
if (unlikely(kprobe_ftrace(kp))) {
disarm_kprobe_ftrace(kp);
return;
}
/* Ditto */ /* Ditto */
mutex_lock(&text_mutex); mutex_lock(&text_mutex);
__disarm_kprobe(kp, true); __disarm_kprobe(kp, reopt);
mutex_unlock(&text_mutex); mutex_unlock(&text_mutex);
} }
...@@ -1144,12 +1215,6 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p) ...@@ -1144,12 +1215,6 @@ static int __kprobes add_new_kprobe(struct kprobe *ap, struct kprobe *p)
if (p->post_handler && !ap->post_handler) if (p->post_handler && !ap->post_handler)
ap->post_handler = aggr_post_handler; ap->post_handler = aggr_post_handler;
if (kprobe_disabled(ap) && !kprobe_disabled(p)) {
ap->flags &= ~KPROBE_FLAG_DISABLED;
if (!kprobes_all_disarmed)
/* Arm the breakpoint again. */
__arm_kprobe(ap);
}
return 0; return 0;
} }
...@@ -1189,11 +1254,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, ...@@ -1189,11 +1254,22 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
int ret = 0; int ret = 0;
struct kprobe *ap = orig_p; struct kprobe *ap = orig_p;
/* For preparing optimization, jump_label_text_reserved() is called */
jump_label_lock();
/*
* Get online CPUs to avoid text_mutex deadlock.with stop machine,
* which is invoked by unoptimize_kprobe() in add_new_kprobe()
*/
get_online_cpus();
mutex_lock(&text_mutex);
if (!kprobe_aggrprobe(orig_p)) { if (!kprobe_aggrprobe(orig_p)) {
/* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */ /* If orig_p is not an aggr_kprobe, create new aggr_kprobe. */
ap = alloc_aggr_kprobe(orig_p); ap = alloc_aggr_kprobe(orig_p);
if (!ap) if (!ap) {
return -ENOMEM; ret = -ENOMEM;
goto out;
}
init_aggr_kprobe(ap, orig_p); init_aggr_kprobe(ap, orig_p);
} else if (kprobe_unused(ap)) } else if (kprobe_unused(ap))
/* This probe is going to die. Rescue it */ /* This probe is going to die. Rescue it */
...@@ -1213,7 +1289,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, ...@@ -1213,7 +1289,7 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
* free aggr_probe. It will be used next time, or * free aggr_probe. It will be used next time, or
* freed by unregister_kprobe. * freed by unregister_kprobe.
*/ */
return ret; goto out;
/* Prepare optimized instructions if possible. */ /* Prepare optimized instructions if possible. */
prepare_optimized_kprobe(ap); prepare_optimized_kprobe(ap);
...@@ -1228,7 +1304,20 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p, ...@@ -1228,7 +1304,20 @@ static int __kprobes register_aggr_kprobe(struct kprobe *orig_p,
/* Copy ap's insn slot to p */ /* Copy ap's insn slot to p */
copy_kprobe(ap, p); copy_kprobe(ap, p);
return add_new_kprobe(ap, p); ret = add_new_kprobe(ap, p);
out:
mutex_unlock(&text_mutex);
put_online_cpus();
jump_label_unlock();
if (ret == 0 && kprobe_disabled(ap) && !kprobe_disabled(p)) {
ap->flags &= ~KPROBE_FLAG_DISABLED;
if (!kprobes_all_disarmed)
/* Arm the breakpoint again. */
arm_kprobe(ap);
}
return ret;
} }
static int __kprobes in_kprobes_functions(unsigned long addr) static int __kprobes in_kprobes_functions(unsigned long addr)
...@@ -1313,71 +1402,99 @@ static inline int check_kprobe_rereg(struct kprobe *p) ...@@ -1313,71 +1402,99 @@ static inline int check_kprobe_rereg(struct kprobe *p)
return ret; return ret;
} }
int __kprobes register_kprobe(struct kprobe *p) static __kprobes int check_kprobe_address_safe(struct kprobe *p,
struct module **probed_mod)
{ {
int ret = 0; int ret = 0;
struct kprobe *old_p; unsigned long ftrace_addr;
struct module *probed_mod;
kprobe_opcode_t *addr;
addr = kprobe_addr(p);
if (IS_ERR(addr))
return PTR_ERR(addr);
p->addr = addr;
ret = check_kprobe_rereg(p); /*
if (ret) * If the address is located on a ftrace nop, set the
return ret; * breakpoint to the following instruction.
*/
ftrace_addr = ftrace_location((unsigned long)p->addr);
if (ftrace_addr) {
#ifdef KPROBES_CAN_USE_FTRACE
/* Given address is not on the instruction boundary */
if ((unsigned long)p->addr != ftrace_addr)
return -EILSEQ;
/* break_handler (jprobe) can not work with ftrace */
if (p->break_handler)
return -EINVAL;
p->flags |= KPROBE_FLAG_FTRACE;
#else /* !KPROBES_CAN_USE_FTRACE */
return -EINVAL;
#endif
}
jump_label_lock(); jump_label_lock();
preempt_disable(); preempt_disable();
/* Ensure it is not in reserved area nor out of text */
if (!kernel_text_address((unsigned long) p->addr) || if (!kernel_text_address((unsigned long) p->addr) ||
in_kprobes_functions((unsigned long) p->addr) || in_kprobes_functions((unsigned long) p->addr) ||
ftrace_text_reserved(p->addr, p->addr) ||
jump_label_text_reserved(p->addr, p->addr)) { jump_label_text_reserved(p->addr, p->addr)) {
ret = -EINVAL; ret = -EINVAL;
goto cannot_probe; goto out;
} }
/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */ /* Check if are we probing a module */
p->flags &= KPROBE_FLAG_DISABLED; *probed_mod = __module_text_address((unsigned long) p->addr);
if (*probed_mod) {
/*
* Check if are we probing a module.
*/
probed_mod = __module_text_address((unsigned long) p->addr);
if (probed_mod) {
/* Return -ENOENT if fail. */
ret = -ENOENT;
/* /*
* We must hold a refcount of the probed module while updating * We must hold a refcount of the probed module while updating
* its code to prohibit unexpected unloading. * its code to prohibit unexpected unloading.
*/ */
if (unlikely(!try_module_get(probed_mod))) if (unlikely(!try_module_get(*probed_mod))) {
goto cannot_probe; ret = -ENOENT;
goto out;
}
/* /*
* If the module freed .init.text, we couldn't insert * If the module freed .init.text, we couldn't insert
* kprobes in there. * kprobes in there.
*/ */
if (within_module_init((unsigned long)p->addr, probed_mod) && if (within_module_init((unsigned long)p->addr, *probed_mod) &&
probed_mod->state != MODULE_STATE_COMING) { (*probed_mod)->state != MODULE_STATE_COMING) {
module_put(probed_mod); module_put(*probed_mod);
goto cannot_probe; *probed_mod = NULL;
ret = -ENOENT;
} }
/* ret will be updated by following code */
} }
out:
preempt_enable(); preempt_enable();
jump_label_unlock(); jump_label_unlock();
return ret;
}
int __kprobes register_kprobe(struct kprobe *p)
{
int ret;
struct kprobe *old_p;
struct module *probed_mod;
kprobe_opcode_t *addr;
/* Adjust probe address from symbol */
addr = kprobe_addr(p);
if (IS_ERR(addr))
return PTR_ERR(addr);
p->addr = addr;
ret = check_kprobe_rereg(p);
if (ret)
return ret;
/* User can pass only KPROBE_FLAG_DISABLED to register_kprobe */
p->flags &= KPROBE_FLAG_DISABLED;
p->nmissed = 0; p->nmissed = 0;
INIT_LIST_HEAD(&p->list); INIT_LIST_HEAD(&p->list);
mutex_lock(&kprobe_mutex);
jump_label_lock(); /* needed to call jump_label_text_reserved() */ ret = check_kprobe_address_safe(p, &probed_mod);
if (ret)
return ret;
get_online_cpus(); /* For avoiding text_mutex deadlock. */ mutex_lock(&kprobe_mutex);
mutex_lock(&text_mutex);
old_p = get_kprobe(p->addr); old_p = get_kprobe(p->addr);
if (old_p) { if (old_p) {
...@@ -1386,7 +1503,9 @@ int __kprobes register_kprobe(struct kprobe *p) ...@@ -1386,7 +1503,9 @@ int __kprobes register_kprobe(struct kprobe *p)
goto out; goto out;
} }
ret = arch_prepare_kprobe(p); mutex_lock(&text_mutex); /* Avoiding text modification */
ret = prepare_kprobe(p);
mutex_unlock(&text_mutex);
if (ret) if (ret)
goto out; goto out;
...@@ -1395,26 +1514,18 @@ int __kprobes register_kprobe(struct kprobe *p) ...@@ -1395,26 +1514,18 @@ int __kprobes register_kprobe(struct kprobe *p)
&kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]); &kprobe_table[hash_ptr(p->addr, KPROBE_HASH_BITS)]);
if (!kprobes_all_disarmed && !kprobe_disabled(p)) if (!kprobes_all_disarmed && !kprobe_disabled(p))
__arm_kprobe(p); arm_kprobe(p);
/* Try to optimize kprobe */ /* Try to optimize kprobe */
try_to_optimize_kprobe(p); try_to_optimize_kprobe(p);
out: out:
mutex_unlock(&text_mutex);
put_online_cpus();
jump_label_unlock();
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
if (probed_mod) if (probed_mod)
module_put(probed_mod); module_put(probed_mod);
return ret; return ret;
cannot_probe:
preempt_enable();
jump_label_unlock();
return ret;
} }
EXPORT_SYMBOL_GPL(register_kprobe); EXPORT_SYMBOL_GPL(register_kprobe);
...@@ -1451,7 +1562,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p) ...@@ -1451,7 +1562,7 @@ static struct kprobe *__kprobes __disable_kprobe(struct kprobe *p)
/* Try to disarm and disable this/parent probe */ /* Try to disarm and disable this/parent probe */
if (p == orig_p || aggr_kprobe_disabled(orig_p)) { if (p == orig_p || aggr_kprobe_disabled(orig_p)) {
disarm_kprobe(orig_p); disarm_kprobe(orig_p, true);
orig_p->flags |= KPROBE_FLAG_DISABLED; orig_p->flags |= KPROBE_FLAG_DISABLED;
} }
} }
...@@ -2049,10 +2160,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p, ...@@ -2049,10 +2160,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
if (!pp) if (!pp)
pp = p; pp = p;
seq_printf(pi, "%s%s%s\n", seq_printf(pi, "%s%s%s%s\n",
(kprobe_gone(p) ? "[GONE]" : ""), (kprobe_gone(p) ? "[GONE]" : ""),
((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""), ((kprobe_disabled(p) && !kprobe_gone(p)) ? "[DISABLED]" : ""),
(kprobe_optimized(pp) ? "[OPTIMIZED]" : "")); (kprobe_optimized(pp) ? "[OPTIMIZED]" : ""),
(kprobe_ftrace(pp) ? "[FTRACE]" : ""));
} }
static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos) static void __kprobes *kprobe_seq_start(struct seq_file *f, loff_t *pos)
...@@ -2131,14 +2243,12 @@ static void __kprobes arm_all_kprobes(void) ...@@ -2131,14 +2243,12 @@ static void __kprobes arm_all_kprobes(void)
goto already_enabled; goto already_enabled;
/* Arming kprobes doesn't optimize kprobe itself */ /* Arming kprobes doesn't optimize kprobe itself */
mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) hlist_for_each_entry_rcu(p, node, head, hlist)
if (!kprobe_disabled(p)) if (!kprobe_disabled(p))
__arm_kprobe(p); arm_kprobe(p);
} }
mutex_unlock(&text_mutex);
kprobes_all_disarmed = false; kprobes_all_disarmed = false;
printk(KERN_INFO "Kprobes globally enabled\n"); printk(KERN_INFO "Kprobes globally enabled\n");
...@@ -2166,15 +2276,13 @@ static void __kprobes disarm_all_kprobes(void) ...@@ -2166,15 +2276,13 @@ static void __kprobes disarm_all_kprobes(void)
kprobes_all_disarmed = true; kprobes_all_disarmed = true;
printk(KERN_INFO "Kprobes globally disabled\n"); printk(KERN_INFO "Kprobes globally disabled\n");
mutex_lock(&text_mutex);
for (i = 0; i < KPROBE_TABLE_SIZE; i++) { for (i = 0; i < KPROBE_TABLE_SIZE; i++) {
head = &kprobe_table[i]; head = &kprobe_table[i];
hlist_for_each_entry_rcu(p, node, head, hlist) { hlist_for_each_entry_rcu(p, node, head, hlist) {
if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p)) if (!arch_trampoline_kprobe(p) && !kprobe_disabled(p))
__disarm_kprobe(p, false); disarm_kprobe(p, false);
} }
} }
mutex_unlock(&text_mutex);
mutex_unlock(&kprobe_mutex); mutex_unlock(&kprobe_mutex);
/* Wait for disarming all kprobes by optimizer */ /* Wait for disarming all kprobes by optimizer */
......
...@@ -5,10 +5,12 @@ ifdef CONFIG_FUNCTION_TRACER ...@@ -5,10 +5,12 @@ ifdef CONFIG_FUNCTION_TRACER
ORIG_CFLAGS := $(KBUILD_CFLAGS) ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
ifdef CONFIG_FTRACE_SELFTEST
# selftest needs instrumentation # selftest needs instrumentation
CFLAGS_trace_selftest_dynamic.o = -pg CFLAGS_trace_selftest_dynamic.o = -pg
obj-y += trace_selftest_dynamic.o obj-y += trace_selftest_dynamic.o
endif endif
endif
# If unlikely tracing is enabled, do not trace these files # If unlikely tracing is enabled, do not trace these files
ifdef CONFIG_TRACING_BRANCHES ifdef CONFIG_TRACING_BRANCHES
......
...@@ -64,12 +64,20 @@ ...@@ -64,12 +64,20 @@
#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL) #define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
static struct ftrace_ops ftrace_list_end __read_mostly = {
.func = ftrace_stub,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
/* ftrace_enabled is a method to turn ftrace on or off */ /* ftrace_enabled is a method to turn ftrace on or off */
int ftrace_enabled __read_mostly; int ftrace_enabled __read_mostly;
static int last_ftrace_enabled; static int last_ftrace_enabled;
/* Quick disabling of function tracer. */ /* Quick disabling of function tracer. */
int function_trace_stop; int function_trace_stop __read_mostly;
/* Current function tracing op */
struct ftrace_ops *function_trace_op __read_mostly = &ftrace_list_end;
/* List for set_ftrace_pid's pids. */ /* List for set_ftrace_pid's pids. */
LIST_HEAD(ftrace_pids); LIST_HEAD(ftrace_pids);
...@@ -86,22 +94,43 @@ static int ftrace_disabled __read_mostly; ...@@ -86,22 +94,43 @@ static int ftrace_disabled __read_mostly;
static DEFINE_MUTEX(ftrace_lock); static DEFINE_MUTEX(ftrace_lock);
static struct ftrace_ops ftrace_list_end __read_mostly = {
.func = ftrace_stub,
};
static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end; static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops; static struct ftrace_ops global_ops;
static struct ftrace_ops control_ops; static struct ftrace_ops control_ops;
static void #if ARCH_SUPPORTS_FTRACE_OPS
ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs);
#else
/* See comment below, where ftrace_ops_list_func is defined */
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip);
#define ftrace_ops_list_func ((ftrace_func_t)ftrace_ops_no_ops)
#endif
/**
* ftrace_nr_registered_ops - return number of ops registered
*
* Returns the number of ftrace_ops registered and tracing functions
*/
int ftrace_nr_registered_ops(void)
{
struct ftrace_ops *ops;
int cnt = 0;
mutex_lock(&ftrace_lock);
for (ops = ftrace_ops_list;
ops != &ftrace_list_end; ops = ops->next)
cnt++;
mutex_unlock(&ftrace_lock);
return cnt;
}
/* /*
* Traverse the ftrace_global_list, invoking all entries. The reason that we * Traverse the ftrace_global_list, invoking all entries. The reason that we
...@@ -112,29 +141,29 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip); ...@@ -112,29 +141,29 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
* *
* Silly Alpha and silly pointer-speculation compiler optimizations! * Silly Alpha and silly pointer-speculation compiler optimizations!
*/ */
static void ftrace_global_list_func(unsigned long ip, static void
unsigned long parent_ip) ftrace_global_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs)
{ {
struct ftrace_ops *op;
if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT))) if (unlikely(trace_recursion_test(TRACE_GLOBAL_BIT)))
return; return;
trace_recursion_set(TRACE_GLOBAL_BIT); trace_recursion_set(TRACE_GLOBAL_BIT);
op = rcu_dereference_raw(ftrace_global_list); /*see above*/ op = rcu_dereference_raw(ftrace_global_list); /*see above*/
while (op != &ftrace_list_end) { while (op != &ftrace_list_end) {
op->func(ip, parent_ip); op->func(ip, parent_ip, op, regs);
op = rcu_dereference_raw(op->next); /*see above*/ op = rcu_dereference_raw(op->next); /*see above*/
}; };
trace_recursion_clear(TRACE_GLOBAL_BIT); trace_recursion_clear(TRACE_GLOBAL_BIT);
} }
static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip) static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs)
{ {
if (!test_tsk_trace_trace(current)) if (!test_tsk_trace_trace(current))
return; return;
ftrace_pid_function(ip, parent_ip); ftrace_pid_function(ip, parent_ip, op, regs);
} }
static void set_ftrace_pid_function(ftrace_func_t func) static void set_ftrace_pid_function(ftrace_func_t func)
...@@ -153,25 +182,9 @@ static void set_ftrace_pid_function(ftrace_func_t func) ...@@ -153,25 +182,9 @@ static void set_ftrace_pid_function(ftrace_func_t func)
void clear_ftrace_function(void) void clear_ftrace_function(void)
{ {
ftrace_trace_function = ftrace_stub; ftrace_trace_function = ftrace_stub;
__ftrace_trace_function = ftrace_stub;
__ftrace_trace_function_delay = ftrace_stub;
ftrace_pid_function = ftrace_stub; ftrace_pid_function = ftrace_stub;
} }
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
/*
* For those archs that do not test ftrace_trace_stop in their
* mcount call site, we need to do it from C.
*/
static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
{
if (function_trace_stop)
return;
__ftrace_trace_function(ip, parent_ip);
}
#endif
static void control_ops_disable_all(struct ftrace_ops *ops) static void control_ops_disable_all(struct ftrace_ops *ops)
{ {
int cpu; int cpu;
...@@ -230,28 +243,27 @@ static void update_ftrace_function(void) ...@@ -230,28 +243,27 @@ static void update_ftrace_function(void)
/* /*
* If we are at the end of the list and this ops is * If we are at the end of the list and this ops is
* not dynamic, then have the mcount trampoline call * recursion safe and not dynamic and the arch supports passing ops,
* the function directly * then have the mcount trampoline call the function directly.
*/ */
if (ftrace_ops_list == &ftrace_list_end || if (ftrace_ops_list == &ftrace_list_end ||
(ftrace_ops_list->next == &ftrace_list_end && (ftrace_ops_list->next == &ftrace_list_end &&
!(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC))) !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
(ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
!FTRACE_FORCE_LIST_FUNC)) {
/* Set the ftrace_ops that the arch callback uses */
if (ftrace_ops_list == &global_ops)
function_trace_op = ftrace_global_list;
else
function_trace_op = ftrace_ops_list;
func = ftrace_ops_list->func; func = ftrace_ops_list->func;
else } else {
/* Just use the default ftrace_ops */
function_trace_op = &ftrace_list_end;
func = ftrace_ops_list_func; func = ftrace_ops_list_func;
}
#ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
ftrace_trace_function = func; ftrace_trace_function = func;
#else
#ifdef CONFIG_DYNAMIC_FTRACE
/* do not update till all functions have been modified */
__ftrace_trace_function_delay = func;
#else
__ftrace_trace_function = func;
#endif
ftrace_trace_function =
(func == ftrace_stub) ? func : ftrace_test_stop_func;
#endif
} }
static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops) static void add_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
...@@ -325,6 +337,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops) ...@@ -325,6 +337,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK) if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
return -EINVAL; return -EINVAL;
#ifndef ARCH_SUPPORTS_FTRACE_SAVE_REGS
/*
* If the ftrace_ops specifies SAVE_REGS, then it only can be used
* if the arch supports it, or SAVE_REGS_IF_SUPPORTED is also set.
* Setting SAVE_REGS_IF_SUPPORTED makes SAVE_REGS irrelevant.
*/
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS &&
!(ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED))
return -EINVAL;
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED)
ops->flags |= FTRACE_OPS_FL_SAVE_REGS;
#endif
if (!core_kernel_data((unsigned long)ops)) if (!core_kernel_data((unsigned long)ops))
ops->flags |= FTRACE_OPS_FL_DYNAMIC; ops->flags |= FTRACE_OPS_FL_DYNAMIC;
...@@ -773,7 +799,8 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip) ...@@ -773,7 +799,8 @@ ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
} }
static void static void
function_profile_call(unsigned long ip, unsigned long parent_ip) function_profile_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
{ {
struct ftrace_profile_stat *stat; struct ftrace_profile_stat *stat;
struct ftrace_profile *rec; struct ftrace_profile *rec;
...@@ -803,7 +830,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip) ...@@ -803,7 +830,7 @@ function_profile_call(unsigned long ip, unsigned long parent_ip)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
static int profile_graph_entry(struct ftrace_graph_ent *trace) static int profile_graph_entry(struct ftrace_graph_ent *trace)
{ {
function_profile_call(trace->func, 0); function_profile_call(trace->func, 0, NULL, NULL);
return 1; return 1;
} }
...@@ -863,6 +890,7 @@ static void unregister_ftrace_profiler(void) ...@@ -863,6 +890,7 @@ static void unregister_ftrace_profiler(void)
#else #else
static struct ftrace_ops ftrace_profile_ops __read_mostly = { static struct ftrace_ops ftrace_profile_ops __read_mostly = {
.func = function_profile_call, .func = function_profile_call,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static int register_ftrace_profiler(void) static int register_ftrace_profiler(void)
...@@ -1045,6 +1073,7 @@ static struct ftrace_ops global_ops = { ...@@ -1045,6 +1073,7 @@ static struct ftrace_ops global_ops = {
.func = ftrace_stub, .func = ftrace_stub,
.notrace_hash = EMPTY_HASH, .notrace_hash = EMPTY_HASH,
.filter_hash = EMPTY_HASH, .filter_hash = EMPTY_HASH,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static DEFINE_MUTEX(ftrace_regex_lock); static DEFINE_MUTEX(ftrace_regex_lock);
...@@ -1525,6 +1554,12 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, ...@@ -1525,6 +1554,12 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
rec->flags++; rec->flags++;
if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX)) if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
return; return;
/*
* If any ops wants regs saved for this function
* then all ops will get saved regs.
*/
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
rec->flags |= FTRACE_FL_REGS;
} else { } else {
if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0)) if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
return; return;
...@@ -1616,18 +1651,59 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update) ...@@ -1616,18 +1651,59 @@ static int ftrace_check_record(struct dyn_ftrace *rec, int enable, int update)
if (enable && (rec->flags & ~FTRACE_FL_MASK)) if (enable && (rec->flags & ~FTRACE_FL_MASK))
flag = FTRACE_FL_ENABLED; flag = FTRACE_FL_ENABLED;
/*
* If enabling and the REGS flag does not match the REGS_EN, then
* do not ignore this record. Set flags to fail the compare against
* ENABLED.
*/
if (flag &&
(!(rec->flags & FTRACE_FL_REGS) != !(rec->flags & FTRACE_FL_REGS_EN)))
flag |= FTRACE_FL_REGS;
/* If the state of this record hasn't changed, then do nothing */ /* If the state of this record hasn't changed, then do nothing */
if ((rec->flags & FTRACE_FL_ENABLED) == flag) if ((rec->flags & FTRACE_FL_ENABLED) == flag)
return FTRACE_UPDATE_IGNORE; return FTRACE_UPDATE_IGNORE;
if (flag) { if (flag) {
if (update) /* Save off if rec is being enabled (for return value) */
flag ^= rec->flags & FTRACE_FL_ENABLED;
if (update) {
rec->flags |= FTRACE_FL_ENABLED; rec->flags |= FTRACE_FL_ENABLED;
return FTRACE_UPDATE_MAKE_CALL; if (flag & FTRACE_FL_REGS) {
if (rec->flags & FTRACE_FL_REGS)
rec->flags |= FTRACE_FL_REGS_EN;
else
rec->flags &= ~FTRACE_FL_REGS_EN;
}
}
/*
* If this record is being updated from a nop, then
* return UPDATE_MAKE_CALL.
* Otherwise, if the EN flag is set, then return
* UPDATE_MODIFY_CALL_REGS to tell the caller to convert
* from the non-save regs, to a save regs function.
* Otherwise,
* return UPDATE_MODIFY_CALL to tell the caller to convert
* from the save regs, to a non-save regs function.
*/
if (flag & FTRACE_FL_ENABLED)
return FTRACE_UPDATE_MAKE_CALL;
else if (rec->flags & FTRACE_FL_REGS_EN)
return FTRACE_UPDATE_MODIFY_CALL_REGS;
else
return FTRACE_UPDATE_MODIFY_CALL;
} }
if (update) if (update) {
rec->flags &= ~FTRACE_FL_ENABLED; /* If there's no more users, clear all flags */
if (!(rec->flags & ~FTRACE_FL_MASK))
rec->flags = 0;
else
/* Just disable the record (keep REGS state) */
rec->flags &= ~FTRACE_FL_ENABLED;
}
return FTRACE_UPDATE_MAKE_NOP; return FTRACE_UPDATE_MAKE_NOP;
} }
...@@ -1662,13 +1738,17 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable) ...@@ -1662,13 +1738,17 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable)
static int static int
__ftrace_replace_code(struct dyn_ftrace *rec, int enable) __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
{ {
unsigned long ftrace_old_addr;
unsigned long ftrace_addr; unsigned long ftrace_addr;
int ret; int ret;
ftrace_addr = (unsigned long)FTRACE_ADDR;
ret = ftrace_update_record(rec, enable); ret = ftrace_update_record(rec, enable);
if (rec->flags & FTRACE_FL_REGS)
ftrace_addr = (unsigned long)FTRACE_REGS_ADDR;
else
ftrace_addr = (unsigned long)FTRACE_ADDR;
switch (ret) { switch (ret) {
case FTRACE_UPDATE_IGNORE: case FTRACE_UPDATE_IGNORE:
return 0; return 0;
...@@ -1678,6 +1758,15 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable) ...@@ -1678,6 +1758,15 @@ __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
case FTRACE_UPDATE_MAKE_NOP: case FTRACE_UPDATE_MAKE_NOP:
return ftrace_make_nop(NULL, rec, ftrace_addr); return ftrace_make_nop(NULL, rec, ftrace_addr);
case FTRACE_UPDATE_MODIFY_CALL_REGS:
case FTRACE_UPDATE_MODIFY_CALL:
if (rec->flags & FTRACE_FL_REGS)
ftrace_old_addr = (unsigned long)FTRACE_ADDR;
else
ftrace_old_addr = (unsigned long)FTRACE_REGS_ADDR;
return ftrace_modify_call(rec, ftrace_old_addr, ftrace_addr);
} }
return -1; /* unknow ftrace bug */ return -1; /* unknow ftrace bug */
...@@ -1882,16 +1971,6 @@ static void ftrace_run_update_code(int command) ...@@ -1882,16 +1971,6 @@ static void ftrace_run_update_code(int command)
*/ */
arch_ftrace_update_code(command); arch_ftrace_update_code(command);
#ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
/*
* For archs that call ftrace_test_stop_func(), we must
* wait till after we update all the function callers
* before we update the callback. This keeps different
* ops that record different functions from corrupting
* each other.
*/
__ftrace_trace_function = __ftrace_trace_function_delay;
#endif
function_trace_stop--; function_trace_stop--;
ret = ftrace_arch_code_modify_post_process(); ret = ftrace_arch_code_modify_post_process();
...@@ -2441,8 +2520,9 @@ static int t_show(struct seq_file *m, void *v) ...@@ -2441,8 +2520,9 @@ static int t_show(struct seq_file *m, void *v)
seq_printf(m, "%ps", (void *)rec->ip); seq_printf(m, "%ps", (void *)rec->ip);
if (iter->flags & FTRACE_ITER_ENABLED) if (iter->flags & FTRACE_ITER_ENABLED)
seq_printf(m, " (%ld)", seq_printf(m, " (%ld)%s",
rec->flags & ~FTRACE_FL_MASK); rec->flags & ~FTRACE_FL_MASK,
rec->flags & FTRACE_FL_REGS ? " R" : "");
seq_printf(m, "\n"); seq_printf(m, "\n");
return 0; return 0;
...@@ -2790,8 +2870,8 @@ static int __init ftrace_mod_cmd_init(void) ...@@ -2790,8 +2870,8 @@ static int __init ftrace_mod_cmd_init(void)
} }
device_initcall(ftrace_mod_cmd_init); device_initcall(ftrace_mod_cmd_init);
static void static void function_trace_probe_call(unsigned long ip, unsigned long parent_ip,
function_trace_probe_call(unsigned long ip, unsigned long parent_ip) struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct ftrace_func_probe *entry; struct ftrace_func_probe *entry;
struct hlist_head *hhd; struct hlist_head *hhd;
...@@ -3162,8 +3242,27 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf, ...@@ -3162,8 +3242,27 @@ ftrace_notrace_write(struct file *file, const char __user *ubuf,
} }
static int static int
ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
int reset, int enable) {
struct ftrace_func_entry *entry;
if (!ftrace_location(ip))
return -EINVAL;
if (remove) {
entry = ftrace_lookup_ip(hash, ip);
if (!entry)
return -ENOENT;
free_hash_entry(hash, entry);
return 0;
}
return add_hash_entry(hash, ip);
}
static int
ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
unsigned long ip, int remove, int reset, int enable)
{ {
struct ftrace_hash **orig_hash; struct ftrace_hash **orig_hash;
struct ftrace_hash *hash; struct ftrace_hash *hash;
...@@ -3192,6 +3291,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, ...@@ -3192,6 +3291,11 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
ret = -EINVAL; ret = -EINVAL;
goto out_regex_unlock; goto out_regex_unlock;
} }
if (ip) {
ret = ftrace_match_addr(hash, ip, remove);
if (ret < 0)
goto out_regex_unlock;
}
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
ret = ftrace_hash_move(ops, enable, orig_hash, hash); ret = ftrace_hash_move(ops, enable, orig_hash, hash);
...@@ -3208,6 +3312,37 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len, ...@@ -3208,6 +3312,37 @@ ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
return ret; return ret;
} }
static int
ftrace_set_addr(struct ftrace_ops *ops, unsigned long ip, int remove,
int reset, int enable)
{
return ftrace_set_hash(ops, 0, 0, ip, remove, reset, enable);
}
/**
* ftrace_set_filter_ip - set a function to filter on in ftrace by address
* @ops - the ops to set the filter with
* @ip - the address to add to or remove from the filter.
* @remove - non zero to remove the ip from the filter
* @reset - non zero to reset all filters before applying this filter.
*
* Filters denote which functions should be enabled when tracing is enabled
* If @ip is NULL, it failes to update filter.
*/
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset)
{
return ftrace_set_addr(ops, ip, remove, reset, 1);
}
EXPORT_SYMBOL_GPL(ftrace_set_filter_ip);
static int
ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
int reset, int enable)
{
return ftrace_set_hash(ops, buf, len, 0, 0, reset, enable);
}
/** /**
* ftrace_set_filter - set a function to filter on in ftrace * ftrace_set_filter - set a function to filter on in ftrace
* @ops - the ops to set the filter with * @ops - the ops to set the filter with
...@@ -3912,6 +4047,7 @@ void __init ftrace_init(void) ...@@ -3912,6 +4047,7 @@ void __init ftrace_init(void)
static struct ftrace_ops global_ops = { static struct ftrace_ops global_ops = {
.func = ftrace_stub, .func = ftrace_stub,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static int __init ftrace_nodyn_init(void) static int __init ftrace_nodyn_init(void)
...@@ -3942,10 +4078,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) ...@@ -3942,10 +4078,9 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
static void static void
ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs)
{ {
struct ftrace_ops *op;
if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT))) if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
return; return;
...@@ -3959,7 +4094,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) ...@@ -3959,7 +4094,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
while (op != &ftrace_list_end) { while (op != &ftrace_list_end) {
if (!ftrace_function_local_disabled(op) && if (!ftrace_function_local_disabled(op) &&
ftrace_ops_test(op, ip)) ftrace_ops_test(op, ip))
op->func(ip, parent_ip); op->func(ip, parent_ip, op, regs);
op = rcu_dereference_raw(op->next); op = rcu_dereference_raw(op->next);
}; };
...@@ -3969,13 +4104,18 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip) ...@@ -3969,13 +4104,18 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops control_ops = { static struct ftrace_ops control_ops = {
.func = ftrace_ops_control_func, .func = ftrace_ops_control_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static void static inline void
ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) __ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ignored, struct pt_regs *regs)
{ {
struct ftrace_ops *op; struct ftrace_ops *op;
if (function_trace_stop)
return;
if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT))) if (unlikely(trace_recursion_test(TRACE_INTERNAL_BIT)))
return; return;
...@@ -3988,13 +4128,39 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip) ...@@ -3988,13 +4128,39 @@ ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
op = rcu_dereference_raw(ftrace_ops_list); op = rcu_dereference_raw(ftrace_ops_list);
while (op != &ftrace_list_end) { while (op != &ftrace_list_end) {
if (ftrace_ops_test(op, ip)) if (ftrace_ops_test(op, ip))
op->func(ip, parent_ip); op->func(ip, parent_ip, op, regs);
op = rcu_dereference_raw(op->next); op = rcu_dereference_raw(op->next);
}; };
preempt_enable_notrace(); preempt_enable_notrace();
trace_recursion_clear(TRACE_INTERNAL_BIT); trace_recursion_clear(TRACE_INTERNAL_BIT);
} }
/*
* Some archs only support passing ip and parent_ip. Even though
* the list function ignores the op parameter, we do not want any
* C side effects, where a function is called without the caller
* sending a third parameter.
* Archs are to support both the regs and ftrace_ops at the same time.
* If they support ftrace_ops, it is assumed they support regs.
* If call backs want to use regs, they must either check for regs
* being NULL, or ARCH_SUPPORTS_FTRACE_SAVE_REGS.
* Note, ARCH_SUPPORT_SAVE_REGS expects a full regs to be saved.
* An architecture can pass partial regs with ftrace_ops and still
* set the ARCH_SUPPORT_FTARCE_OPS.
*/
#if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs)
{
__ftrace_ops_list_func(ip, parent_ip, NULL, regs);
}
#else
static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
{
__ftrace_ops_list_func(ip, parent_ip, NULL, NULL);
}
#endif
static void clear_ftrace_swapper(void) static void clear_ftrace_swapper(void)
{ {
struct task_struct *p; struct task_struct *p;
......
...@@ -472,11 +472,11 @@ extern void trace_find_cmdline(int pid, char comm[]); ...@@ -472,11 +472,11 @@ extern void trace_find_cmdline(int pid, char comm[]);
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt; extern unsigned long ftrace_update_tot_cnt;
#endif
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
extern int DYN_FTRACE_TEST_NAME(void); extern int DYN_FTRACE_TEST_NAME(void);
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
extern int DYN_FTRACE_TEST_NAME2(void); extern int DYN_FTRACE_TEST_NAME2(void);
#endif
extern int ring_buffer_expanded; extern int ring_buffer_expanded;
extern bool tracing_selftest_disabled; extern bool tracing_selftest_disabled;
......
...@@ -258,7 +258,8 @@ EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); ...@@ -258,7 +258,8 @@ EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
static void static void
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip) perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *pt_regs)
{ {
struct ftrace_entry *entry; struct ftrace_entry *entry;
struct hlist_head *head; struct hlist_head *head;
......
...@@ -1681,7 +1681,8 @@ static __init void event_trace_self_tests(void) ...@@ -1681,7 +1681,8 @@ static __init void event_trace_self_tests(void)
static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
static void static void
function_test_events_call(unsigned long ip, unsigned long parent_ip) function_test_events_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer; struct ring_buffer *buffer;
...@@ -1720,6 +1721,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) ...@@ -1720,6 +1721,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __initdata = static struct ftrace_ops trace_ops __initdata =
{ {
.func = function_test_events_call, .func = function_test_events_call,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static __init void event_trace_self_test_with_function(void) static __init void event_trace_self_test_with_function(void)
......
...@@ -48,7 +48,8 @@ static void function_trace_start(struct trace_array *tr) ...@@ -48,7 +48,8 @@ static void function_trace_start(struct trace_array *tr)
} }
static void static void
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = func_trace; struct trace_array *tr = func_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
...@@ -75,7 +76,9 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) ...@@ -75,7 +76,9 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
} }
static void static void
function_trace_call(unsigned long ip, unsigned long parent_ip) function_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = func_trace; struct trace_array *tr = func_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
...@@ -106,7 +109,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) ...@@ -106,7 +109,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
} }
static void static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip) function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = func_trace; struct trace_array *tr = func_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
...@@ -149,13 +153,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) ...@@ -149,13 +153,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = function_trace_call, .func = function_trace_call,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static struct ftrace_ops trace_stack_ops __read_mostly = static struct ftrace_ops trace_stack_ops __read_mostly =
{ {
.func = function_stack_trace_call, .func = function_stack_trace_call,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
/* Our two options */ /* Our two options */
......
...@@ -136,7 +136,8 @@ static int func_prolog_dec(struct trace_array *tr, ...@@ -136,7 +136,8 @@ static int func_prolog_dec(struct trace_array *tr,
* irqsoff uses its own tracer function to keep the overhead down: * irqsoff uses its own tracer function to keep the overhead down:
*/ */
static void static void
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = irqsoff_trace; struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
...@@ -153,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) ...@@ -153,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = irqsoff_tracer_call, .func = irqsoff_tracer_call,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
......
...@@ -108,7 +108,8 @@ func_prolog_preempt_disable(struct trace_array *tr, ...@@ -108,7 +108,8 @@ func_prolog_preempt_disable(struct trace_array *tr,
* wakeup uses its own tracer function to keep the overhead down: * wakeup uses its own tracer function to keep the overhead down:
*/ */
static void static void
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = wakeup_trace; struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
...@@ -129,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) ...@@ -129,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = wakeup_tracer_call, .func = wakeup_tracer_call,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
......
...@@ -103,54 +103,67 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) ...@@ -103,54 +103,67 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
static int trace_selftest_test_probe1_cnt; static int trace_selftest_test_probe1_cnt;
static void trace_selftest_test_probe1_func(unsigned long ip, static void trace_selftest_test_probe1_func(unsigned long ip,
unsigned long pip) unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_probe1_cnt++; trace_selftest_test_probe1_cnt++;
} }
static int trace_selftest_test_probe2_cnt; static int trace_selftest_test_probe2_cnt;
static void trace_selftest_test_probe2_func(unsigned long ip, static void trace_selftest_test_probe2_func(unsigned long ip,
unsigned long pip) unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_probe2_cnt++; trace_selftest_test_probe2_cnt++;
} }
static int trace_selftest_test_probe3_cnt; static int trace_selftest_test_probe3_cnt;
static void trace_selftest_test_probe3_func(unsigned long ip, static void trace_selftest_test_probe3_func(unsigned long ip,
unsigned long pip) unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_probe3_cnt++; trace_selftest_test_probe3_cnt++;
} }
static int trace_selftest_test_global_cnt; static int trace_selftest_test_global_cnt;
static void trace_selftest_test_global_func(unsigned long ip, static void trace_selftest_test_global_func(unsigned long ip,
unsigned long pip) unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_global_cnt++; trace_selftest_test_global_cnt++;
} }
static int trace_selftest_test_dyn_cnt; static int trace_selftest_test_dyn_cnt;
static void trace_selftest_test_dyn_func(unsigned long ip, static void trace_selftest_test_dyn_func(unsigned long ip,
unsigned long pip) unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_dyn_cnt++; trace_selftest_test_dyn_cnt++;
} }
static struct ftrace_ops test_probe1 = { static struct ftrace_ops test_probe1 = {
.func = trace_selftest_test_probe1_func, .func = trace_selftest_test_probe1_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static struct ftrace_ops test_probe2 = { static struct ftrace_ops test_probe2 = {
.func = trace_selftest_test_probe2_func, .func = trace_selftest_test_probe2_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static struct ftrace_ops test_probe3 = { static struct ftrace_ops test_probe3 = {
.func = trace_selftest_test_probe3_func, .func = trace_selftest_test_probe3_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static struct ftrace_ops test_global = { static struct ftrace_ops test_global = {
.func = trace_selftest_test_global_func, .func = trace_selftest_test_global_func,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static void print_counts(void) static void print_counts(void)
...@@ -393,10 +406,253 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, ...@@ -393,10 +406,253 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
return ret; return ret;
} }
static int trace_selftest_recursion_cnt;
static void trace_selftest_test_recursion_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{
/*
* This function is registered without the recursion safe flag.
* The ftrace infrastructure should provide the recursion
* protection. If not, this will crash the kernel!
*/
trace_selftest_recursion_cnt++;
DYN_FTRACE_TEST_NAME();
}
static void trace_selftest_test_recursion_safe_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{
/*
* We said we would provide our own recursion. By calling
* this function again, we should recurse back into this function
* and count again. But this only happens if the arch supports
* all of ftrace features and nothing else is using the function
* tracing utility.
*/
if (trace_selftest_recursion_cnt++)
return;
DYN_FTRACE_TEST_NAME();
}
static struct ftrace_ops test_rec_probe = {
.func = trace_selftest_test_recursion_func,
};
static struct ftrace_ops test_recsafe_probe = {
.func = trace_selftest_test_recursion_safe_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static int
trace_selftest_function_recursion(void)
{
int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
char *func_name;
int len;
int ret;
int cnt;
/* The previous test PASSED */
pr_cont("PASSED\n");
pr_info("Testing ftrace recursion: ");
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
tracer_enabled = 1;
/* Handle PPC64 '.' name */
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
len = strlen(func_name);
ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
if (ret) {
pr_cont("*Could not set filter* ");
goto out;
}
ret = register_ftrace_function(&test_rec_probe);
if (ret) {
pr_cont("*could not register callback* ");
goto out;
}
DYN_FTRACE_TEST_NAME();
unregister_ftrace_function(&test_rec_probe);
ret = -1;
if (trace_selftest_recursion_cnt != 1) {
pr_cont("*callback not called once (%d)* ",
trace_selftest_recursion_cnt);
goto out;
}
trace_selftest_recursion_cnt = 1;
pr_cont("PASSED\n");
pr_info("Testing ftrace recursion safe: ");
ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
if (ret) {
pr_cont("*Could not set filter* ");
goto out;
}
ret = register_ftrace_function(&test_recsafe_probe);
if (ret) {
pr_cont("*could not register callback* ");
goto out;
}
DYN_FTRACE_TEST_NAME();
unregister_ftrace_function(&test_recsafe_probe);
/*
* If arch supports all ftrace features, and no other task
* was on the list, we should be fine.
*/
if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC)
cnt = 2; /* Should have recursed */
else
cnt = 1;
ret = -1;
if (trace_selftest_recursion_cnt != cnt) {
pr_cont("*callback not called expected %d times (%d)* ",
cnt, trace_selftest_recursion_cnt);
goto out;
}
ret = 0;
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
return ret;
}
#else #else
# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
# define trace_selftest_function_recursion() ({ 0; })
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
static enum {
TRACE_SELFTEST_REGS_START,
TRACE_SELFTEST_REGS_FOUND,
TRACE_SELFTEST_REGS_NOT_FOUND,
} trace_selftest_regs_stat;
static void trace_selftest_test_regs_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{
if (pt_regs)
trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
else
trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
}
static struct ftrace_ops test_regs_probe = {
.func = trace_selftest_test_regs_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
};
static int
trace_selftest_function_regs(void)
{
int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
char *func_name;
int len;
int ret;
int supported = 0;
#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
supported = 1;
#endif
/* The previous test PASSED */
pr_cont("PASSED\n");
pr_info("Testing ftrace regs%s: ",
!supported ? "(no arch support)" : "");
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
tracer_enabled = 1;
/* Handle PPC64 '.' name */
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
len = strlen(func_name);
ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
/*
* If DYNAMIC_FTRACE is not set, then we just trace all functions.
* This test really doesn't care.
*/
if (ret && ret != -ENODEV) {
pr_cont("*Could not set filter* ");
goto out;
}
ret = register_ftrace_function(&test_regs_probe);
/*
* Now if the arch does not support passing regs, then this should
* have failed.
*/
if (!supported) {
if (!ret) {
pr_cont("*registered save-regs without arch support* ");
goto out;
}
test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
ret = register_ftrace_function(&test_regs_probe);
}
if (ret) {
pr_cont("*could not register callback* ");
goto out;
}
DYN_FTRACE_TEST_NAME();
unregister_ftrace_function(&test_regs_probe);
ret = -1;
switch (trace_selftest_regs_stat) {
case TRACE_SELFTEST_REGS_START:
pr_cont("*callback never called* ");
goto out;
case TRACE_SELFTEST_REGS_FOUND:
if (supported)
break;
pr_cont("*callback received regs without arch support* ");
goto out;
case TRACE_SELFTEST_REGS_NOT_FOUND:
if (!supported)
break;
pr_cont("*callback received NULL regs* ");
goto out;
}
ret = 0;
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
return ret;
}
/* /*
* Simple verification test of ftrace function tracer. * Simple verification test of ftrace function tracer.
* Enable ftrace, sleep 1/10 second, and then read the trace * Enable ftrace, sleep 1/10 second, and then read the trace
...@@ -442,7 +698,14 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) ...@@ -442,7 +698,14 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ret = trace_selftest_startup_dynamic_tracing(trace, tr, ret = trace_selftest_startup_dynamic_tracing(trace, tr,
DYN_FTRACE_TEST_NAME); DYN_FTRACE_TEST_NAME);
if (ret)
goto out;
ret = trace_selftest_function_recursion();
if (ret)
goto out;
ret = trace_selftest_function_regs();
out: out:
ftrace_enabled = save_ftrace_enabled; ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled; tracer_enabled = save_tracer_enabled;
......
...@@ -111,7 +111,8 @@ static inline void check_stack(void) ...@@ -111,7 +111,8 @@ static inline void check_stack(void)
} }
static void static void
stack_trace_call(unsigned long ip, unsigned long parent_ip) stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
int cpu; int cpu;
...@@ -136,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) ...@@ -136,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = stack_trace_call, .func = stack_trace_call,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static ssize_t static ssize_t
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment