Commit 26198c21 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/perf/core' of...

Merge branch 'tip/perf/core' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace into perf/core

Pull ftrace updates from Steve Rostedt:

" This patch series extends ftrace function tracing utility to be
  more dynamic for its users. It allows for data passing to the callback
  functions, as well as reading regs as if a breakpoint were to trigger
  at function entry.

  The main goal of this patch series was to allow kprobes to use ftrace
  as an optimized probe point when a probe is placed on an ftrace nop.
  With lots of help from Masami Hiramatsu, and going through lots of
  iterations, we finally came up with a good solution. "
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parents 194f8dcb e5253896
...@@ -3,27 +3,33 @@ ...@@ -3,27 +3,33 @@
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
.macro MCOUNT_SAVE_FRAME /* skip is set if the stack was already partially adjusted */
/* taken from glibc */ .macro MCOUNT_SAVE_FRAME skip=0
subq $0x38, %rsp /*
movq %rax, (%rsp) * We add enough stack to save all regs.
movq %rcx, 8(%rsp) */
movq %rdx, 16(%rsp) subq $(SS+8-\skip), %rsp
movq %rsi, 24(%rsp) movq %rax, RAX(%rsp)
movq %rdi, 32(%rsp) movq %rcx, RCX(%rsp)
movq %r8, 40(%rsp) movq %rdx, RDX(%rsp)
movq %r9, 48(%rsp) movq %rsi, RSI(%rsp)
movq %rdi, RDI(%rsp)
movq %r8, R8(%rsp)
movq %r9, R9(%rsp)
/* Move RIP to its proper location */
movq SS+8(%rsp), %rdx
movq %rdx, RIP(%rsp)
.endm .endm
.macro MCOUNT_RESTORE_FRAME .macro MCOUNT_RESTORE_FRAME skip=0
movq 48(%rsp), %r9 movq R9(%rsp), %r9
movq 40(%rsp), %r8 movq R8(%rsp), %r8
movq 32(%rsp), %rdi movq RDI(%rsp), %rdi
movq 24(%rsp), %rsi movq RSI(%rsp), %rsi
movq 16(%rsp), %rdx movq RDX(%rsp), %rdx
movq 8(%rsp), %rcx movq RCX(%rsp), %rcx
movq (%rsp), %rax movq RAX(%rsp), %rax
addq $0x38, %rsp addq $(SS+8-\skip), %rsp
.endm .endm
#endif #endif
...@@ -32,6 +38,11 @@ ...@@ -32,6 +38,11 @@
#define MCOUNT_ADDR ((long)(mcount)) #define MCOUNT_ADDR ((long)(mcount))
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */ #define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */
#ifdef CONFIG_DYNAMIC_FTRACE
#define ARCH_SUPPORTS_FTRACE_OPS 1
#define ARCH_SUPPORTS_FTRACE_SAVE_REGS
#endif
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern void mcount(void); extern void mcount(void);
extern atomic_t modifying_ftrace_code; extern atomic_t modifying_ftrace_code;
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <asm/insn.h> #include <asm/insn.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT #define __ARCH_WANT_KPROBES_INSN_SLOT
#define ARCH_SUPPORTS_KPROBES_ON_FTRACE
struct pt_regs; struct pt_regs;
struct kprobe; struct kprobe;
......
...@@ -1109,17 +1109,21 @@ ENTRY(ftrace_caller) ...@@ -1109,17 +1109,21 @@ ENTRY(ftrace_caller)
pushl %eax pushl %eax
pushl %ecx pushl %ecx
pushl %edx pushl %edx
movl 0xc(%esp), %eax pushl $0 /* Pass NULL as regs pointer */
movl 4*4(%esp), %eax
movl 0x4(%ebp), %edx movl 0x4(%ebp), %edx
leal function_trace_op, %ecx
subl $MCOUNT_INSN_SIZE, %eax subl $MCOUNT_INSN_SIZE, %eax
.globl ftrace_call .globl ftrace_call
ftrace_call: ftrace_call:
call ftrace_stub call ftrace_stub
addl $4,%esp /* skip NULL pointer */
popl %edx popl %edx
popl %ecx popl %ecx
popl %eax popl %eax
ftrace_ret:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call .globl ftrace_graph_call
ftrace_graph_call: ftrace_graph_call:
...@@ -1131,6 +1135,72 @@ ftrace_stub: ...@@ -1131,6 +1135,72 @@ ftrace_stub:
ret ret
END(ftrace_caller) END(ftrace_caller)
ENTRY(ftrace_regs_caller)
pushf /* push flags before compare (in cs location) */
cmpl $0, function_trace_stop
jne ftrace_restore_flags
/*
* i386 does not save SS and ESP when coming from kernel.
* Instead, to get sp, &regs->sp is used (see ptrace.h).
* Unfortunately, that means eflags must be at the same location
* as the current return ip is. We move the return ip into the
* ip location, and move flags into the return ip location.
*/
pushl 4(%esp) /* save return ip into ip slot */
subl $MCOUNT_INSN_SIZE, (%esp) /* Adjust ip */
pushl $0 /* Load 0 into orig_ax */
pushl %gs
pushl %fs
pushl %es
pushl %ds
pushl %eax
pushl %ebp
pushl %edi
pushl %esi
pushl %edx
pushl %ecx
pushl %ebx
movl 13*4(%esp), %eax /* Get the saved flags */
movl %eax, 14*4(%esp) /* Move saved flags into regs->flags location */
/* clobbering return ip */
movl $__KERNEL_CS,13*4(%esp)
movl 12*4(%esp), %eax /* Load ip (1st parameter) */
movl 0x4(%ebp), %edx /* Load parent ip (2nd parameter) */
leal function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
pushl %esp /* Save pt_regs as 4th parameter */
GLOBAL(ftrace_regs_call)
call ftrace_stub
addl $4, %esp /* Skip pt_regs */
movl 14*4(%esp), %eax /* Move flags back into cs */
movl %eax, 13*4(%esp) /* Needed to keep addl from modifying flags */
movl 12*4(%esp), %eax /* Get return ip from regs->ip */
addl $MCOUNT_INSN_SIZE, %eax
movl %eax, 14*4(%esp) /* Put return ip back for ret */
popl %ebx
popl %ecx
popl %edx
popl %esi
popl %edi
popl %ebp
popl %eax
popl %ds
popl %es
popl %fs
popl %gs
addl $8, %esp /* Skip orig_ax and ip */
popf /* Pop flags at end (no addl to corrupt flags) */
jmp ftrace_ret
ftrace_restore_flags:
popf
jmp ftrace_stub
#else /* ! CONFIG_DYNAMIC_FTRACE */ #else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(mcount) ENTRY(mcount)
...@@ -1171,9 +1241,6 @@ END(mcount) ...@@ -1171,9 +1241,6 @@ END(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller) ENTRY(ftrace_graph_caller)
cmpl $0, function_trace_stop
jne ftrace_stub
pushl %eax pushl %eax
pushl %ecx pushl %ecx
pushl %edx pushl %edx
......
...@@ -73,20 +73,34 @@ ENTRY(mcount) ...@@ -73,20 +73,34 @@ ENTRY(mcount)
retq retq
END(mcount) END(mcount)
/* skip is set if stack has been adjusted */
.macro ftrace_caller_setup skip=0
MCOUNT_SAVE_FRAME \skip
/* Load the ftrace_ops into the 3rd parameter */
leaq function_trace_op, %rdx
/* Load ip into the first parameter */
movq RIP(%rsp), %rdi
subq $MCOUNT_INSN_SIZE, %rdi
/* Load the parent_ip into the second parameter */
movq 8(%rbp), %rsi
.endm
ENTRY(ftrace_caller) ENTRY(ftrace_caller)
/* Check if tracing was disabled (quick check) */
cmpl $0, function_trace_stop cmpl $0, function_trace_stop
jne ftrace_stub jne ftrace_stub
MCOUNT_SAVE_FRAME ftrace_caller_setup
/* regs go into 4th parameter (but make it NULL) */
movq 0x38(%rsp), %rdi movq $0, %rcx
movq 8(%rbp), %rsi
subq $MCOUNT_INSN_SIZE, %rdi
GLOBAL(ftrace_call) GLOBAL(ftrace_call)
call ftrace_stub call ftrace_stub
MCOUNT_RESTORE_FRAME MCOUNT_RESTORE_FRAME
ftrace_return:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
GLOBAL(ftrace_graph_call) GLOBAL(ftrace_graph_call)
...@@ -97,6 +111,71 @@ GLOBAL(ftrace_stub) ...@@ -97,6 +111,71 @@ GLOBAL(ftrace_stub)
retq retq
END(ftrace_caller) END(ftrace_caller)
ENTRY(ftrace_regs_caller)
/* Save the current flags before compare (in SS location)*/
pushfq
/* Check if tracing was disabled (quick check) */
cmpl $0, function_trace_stop
jne ftrace_restore_flags
/* skip=8 to skip flags saved in SS */
ftrace_caller_setup 8
/* Save the rest of pt_regs */
movq %r15, R15(%rsp)
movq %r14, R14(%rsp)
movq %r13, R13(%rsp)
movq %r12, R12(%rsp)
movq %r11, R11(%rsp)
movq %r10, R10(%rsp)
movq %rbp, RBP(%rsp)
movq %rbx, RBX(%rsp)
/* Copy saved flags */
movq SS(%rsp), %rcx
movq %rcx, EFLAGS(%rsp)
/* Kernel segments */
movq $__KERNEL_DS, %rcx
movq %rcx, SS(%rsp)
movq $__KERNEL_CS, %rcx
movq %rcx, CS(%rsp)
/* Stack - skipping return address */
leaq SS+16(%rsp), %rcx
movq %rcx, RSP(%rsp)
/* regs go into 4th parameter */
leaq (%rsp), %rcx
GLOBAL(ftrace_regs_call)
call ftrace_stub
/* Copy flags back to SS, to restore them */
movq EFLAGS(%rsp), %rax
movq %rax, SS(%rsp)
/* restore the rest of pt_regs */
movq R15(%rsp), %r15
movq R14(%rsp), %r14
movq R13(%rsp), %r13
movq R12(%rsp), %r12
movq R10(%rsp), %r10
movq RBP(%rsp), %rbp
movq RBX(%rsp), %rbx
/* skip=8 to skip flags saved in SS */
MCOUNT_RESTORE_FRAME 8
/* Restore flags */
popfq
jmp ftrace_return
ftrace_restore_flags:
popfq
jmp ftrace_stub
END(ftrace_regs_caller)
#else /* ! CONFIG_DYNAMIC_FTRACE */ #else /* ! CONFIG_DYNAMIC_FTRACE */
ENTRY(mcount) ENTRY(mcount)
cmpl $0, function_trace_stop cmpl $0, function_trace_stop
...@@ -119,7 +198,7 @@ GLOBAL(ftrace_stub) ...@@ -119,7 +198,7 @@ GLOBAL(ftrace_stub)
trace: trace:
MCOUNT_SAVE_FRAME MCOUNT_SAVE_FRAME
movq 0x38(%rsp), %rdi movq RIP(%rsp), %rdi
movq 8(%rbp), %rsi movq 8(%rbp), %rsi
subq $MCOUNT_INSN_SIZE, %rdi subq $MCOUNT_INSN_SIZE, %rdi
...@@ -134,13 +213,10 @@ END(mcount) ...@@ -134,13 +213,10 @@ END(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller) ENTRY(ftrace_graph_caller)
cmpl $0, function_trace_stop
jne ftrace_stub
MCOUNT_SAVE_FRAME MCOUNT_SAVE_FRAME
leaq 8(%rbp), %rdi leaq 8(%rbp), %rdi
movq 0x38(%rsp), %rsi movq RIP(%rsp), %rsi
movq (%rbp), %rdx movq (%rbp), %rdx
subq $MCOUNT_INSN_SIZE, %rsi subq $MCOUNT_INSN_SIZE, %rsi
......
...@@ -206,6 +206,21 @@ static int ...@@ -206,6 +206,21 @@ static int
ftrace_modify_code(unsigned long ip, unsigned const char *old_code, ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
unsigned const char *new_code); unsigned const char *new_code);
/*
* Should never be called:
* As it is only called by __ftrace_replace_code() which is called by
* ftrace_replace_code() that x86 overrides, and by ftrace_update_code()
* which is called to turn mcount into nops or nops into function calls
* but not to convert a function from not using regs to one that uses
* regs, which ftrace_modify_call() is for.
*/
int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
WARN_ON(1);
return -EINVAL;
}
int ftrace_update_ftrace_func(ftrace_func_t func) int ftrace_update_ftrace_func(ftrace_func_t func)
{ {
unsigned long ip = (unsigned long)(&ftrace_call); unsigned long ip = (unsigned long)(&ftrace_call);
...@@ -220,6 +235,14 @@ int ftrace_update_ftrace_func(ftrace_func_t func) ...@@ -220,6 +235,14 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
ret = ftrace_modify_code(ip, old, new); ret = ftrace_modify_code(ip, old, new);
/* Also update the regs callback function */
if (!ret) {
ip = (unsigned long)(&ftrace_regs_call);
memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
new = ftrace_call_replace(ip, (unsigned long)func);
ret = ftrace_modify_code(ip, old, new);
}
atomic_dec(&modifying_ftrace_code); atomic_dec(&modifying_ftrace_code);
return ret; return ret;
...@@ -299,6 +322,32 @@ static int add_brk_on_nop(struct dyn_ftrace *rec) ...@@ -299,6 +322,32 @@ static int add_brk_on_nop(struct dyn_ftrace *rec)
return add_break(rec->ip, old); return add_break(rec->ip, old);
} }
/*
* If the record has the FTRACE_FL_REGS set, that means that it
* wants to convert to a callback that saves all regs. If FTRACE_FL_REGS
* is not not set, then it wants to convert to the normal callback.
*/
static unsigned long get_ftrace_addr(struct dyn_ftrace *rec)
{
if (rec->flags & FTRACE_FL_REGS)
return (unsigned long)FTRACE_REGS_ADDR;
else
return (unsigned long)FTRACE_ADDR;
}
/*
* The FTRACE_FL_REGS_EN is set when the record already points to
* a function that saves all the regs. Basically the '_EN' version
* represents the current state of the function.
*/
static unsigned long get_ftrace_old_addr(struct dyn_ftrace *rec)
{
if (rec->flags & FTRACE_FL_REGS_EN)
return (unsigned long)FTRACE_REGS_ADDR;
else
return (unsigned long)FTRACE_ADDR;
}
static int add_breakpoints(struct dyn_ftrace *rec, int enable) static int add_breakpoints(struct dyn_ftrace *rec, int enable)
{ {
unsigned long ftrace_addr; unsigned long ftrace_addr;
...@@ -306,7 +355,7 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable) ...@@ -306,7 +355,7 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable)
ret = ftrace_test_record(rec, enable); ret = ftrace_test_record(rec, enable);
ftrace_addr = (unsigned long)FTRACE_ADDR; ftrace_addr = get_ftrace_addr(rec);
switch (ret) { switch (ret) {
case FTRACE_UPDATE_IGNORE: case FTRACE_UPDATE_IGNORE:
...@@ -316,6 +365,10 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable) ...@@ -316,6 +365,10 @@ static int add_breakpoints(struct dyn_ftrace *rec, int enable)
/* converting nop to call */ /* converting nop to call */
return add_brk_on_nop(rec); return add_brk_on_nop(rec);
case FTRACE_UPDATE_MODIFY_CALL_REGS:
case FTRACE_UPDATE_MODIFY_CALL:
ftrace_addr = get_ftrace_old_addr(rec);
/* fall through */
case FTRACE_UPDATE_MAKE_NOP: case FTRACE_UPDATE_MAKE_NOP:
/* converting a call to a nop */ /* converting a call to a nop */
return add_brk_on_call(rec, ftrace_addr); return add_brk_on_call(rec, ftrace_addr);
...@@ -360,13 +413,21 @@ static int remove_breakpoint(struct dyn_ftrace *rec) ...@@ -360,13 +413,21 @@ static int remove_breakpoint(struct dyn_ftrace *rec)
* If not, don't touch the breakpoint, we make just create * If not, don't touch the breakpoint, we make just create
* a disaster. * a disaster.
*/ */
ftrace_addr = (unsigned long)FTRACE_ADDR; ftrace_addr = get_ftrace_addr(rec);
nop = ftrace_call_replace(ip, ftrace_addr);
if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) == 0)
goto update;
/* Check both ftrace_addr and ftrace_old_addr */
ftrace_addr = get_ftrace_old_addr(rec);
nop = ftrace_call_replace(ip, ftrace_addr); nop = ftrace_call_replace(ip, ftrace_addr);
if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0) if (memcmp(&ins[1], &nop[1], MCOUNT_INSN_SIZE - 1) != 0)
return -EINVAL; return -EINVAL;
} }
update:
return probe_kernel_write((void *)ip, &nop[0], 1); return probe_kernel_write((void *)ip, &nop[0], 1);
} }
...@@ -405,12 +466,14 @@ static int add_update(struct dyn_ftrace *rec, int enable) ...@@ -405,12 +466,14 @@ static int add_update(struct dyn_ftrace *rec, int enable)
ret = ftrace_test_record(rec, enable); ret = ftrace_test_record(rec, enable);
ftrace_addr = (unsigned long)FTRACE_ADDR; ftrace_addr = get_ftrace_addr(rec);
switch (ret) { switch (ret) {
case FTRACE_UPDATE_IGNORE: case FTRACE_UPDATE_IGNORE:
return 0; return 0;
case FTRACE_UPDATE_MODIFY_CALL_REGS:
case FTRACE_UPDATE_MODIFY_CALL:
case FTRACE_UPDATE_MAKE_CALL: case FTRACE_UPDATE_MAKE_CALL:
/* converting nop to call */ /* converting nop to call */
return add_update_call(rec, ftrace_addr); return add_update_call(rec, ftrace_addr);
...@@ -455,12 +518,14 @@ static int finish_update(struct dyn_ftrace *rec, int enable) ...@@ -455,12 +518,14 @@ static int finish_update(struct dyn_ftrace *rec, int enable)
ret = ftrace_update_record(rec, enable); ret = ftrace_update_record(rec, enable);
ftrace_addr = (unsigned long)FTRACE_ADDR; ftrace_addr = get_ftrace_addr(rec);
switch (ret) { switch (ret) {
case FTRACE_UPDATE_IGNORE: case FTRACE_UPDATE_IGNORE:
return 0; return 0;
case FTRACE_UPDATE_MODIFY_CALL_REGS:
case FTRACE_UPDATE_MODIFY_CALL:
case FTRACE_UPDATE_MAKE_CALL: case FTRACE_UPDATE_MAKE_CALL:
/* converting nop to call */ /* converting nop to call */
return finish_update_call(rec, ftrace_addr); return finish_update_call(rec, ftrace_addr);
......
...@@ -1052,6 +1052,54 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) ...@@ -1052,6 +1052,54 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
return 0; return 0;
} }
#ifdef KPROBES_CAN_USE_FTRACE
/* Ftrace callback handler for kprobes */
void __kprobes kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs)
{
struct kprobe *p;
struct kprobe_ctlblk *kcb;
unsigned long flags;
/* Disable irq for emulating a breakpoint and avoiding preempt */
local_irq_save(flags);
p = get_kprobe((kprobe_opcode_t *)ip);
if (unlikely(!p) || kprobe_disabled(p))
goto end;
kcb = get_kprobe_ctlblk();
if (kprobe_running()) {
kprobes_inc_nmissed_count(p);
} else {
regs->ip += sizeof(kprobe_opcode_t);
__this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (p->pre_handler)
p->pre_handler(p, regs);
if (unlikely(p->post_handler)) {
/* Emulate singlestep as if there is a 5byte nop */
regs->ip = ip + MCOUNT_INSN_SIZE;
kcb->kprobe_status = KPROBE_HIT_SSDONE;
p->post_handler(p, regs, 0);
}
__this_cpu_write(current_kprobe, NULL);
regs->ip = ip; /* Recover for next callback */
}
end:
local_irq_restore(flags);
}
int __kprobes arch_prepare_kprobe_ftrace(struct kprobe *p)
{
p->ainsn.insn = NULL;
p->ainsn.boostable = -1;
return 0;
}
#endif
int __init arch_init_kprobes(void) int __init arch_init_kprobes(void)
{ {
return arch_init_optprobes(); return arch_init_optprobes();
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include <linux/kallsyms.h> #include <linux/kallsyms.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/ptrace.h>
#include <linux/ktime.h> #include <linux/ktime.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -18,6 +19,28 @@ ...@@ -18,6 +19,28 @@
#include <asm/ftrace.h> #include <asm/ftrace.h>
/*
* If the arch supports passing the variable contents of
* function_trace_op as the third parameter back from the
* mcount call, then the arch should define this as 1.
*/
#ifndef ARCH_SUPPORTS_FTRACE_OPS
#define ARCH_SUPPORTS_FTRACE_OPS 0
#endif
/*
* If the arch's mcount caller does not support all of ftrace's
* features, then it must call an indirect function that
* does. Or at least does enough to prevent any unwelcomed side effects.
*/
#if !defined(CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST) || \
!ARCH_SUPPORTS_FTRACE_OPS
# define FTRACE_FORCE_LIST_FUNC 1
#else
# define FTRACE_FORCE_LIST_FUNC 0
#endif
struct module; struct module;
struct ftrace_hash; struct ftrace_hash;
...@@ -29,7 +52,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, ...@@ -29,7 +52,10 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, void __user *buffer, size_t *lenp,
loff_t *ppos); loff_t *ppos);
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); struct ftrace_ops;
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs);
/* /*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
...@@ -45,12 +71,33 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip); ...@@ -45,12 +71,33 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip);
* could be controled by following calls: * could be controled by following calls:
* ftrace_function_local_enable * ftrace_function_local_enable
* ftrace_function_local_disable * ftrace_function_local_disable
* SAVE_REGS - The ftrace_ops wants regs saved at each function called
* and passed to the callback. If this flag is set, but the
* architecture does not support passing regs
* (ARCH_SUPPORTS_FTRACE_SAVE_REGS is not defined), then the
* ftrace_ops will fail to register, unless the next flag
* is set.
* SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
* handler can handle an arch that does not save regs
* (the handler tests if regs == NULL), then it can set
* this flag instead. It will not fail registering the ftrace_ops
* but, the regs field will be NULL if the arch does not support
* passing regs to the handler.
* Note, if this flag is set, the SAVE_REGS flag will automatically
* get set upon registering the ftrace_ops, if the arch supports it.
* RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
* that the call back has its own recursion protection. If it does
* not set this, then the ftrace infrastructure will add recursion
* protection for the caller.
*/ */
enum { enum {
FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_ENABLED = 1 << 0,
FTRACE_OPS_FL_GLOBAL = 1 << 1, FTRACE_OPS_FL_GLOBAL = 1 << 1,
FTRACE_OPS_FL_DYNAMIC = 1 << 2, FTRACE_OPS_FL_DYNAMIC = 1 << 2,
FTRACE_OPS_FL_CONTROL = 1 << 3, FTRACE_OPS_FL_CONTROL = 1 << 3,
FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
}; };
struct ftrace_ops { struct ftrace_ops {
...@@ -163,7 +210,8 @@ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops) ...@@ -163,7 +210,8 @@ static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
return *this_cpu_ptr(ops->disabled); return *this_cpu_ptr(ops->disabled);
} }
extern void ftrace_stub(unsigned long a0, unsigned long a1); extern void ftrace_stub(unsigned long a0, unsigned long a1,
struct ftrace_ops *op, struct pt_regs *regs);
#else /* !CONFIG_FUNCTION_TRACER */ #else /* !CONFIG_FUNCTION_TRACER */
/* /*
...@@ -172,6 +220,10 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1); ...@@ -172,6 +220,10 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1);
*/ */
#define register_ftrace_function(ops) ({ 0; }) #define register_ftrace_function(ops) ({ 0; })
#define unregister_ftrace_function(ops) ({ 0; }) #define unregister_ftrace_function(ops) ({ 0; })
static inline int ftrace_nr_registered_ops(void)
{
return 0;
}
static inline void clear_ftrace_function(void) { } static inline void clear_ftrace_function(void) { }
static inline void ftrace_kill(void) { } static inline void ftrace_kill(void) { }
static inline void ftrace_stop(void) { } static inline void ftrace_stop(void) { }
...@@ -227,12 +279,33 @@ extern void unregister_ftrace_function_probe_all(char *glob); ...@@ -227,12 +279,33 @@ extern void unregister_ftrace_function_probe_all(char *glob);
extern int ftrace_text_reserved(void *start, void *end); extern int ftrace_text_reserved(void *start, void *end);
extern int ftrace_nr_registered_ops(void);
/*
* The dyn_ftrace record's flags field is split into two parts.
* the first part which is '0-FTRACE_REF_MAX' is a counter of
* the number of callbacks that have registered the function that
* the dyn_ftrace descriptor represents.
*
* The second part is a mask:
* ENABLED - the function is being traced
* REGS - the record wants the function to save regs
* REGS_EN - the function is set up to save regs.
*
* When a new ftrace_ops is registered and wants a function to save
* pt_regs, the rec->flag REGS is set. When the function has been
* set up to save regs, the REG_EN flag is set. Once a function
* starts saving regs it will do so until all ftrace_ops are removed
* from tracing that function.
*/
enum { enum {
FTRACE_FL_ENABLED = (1 << 30), FTRACE_FL_ENABLED = (1UL << 29),
FTRACE_FL_REGS = (1UL << 30),
FTRACE_FL_REGS_EN = (1UL << 31)
}; };
#define FTRACE_FL_MASK (0x3UL << 30) #define FTRACE_FL_MASK (0x7UL << 29)
#define FTRACE_REF_MAX ((1 << 30) - 1) #define FTRACE_REF_MAX ((1UL << 29) - 1)
struct dyn_ftrace { struct dyn_ftrace {
union { union {
...@@ -244,6 +317,8 @@ struct dyn_ftrace { ...@@ -244,6 +317,8 @@ struct dyn_ftrace {
}; };
int ftrace_force_update(void); int ftrace_force_update(void);
int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
int remove, int reset);
int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf, int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
int len, int reset); int len, int reset);
int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf, int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
...@@ -263,9 +338,23 @@ enum { ...@@ -263,9 +338,23 @@ enum {
FTRACE_STOP_FUNC_RET = (1 << 4), FTRACE_STOP_FUNC_RET = (1 << 4),
}; };
/*
* The FTRACE_UPDATE_* enum is used to pass information back
* from the ftrace_update_record() and ftrace_test_record()
* functions. These are called by the code update routines
* to find out what is to be done for a given function.
*
* IGNORE - The function is already what we want it to be
* MAKE_CALL - Start tracing the function
* MODIFY_CALL - Stop saving regs for the function
* MODIFY_CALL_REGS - Start saving regs for the function
* MAKE_NOP - Stop tracing the function
*/
enum { enum {
FTRACE_UPDATE_IGNORE, FTRACE_UPDATE_IGNORE,
FTRACE_UPDATE_MAKE_CALL, FTRACE_UPDATE_MAKE_CALL,
FTRACE_UPDATE_MODIFY_CALL,
FTRACE_UPDATE_MODIFY_CALL_REGS,
FTRACE_UPDATE_MAKE_NOP, FTRACE_UPDATE_MAKE_NOP,
}; };
...@@ -317,7 +406,9 @@ extern int ftrace_dyn_arch_init(void *data); ...@@ -317,7 +406,9 @@ extern int ftrace_dyn_arch_init(void *data);
extern void ftrace_replace_code(int enable); extern void ftrace_replace_code(int enable);
extern int ftrace_update_ftrace_func(ftrace_func_t func); extern int ftrace_update_ftrace_func(ftrace_func_t func);
extern void ftrace_caller(void); extern void ftrace_caller(void);
extern void ftrace_regs_caller(void);
extern void ftrace_call(void); extern void ftrace_call(void);
extern void ftrace_regs_call(void);
extern void mcount_call(void); extern void mcount_call(void);
void ftrace_modify_all_code(int command); void ftrace_modify_all_code(int command);
...@@ -325,6 +416,15 @@ void ftrace_modify_all_code(int command); ...@@ -325,6 +416,15 @@ void ftrace_modify_all_code(int command);
#ifndef FTRACE_ADDR #ifndef FTRACE_ADDR
#define FTRACE_ADDR ((unsigned long)ftrace_caller) #define FTRACE_ADDR ((unsigned long)ftrace_caller)
#endif #endif
#ifndef FTRACE_REGS_ADDR
#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
# define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
#else
# define FTRACE_REGS_ADDR FTRACE_ADDR
#endif
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
extern void ftrace_graph_caller(void); extern void ftrace_graph_caller(void);
extern int ftrace_enable_ftrace_graph_caller(void); extern int ftrace_enable_ftrace_graph_caller(void);
...@@ -380,6 +480,39 @@ extern int ftrace_make_nop(struct module *mod, ...@@ -380,6 +480,39 @@ extern int ftrace_make_nop(struct module *mod,
*/ */
extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr); extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
/**
* ftrace_modify_call - convert from one addr to another (no nop)
* @rec: the mcount call site record
* @old_addr: the address expected to be currently called to
* @addr: the address to change to
*
* This is a very sensitive operation and great care needs
* to be taken by the arch. The operation should carefully
* read the location, check to see if what is read is indeed
* what we expect it to be, and then on success of the compare,
* it should write to the location.
*
* The code segment at @rec->ip should be a caller to @old_addr
*
* Return must be:
* 0 on success
* -EFAULT on error reading the location
* -EINVAL on a failed compare of the contents
* -EPERM on error writing to the location
* Any other value will be considered a failure.
*/
extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr);
#else
/* Should never be called */
static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
unsigned long addr)
{
return -EINVAL;
}
#endif
/* May be defined in arch */ /* May be defined in arch */
extern int ftrace_arch_read_dyn_info(char *buf, int size); extern int ftrace_arch_read_dyn_info(char *buf, int size);
...@@ -387,7 +520,7 @@ extern int skip_trace(unsigned long ip); ...@@ -387,7 +520,7 @@ extern int skip_trace(unsigned long ip);
extern void ftrace_disable_daemon(void); extern void ftrace_disable_daemon(void);
extern void ftrace_enable_daemon(void); extern void ftrace_enable_daemon(void);
#else #else /* CONFIG_DYNAMIC_FTRACE */
static inline int skip_trace(unsigned long ip) { return 0; } static inline int skip_trace(unsigned long ip) { return 0; }
static inline int ftrace_force_update(void) { return 0; } static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { } static inline void ftrace_disable_daemon(void) { }
...@@ -405,6 +538,10 @@ static inline int ftrace_text_reserved(void *start, void *end) ...@@ -405,6 +538,10 @@ static inline int ftrace_text_reserved(void *start, void *end)
{ {
return 0; return 0;
} }
static inline unsigned long ftrace_location(unsigned long ip)
{
return 0;
}
/* /*
* Again users of functions that have ftrace_ops may not * Again users of functions that have ftrace_ops may not
...@@ -413,6 +550,7 @@ static inline int ftrace_text_reserved(void *start, void *end) ...@@ -413,6 +550,7 @@ static inline int ftrace_text_reserved(void *start, void *end)
*/ */
#define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; }) #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
#define ftrace_set_early_filter(ops, buf, enable) do { } while (0) #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
#define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
#define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; }) #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
#define ftrace_free_filter(ops) do { } while (0) #define ftrace_free_filter(ops) do { } while (0)
......
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/ftrace.h>
#ifdef CONFIG_KPROBES #ifdef CONFIG_KPROBES
#include <asm/kprobes.h> #include <asm/kprobes.h>
...@@ -48,14 +49,26 @@ ...@@ -48,14 +49,26 @@
#define KPROBE_REENTER 0x00000004 #define KPROBE_REENTER 0x00000004
#define KPROBE_HIT_SSDONE 0x00000008 #define KPROBE_HIT_SSDONE 0x00000008
/*
* If function tracer is enabled and the arch supports full
* passing of pt_regs to function tracing, then kprobes can
* optimize on top of function tracing.
*/
#if defined(CONFIG_FUNCTION_TRACER) && defined(ARCH_SUPPORTS_FTRACE_SAVE_REGS) \
&& defined(ARCH_SUPPORTS_KPROBES_ON_FTRACE)
# define KPROBES_CAN_USE_FTRACE
#endif
/* Attach to insert probes on any functions which should be ignored*/ /* Attach to insert probes on any functions which should be ignored*/
#define __kprobes __attribute__((__section__(".kprobes.text"))) #define __kprobes __attribute__((__section__(".kprobes.text")))
#else /* CONFIG_KPROBES */ #else /* CONFIG_KPROBES */
typedef int kprobe_opcode_t; typedef int kprobe_opcode_t;
struct arch_specific_insn { struct arch_specific_insn {
int dummy; int dummy;
}; };
#define __kprobes #define __kprobes
#endif /* CONFIG_KPROBES */ #endif /* CONFIG_KPROBES */
struct kprobe; struct kprobe;
...@@ -128,6 +141,7 @@ struct kprobe { ...@@ -128,6 +141,7 @@ struct kprobe {
* NOTE: * NOTE:
* this flag is only for optimized_kprobe. * this flag is only for optimized_kprobe.
*/ */
#define KPROBE_FLAG_FTRACE 8 /* probe is using ftrace */
/* Has this kprobe gone ? */ /* Has this kprobe gone ? */
static inline int kprobe_gone(struct kprobe *p) static inline int kprobe_gone(struct kprobe *p)
...@@ -146,6 +160,13 @@ static inline int kprobe_optimized(struct kprobe *p) ...@@ -146,6 +160,13 @@ static inline int kprobe_optimized(struct kprobe *p)
{ {
return p->flags & KPROBE_FLAG_OPTIMIZED; return p->flags & KPROBE_FLAG_OPTIMIZED;
} }
/* Is this kprobe uses ftrace ? */
static inline int kprobe_ftrace(struct kprobe *p)
{
return p->flags & KPROBE_FLAG_FTRACE;
}
/* /*
* Special probe type that uses setjmp-longjmp type tricks to resume * Special probe type that uses setjmp-longjmp type tricks to resume
* execution at a specified entry with a matching prototype corresponding * execution at a specified entry with a matching prototype corresponding
...@@ -295,6 +316,12 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table, ...@@ -295,6 +316,12 @@ extern int proc_kprobes_optimization_handler(struct ctl_table *table,
#endif #endif
#endif /* CONFIG_OPTPROBES */ #endif /* CONFIG_OPTPROBES */
#ifdef KPROBES_CAN_USE_FTRACE
extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *regs);
extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
#endif
/* Get the kprobe at this addr (if any) - called with preemption disabled */ /* Get the kprobe at this addr (if any) - called with preemption disabled */
struct kprobe *get_kprobe(void *addr); struct kprobe *get_kprobe(void *addr);
......
This diff is collapsed.
...@@ -5,10 +5,12 @@ ifdef CONFIG_FUNCTION_TRACER ...@@ -5,10 +5,12 @@ ifdef CONFIG_FUNCTION_TRACER
ORIG_CFLAGS := $(KBUILD_CFLAGS) ORIG_CFLAGS := $(KBUILD_CFLAGS)
KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS)) KBUILD_CFLAGS = $(subst -pg,,$(ORIG_CFLAGS))
ifdef CONFIG_FTRACE_SELFTEST
# selftest needs instrumentation # selftest needs instrumentation
CFLAGS_trace_selftest_dynamic.o = -pg CFLAGS_trace_selftest_dynamic.o = -pg
obj-y += trace_selftest_dynamic.o obj-y += trace_selftest_dynamic.o
endif endif
endif
# If unlikely tracing is enabled, do not trace these files # If unlikely tracing is enabled, do not trace these files
ifdef CONFIG_TRACING_BRANCHES ifdef CONFIG_TRACING_BRANCHES
......
This diff is collapsed.
...@@ -472,11 +472,11 @@ extern void trace_find_cmdline(int pid, char comm[]); ...@@ -472,11 +472,11 @@ extern void trace_find_cmdline(int pid, char comm[]);
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_update_tot_cnt; extern unsigned long ftrace_update_tot_cnt;
#endif
#define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func #define DYN_FTRACE_TEST_NAME trace_selftest_dynamic_test_func
extern int DYN_FTRACE_TEST_NAME(void); extern int DYN_FTRACE_TEST_NAME(void);
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
extern int DYN_FTRACE_TEST_NAME2(void); extern int DYN_FTRACE_TEST_NAME2(void);
#endif
extern int ring_buffer_expanded; extern int ring_buffer_expanded;
extern bool tracing_selftest_disabled; extern bool tracing_selftest_disabled;
......
...@@ -258,7 +258,8 @@ EXPORT_SYMBOL_GPL(perf_trace_buf_prepare); ...@@ -258,7 +258,8 @@ EXPORT_SYMBOL_GPL(perf_trace_buf_prepare);
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
static void static void
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip) perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *pt_regs)
{ {
struct ftrace_entry *entry; struct ftrace_entry *entry;
struct hlist_head *head; struct hlist_head *head;
......
...@@ -1681,7 +1681,8 @@ static __init void event_trace_self_tests(void) ...@@ -1681,7 +1681,8 @@ static __init void event_trace_self_tests(void)
static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable); static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
static void static void
function_test_events_call(unsigned long ip, unsigned long parent_ip) function_test_events_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer; struct ring_buffer *buffer;
...@@ -1720,6 +1721,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip) ...@@ -1720,6 +1721,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __initdata = static struct ftrace_ops trace_ops __initdata =
{ {
.func = function_test_events_call, .func = function_test_events_call,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static __init void event_trace_self_test_with_function(void) static __init void event_trace_self_test_with_function(void)
......
...@@ -48,7 +48,8 @@ static void function_trace_start(struct trace_array *tr) ...@@ -48,7 +48,8 @@ static void function_trace_start(struct trace_array *tr)
} }
static void static void
function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = func_trace; struct trace_array *tr = func_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
...@@ -75,7 +76,9 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip) ...@@ -75,7 +76,9 @@ function_trace_call_preempt_only(unsigned long ip, unsigned long parent_ip)
} }
static void static void
function_trace_call(unsigned long ip, unsigned long parent_ip) function_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = func_trace; struct trace_array *tr = func_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
...@@ -106,7 +109,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip) ...@@ -106,7 +109,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
} }
static void static void
function_stack_trace_call(unsigned long ip, unsigned long parent_ip) function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = func_trace; struct trace_array *tr = func_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
...@@ -149,13 +153,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip) ...@@ -149,13 +153,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = function_trace_call, .func = function_trace_call,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static struct ftrace_ops trace_stack_ops __read_mostly = static struct ftrace_ops trace_stack_ops __read_mostly =
{ {
.func = function_stack_trace_call, .func = function_stack_trace_call,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
/* Our two options */ /* Our two options */
......
...@@ -136,7 +136,8 @@ static int func_prolog_dec(struct trace_array *tr, ...@@ -136,7 +136,8 @@ static int func_prolog_dec(struct trace_array *tr,
* irqsoff uses its own tracer function to keep the overhead down: * irqsoff uses its own tracer function to keep the overhead down:
*/ */
static void static void
irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = irqsoff_trace; struct trace_array *tr = irqsoff_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
...@@ -153,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip) ...@@ -153,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = irqsoff_tracer_call, .func = irqsoff_tracer_call,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
......
...@@ -108,7 +108,8 @@ func_prolog_preempt_disable(struct trace_array *tr, ...@@ -108,7 +108,8 @@ func_prolog_preempt_disable(struct trace_array *tr,
* wakeup uses its own tracer function to keep the overhead down: * wakeup uses its own tracer function to keep the overhead down:
*/ */
static void static void
wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_array *tr = wakeup_trace; struct trace_array *tr = wakeup_trace;
struct trace_array_cpu *data; struct trace_array_cpu *data;
...@@ -129,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip) ...@@ -129,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = wakeup_tracer_call, .func = wakeup_tracer_call,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
......
...@@ -103,54 +103,67 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret) ...@@ -103,54 +103,67 @@ static inline void warn_failed_init_tracer(struct tracer *trace, int init_ret)
static int trace_selftest_test_probe1_cnt; static int trace_selftest_test_probe1_cnt;
static void trace_selftest_test_probe1_func(unsigned long ip, static void trace_selftest_test_probe1_func(unsigned long ip,
unsigned long pip) unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_probe1_cnt++; trace_selftest_test_probe1_cnt++;
} }
static int trace_selftest_test_probe2_cnt; static int trace_selftest_test_probe2_cnt;
static void trace_selftest_test_probe2_func(unsigned long ip, static void trace_selftest_test_probe2_func(unsigned long ip,
unsigned long pip) unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_probe2_cnt++; trace_selftest_test_probe2_cnt++;
} }
static int trace_selftest_test_probe3_cnt; static int trace_selftest_test_probe3_cnt;
static void trace_selftest_test_probe3_func(unsigned long ip, static void trace_selftest_test_probe3_func(unsigned long ip,
unsigned long pip) unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_probe3_cnt++; trace_selftest_test_probe3_cnt++;
} }
static int trace_selftest_test_global_cnt; static int trace_selftest_test_global_cnt;
static void trace_selftest_test_global_func(unsigned long ip, static void trace_selftest_test_global_func(unsigned long ip,
unsigned long pip) unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_global_cnt++; trace_selftest_test_global_cnt++;
} }
static int trace_selftest_test_dyn_cnt; static int trace_selftest_test_dyn_cnt;
static void trace_selftest_test_dyn_func(unsigned long ip, static void trace_selftest_test_dyn_func(unsigned long ip,
unsigned long pip) unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{ {
trace_selftest_test_dyn_cnt++; trace_selftest_test_dyn_cnt++;
} }
static struct ftrace_ops test_probe1 = { static struct ftrace_ops test_probe1 = {
.func = trace_selftest_test_probe1_func, .func = trace_selftest_test_probe1_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static struct ftrace_ops test_probe2 = { static struct ftrace_ops test_probe2 = {
.func = trace_selftest_test_probe2_func, .func = trace_selftest_test_probe2_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static struct ftrace_ops test_probe3 = { static struct ftrace_ops test_probe3 = {
.func = trace_selftest_test_probe3_func, .func = trace_selftest_test_probe3_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static struct ftrace_ops test_global = { static struct ftrace_ops test_global = {
.func = trace_selftest_test_global_func, .func = trace_selftest_test_global_func,
.flags = FTRACE_OPS_FL_GLOBAL, .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static void print_counts(void) static void print_counts(void)
...@@ -393,10 +406,253 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace, ...@@ -393,10 +406,253 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
return ret; return ret;
} }
static int trace_selftest_recursion_cnt;
static void trace_selftest_test_recursion_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{
/*
* This function is registered without the recursion safe flag.
* The ftrace infrastructure should provide the recursion
* protection. If not, this will crash the kernel!
*/
trace_selftest_recursion_cnt++;
DYN_FTRACE_TEST_NAME();
}
static void trace_selftest_test_recursion_safe_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{
/*
* We said we would provide our own recursion. By calling
* this function again, we should recurse back into this function
* and count again. But this only happens if the arch supports
* all of ftrace features and nothing else is using the function
* tracing utility.
*/
if (trace_selftest_recursion_cnt++)
return;
DYN_FTRACE_TEST_NAME();
}
static struct ftrace_ops test_rec_probe = {
.func = trace_selftest_test_recursion_func,
};
static struct ftrace_ops test_recsafe_probe = {
.func = trace_selftest_test_recursion_safe_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
};
static int
trace_selftest_function_recursion(void)
{
int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
char *func_name;
int len;
int ret;
int cnt;
/* The previous test PASSED */
pr_cont("PASSED\n");
pr_info("Testing ftrace recursion: ");
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
tracer_enabled = 1;
/* Handle PPC64 '.' name */
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
len = strlen(func_name);
ret = ftrace_set_filter(&test_rec_probe, func_name, len, 1);
if (ret) {
pr_cont("*Could not set filter* ");
goto out;
}
ret = register_ftrace_function(&test_rec_probe);
if (ret) {
pr_cont("*could not register callback* ");
goto out;
}
DYN_FTRACE_TEST_NAME();
unregister_ftrace_function(&test_rec_probe);
ret = -1;
if (trace_selftest_recursion_cnt != 1) {
pr_cont("*callback not called once (%d)* ",
trace_selftest_recursion_cnt);
goto out;
}
trace_selftest_recursion_cnt = 1;
pr_cont("PASSED\n");
pr_info("Testing ftrace recursion safe: ");
ret = ftrace_set_filter(&test_recsafe_probe, func_name, len, 1);
if (ret) {
pr_cont("*Could not set filter* ");
goto out;
}
ret = register_ftrace_function(&test_recsafe_probe);
if (ret) {
pr_cont("*could not register callback* ");
goto out;
}
DYN_FTRACE_TEST_NAME();
unregister_ftrace_function(&test_recsafe_probe);
/*
* If arch supports all ftrace features, and no other task
* was on the list, we should be fine.
*/
if (!ftrace_nr_registered_ops() && !FTRACE_FORCE_LIST_FUNC)
cnt = 2; /* Should have recursed */
else
cnt = 1;
ret = -1;
if (trace_selftest_recursion_cnt != cnt) {
pr_cont("*callback not called expected %d times (%d)* ",
cnt, trace_selftest_recursion_cnt);
goto out;
}
ret = 0;
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
return ret;
}
#else #else
# define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; }) # define trace_selftest_startup_dynamic_tracing(trace, tr, func) ({ 0; })
# define trace_selftest_function_recursion() ({ 0; })
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
static enum {
TRACE_SELFTEST_REGS_START,
TRACE_SELFTEST_REGS_FOUND,
TRACE_SELFTEST_REGS_NOT_FOUND,
} trace_selftest_regs_stat;
static void trace_selftest_test_regs_func(unsigned long ip,
unsigned long pip,
struct ftrace_ops *op,
struct pt_regs *pt_regs)
{
if (pt_regs)
trace_selftest_regs_stat = TRACE_SELFTEST_REGS_FOUND;
else
trace_selftest_regs_stat = TRACE_SELFTEST_REGS_NOT_FOUND;
}
static struct ftrace_ops test_regs_probe = {
.func = trace_selftest_test_regs_func,
.flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_SAVE_REGS,
};
static int
trace_selftest_function_regs(void)
{
int save_ftrace_enabled = ftrace_enabled;
int save_tracer_enabled = tracer_enabled;
char *func_name;
int len;
int ret;
int supported = 0;
#ifdef ARCH_SUPPORTS_FTRACE_SAVE_REGS
supported = 1;
#endif
/* The previous test PASSED */
pr_cont("PASSED\n");
pr_info("Testing ftrace regs%s: ",
!supported ? "(no arch support)" : "");
/* enable tracing, and record the filter function */
ftrace_enabled = 1;
tracer_enabled = 1;
/* Handle PPC64 '.' name */
func_name = "*" __stringify(DYN_FTRACE_TEST_NAME);
len = strlen(func_name);
ret = ftrace_set_filter(&test_regs_probe, func_name, len, 1);
/*
* If DYNAMIC_FTRACE is not set, then we just trace all functions.
* This test really doesn't care.
*/
if (ret && ret != -ENODEV) {
pr_cont("*Could not set filter* ");
goto out;
}
ret = register_ftrace_function(&test_regs_probe);
/*
* Now if the arch does not support passing regs, then this should
* have failed.
*/
if (!supported) {
if (!ret) {
pr_cont("*registered save-regs without arch support* ");
goto out;
}
test_regs_probe.flags |= FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED;
ret = register_ftrace_function(&test_regs_probe);
}
if (ret) {
pr_cont("*could not register callback* ");
goto out;
}
DYN_FTRACE_TEST_NAME();
unregister_ftrace_function(&test_regs_probe);
ret = -1;
switch (trace_selftest_regs_stat) {
case TRACE_SELFTEST_REGS_START:
pr_cont("*callback never called* ");
goto out;
case TRACE_SELFTEST_REGS_FOUND:
if (supported)
break;
pr_cont("*callback received regs without arch support* ");
goto out;
case TRACE_SELFTEST_REGS_NOT_FOUND:
if (!supported)
break;
pr_cont("*callback received NULL regs* ");
goto out;
}
ret = 0;
out:
ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled;
return ret;
}
/* /*
* Simple verification test of ftrace function tracer. * Simple verification test of ftrace function tracer.
* Enable ftrace, sleep 1/10 second, and then read the trace * Enable ftrace, sleep 1/10 second, and then read the trace
...@@ -442,7 +698,14 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) ...@@ -442,7 +698,14 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
ret = trace_selftest_startup_dynamic_tracing(trace, tr, ret = trace_selftest_startup_dynamic_tracing(trace, tr,
DYN_FTRACE_TEST_NAME); DYN_FTRACE_TEST_NAME);
if (ret)
goto out;
ret = trace_selftest_function_recursion();
if (ret)
goto out;
ret = trace_selftest_function_regs();
out: out:
ftrace_enabled = save_ftrace_enabled; ftrace_enabled = save_ftrace_enabled;
tracer_enabled = save_tracer_enabled; tracer_enabled = save_tracer_enabled;
......
...@@ -111,7 +111,8 @@ static inline void check_stack(void) ...@@ -111,7 +111,8 @@ static inline void check_stack(void)
} }
static void static void
stack_trace_call(unsigned long ip, unsigned long parent_ip) stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
int cpu; int cpu;
...@@ -136,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) ...@@ -136,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip)
static struct ftrace_ops trace_ops __read_mostly = static struct ftrace_ops trace_ops __read_mostly =
{ {
.func = stack_trace_call, .func = stack_trace_call,
.flags = FTRACE_OPS_FL_RECURSION_SAFE,
}; };
static ssize_t static ssize_t
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment