Commit 1dd7dcb6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
 "There was a lot of clean ups and minor fixes.  One of those clean ups
  was to the trace_seq code.  It also removed the return values to the
  trace_seq_*() functions and use trace_seq_has_overflowed() to see if
  the buffer filled up or not.  This is similar to work being done to
  the seq_file code as well in another tree.

  Some of the other goodies include:

   - Added some "!" (NOT) logic to the tracing filter.

   - Fixed the frame pointer logic to the x86_64 mcount trampolines

   - Added the logic for dynamic trampolines on !CONFIG_PREEMPT systems.
     That is, the ftrace trampoline can be dynamically allocated and be
     called directly by functions that only have a single hook to them"

* tag 'trace-3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (55 commits)
  tracing: Truncated output is better than nothing
  tracing: Add additional marks to signal very large time deltas
  Documentation: describe trace_buf_size parameter more accurately
  tracing: Allow NOT to filter AND and OR clauses
  tracing: Add NOT to filtering logic
  ftrace/fgraph/x86: Have prepare_ftrace_return() take ip as first parameter
  ftrace/x86: Get rid of ftrace_caller_setup
  ftrace/x86: Have save_mcount_regs macro also save stack frames if needed
  ftrace/x86: Add macro MCOUNT_REG_SIZE for amount of stack used to save mcount regs
  ftrace/x86: Simplify save_mcount_regs on getting RIP
  ftrace/x86: Have save_mcount_regs store RIP in %rdi for first parameter
  ftrace/x86: Rename MCOUNT_SAVE_FRAME and add more detailed comments
  ftrace/x86: Move MCOUNT_SAVE_FRAME out of header file
  ftrace/x86: Have static tracing also use ftrace_caller_setup
  ftrace/x86: Have static function tracing always test for function graph
  kprobes: Add IPMODIFY flag to kprobe_ftrace_ops
  ftrace, kprobes: Support IPMODIFY flag to find IP modify conflict
  kprobes/ftrace: Recover original IP if pre_handler doesn't change it
  tracing/trivial: Fix typos and make an int into a bool
  tracing: Deletion of an unnecessary check before iput()
  ...
parents b6da0076 3558a5ac
...@@ -3520,7 +3520,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -3520,7 +3520,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
are saved. are saved.
trace_buf_size=nn[KMG] trace_buf_size=nn[KMG]
[FTRACE] will set tracing buffer size. [FTRACE] will set tracing buffer size on each cpu.
trace_event=[event-list] trace_event=[event-list]
[FTRACE] Set and start specified trace events in order [FTRACE] Set and start specified trace events in order
......
...@@ -234,6 +234,11 @@ of ftrace. Here is a list of some of the key files: ...@@ -234,6 +234,11 @@ of ftrace. Here is a list of some of the key files:
will be displayed on the same line as the function that will be displayed on the same line as the function that
is returning registers. is returning registers.
If the callback registered to be traced by a function with
the "ip modify" attribute (thus the regs->ip can be changed),
an 'I' will be displayed on the same line as the function that
can be overridden.
function_profile_enabled: function_profile_enabled:
When set it will enable all functions with either the function When set it will enable all functions with either the function
...@@ -680,9 +685,11 @@ The above is mostly meaningful for kernel developers. ...@@ -680,9 +685,11 @@ The above is mostly meaningful for kernel developers.
needs to be fixed to be only relative to the same CPU. needs to be fixed to be only relative to the same CPU.
The marks are determined by the difference between this The marks are determined by the difference between this
current trace and the next trace. current trace and the next trace.
'!' - greater than preempt_mark_thresh (default 100) '$' - greater than 1 second
'+' - greater than 1 microsecond '#' - greater than 1000 microsecond
' ' - less than or equal to 1 microsecond. '!' - greater than 100 microsecond
'+' - greater than 10 microsecond
' ' - less than or equal to 10 microsecond.
The rest is the same as the 'trace' file. The rest is the same as the 'trace' file.
...@@ -1951,6 +1958,8 @@ want, depending on your needs. ...@@ -1951,6 +1958,8 @@ want, depending on your needs.
+ means that the function exceeded 10 usecs. + means that the function exceeded 10 usecs.
! means that the function exceeded 100 usecs. ! means that the function exceeded 100 usecs.
# means that the function exceeded 1000 usecs.
$ means that the function exceeded 1 sec.
- The task/pid field displays the thread cmdline and pid which - The task/pid field displays the thread cmdline and pid which
......
...@@ -449,7 +449,7 @@ void ftrace_replace_code(int enable) ...@@ -449,7 +449,7 @@ void ftrace_replace_code(int enable)
rec = ftrace_rec_iter_record(iter); rec = ftrace_rec_iter_record(iter);
ret = __ftrace_replace_code(rec, enable); ret = __ftrace_replace_code(rec, enable);
if (ret) { if (ret) {
ftrace_bug(ret, rec->ip); ftrace_bug(ret, rec);
return; return;
} }
} }
......
#ifndef _ASM_X86_FTRACE_H #ifndef _ASM_X86_FTRACE_H
#define _ASM_X86_FTRACE_H #define _ASM_X86_FTRACE_H
#ifdef __ASSEMBLY__
/* skip is set if the stack was already partially adjusted */
.macro MCOUNT_SAVE_FRAME skip=0
/*
* We add enough stack to save all regs.
*/
subq $(SS+8-\skip), %rsp
movq %rax, RAX(%rsp)
movq %rcx, RCX(%rsp)
movq %rdx, RDX(%rsp)
movq %rsi, RSI(%rsp)
movq %rdi, RDI(%rsp)
movq %r8, R8(%rsp)
movq %r9, R9(%rsp)
/* Move RIP to its proper location */
movq SS+8(%rsp), %rdx
movq %rdx, RIP(%rsp)
.endm
.macro MCOUNT_RESTORE_FRAME skip=0
movq R9(%rsp), %r9
movq R8(%rsp), %r8
movq RDI(%rsp), %rdi
movq RSI(%rsp), %rsi
movq RDX(%rsp), %rdx
movq RCX(%rsp), %rcx
movq RAX(%rsp), %rax
addq $(SS+8-\skip), %rsp
.endm
#endif
#ifdef CONFIG_FUNCTION_TRACER #ifdef CONFIG_FUNCTION_TRACER
#ifdef CC_USING_FENTRY #ifdef CC_USING_FENTRY
# define MCOUNT_ADDR ((long)(__fentry__)) # define MCOUNT_ADDR ((long)(__fentry__))
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/module.h> #include <linux/module.h>
...@@ -47,7 +48,7 @@ int ftrace_arch_code_modify_post_process(void) ...@@ -47,7 +48,7 @@ int ftrace_arch_code_modify_post_process(void)
union ftrace_code_union { union ftrace_code_union {
char code[MCOUNT_INSN_SIZE]; char code[MCOUNT_INSN_SIZE];
struct { struct {
char e8; unsigned char e8;
int offset; int offset;
} __attribute__((packed)); } __attribute__((packed));
}; };
...@@ -582,7 +583,7 @@ void ftrace_replace_code(int enable) ...@@ -582,7 +583,7 @@ void ftrace_replace_code(int enable)
remove_breakpoints: remove_breakpoints:
pr_warn("Failed on %s (%d):\n", report, count); pr_warn("Failed on %s (%d):\n", report, count);
ftrace_bug(ret, rec ? rec->ip : 0); ftrace_bug(ret, rec);
for_ftrace_rec_iter(iter) { for_ftrace_rec_iter(iter) {
rec = ftrace_rec_iter_record(iter); rec = ftrace_rec_iter_record(iter);
/* /*
...@@ -644,13 +645,8 @@ int __init ftrace_dyn_arch_init(void) ...@@ -644,13 +645,8 @@ int __init ftrace_dyn_arch_init(void)
{ {
return 0; return 0;
} }
#endif
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
#if defined(CONFIG_X86_64) || defined(CONFIG_FUNCTION_GRAPH_TRACER)
static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
{ {
static union ftrace_code_union calc; static union ftrace_code_union calc;
...@@ -664,6 +660,280 @@ static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr) ...@@ -664,6 +660,280 @@ static unsigned char *ftrace_jmp_replace(unsigned long ip, unsigned long addr)
*/ */
return calc.code; return calc.code;
} }
#endif
/* Currently only x86_64 supports dynamic trampolines */
#ifdef CONFIG_X86_64
#ifdef CONFIG_MODULES
#include <linux/moduleloader.h>
/* Module allocation simplifies allocating memory for code */
static inline void *alloc_tramp(unsigned long size)
{
return module_alloc(size);
}
static inline void tramp_free(void *tramp)
{
module_free(NULL, tramp);
}
#else
/* Trampolines can only be created if modules are supported */
static inline void *alloc_tramp(unsigned long size)
{
return NULL;
}
static inline void tramp_free(void *tramp) { }
#endif
/* Defined as markers to the end of the ftrace default trampolines */
extern void ftrace_caller_end(void);
extern void ftrace_regs_caller_end(void);
extern void ftrace_return(void);
extern void ftrace_caller_op_ptr(void);
extern void ftrace_regs_caller_op_ptr(void);
/* movq function_trace_op(%rip), %rdx */
/* 0x48 0x8b 0x15 <offset-to-ftrace_trace_op (4 bytes)> */
#define OP_REF_SIZE 7
/*
* The ftrace_ops is passed to the function callback. Since the
* trampoline only services a single ftrace_ops, we can pass in
* that ops directly.
*
* The ftrace_op_code_union is used to create a pointer to the
* ftrace_ops that will be passed to the callback function.
*/
union ftrace_op_code_union {
char code[OP_REF_SIZE];
struct {
char op[3];
int offset;
} __attribute__((packed));
};
static unsigned long
create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
{
unsigned const char *jmp;
unsigned long start_offset;
unsigned long end_offset;
unsigned long op_offset;
unsigned long offset;
unsigned long size;
unsigned long ip;
unsigned long *ptr;
void *trampoline;
/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
union ftrace_op_code_union op_ptr;
int ret;
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
start_offset = (unsigned long)ftrace_regs_caller;
end_offset = (unsigned long)ftrace_regs_caller_end;
op_offset = (unsigned long)ftrace_regs_caller_op_ptr;
} else {
start_offset = (unsigned long)ftrace_caller;
end_offset = (unsigned long)ftrace_caller_end;
op_offset = (unsigned long)ftrace_caller_op_ptr;
}
size = end_offset - start_offset;
/*
* Allocate enough size to store the ftrace_caller code,
* the jmp to ftrace_return, as well as the address of
* the ftrace_ops this trampoline is used for.
*/
trampoline = alloc_tramp(size + MCOUNT_INSN_SIZE + sizeof(void *));
if (!trampoline)
return 0;
*tramp_size = size + MCOUNT_INSN_SIZE + sizeof(void *);
/* Copy ftrace_caller onto the trampoline memory */
ret = probe_kernel_read(trampoline, (void *)start_offset, size);
if (WARN_ON(ret < 0)) {
tramp_free(trampoline);
return 0;
}
ip = (unsigned long)trampoline + size;
/* The trampoline ends with a jmp to ftrace_return */
jmp = ftrace_jmp_replace(ip, (unsigned long)ftrace_return);
memcpy(trampoline + size, jmp, MCOUNT_INSN_SIZE);
/*
* The address of the ftrace_ops that is used for this trampoline
* is stored at the end of the trampoline. This will be used to
* load the third parameter for the callback. Basically, that
* location at the end of the trampoline takes the place of
* the global function_trace_op variable.
*/
ptr = (unsigned long *)(trampoline + size + MCOUNT_INSN_SIZE);
*ptr = (unsigned long)ops;
op_offset -= start_offset;
memcpy(&op_ptr, trampoline + op_offset, OP_REF_SIZE);
/* Are we pointing to the reference? */
if (WARN_ON(memcmp(op_ptr.op, op_ref, 3) != 0)) {
tramp_free(trampoline);
return 0;
}
/* Load the contents of ptr into the callback parameter */
offset = (unsigned long)ptr;
offset -= (unsigned long)trampoline + op_offset + OP_REF_SIZE;
op_ptr.offset = offset;
/* put in the new offset to the ftrace_ops */
memcpy(trampoline + op_offset, &op_ptr, OP_REF_SIZE);
/* ALLOC_TRAMP flags lets us know we created it */
ops->flags |= FTRACE_OPS_FL_ALLOC_TRAMP;
return (unsigned long)trampoline;
}
static unsigned long calc_trampoline_call_offset(bool save_regs)
{
unsigned long start_offset;
unsigned long call_offset;
if (save_regs) {
start_offset = (unsigned long)ftrace_regs_caller;
call_offset = (unsigned long)ftrace_regs_call;
} else {
start_offset = (unsigned long)ftrace_caller;
call_offset = (unsigned long)ftrace_call;
}
return call_offset - start_offset;
}
void arch_ftrace_update_trampoline(struct ftrace_ops *ops)
{
ftrace_func_t func;
unsigned char *new;
unsigned long offset;
unsigned long ip;
unsigned int size;
int ret;
if (ops->trampoline) {
/*
* The ftrace_ops caller may set up its own trampoline.
* In such a case, this code must not modify it.
*/
if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return;
} else {
ops->trampoline = create_trampoline(ops, &size);
if (!ops->trampoline)
return;
ops->trampoline_size = size;
}
offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
ip = ops->trampoline + offset;
func = ftrace_ops_get_func(ops);
/* Do a safe modify in case the trampoline is executing */
new = ftrace_call_replace(ip, (unsigned long)func);
ret = update_ftrace_func(ip, new);
/* The update should never fail */
WARN_ON(ret);
}
/* Return the address of the function the trampoline calls */
static void *addr_from_call(void *ptr)
{
union ftrace_code_union calc;
int ret;
ret = probe_kernel_read(&calc, ptr, MCOUNT_INSN_SIZE);
if (WARN_ON_ONCE(ret < 0))
return NULL;
/* Make sure this is a call */
if (WARN_ON_ONCE(calc.e8 != 0xe8)) {
pr_warn("Expected e8, got %x\n", calc.e8);
return NULL;
}
return ptr + MCOUNT_INSN_SIZE + calc.offset;
}
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
unsigned long frame_pointer);
/*
* If the ops->trampoline was not allocated, then it probably
* has a static trampoline func, or is the ftrace caller itself.
*/
static void *static_tramp_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
{
unsigned long offset;
bool save_regs = rec->flags & FTRACE_FL_REGS_EN;
void *ptr;
if (ops && ops->trampoline) {
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
/*
* We only know about function graph tracer setting as static
* trampoline.
*/
if (ops->trampoline == FTRACE_GRAPH_ADDR)
return (void *)prepare_ftrace_return;
#endif
return NULL;
}
offset = calc_trampoline_call_offset(save_regs);
if (save_regs)
ptr = (void *)FTRACE_REGS_ADDR + offset;
else
ptr = (void *)FTRACE_ADDR + offset;
return addr_from_call(ptr);
}
void *arch_ftrace_trampoline_func(struct ftrace_ops *ops, struct dyn_ftrace *rec)
{
unsigned long offset;
/* If we didn't allocate this trampoline, consider it static */
if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return static_tramp_func(ops, rec);
offset = calc_trampoline_call_offset(ops->flags & FTRACE_OPS_FL_SAVE_REGS);
return addr_from_call((void *)ops->trampoline + offset);
}
void arch_ftrace_trampoline_free(struct ftrace_ops *ops)
{
if (!ops || !(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
return;
tramp_free((void *)ops->trampoline);
ops->trampoline = 0;
}
#endif /* CONFIG_X86_64 */
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#ifdef CONFIG_DYNAMIC_FTRACE
extern void ftrace_graph_call(void);
static int ftrace_mod_jmp(unsigned long ip, void *func) static int ftrace_mod_jmp(unsigned long ip, void *func)
{ {
...@@ -694,7 +964,7 @@ int ftrace_disable_ftrace_graph_caller(void) ...@@ -694,7 +964,7 @@ int ftrace_disable_ftrace_graph_caller(void)
* Hook the return address and push it in the stack of return addrs * Hook the return address and push it in the stack of return addrs
* in current thread info. * in current thread info.
*/ */
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
unsigned long frame_pointer) unsigned long frame_pointer)
{ {
unsigned long old; unsigned long old;
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
static nokprobe_inline static nokprobe_inline
int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb) struct kprobe_ctlblk *kcb, unsigned long orig_ip)
{ {
/* /*
* Emulate singlestep (and also recover regs->ip) * Emulate singlestep (and also recover regs->ip)
...@@ -39,6 +39,8 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs, ...@@ -39,6 +39,8 @@ int __skip_singlestep(struct kprobe *p, struct pt_regs *regs,
p->post_handler(p, regs, 0); p->post_handler(p, regs, 0);
} }
__this_cpu_write(current_kprobe, NULL); __this_cpu_write(current_kprobe, NULL);
if (orig_ip)
regs->ip = orig_ip;
return 1; return 1;
} }
...@@ -46,7 +48,7 @@ int skip_singlestep(struct kprobe *p, struct pt_regs *regs, ...@@ -46,7 +48,7 @@ int skip_singlestep(struct kprobe *p, struct pt_regs *regs,
struct kprobe_ctlblk *kcb) struct kprobe_ctlblk *kcb)
{ {
if (kprobe_ftrace(p)) if (kprobe_ftrace(p))
return __skip_singlestep(p, regs, kcb); return __skip_singlestep(p, regs, kcb, 0);
else else
return 0; return 0;
} }
...@@ -71,13 +73,14 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip, ...@@ -71,13 +73,14 @@ void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
if (kprobe_running()) { if (kprobe_running()) {
kprobes_inc_nmissed_count(p); kprobes_inc_nmissed_count(p);
} else { } else {
unsigned long orig_ip = regs->ip;
/* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */ /* Kprobe handler expects regs->ip = ip + 1 as breakpoint hit */
regs->ip = ip + sizeof(kprobe_opcode_t); regs->ip = ip + sizeof(kprobe_opcode_t);
__this_cpu_write(current_kprobe, p); __this_cpu_write(current_kprobe, p);
kcb->kprobe_status = KPROBE_HIT_ACTIVE; kcb->kprobe_status = KPROBE_HIT_ACTIVE;
if (!p->pre_handler || !p->pre_handler(p, regs)) if (!p->pre_handler || !p->pre_handler(p, regs))
__skip_singlestep(p, regs, kcb); __skip_singlestep(p, regs, kcb, orig_ip);
/* /*
* If pre_handler returns !0, it sets regs->ip and * If pre_handler returns !0, it sets regs->ip and
* resets current kprobe. * resets current kprobe.
......
...@@ -21,40 +21,159 @@ ...@@ -21,40 +21,159 @@
# define function_hook mcount # define function_hook mcount
#endif #endif
/* All cases save the original rbp (8 bytes) */
#ifdef CONFIG_FRAME_POINTER
# ifdef CC_USING_FENTRY
/* Save parent and function stack frames (rip and rbp) */
# define MCOUNT_FRAME_SIZE (8+16*2)
# else
/* Save just function stack frame (rip and rbp) */
# define MCOUNT_FRAME_SIZE (8+16)
# endif
#else
/* No need to save a stack frame */
# define MCOUNT_FRAME_SIZE 8
#endif /* CONFIG_FRAME_POINTER */
/* Size of stack used to save mcount regs in save_mcount_regs */
#define MCOUNT_REG_SIZE (SS+8 + MCOUNT_FRAME_SIZE)
/*
* gcc -pg option adds a call to 'mcount' in most functions.
* When -mfentry is used, the call is to 'fentry' and not 'mcount'
* and is done before the function's stack frame is set up.
* They both require a set of regs to be saved before calling
* any C code and restored before returning back to the function.
*
* On boot up, all these calls are converted into nops. When tracing
* is enabled, the call can jump to either ftrace_caller or
* ftrace_regs_caller. Callbacks (tracing functions) that require
* ftrace_regs_caller (like kprobes) need to have pt_regs passed to
* it. For this reason, the size of the pt_regs structure will be
* allocated on the stack and the required mcount registers will
* be saved in the locations that pt_regs has them in.
*/
/*
* @added: the amount of stack added before calling this
*
* After this is called, the following registers contain:
*
* %rdi - holds the address that called the trampoline
* %rsi - holds the parent function (traced function's return address)
* %rdx - holds the original %rbp
*/
.macro save_mcount_regs added=0
/* Always save the original rbp */
pushq %rbp
#ifdef CONFIG_FRAME_POINTER
/*
* Stack traces will stop at the ftrace trampoline if the frame pointer
* is not set up properly. If fentry is used, we need to save a frame
* pointer for the parent as well as the function traced, because the
* fentry is called before the stack frame is set up, where as mcount
* is called afterward.
*/
#ifdef CC_USING_FENTRY
/* Save the parent pointer (skip orig rbp and our return address) */
pushq \added+8*2(%rsp)
pushq %rbp
movq %rsp, %rbp
/* Save the return address (now skip orig rbp, rbp and parent) */
pushq \added+8*3(%rsp)
#else
/* Can't assume that rip is before this (unless added was zero) */
pushq \added+8(%rsp)
#endif
pushq %rbp
movq %rsp, %rbp
#endif /* CONFIG_FRAME_POINTER */
/*
* We add enough stack to save all regs.
*/
subq $(MCOUNT_REG_SIZE - MCOUNT_FRAME_SIZE), %rsp
movq %rax, RAX(%rsp)
movq %rcx, RCX(%rsp)
movq %rdx, RDX(%rsp)
movq %rsi, RSI(%rsp)
movq %rdi, RDI(%rsp)
movq %r8, R8(%rsp)
movq %r9, R9(%rsp)
/*
* Save the original RBP. Even though the mcount ABI does not
* require this, it helps out callers.
*/
movq MCOUNT_REG_SIZE-8(%rsp), %rdx
movq %rdx, RBP(%rsp)
/* Copy the parent address into %rsi (second parameter) */
#ifdef CC_USING_FENTRY
movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
#else
/* %rdx contains original %rbp */
movq 8(%rdx), %rsi
#endif
/* Move RIP to its proper location */
movq MCOUNT_REG_SIZE+\added(%rsp), %rdi
movq %rdi, RIP(%rsp)
/*
* Now %rdi (the first parameter) has the return address of
* where ftrace_call returns. But the callbacks expect the
* address of the call itself.
*/
subq $MCOUNT_INSN_SIZE, %rdi
.endm
.macro restore_mcount_regs
movq R9(%rsp), %r9
movq R8(%rsp), %r8
movq RDI(%rsp), %rdi
movq RSI(%rsp), %rsi
movq RDX(%rsp), %rdx
movq RCX(%rsp), %rcx
movq RAX(%rsp), %rax
/* ftrace_regs_caller can modify %rbp */
movq RBP(%rsp), %rbp
addq $MCOUNT_REG_SIZE, %rsp
.endm
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(function_hook) ENTRY(function_hook)
retq retq
END(function_hook) END(function_hook)
/* skip is set if stack has been adjusted */ ENTRY(ftrace_caller)
.macro ftrace_caller_setup skip=0 /* save_mcount_regs fills in first two parameters */
MCOUNT_SAVE_FRAME \skip save_mcount_regs
GLOBAL(ftrace_caller_op_ptr)
/* Load the ftrace_ops into the 3rd parameter */ /* Load the ftrace_ops into the 3rd parameter */
movq function_trace_op(%rip), %rdx movq function_trace_op(%rip), %rdx
/* Load ip into the first parameter */
movq RIP(%rsp), %rdi
subq $MCOUNT_INSN_SIZE, %rdi
/* Load the parent_ip into the second parameter */
#ifdef CC_USING_FENTRY
movq SS+16(%rsp), %rsi
#else
movq 8(%rbp), %rsi
#endif
.endm
ENTRY(ftrace_caller)
ftrace_caller_setup
/* regs go into 4th parameter (but make it NULL) */ /* regs go into 4th parameter (but make it NULL) */
movq $0, %rcx movq $0, %rcx
GLOBAL(ftrace_call) GLOBAL(ftrace_call)
call ftrace_stub call ftrace_stub
MCOUNT_RESTORE_FRAME restore_mcount_regs
ftrace_return:
/*
* The copied trampoline must call ftrace_return as it
* still may need to call the function graph tracer.
*/
GLOBAL(ftrace_caller_end)
GLOBAL(ftrace_return)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
GLOBAL(ftrace_graph_call) GLOBAL(ftrace_graph_call)
...@@ -66,11 +185,16 @@ GLOBAL(ftrace_stub) ...@@ -66,11 +185,16 @@ GLOBAL(ftrace_stub)
END(ftrace_caller) END(ftrace_caller)
ENTRY(ftrace_regs_caller) ENTRY(ftrace_regs_caller)
/* Save the current flags before compare (in SS location)*/ /* Save the current flags before any operations that can change them */
pushfq pushfq
/* skip=8 to skip flags saved in SS */ /* added 8 bytes to save flags */
ftrace_caller_setup 8 save_mcount_regs 8
/* save_mcount_regs fills in first two parameters */
GLOBAL(ftrace_regs_caller_op_ptr)
/* Load the ftrace_ops into the 3rd parameter */
movq function_trace_op(%rip), %rdx
/* Save the rest of pt_regs */ /* Save the rest of pt_regs */
movq %r15, R15(%rsp) movq %r15, R15(%rsp)
...@@ -79,18 +203,17 @@ ENTRY(ftrace_regs_caller) ...@@ -79,18 +203,17 @@ ENTRY(ftrace_regs_caller)
movq %r12, R12(%rsp) movq %r12, R12(%rsp)
movq %r11, R11(%rsp) movq %r11, R11(%rsp)
movq %r10, R10(%rsp) movq %r10, R10(%rsp)
movq %rbp, RBP(%rsp)
movq %rbx, RBX(%rsp) movq %rbx, RBX(%rsp)
/* Copy saved flags */ /* Copy saved flags */
movq SS(%rsp), %rcx movq MCOUNT_REG_SIZE(%rsp), %rcx
movq %rcx, EFLAGS(%rsp) movq %rcx, EFLAGS(%rsp)
/* Kernel segments */ /* Kernel segments */
movq $__KERNEL_DS, %rcx movq $__KERNEL_DS, %rcx
movq %rcx, SS(%rsp) movq %rcx, SS(%rsp)
movq $__KERNEL_CS, %rcx movq $__KERNEL_CS, %rcx
movq %rcx, CS(%rsp) movq %rcx, CS(%rsp)
/* Stack - skipping return address */ /* Stack - skipping return address and flags */
leaq SS+16(%rsp), %rcx leaq MCOUNT_REG_SIZE+8*2(%rsp), %rcx
movq %rcx, RSP(%rsp) movq %rcx, RSP(%rsp)
/* regs go into 4th parameter */ /* regs go into 4th parameter */
...@@ -101,11 +224,11 @@ GLOBAL(ftrace_regs_call) ...@@ -101,11 +224,11 @@ GLOBAL(ftrace_regs_call)
/* Copy flags back to SS, to restore them */ /* Copy flags back to SS, to restore them */
movq EFLAGS(%rsp), %rax movq EFLAGS(%rsp), %rax
movq %rax, SS(%rsp) movq %rax, MCOUNT_REG_SIZE(%rsp)
/* Handlers can change the RIP */ /* Handlers can change the RIP */
movq RIP(%rsp), %rax movq RIP(%rsp), %rax
movq %rax, SS+8(%rsp) movq %rax, MCOUNT_REG_SIZE+8(%rsp)
/* restore the rest of pt_regs */ /* restore the rest of pt_regs */
movq R15(%rsp), %r15 movq R15(%rsp), %r15
...@@ -113,19 +236,22 @@ GLOBAL(ftrace_regs_call) ...@@ -113,19 +236,22 @@ GLOBAL(ftrace_regs_call)
movq R13(%rsp), %r13 movq R13(%rsp), %r13
movq R12(%rsp), %r12 movq R12(%rsp), %r12
movq R10(%rsp), %r10 movq R10(%rsp), %r10
movq RBP(%rsp), %rbp
movq RBX(%rsp), %rbx movq RBX(%rsp), %rbx
/* skip=8 to skip flags saved in SS */ restore_mcount_regs
MCOUNT_RESTORE_FRAME 8
/* Restore flags */ /* Restore flags */
popfq popfq
jmp ftrace_return /*
* As this jmp to ftrace_return can be a short jump
* it must not be copied into the trampoline.
* The trampoline will add the code to jump
* to the return.
*/
GLOBAL(ftrace_regs_caller_end)
popfq jmp ftrace_return
jmp ftrace_stub
END(ftrace_regs_caller) END(ftrace_regs_caller)
...@@ -136,6 +262,7 @@ ENTRY(function_hook) ...@@ -136,6 +262,7 @@ ENTRY(function_hook)
cmpq $ftrace_stub, ftrace_trace_function cmpq $ftrace_stub, ftrace_trace_function
jnz trace jnz trace
fgraph_trace:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
cmpq $ftrace_stub, ftrace_graph_return cmpq $ftrace_stub, ftrace_graph_return
jnz ftrace_graph_caller jnz ftrace_graph_caller
...@@ -148,42 +275,35 @@ GLOBAL(ftrace_stub) ...@@ -148,42 +275,35 @@ GLOBAL(ftrace_stub)
retq retq
trace: trace:
MCOUNT_SAVE_FRAME /* save_mcount_regs fills in first two parameters */
save_mcount_regs
movq RIP(%rsp), %rdi
#ifdef CC_USING_FENTRY
movq SS+16(%rsp), %rsi
#else
movq 8(%rbp), %rsi
#endif
subq $MCOUNT_INSN_SIZE, %rdi
call *ftrace_trace_function call *ftrace_trace_function
MCOUNT_RESTORE_FRAME restore_mcount_regs
jmp ftrace_stub jmp fgraph_trace
END(function_hook) END(function_hook)
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller) ENTRY(ftrace_graph_caller)
MCOUNT_SAVE_FRAME /* Saves rbp into %rdx and fills first parameter */
save_mcount_regs
#ifdef CC_USING_FENTRY #ifdef CC_USING_FENTRY
leaq SS+16(%rsp), %rdi leaq MCOUNT_REG_SIZE+8(%rsp), %rsi
movq $0, %rdx /* No framepointers needed */ movq $0, %rdx /* No framepointers needed */
#else #else
leaq 8(%rbp), %rdi /* Save address of the return address of traced function */
movq (%rbp), %rdx leaq 8(%rdx), %rsi
/* ftrace does sanity checks against frame pointers */
movq (%rdx), %rdx
#endif #endif
movq RIP(%rsp), %rsi
subq $MCOUNT_INSN_SIZE, %rsi
call prepare_ftrace_return call prepare_ftrace_return
MCOUNT_RESTORE_FRAME restore_mcount_regs
retq retq
END(ftrace_graph_caller) END(ftrace_graph_caller)
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
__entry->unsync = sp->unsync; __entry->unsync = sp->unsync;
#define KVM_MMU_PAGE_PRINTK() ({ \ #define KVM_MMU_PAGE_PRINTK() ({ \
const u32 saved_len = p->len; \ const char *saved_ptr = trace_seq_buffer_ptr(p); \
static const char *access_str[] = { \ static const char *access_str[] = { \
"---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \ "---", "--x", "w--", "w-x", "-u-", "-ux", "wu-", "wux" \
}; \ }; \
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
role.nxe ? "" : "!", \ role.nxe ? "" : "!", \
__entry->root_count, \ __entry->root_count, \
__entry->unsync ? "unsync" : "sync", 0); \ __entry->unsync ? "unsync" : "sync", 0); \
p->buffer + saved_len; \ saved_ptr; \
}) })
#define kvm_mmu_trace_pferr_flags \ #define kvm_mmu_trace_pferr_flags \
......
...@@ -294,7 +294,7 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *mem, ...@@ -294,7 +294,7 @@ void cper_mem_err_pack(const struct cper_sec_mem_err *mem,
const char *cper_mem_err_unpack(struct trace_seq *p, const char *cper_mem_err_unpack(struct trace_seq *p,
struct cper_mem_err_compact *cmem) struct cper_mem_err_compact *cmem)
{ {
const char *ret = p->buffer + p->len; const char *ret = trace_seq_buffer_ptr(p);
if (cper_mem_err_location(cmem, rcd_decode_str)) if (cper_mem_err_location(cmem, rcd_decode_str))
trace_seq_printf(p, "%s", rcd_decode_str); trace_seq_printf(p, "%s", rcd_decode_str);
......
...@@ -61,6 +61,11 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); ...@@ -61,6 +61,11 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
/* /*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
* set in the flags member. * set in the flags member.
* CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
* IPMODIFY are a kind of attribute flags which can be set only before
* registering the ftrace_ops, and can not be modified while registered.
* Changing those attribute flags after regsitering ftrace_ops will
* cause unexpected results.
* *
* ENABLED - set/unset when ftrace_ops is registered/unregistered * ENABLED - set/unset when ftrace_ops is registered/unregistered
* DYNAMIC - set when ftrace_ops is registered to denote dynamically * DYNAMIC - set when ftrace_ops is registered to denote dynamically
...@@ -94,6 +99,17 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops); ...@@ -94,6 +99,17 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* ADDING - The ops is in the process of being added. * ADDING - The ops is in the process of being added.
* REMOVING - The ops is in the process of being removed. * REMOVING - The ops is in the process of being removed.
* MODIFYING - The ops is in the process of changing its filter functions. * MODIFYING - The ops is in the process of changing its filter functions.
* ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
* The arch specific code sets this flag when it allocated a
* trampoline. This lets the arch know that it can update the
* trampoline in case the callback function changes.
* The ftrace_ops trampoline can be set by the ftrace users, and
* in such cases the arch must not modify it. Only the arch ftrace
* core code should set this flag.
* IPMODIFY - The ops can modify the IP register. This can only be set with
* SAVE_REGS. If another ops with this flag set is already registered
* for any of the functions that this ops will be registered for, then
* this ops will fail to register or set_filter_ip.
*/ */
enum { enum {
FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_ENABLED = 1 << 0,
...@@ -108,6 +124,8 @@ enum { ...@@ -108,6 +124,8 @@ enum {
FTRACE_OPS_FL_ADDING = 1 << 9, FTRACE_OPS_FL_ADDING = 1 << 9,
FTRACE_OPS_FL_REMOVING = 1 << 10, FTRACE_OPS_FL_REMOVING = 1 << 10,
FTRACE_OPS_FL_MODIFYING = 1 << 11, FTRACE_OPS_FL_MODIFYING = 1 << 11,
FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
FTRACE_OPS_FL_IPMODIFY = 1 << 13,
}; };
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
...@@ -142,6 +160,7 @@ struct ftrace_ops { ...@@ -142,6 +160,7 @@ struct ftrace_ops {
struct ftrace_ops_hash *func_hash; struct ftrace_ops_hash *func_hash;
struct ftrace_ops_hash old_hash; struct ftrace_ops_hash old_hash;
unsigned long trampoline; unsigned long trampoline;
unsigned long trampoline_size;
#endif #endif
}; };
...@@ -255,7 +274,9 @@ struct ftrace_func_command { ...@@ -255,7 +274,9 @@ struct ftrace_func_command {
int ftrace_arch_code_modify_prepare(void); int ftrace_arch_code_modify_prepare(void);
int ftrace_arch_code_modify_post_process(void); int ftrace_arch_code_modify_post_process(void);
void ftrace_bug(int err, unsigned long ip); struct dyn_ftrace;
void ftrace_bug(int err, struct dyn_ftrace *rec);
struct seq_file; struct seq_file;
...@@ -287,6 +308,8 @@ extern int ftrace_text_reserved(const void *start, const void *end); ...@@ -287,6 +308,8 @@ extern int ftrace_text_reserved(const void *start, const void *end);
extern int ftrace_nr_registered_ops(void); extern int ftrace_nr_registered_ops(void);
bool is_ftrace_trampoline(unsigned long addr);
/* /*
* The dyn_ftrace record's flags field is split into two parts. * The dyn_ftrace record's flags field is split into two parts.
* the first part which is '0-FTRACE_REF_MAX' is a counter of * the first part which is '0-FTRACE_REF_MAX' is a counter of
...@@ -297,6 +320,7 @@ extern int ftrace_nr_registered_ops(void); ...@@ -297,6 +320,7 @@ extern int ftrace_nr_registered_ops(void);
* ENABLED - the function is being traced * ENABLED - the function is being traced
* REGS - the record wants the function to save regs * REGS - the record wants the function to save regs
* REGS_EN - the function is set up to save regs. * REGS_EN - the function is set up to save regs.
* IPMODIFY - the record allows for the IP address to be changed.
* *
* When a new ftrace_ops is registered and wants a function to save * When a new ftrace_ops is registered and wants a function to save
* pt_regs, the rec->flag REGS is set. When the function has been * pt_regs, the rec->flag REGS is set. When the function has been
...@@ -310,10 +334,11 @@ enum { ...@@ -310,10 +334,11 @@ enum {
FTRACE_FL_REGS_EN = (1UL << 29), FTRACE_FL_REGS_EN = (1UL << 29),
FTRACE_FL_TRAMP = (1UL << 28), FTRACE_FL_TRAMP = (1UL << 28),
FTRACE_FL_TRAMP_EN = (1UL << 27), FTRACE_FL_TRAMP_EN = (1UL << 27),
FTRACE_FL_IPMODIFY = (1UL << 26),
}; };
#define FTRACE_REF_MAX_SHIFT 27 #define FTRACE_REF_MAX_SHIFT 26
#define FTRACE_FL_BITS 5 #define FTRACE_FL_BITS 6
#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1) #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT) #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1) #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
...@@ -586,6 +611,11 @@ static inline ssize_t ftrace_notrace_write(struct file *file, const char __user ...@@ -586,6 +611,11 @@ static inline ssize_t ftrace_notrace_write(struct file *file, const char __user
size_t cnt, loff_t *ppos) { return -ENODEV; } size_t cnt, loff_t *ppos) { return -ENODEV; }
static inline int static inline int
ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; } ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
static inline bool is_ftrace_trampoline(unsigned long addr)
{
return false;
}
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
/* totally disable ftrace - can not re-enable after this */ /* totally disable ftrace - can not re-enable after this */
......
...@@ -138,6 +138,17 @@ enum print_line_t { ...@@ -138,6 +138,17 @@ enum print_line_t {
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */ TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
}; };
/*
* Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
* overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
* simplifies those functions and keeps them in sync.
*/
static inline enum print_line_t trace_handle_return(struct trace_seq *s)
{
return trace_seq_has_overflowed(s) ?
TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
}
void tracing_generic_entry_update(struct trace_entry *entry, void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags, unsigned long flags,
int pc); int pc);
......
...@@ -40,45 +40,54 @@ trace_seq_buffer_ptr(struct trace_seq *s) ...@@ -40,45 +40,54 @@ trace_seq_buffer_ptr(struct trace_seq *s)
return s->buffer + s->len; return s->buffer + s->len;
} }
/**
* trace_seq_has_overflowed - return true if the trace_seq took too much
* @s: trace sequence descriptor
*
* Returns true if too much data was added to the trace_seq and it is
* now full and will not take anymore.
*/
static inline bool trace_seq_has_overflowed(struct trace_seq *s)
{
return s->full || s->len > PAGE_SIZE - 1;
}
/* /*
* Currently only defined when tracing is enabled. * Currently only defined when tracing is enabled.
*/ */
#ifdef CONFIG_TRACING #ifdef CONFIG_TRACING
extern __printf(2, 3) extern __printf(2, 3)
int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); void trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
extern __printf(2, 0) extern __printf(2, 0)
int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args); void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args);
extern int extern void
trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary); trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary);
extern int trace_print_seq(struct seq_file *m, struct trace_seq *s); extern int trace_print_seq(struct seq_file *m, struct trace_seq *s);
extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, extern int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
int cnt); int cnt);
extern int trace_seq_puts(struct trace_seq *s, const char *str); extern void trace_seq_puts(struct trace_seq *s, const char *str);
extern int trace_seq_putc(struct trace_seq *s, unsigned char c); extern void trace_seq_putc(struct trace_seq *s, unsigned char c);
extern int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len); extern void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len);
extern int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, extern void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
unsigned int len); unsigned int len);
extern int trace_seq_path(struct trace_seq *s, const struct path *path); extern int trace_seq_path(struct trace_seq *s, const struct path *path);
extern int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
int nmaskbits); int nmaskbits);
#else /* CONFIG_TRACING */ #else /* CONFIG_TRACING */
static inline int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{ {
return 0;
} }
static inline int static inline void
trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
{ {
return 0;
} }
static inline int static inline void
trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
int nmaskbits) int nmaskbits)
{ {
return 0;
} }
static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s) static inline int trace_print_seq(struct seq_file *m, struct trace_seq *s)
...@@ -90,23 +99,19 @@ static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf, ...@@ -90,23 +99,19 @@ static inline int trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
{ {
return 0; return 0;
} }
static inline int trace_seq_puts(struct trace_seq *s, const char *str) static inline void trace_seq_puts(struct trace_seq *s, const char *str)
{ {
return 0;
} }
static inline int trace_seq_putc(struct trace_seq *s, unsigned char c) static inline void trace_seq_putc(struct trace_seq *s, unsigned char c)
{ {
return 0;
} }
static inline int static inline void
trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len)
{ {
return 0;
} }
static inline int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, static inline void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
unsigned int len) unsigned int len)
{ {
return 0;
} }
static inline int trace_seq_path(struct trace_seq *s, const struct path *path) static inline int trace_seq_path(struct trace_seq *s, const struct path *path)
{ {
......
...@@ -277,14 +277,12 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ ...@@ -277,14 +277,12 @@ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
field = (typeof(field))iter->ent; \ field = (typeof(field))iter->ent; \
\ \
ret = ftrace_raw_output_prep(iter, trace_event); \ ret = ftrace_raw_output_prep(iter, trace_event); \
if (ret) \ if (ret != TRACE_TYPE_HANDLED) \
return ret; \ return ret; \
\ \
ret = trace_seq_printf(s, print); \ trace_seq_printf(s, print); \
if (!ret) \
return TRACE_TYPE_PARTIAL_LINE; \
\ \
return TRACE_TYPE_HANDLED; \ return trace_handle_return(s); \
} \ } \
static struct trace_event_functions ftrace_event_type_funcs_##call = { \ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
.trace = ftrace_raw_output_##call, \ .trace = ftrace_raw_output_##call, \
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/memory.h> #include <linux/memory.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -102,6 +103,8 @@ int __kernel_text_address(unsigned long addr) ...@@ -102,6 +103,8 @@ int __kernel_text_address(unsigned long addr)
return 1; return 1;
if (is_module_text_address(addr)) if (is_module_text_address(addr))
return 1; return 1;
if (is_ftrace_trampoline(addr))
return 1;
/* /*
* There might be init symbols in saved stacktraces. * There might be init symbols in saved stacktraces.
* Give those symbols a chance to be printed in * Give those symbols a chance to be printed in
...@@ -119,7 +122,9 @@ int kernel_text_address(unsigned long addr) ...@@ -119,7 +122,9 @@ int kernel_text_address(unsigned long addr)
{ {
if (core_kernel_text(addr)) if (core_kernel_text(addr))
return 1; return 1;
return is_module_text_address(addr); if (is_module_text_address(addr))
return 1;
return is_ftrace_trampoline(addr);
} }
/* /*
......
...@@ -915,7 +915,7 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p) ...@@ -915,7 +915,7 @@ static struct kprobe *alloc_aggr_kprobe(struct kprobe *p)
#ifdef CONFIG_KPROBES_ON_FTRACE #ifdef CONFIG_KPROBES_ON_FTRACE
static struct ftrace_ops kprobe_ftrace_ops __read_mostly = { static struct ftrace_ops kprobe_ftrace_ops __read_mostly = {
.func = kprobe_ftrace_handler, .func = kprobe_ftrace_handler,
.flags = FTRACE_OPS_FL_SAVE_REGS, .flags = FTRACE_OPS_FL_SAVE_REGS | FTRACE_OPS_FL_IPMODIFY,
}; };
static int kprobe_ftrace_enabled; static int kprobe_ftrace_enabled;
......
...@@ -1142,9 +1142,9 @@ static void get_pdu_remap(const struct trace_entry *ent, ...@@ -1142,9 +1142,9 @@ static void get_pdu_remap(const struct trace_entry *ent,
r->sector_from = be64_to_cpu(sector_from); r->sector_from = be64_to_cpu(sector_from);
} }
typedef int (blk_log_action_t) (struct trace_iterator *iter, const char *act); typedef void (blk_log_action_t) (struct trace_iterator *iter, const char *act);
static int blk_log_action_classic(struct trace_iterator *iter, const char *act) static void blk_log_action_classic(struct trace_iterator *iter, const char *act)
{ {
char rwbs[RWBS_LEN]; char rwbs[RWBS_LEN];
unsigned long long ts = iter->ts; unsigned long long ts = iter->ts;
...@@ -1154,33 +1154,33 @@ static int blk_log_action_classic(struct trace_iterator *iter, const char *act) ...@@ -1154,33 +1154,33 @@ static int blk_log_action_classic(struct trace_iterator *iter, const char *act)
fill_rwbs(rwbs, t); fill_rwbs(rwbs, t);
return trace_seq_printf(&iter->seq, trace_seq_printf(&iter->seq,
"%3d,%-3d %2d %5d.%09lu %5u %2s %3s ", "%3d,%-3d %2d %5d.%09lu %5u %2s %3s ",
MAJOR(t->device), MINOR(t->device), iter->cpu, MAJOR(t->device), MINOR(t->device), iter->cpu,
secs, nsec_rem, iter->ent->pid, act, rwbs); secs, nsec_rem, iter->ent->pid, act, rwbs);
} }
static int blk_log_action(struct trace_iterator *iter, const char *act) static void blk_log_action(struct trace_iterator *iter, const char *act)
{ {
char rwbs[RWBS_LEN]; char rwbs[RWBS_LEN];
const struct blk_io_trace *t = te_blk_io_trace(iter->ent); const struct blk_io_trace *t = te_blk_io_trace(iter->ent);
fill_rwbs(rwbs, t); fill_rwbs(rwbs, t);
return trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ", trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
MAJOR(t->device), MINOR(t->device), act, rwbs); MAJOR(t->device), MINOR(t->device), act, rwbs);
} }
static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) static void blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
{ {
const unsigned char *pdu_buf; const unsigned char *pdu_buf;
int pdu_len; int pdu_len;
int i, end, ret; int i, end;
pdu_buf = pdu_start(ent); pdu_buf = pdu_start(ent);
pdu_len = te_blk_io_trace(ent)->pdu_len; pdu_len = te_blk_io_trace(ent)->pdu_len;
if (!pdu_len) if (!pdu_len)
return 1; return;
/* find the last zero that needs to be printed */ /* find the last zero that needs to be printed */
for (end = pdu_len - 1; end >= 0; end--) for (end = pdu_len - 1; end >= 0; end--)
...@@ -1188,119 +1188,107 @@ static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent) ...@@ -1188,119 +1188,107 @@ static int blk_log_dump_pdu(struct trace_seq *s, const struct trace_entry *ent)
break; break;
end++; end++;
if (!trace_seq_putc(s, '(')) trace_seq_putc(s, '(');
return 0;
for (i = 0; i < pdu_len; i++) { for (i = 0; i < pdu_len; i++) {
ret = trace_seq_printf(s, "%s%02x", trace_seq_printf(s, "%s%02x",
i == 0 ? "" : " ", pdu_buf[i]); i == 0 ? "" : " ", pdu_buf[i]);
if (!ret)
return ret;
/* /*
* stop when the rest is just zeroes and indicate so * stop when the rest is just zeroes and indicate so
* with a ".." appended * with a ".." appended
*/ */
if (i == end && end != pdu_len - 1) if (i == end && end != pdu_len - 1) {
return trace_seq_puts(s, " ..) "); trace_seq_puts(s, " ..) ");
return;
}
} }
return trace_seq_puts(s, ") "); trace_seq_puts(s, ") ");
} }
static int blk_log_generic(struct trace_seq *s, const struct trace_entry *ent) static void blk_log_generic(struct trace_seq *s, const struct trace_entry *ent)
{ {
char cmd[TASK_COMM_LEN]; char cmd[TASK_COMM_LEN];
trace_find_cmdline(ent->pid, cmd); trace_find_cmdline(ent->pid, cmd);
if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
int ret; trace_seq_printf(s, "%u ", t_bytes(ent));
blk_log_dump_pdu(s, ent);
ret = trace_seq_printf(s, "%u ", t_bytes(ent)); trace_seq_printf(s, "[%s]\n", cmd);
if (!ret)
return 0;
ret = blk_log_dump_pdu(s, ent);
if (!ret)
return 0;
return trace_seq_printf(s, "[%s]\n", cmd);
} else { } else {
if (t_sec(ent)) if (t_sec(ent))
return trace_seq_printf(s, "%llu + %u [%s]\n", trace_seq_printf(s, "%llu + %u [%s]\n",
t_sector(ent), t_sec(ent), cmd); t_sector(ent), t_sec(ent), cmd);
return trace_seq_printf(s, "[%s]\n", cmd); else
trace_seq_printf(s, "[%s]\n", cmd);
} }
} }
static int blk_log_with_error(struct trace_seq *s, static void blk_log_with_error(struct trace_seq *s,
const struct trace_entry *ent) const struct trace_entry *ent)
{ {
if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) { if (t_action(ent) & BLK_TC_ACT(BLK_TC_PC)) {
int ret; blk_log_dump_pdu(s, ent);
trace_seq_printf(s, "[%d]\n", t_error(ent));
ret = blk_log_dump_pdu(s, ent);
if (ret)
return trace_seq_printf(s, "[%d]\n", t_error(ent));
return 0;
} else { } else {
if (t_sec(ent)) if (t_sec(ent))
return trace_seq_printf(s, "%llu + %u [%d]\n", trace_seq_printf(s, "%llu + %u [%d]\n",
t_sector(ent), t_sector(ent),
t_sec(ent), t_error(ent)); t_sec(ent), t_error(ent));
return trace_seq_printf(s, "%llu [%d]\n", else
t_sector(ent), t_error(ent)); trace_seq_printf(s, "%llu [%d]\n",
t_sector(ent), t_error(ent));
} }
} }
static int blk_log_remap(struct trace_seq *s, const struct trace_entry *ent) static void blk_log_remap(struct trace_seq *s, const struct trace_entry *ent)
{ {
struct blk_io_trace_remap r = { .device_from = 0, }; struct blk_io_trace_remap r = { .device_from = 0, };
get_pdu_remap(ent, &r); get_pdu_remap(ent, &r);
return trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n", trace_seq_printf(s, "%llu + %u <- (%d,%d) %llu\n",
t_sector(ent), t_sec(ent), t_sector(ent), t_sec(ent),
MAJOR(r.device_from), MINOR(r.device_from), MAJOR(r.device_from), MINOR(r.device_from),
(unsigned long long)r.sector_from); (unsigned long long)r.sector_from);
} }
static int blk_log_plug(struct trace_seq *s, const struct trace_entry *ent) static void blk_log_plug(struct trace_seq *s, const struct trace_entry *ent)
{ {
char cmd[TASK_COMM_LEN]; char cmd[TASK_COMM_LEN];
trace_find_cmdline(ent->pid, cmd); trace_find_cmdline(ent->pid, cmd);
return trace_seq_printf(s, "[%s]\n", cmd); trace_seq_printf(s, "[%s]\n", cmd);
} }
static int blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent) static void blk_log_unplug(struct trace_seq *s, const struct trace_entry *ent)
{ {
char cmd[TASK_COMM_LEN]; char cmd[TASK_COMM_LEN];
trace_find_cmdline(ent->pid, cmd); trace_find_cmdline(ent->pid, cmd);
return trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent)); trace_seq_printf(s, "[%s] %llu\n", cmd, get_pdu_int(ent));
} }
static int blk_log_split(struct trace_seq *s, const struct trace_entry *ent) static void blk_log_split(struct trace_seq *s, const struct trace_entry *ent)
{ {
char cmd[TASK_COMM_LEN]; char cmd[TASK_COMM_LEN];
trace_find_cmdline(ent->pid, cmd); trace_find_cmdline(ent->pid, cmd);
return trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent), trace_seq_printf(s, "%llu / %llu [%s]\n", t_sector(ent),
get_pdu_int(ent), cmd); get_pdu_int(ent), cmd);
} }
static int blk_log_msg(struct trace_seq *s, const struct trace_entry *ent) static void blk_log_msg(struct trace_seq *s, const struct trace_entry *ent)
{ {
int ret;
const struct blk_io_trace *t = te_blk_io_trace(ent); const struct blk_io_trace *t = te_blk_io_trace(ent);
ret = trace_seq_putmem(s, t + 1, t->pdu_len); trace_seq_putmem(s, t + 1, t->pdu_len);
if (ret) trace_seq_putc(s, '\n');
return trace_seq_putc(s, '\n');
return ret;
} }
/* /*
...@@ -1339,7 +1327,7 @@ static void blk_tracer_reset(struct trace_array *tr) ...@@ -1339,7 +1327,7 @@ static void blk_tracer_reset(struct trace_array *tr)
static const struct { static const struct {
const char *act[2]; const char *act[2];
int (*print)(struct trace_seq *s, const struct trace_entry *ent); void (*print)(struct trace_seq *s, const struct trace_entry *ent);
} what2act[] = { } what2act[] = {
[__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic }, [__BLK_TA_QUEUE] = {{ "Q", "queue" }, blk_log_generic },
[__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic }, [__BLK_TA_BACKMERGE] = {{ "M", "backmerge" }, blk_log_generic },
...@@ -1364,7 +1352,6 @@ static enum print_line_t print_one_line(struct trace_iterator *iter, ...@@ -1364,7 +1352,6 @@ static enum print_line_t print_one_line(struct trace_iterator *iter,
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
const struct blk_io_trace *t; const struct blk_io_trace *t;
u16 what; u16 what;
int ret;
bool long_act; bool long_act;
blk_log_action_t *log_action; blk_log_action_t *log_action;
...@@ -1374,21 +1361,18 @@ static enum print_line_t print_one_line(struct trace_iterator *iter, ...@@ -1374,21 +1361,18 @@ static enum print_line_t print_one_line(struct trace_iterator *iter,
log_action = classic ? &blk_log_action_classic : &blk_log_action; log_action = classic ? &blk_log_action_classic : &blk_log_action;
if (t->action == BLK_TN_MESSAGE) { if (t->action == BLK_TN_MESSAGE) {
ret = log_action(iter, long_act ? "message" : "m"); log_action(iter, long_act ? "message" : "m");
if (ret) blk_log_msg(s, iter->ent);
ret = blk_log_msg(s, iter->ent);
goto out;
} }
if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act))) if (unlikely(what == 0 || what >= ARRAY_SIZE(what2act)))
ret = trace_seq_printf(s, "Unknown action %x\n", what); trace_seq_printf(s, "Unknown action %x\n", what);
else { else {
ret = log_action(iter, what2act[what].act[long_act]); log_action(iter, what2act[what].act[long_act]);
if (ret) what2act[what].print(s, iter->ent);
ret = what2act[what].print(s, iter->ent);
} }
out:
return ret ? TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE; return trace_handle_return(s);
} }
static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
...@@ -1397,7 +1381,7 @@ static enum print_line_t blk_trace_event_print(struct trace_iterator *iter, ...@@ -1397,7 +1381,7 @@ static enum print_line_t blk_trace_event_print(struct trace_iterator *iter,
return print_one_line(iter, false); return print_one_line(iter, false);
} }
static int blk_trace_synthesize_old_trace(struct trace_iterator *iter) static void blk_trace_synthesize_old_trace(struct trace_iterator *iter)
{ {
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
struct blk_io_trace *t = (struct blk_io_trace *)iter->ent; struct blk_io_trace *t = (struct blk_io_trace *)iter->ent;
...@@ -1407,18 +1391,18 @@ static int blk_trace_synthesize_old_trace(struct trace_iterator *iter) ...@@ -1407,18 +1391,18 @@ static int blk_trace_synthesize_old_trace(struct trace_iterator *iter)
.time = iter->ts, .time = iter->ts,
}; };
if (!trace_seq_putmem(s, &old, offset)) trace_seq_putmem(s, &old, offset);
return 0; trace_seq_putmem(s, &t->sector,
return trace_seq_putmem(s, &t->sector, sizeof(old) - offset + t->pdu_len);
sizeof(old) - offset + t->pdu_len);
} }
static enum print_line_t static enum print_line_t
blk_trace_event_print_binary(struct trace_iterator *iter, int flags, blk_trace_event_print_binary(struct trace_iterator *iter, int flags,
struct trace_event *event) struct trace_event *event)
{ {
return blk_trace_synthesize_old_trace(iter) ? blk_trace_synthesize_old_trace(iter);
TRACE_TYPE_HANDLED : TRACE_TYPE_PARTIAL_LINE;
return trace_handle_return(&iter->seq);
} }
static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter) static enum print_line_t blk_tracer_print_line(struct trace_iterator *iter)
......
This diff is collapsed.
...@@ -34,21 +34,19 @@ static void update_pages_handler(struct work_struct *work); ...@@ -34,21 +34,19 @@ static void update_pages_handler(struct work_struct *work);
*/ */
int ring_buffer_print_entry_header(struct trace_seq *s) int ring_buffer_print_entry_header(struct trace_seq *s)
{ {
int ret; trace_seq_puts(s, "# compressed entry header\n");
trace_seq_puts(s, "\ttype_len : 5 bits\n");
ret = trace_seq_puts(s, "# compressed entry header\n"); trace_seq_puts(s, "\ttime_delta : 27 bits\n");
ret = trace_seq_puts(s, "\ttype_len : 5 bits\n"); trace_seq_puts(s, "\tarray : 32 bits\n");
ret = trace_seq_puts(s, "\ttime_delta : 27 bits\n"); trace_seq_putc(s, '\n');
ret = trace_seq_puts(s, "\tarray : 32 bits\n"); trace_seq_printf(s, "\tpadding : type == %d\n",
ret = trace_seq_putc(s, '\n'); RINGBUF_TYPE_PADDING);
ret = trace_seq_printf(s, "\tpadding : type == %d\n", trace_seq_printf(s, "\ttime_extend : type == %d\n",
RINGBUF_TYPE_PADDING); RINGBUF_TYPE_TIME_EXTEND);
ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", trace_seq_printf(s, "\tdata max type_len == %d\n",
RINGBUF_TYPE_TIME_EXTEND); RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
ret = trace_seq_printf(s, "\tdata max type_len == %d\n",
RINGBUF_TYPE_DATA_TYPE_LEN_MAX);
return ret; return !trace_seq_has_overflowed(s);
} }
/* /*
...@@ -419,32 +417,31 @@ static inline int test_time_stamp(u64 delta) ...@@ -419,32 +417,31 @@ static inline int test_time_stamp(u64 delta)
int ring_buffer_print_page_header(struct trace_seq *s) int ring_buffer_print_page_header(struct trace_seq *s)
{ {
struct buffer_data_page field; struct buffer_data_page field;
int ret;
ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t"
"offset:0;\tsize:%u;\tsigned:%u;\n",
(unsigned int)sizeof(field.time_stamp),
(unsigned int)is_signed_type(u64));
ret = trace_seq_printf(s, "\tfield: local_t commit;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), commit),
(unsigned int)sizeof(field.commit),
(unsigned int)is_signed_type(long));
ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), commit),
1,
(unsigned int)is_signed_type(long));
ret = trace_seq_printf(s, "\tfield: char data;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), data),
(unsigned int)BUF_PAGE_SIZE,
(unsigned int)is_signed_type(char));
return ret; trace_seq_printf(s, "\tfield: u64 timestamp;\t"
"offset:0;\tsize:%u;\tsigned:%u;\n",
(unsigned int)sizeof(field.time_stamp),
(unsigned int)is_signed_type(u64));
trace_seq_printf(s, "\tfield: local_t commit;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), commit),
(unsigned int)sizeof(field.commit),
(unsigned int)is_signed_type(long));
trace_seq_printf(s, "\tfield: int overwrite;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), commit),
1,
(unsigned int)is_signed_type(long));
trace_seq_printf(s, "\tfield: char data;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), data),
(unsigned int)BUF_PAGE_SIZE,
(unsigned int)is_signed_type(char));
return !trace_seq_has_overflowed(s);
} }
struct rb_irq_work { struct rb_irq_work {
......
This diff is collapsed.
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/trace_seq.h> #include <linux/trace_seq.h>
#include <linux/ftrace_event.h> #include <linux/ftrace_event.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/trace_seq.h>
#ifdef CONFIG_FTRACE_SYSCALLS #ifdef CONFIG_FTRACE_SYSCALLS
#include <asm/unistd.h> /* For NR_SYSCALLS */ #include <asm/unistd.h> /* For NR_SYSCALLS */
...@@ -569,15 +570,6 @@ void trace_init_global_iter(struct trace_iterator *iter); ...@@ -569,15 +570,6 @@ void trace_init_global_iter(struct trace_iterator *iter);
void tracing_iter_reset(struct trace_iterator *iter, int cpu); void tracing_iter_reset(struct trace_iterator *iter, int cpu);
void tracing_sched_switch_trace(struct trace_array *tr,
struct task_struct *prev,
struct task_struct *next,
unsigned long flags, int pc);
void tracing_sched_wakeup_trace(struct trace_array *tr,
struct task_struct *wakee,
struct task_struct *cur,
unsigned long flags, int pc);
void trace_function(struct trace_array *tr, void trace_function(struct trace_array *tr,
unsigned long ip, unsigned long ip,
unsigned long parent_ip, unsigned long parent_ip,
...@@ -597,9 +589,6 @@ void set_graph_array(struct trace_array *tr); ...@@ -597,9 +589,6 @@ void set_graph_array(struct trace_array *tr);
void tracing_start_cmdline_record(void); void tracing_start_cmdline_record(void);
void tracing_stop_cmdline_record(void); void tracing_stop_cmdline_record(void);
void tracing_sched_switch_assign_trace(struct trace_array *tr);
void tracing_stop_sched_switch_record(void);
void tracing_start_sched_switch_record(void);
int register_tracer(struct tracer *type); int register_tracer(struct tracer *type);
int is_tracing_stopped(void); int is_tracing_stopped(void);
...@@ -719,6 +708,8 @@ enum print_line_t print_trace_line(struct trace_iterator *iter); ...@@ -719,6 +708,8 @@ enum print_line_t print_trace_line(struct trace_iterator *iter);
extern unsigned long trace_flags; extern unsigned long trace_flags;
extern char trace_find_mark(unsigned long long duration);
/* Standard output formatting function used for function return traces */ /* Standard output formatting function used for function return traces */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
...@@ -737,7 +728,7 @@ extern unsigned long trace_flags; ...@@ -737,7 +728,7 @@ extern unsigned long trace_flags;
extern enum print_line_t extern enum print_line_t
print_graph_function_flags(struct trace_iterator *iter, u32 flags); print_graph_function_flags(struct trace_iterator *iter, u32 flags);
extern void print_graph_headers_flags(struct seq_file *s, u32 flags); extern void print_graph_headers_flags(struct seq_file *s, u32 flags);
extern enum print_line_t extern void
trace_print_graph_duration(unsigned long long duration, struct trace_seq *s); trace_print_graph_duration(unsigned long long duration, struct trace_seq *s);
extern void graph_trace_open(struct trace_iterator *iter); extern void graph_trace_open(struct trace_iterator *iter);
extern void graph_trace_close(struct trace_iterator *iter); extern void graph_trace_close(struct trace_iterator *iter);
......
...@@ -151,22 +151,21 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter, ...@@ -151,22 +151,21 @@ static enum print_line_t trace_branch_print(struct trace_iterator *iter,
trace_assign_type(field, iter->ent); trace_assign_type(field, iter->ent);
if (trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n", trace_seq_printf(&iter->seq, "[%s] %s:%s:%d\n",
field->correct ? " ok " : " MISS ", field->correct ? " ok " : " MISS ",
field->func, field->func,
field->file, field->file,
field->line)) field->line);
return TRACE_TYPE_PARTIAL_LINE;
return trace_handle_return(&iter->seq);
return TRACE_TYPE_HANDLED;
} }
static void branch_print_header(struct seq_file *s) static void branch_print_header(struct seq_file *s)
{ {
seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT" seq_puts(s, "# TASK-PID CPU# TIMESTAMP CORRECT"
" FUNC:FILE:LINE\n"); " FUNC:FILE:LINE\n"
seq_puts(s, "# | | | | | " "# | | | | | "
" |\n"); " |\n");
} }
static struct trace_event_functions trace_branch_funcs = { static struct trace_event_functions trace_branch_funcs = {
...@@ -233,12 +232,12 @@ extern unsigned long __stop_annotated_branch_profile[]; ...@@ -233,12 +232,12 @@ extern unsigned long __stop_annotated_branch_profile[];
static int annotated_branch_stat_headers(struct seq_file *m) static int annotated_branch_stat_headers(struct seq_file *m)
{ {
seq_printf(m, " correct incorrect %% "); seq_puts(m, " correct incorrect % "
seq_printf(m, " Function " " Function "
" File Line\n" " File Line\n"
" ------- --------- - " " ------- --------- - "
" -------- " " -------- "
" ---- ----\n"); " ---- ----\n");
return 0; return 0;
} }
...@@ -274,7 +273,7 @@ static int branch_stat_show(struct seq_file *m, void *v) ...@@ -274,7 +273,7 @@ static int branch_stat_show(struct seq_file *m, void *v)
seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect); seq_printf(m, "%8lu %8lu ", p->correct, p->incorrect);
if (percent < 0) if (percent < 0)
seq_printf(m, " X "); seq_puts(m, " X ");
else else
seq_printf(m, "%3ld ", percent); seq_printf(m, "%3ld ", percent);
seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line); seq_printf(m, "%-30.30s %-20.20s %d\n", p->func, f, p->line);
...@@ -362,12 +361,12 @@ extern unsigned long __stop_branch_profile[]; ...@@ -362,12 +361,12 @@ extern unsigned long __stop_branch_profile[];
static int all_branch_stat_headers(struct seq_file *m) static int all_branch_stat_headers(struct seq_file *m)
{ {
seq_printf(m, " miss hit %% "); seq_puts(m, " miss hit % "
seq_printf(m, " Function " " Function "
" File Line\n" " File Line\n"
" ------- --------- - " " ------- --------- - "
" -------- " " -------- "
" ---- ----\n"); " ---- ----\n");
return 0; return 0;
} }
......
...@@ -918,7 +918,7 @@ static int f_show(struct seq_file *m, void *v) ...@@ -918,7 +918,7 @@ static int f_show(struct seq_file *m, void *v)
case FORMAT_HEADER: case FORMAT_HEADER:
seq_printf(m, "name: %s\n", ftrace_event_name(call)); seq_printf(m, "name: %s\n", ftrace_event_name(call));
seq_printf(m, "ID: %d\n", call->event.type); seq_printf(m, "ID: %d\n", call->event.type);
seq_printf(m, "format:\n"); seq_puts(m, "format:\n");
return 0; return 0;
case FORMAT_FIELD_SEPERATOR: case FORMAT_FIELD_SEPERATOR:
...@@ -1988,7 +1988,7 @@ event_enable_print(struct seq_file *m, unsigned long ip, ...@@ -1988,7 +1988,7 @@ event_enable_print(struct seq_file *m, unsigned long ip,
ftrace_event_name(data->file->event_call)); ftrace_event_name(data->file->event_call));
if (data->count == -1) if (data->count == -1)
seq_printf(m, ":unlimited\n"); seq_puts(m, ":unlimited\n");
else else
seq_printf(m, ":count=%ld\n", data->count); seq_printf(m, ":count=%ld\n", data->count);
......
...@@ -45,6 +45,7 @@ enum filter_op_ids ...@@ -45,6 +45,7 @@ enum filter_op_ids
OP_GT, OP_GT,
OP_GE, OP_GE,
OP_BAND, OP_BAND,
OP_NOT,
OP_NONE, OP_NONE,
OP_OPEN_PAREN, OP_OPEN_PAREN,
}; };
...@@ -67,6 +68,7 @@ static struct filter_op filter_ops[] = { ...@@ -67,6 +68,7 @@ static struct filter_op filter_ops[] = {
{ OP_GT, ">", 5 }, { OP_GT, ">", 5 },
{ OP_GE, ">=", 5 }, { OP_GE, ">=", 5 },
{ OP_BAND, "&", 6 }, { OP_BAND, "&", 6 },
{ OP_NOT, "!", 6 },
{ OP_NONE, "OP_NONE", 0 }, { OP_NONE, "OP_NONE", 0 },
{ OP_OPEN_PAREN, "(", 0 }, { OP_OPEN_PAREN, "(", 0 },
}; };
...@@ -85,6 +87,7 @@ enum { ...@@ -85,6 +87,7 @@ enum {
FILT_ERR_MISSING_FIELD, FILT_ERR_MISSING_FIELD,
FILT_ERR_INVALID_FILTER, FILT_ERR_INVALID_FILTER,
FILT_ERR_IP_FIELD_ONLY, FILT_ERR_IP_FIELD_ONLY,
FILT_ERR_ILLEGAL_NOT_OP,
}; };
static char *err_text[] = { static char *err_text[] = {
...@@ -101,6 +104,7 @@ static char *err_text[] = { ...@@ -101,6 +104,7 @@ static char *err_text[] = {
"Missing field name and/or value", "Missing field name and/or value",
"Meaningless filter expression", "Meaningless filter expression",
"Only 'ip' field is supported for function trace", "Only 'ip' field is supported for function trace",
"Illegal use of '!'",
}; };
struct opstack_op { struct opstack_op {
...@@ -139,6 +143,7 @@ struct pred_stack { ...@@ -139,6 +143,7 @@ struct pred_stack {
int index; int index;
}; };
/* If not of not match is equal to not of not, then it is a match */
#define DEFINE_COMPARISON_PRED(type) \ #define DEFINE_COMPARISON_PRED(type) \
static int filter_pred_##type(struct filter_pred *pred, void *event) \ static int filter_pred_##type(struct filter_pred *pred, void *event) \
{ \ { \
...@@ -166,7 +171,7 @@ static int filter_pred_##type(struct filter_pred *pred, void *event) \ ...@@ -166,7 +171,7 @@ static int filter_pred_##type(struct filter_pred *pred, void *event) \
break; \ break; \
} \ } \
\ \
return match; \ return !!match == !pred->not; \
} }
#define DEFINE_EQUALITY_PRED(size) \ #define DEFINE_EQUALITY_PRED(size) \
...@@ -484,9 +489,10 @@ static int process_ops(struct filter_pred *preds, ...@@ -484,9 +489,10 @@ static int process_ops(struct filter_pred *preds,
if (!WARN_ON_ONCE(!pred->fn)) if (!WARN_ON_ONCE(!pred->fn))
match = pred->fn(pred, rec); match = pred->fn(pred, rec);
if (!!match == type) if (!!match == type)
return match; break;
} }
return match; /* If not of not match is equal to not of not, then it is a match */
return !!match == !op->not;
} }
struct filter_match_preds_data { struct filter_match_preds_data {
...@@ -735,10 +741,10 @@ static int filter_set_pred(struct event_filter *filter, ...@@ -735,10 +741,10 @@ static int filter_set_pred(struct event_filter *filter,
* then this op can be folded. * then this op can be folded.
*/ */
if (left->index & FILTER_PRED_FOLD && if (left->index & FILTER_PRED_FOLD &&
(left->op == dest->op || ((left->op == dest->op && !left->not) ||
left->left == FILTER_PRED_INVALID) && left->left == FILTER_PRED_INVALID) &&
right->index & FILTER_PRED_FOLD && right->index & FILTER_PRED_FOLD &&
(right->op == dest->op || ((right->op == dest->op && !right->not) ||
right->left == FILTER_PRED_INVALID)) right->left == FILTER_PRED_INVALID))
dest->index |= FILTER_PRED_FOLD; dest->index |= FILTER_PRED_FOLD;
...@@ -1028,7 +1034,7 @@ static int init_pred(struct filter_parse_state *ps, ...@@ -1028,7 +1034,7 @@ static int init_pred(struct filter_parse_state *ps,
} }
if (pred->op == OP_NE) if (pred->op == OP_NE)
pred->not = 1; pred->not ^= 1;
pred->fn = fn; pred->fn = fn;
return 0; return 0;
...@@ -1590,6 +1596,17 @@ static int replace_preds(struct ftrace_event_call *call, ...@@ -1590,6 +1596,17 @@ static int replace_preds(struct ftrace_event_call *call,
continue; continue;
} }
if (elt->op == OP_NOT) {
if (!n_preds || operand1 || operand2) {
parse_error(ps, FILT_ERR_ILLEGAL_NOT_OP, 0);
err = -EINVAL;
goto fail;
}
if (!dry_run)
filter->preds[n_preds - 1].not ^= 1;
continue;
}
if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) { if (WARN_ON(n_preds++ == MAX_FILTER_PRED)) {
parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0); parse_error(ps, FILT_ERR_TOO_MANY_PREDS, 0);
err = -ENOSPC; err = -ENOSPC;
......
...@@ -373,7 +373,7 @@ event_trigger_print(const char *name, struct seq_file *m, ...@@ -373,7 +373,7 @@ event_trigger_print(const char *name, struct seq_file *m,
{ {
long count = (long)data; long count = (long)data;
seq_printf(m, "%s", name); seq_puts(m, name);
if (count == -1) if (count == -1)
seq_puts(m, ":unlimited"); seq_puts(m, ":unlimited");
...@@ -383,7 +383,7 @@ event_trigger_print(const char *name, struct seq_file *m, ...@@ -383,7 +383,7 @@ event_trigger_print(const char *name, struct seq_file *m,
if (filter_str) if (filter_str)
seq_printf(m, " if %s\n", filter_str); seq_printf(m, " if %s\n", filter_str);
else else
seq_puts(m, "\n"); seq_putc(m, '\n');
return 0; return 0;
} }
...@@ -1105,7 +1105,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops, ...@@ -1105,7 +1105,7 @@ event_enable_trigger_print(struct seq_file *m, struct event_trigger_ops *ops,
if (data->filter_str) if (data->filter_str)
seq_printf(m, " if %s\n", data->filter_str); seq_printf(m, " if %s\n", data->filter_str);
else else
seq_puts(m, "\n"); seq_putc(m, '\n');
return 0; return 0;
} }
......
...@@ -261,37 +261,74 @@ static struct tracer function_trace __tracer_data = ...@@ -261,37 +261,74 @@ static struct tracer function_trace __tracer_data =
}; };
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
static int update_count(void **data) static void update_traceon_count(void **data, bool on)
{ {
unsigned long *count = (long *)data; long *count = (long *)data;
long old_count = *count;
if (!*count) /*
return 0; * Tracing gets disabled (or enabled) once per count.
* This function can be called at the same time on multiple CPUs.
* It is fine if both disable (or enable) tracing, as disabling
* (or enabling) the second time doesn't do anything as the
* state of the tracer is already disabled (or enabled).
* What needs to be synchronized in this case is that the count
* only gets decremented once, even if the tracer is disabled
* (or enabled) twice, as the second one is really a nop.
*
* The memory barriers guarantee that we only decrement the
* counter once. First the count is read to a local variable
* and a read barrier is used to make sure that it is loaded
* before checking if the tracer is in the state we want.
* If the tracer is not in the state we want, then the count
* is guaranteed to be the old count.
*
* Next the tracer is set to the state we want (disabled or enabled)
* then a write memory barrier is used to make sure that
* the new state is visible before changing the counter by
* one minus the old counter. This guarantees that another CPU
* executing this code will see the new state before seeing
* the new counter value, and would not do anything if the new
* counter is seen.
*
* Note, there is no synchronization between this and a user
* setting the tracing_on file. But we currently don't care
* about that.
*/
if (!old_count)
return;
if (*count != -1) /* Make sure we see count before checking tracing state */
(*count)--; smp_rmb();
return 1; if (on == !!tracing_is_on())
return;
if (on)
tracing_on();
else
tracing_off();
/* unlimited? */
if (old_count == -1)
return;
/* Make sure tracing state is visible before updating count */
smp_wmb();
*count = old_count - 1;
} }
static void static void
ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data) ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
{ {
if (tracing_is_on()) update_traceon_count(data, 1);
return;
if (update_count(data))
tracing_on();
} }
static void static void
ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data) ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
{ {
if (!tracing_is_on()) update_traceon_count(data, 0);
return;
if (update_count(data))
tracing_off();
} }
static void static void
...@@ -330,11 +367,49 @@ ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data) ...@@ -330,11 +367,49 @@ ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
static void static void
ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data) ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
{ {
if (!tracing_is_on()) long *count = (long *)data;
return; long old_count;
long new_count;
if (update_count(data)) /*
trace_dump_stack(STACK_SKIP); * Stack traces should only execute the number of times the
* user specified in the counter.
*/
do {
if (!tracing_is_on())
return;
old_count = *count;
if (!old_count)
return;
/* unlimited? */
if (old_count == -1) {
trace_dump_stack(STACK_SKIP);
return;
}
new_count = old_count - 1;
new_count = cmpxchg(count, old_count, new_count);
if (new_count == old_count)
trace_dump_stack(STACK_SKIP);
} while (new_count != old_count);
}
static int update_count(void **data)
{
unsigned long *count = (long *)data;
if (!*count)
return 0;
if (*count != -1)
(*count)--;
return 1;
} }
static void static void
...@@ -361,7 +436,7 @@ ftrace_probe_print(const char *name, struct seq_file *m, ...@@ -361,7 +436,7 @@ ftrace_probe_print(const char *name, struct seq_file *m,
seq_printf(m, "%ps:%s", (void *)ip, name); seq_printf(m, "%ps:%s", (void *)ip, name);
if (count == -1) if (count == -1)
seq_printf(m, ":unlimited\n"); seq_puts(m, ":unlimited\n");
else else
seq_printf(m, ":count=%ld\n", count); seq_printf(m, ":count=%ld\n", count);
......
This diff is collapsed.
...@@ -20,10 +20,12 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) ...@@ -20,10 +20,12 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
{ {
/* use static because iter can be a bit big for the stack */ /* use static because iter can be a bit big for the stack */
static struct trace_iterator iter; static struct trace_iterator iter;
static struct ring_buffer_iter *buffer_iter[CONFIG_NR_CPUS];
unsigned int old_userobj; unsigned int old_userobj;
int cnt = 0, cpu; int cnt = 0, cpu;
trace_init_global_iter(&iter); trace_init_global_iter(&iter);
iter.buffer_iter = buffer_iter;
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
...@@ -57,19 +59,19 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) ...@@ -57,19 +59,19 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
ring_buffer_read_start(iter.buffer_iter[cpu_file]); ring_buffer_read_start(iter.buffer_iter[cpu_file]);
tracing_iter_reset(&iter, cpu_file); tracing_iter_reset(&iter, cpu_file);
} }
if (!trace_empty(&iter))
trace_find_next_entry_inc(&iter); while (trace_find_next_entry_inc(&iter)) {
while (!trace_empty(&iter)) {
if (!cnt) if (!cnt)
kdb_printf("---------------------------------\n"); kdb_printf("---------------------------------\n");
cnt++; cnt++;
if (trace_find_next_entry_inc(&iter) != NULL && !skip_lines) if (!skip_lines) {
print_trace_line(&iter); print_trace_line(&iter);
if (!skip_lines)
trace_printk_seq(&iter.seq); trace_printk_seq(&iter.seq);
else } else {
skip_lines--; skip_lines--;
}
if (KDB_FLAG(CMD_INTERRUPT)) if (KDB_FLAG(CMD_INTERRUPT))
goto out; goto out;
} }
...@@ -86,9 +88,12 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) ...@@ -86,9 +88,12 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled); atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
} }
for_each_tracing_cpu(cpu) for_each_tracing_cpu(cpu) {
if (iter.buffer_iter[cpu]) if (iter.buffer_iter[cpu]) {
ring_buffer_read_finish(iter.buffer_iter[cpu]); ring_buffer_read_finish(iter.buffer_iter[cpu]);
iter.buffer_iter[cpu] = NULL;
}
}
} }
/* /*
......
...@@ -826,7 +826,7 @@ static int probes_seq_show(struct seq_file *m, void *v) ...@@ -826,7 +826,7 @@ static int probes_seq_show(struct seq_file *m, void *v)
struct trace_kprobe *tk = v; struct trace_kprobe *tk = v;
int i; int i;
seq_printf(m, "%c", trace_kprobe_is_return(tk) ? 'r' : 'p'); seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
seq_printf(m, ":%s/%s", tk->tp.call.class->system, seq_printf(m, ":%s/%s", tk->tp.call.class->system,
ftrace_event_name(&tk->tp.call)); ftrace_event_name(&tk->tp.call));
...@@ -840,7 +840,7 @@ static int probes_seq_show(struct seq_file *m, void *v) ...@@ -840,7 +840,7 @@ static int probes_seq_show(struct seq_file *m, void *v)
for (i = 0; i < tk->tp.nr_args; i++) for (i = 0; i < tk->tp.nr_args; i++)
seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm); seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
seq_printf(m, "\n"); seq_putc(m, '\n');
return 0; return 0;
} }
...@@ -1024,27 +1024,22 @@ print_kprobe_event(struct trace_iterator *iter, int flags, ...@@ -1024,27 +1024,22 @@ print_kprobe_event(struct trace_iterator *iter, int flags,
field = (struct kprobe_trace_entry_head *)iter->ent; field = (struct kprobe_trace_entry_head *)iter->ent;
tp = container_of(event, struct trace_probe, call.event); tp = container_of(event, struct trace_probe, call.event);
if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call));
goto partial;
if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET)) if (!seq_print_ip_sym(s, field->ip, flags | TRACE_ITER_SYM_OFFSET))
goto partial; goto out;
if (!trace_seq_puts(s, ")")) trace_seq_putc(s, ')');
goto partial;
data = (u8 *)&field[1]; data = (u8 *)&field[1];
for (i = 0; i < tp->nr_args; i++) for (i = 0; i < tp->nr_args; i++)
if (!tp->args[i].type->print(s, tp->args[i].name, if (!tp->args[i].type->print(s, tp->args[i].name,
data + tp->args[i].offset, field)) data + tp->args[i].offset, field))
goto partial; goto out;
if (!trace_seq_puts(s, "\n"))
goto partial;
return TRACE_TYPE_HANDLED; trace_seq_putc(s, '\n');
partial: out:
return TRACE_TYPE_PARTIAL_LINE; return trace_handle_return(s);
} }
static enum print_line_t static enum print_line_t
...@@ -1060,33 +1055,28 @@ print_kretprobe_event(struct trace_iterator *iter, int flags, ...@@ -1060,33 +1055,28 @@ print_kretprobe_event(struct trace_iterator *iter, int flags,
field = (struct kretprobe_trace_entry_head *)iter->ent; field = (struct kretprobe_trace_entry_head *)iter->ent;
tp = container_of(event, struct trace_probe, call.event); tp = container_of(event, struct trace_probe, call.event);
if (!trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call))) trace_seq_printf(s, "%s: (", ftrace_event_name(&tp->call));
goto partial;
if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET)) if (!seq_print_ip_sym(s, field->ret_ip, flags | TRACE_ITER_SYM_OFFSET))
goto partial; goto out;
if (!trace_seq_puts(s, " <- ")) trace_seq_puts(s, " <- ");
goto partial;
if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET)) if (!seq_print_ip_sym(s, field->func, flags & ~TRACE_ITER_SYM_OFFSET))
goto partial; goto out;
if (!trace_seq_puts(s, ")")) trace_seq_putc(s, ')');
goto partial;
data = (u8 *)&field[1]; data = (u8 *)&field[1];
for (i = 0; i < tp->nr_args; i++) for (i = 0; i < tp->nr_args; i++)
if (!tp->args[i].type->print(s, tp->args[i].name, if (!tp->args[i].type->print(s, tp->args[i].name,
data + tp->args[i].offset, field)) data + tp->args[i].offset, field))
goto partial; goto out;
if (!trace_seq_puts(s, "\n")) trace_seq_putc(s, '\n');
goto partial;
return TRACE_TYPE_HANDLED; out:
partial: return trace_handle_return(s);
return TRACE_TYPE_PARTIAL_LINE;
} }
......
...@@ -59,17 +59,15 @@ static void mmio_trace_start(struct trace_array *tr) ...@@ -59,17 +59,15 @@ static void mmio_trace_start(struct trace_array *tr)
mmio_reset_data(tr); mmio_reset_data(tr);
} }
static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) static void mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
{ {
int ret = 0;
int i; int i;
resource_size_t start, end; resource_size_t start, end;
const struct pci_driver *drv = pci_dev_driver(dev); const struct pci_driver *drv = pci_dev_driver(dev);
/* XXX: incomplete checks for trace_seq_printf() return value */ trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x",
ret += trace_seq_printf(s, "PCIDEV %02x%02x %04x%04x %x", dev->bus->number, dev->devfn,
dev->bus->number, dev->devfn, dev->vendor, dev->device, dev->irq);
dev->vendor, dev->device, dev->irq);
/* /*
* XXX: is pci_resource_to_user() appropriate, since we are * XXX: is pci_resource_to_user() appropriate, since we are
* supposed to interpret the __ioremap() phys_addr argument based on * supposed to interpret the __ioremap() phys_addr argument based on
...@@ -77,21 +75,20 @@ static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) ...@@ -77,21 +75,20 @@ static int mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev)
*/ */
for (i = 0; i < 7; i++) { for (i = 0; i < 7; i++) {
pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
ret += trace_seq_printf(s, " %llx", trace_seq_printf(s, " %llx",
(unsigned long long)(start | (unsigned long long)(start |
(dev->resource[i].flags & PCI_REGION_FLAG_MASK))); (dev->resource[i].flags & PCI_REGION_FLAG_MASK)));
} }
for (i = 0; i < 7; i++) { for (i = 0; i < 7; i++) {
pci_resource_to_user(dev, i, &dev->resource[i], &start, &end); pci_resource_to_user(dev, i, &dev->resource[i], &start, &end);
ret += trace_seq_printf(s, " %llx", trace_seq_printf(s, " %llx",
dev->resource[i].start < dev->resource[i].end ? dev->resource[i].start < dev->resource[i].end ?
(unsigned long long)(end - start) + 1 : 0); (unsigned long long)(end - start) + 1 : 0);
} }
if (drv) if (drv)
ret += trace_seq_printf(s, " %s\n", drv->name); trace_seq_printf(s, " %s\n", drv->name);
else else
ret += trace_seq_puts(s, " \n"); trace_seq_puts(s, " \n");
return ret;
} }
static void destroy_header_iter(struct header_iter *hiter) static void destroy_header_iter(struct header_iter *hiter)
...@@ -179,28 +176,27 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) ...@@ -179,28 +176,27 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
unsigned long long t = ns2usecs(iter->ts); unsigned long long t = ns2usecs(iter->ts);
unsigned long usec_rem = do_div(t, USEC_PER_SEC); unsigned long usec_rem = do_div(t, USEC_PER_SEC);
unsigned secs = (unsigned long)t; unsigned secs = (unsigned long)t;
int ret = 1;
trace_assign_type(field, entry); trace_assign_type(field, entry);
rw = &field->rw; rw = &field->rw;
switch (rw->opcode) { switch (rw->opcode) {
case MMIO_READ: case MMIO_READ:
ret = trace_seq_printf(s, trace_seq_printf(s,
"R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", "R %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
rw->width, secs, usec_rem, rw->map_id, rw->width, secs, usec_rem, rw->map_id,
(unsigned long long)rw->phys, (unsigned long long)rw->phys,
rw->value, rw->pc, 0); rw->value, rw->pc, 0);
break; break;
case MMIO_WRITE: case MMIO_WRITE:
ret = trace_seq_printf(s, trace_seq_printf(s,
"W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", "W %d %u.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
rw->width, secs, usec_rem, rw->map_id, rw->width, secs, usec_rem, rw->map_id,
(unsigned long long)rw->phys, (unsigned long long)rw->phys,
rw->value, rw->pc, 0); rw->value, rw->pc, 0);
break; break;
case MMIO_UNKNOWN_OP: case MMIO_UNKNOWN_OP:
ret = trace_seq_printf(s, trace_seq_printf(s,
"UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx," "UNKNOWN %u.%06lu %d 0x%llx %02lx,%02lx,"
"%02lx 0x%lx %d\n", "%02lx 0x%lx %d\n",
secs, usec_rem, rw->map_id, secs, usec_rem, rw->map_id,
...@@ -209,12 +205,11 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter) ...@@ -209,12 +205,11 @@ static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
(rw->value >> 0) & 0xff, rw->pc, 0); (rw->value >> 0) & 0xff, rw->pc, 0);
break; break;
default: default:
ret = trace_seq_puts(s, "rw what?\n"); trace_seq_puts(s, "rw what?\n");
break; break;
} }
if (ret)
return TRACE_TYPE_HANDLED; return trace_handle_return(s);
return TRACE_TYPE_PARTIAL_LINE;
} }
static enum print_line_t mmio_print_map(struct trace_iterator *iter) static enum print_line_t mmio_print_map(struct trace_iterator *iter)
...@@ -226,31 +221,29 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter) ...@@ -226,31 +221,29 @@ static enum print_line_t mmio_print_map(struct trace_iterator *iter)
unsigned long long t = ns2usecs(iter->ts); unsigned long long t = ns2usecs(iter->ts);
unsigned long usec_rem = do_div(t, USEC_PER_SEC); unsigned long usec_rem = do_div(t, USEC_PER_SEC);
unsigned secs = (unsigned long)t; unsigned secs = (unsigned long)t;
int ret;
trace_assign_type(field, entry); trace_assign_type(field, entry);
m = &field->map; m = &field->map;
switch (m->opcode) { switch (m->opcode) {
case MMIO_PROBE: case MMIO_PROBE:
ret = trace_seq_printf(s, trace_seq_printf(s,
"MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", "MAP %u.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
secs, usec_rem, m->map_id, secs, usec_rem, m->map_id,
(unsigned long long)m->phys, m->virt, m->len, (unsigned long long)m->phys, m->virt, m->len,
0UL, 0); 0UL, 0);
break; break;
case MMIO_UNPROBE: case MMIO_UNPROBE:
ret = trace_seq_printf(s, trace_seq_printf(s,
"UNMAP %u.%06lu %d 0x%lx %d\n", "UNMAP %u.%06lu %d 0x%lx %d\n",
secs, usec_rem, m->map_id, 0UL, 0); secs, usec_rem, m->map_id, 0UL, 0);
break; break;
default: default:
ret = trace_seq_puts(s, "map what?\n"); trace_seq_puts(s, "map what?\n");
break; break;
} }
if (ret)
return TRACE_TYPE_HANDLED; return trace_handle_return(s);
return TRACE_TYPE_PARTIAL_LINE;
} }
static enum print_line_t mmio_print_mark(struct trace_iterator *iter) static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
...@@ -262,14 +255,11 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter) ...@@ -262,14 +255,11 @@ static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
unsigned long long t = ns2usecs(iter->ts); unsigned long long t = ns2usecs(iter->ts);
unsigned long usec_rem = do_div(t, USEC_PER_SEC); unsigned long usec_rem = do_div(t, USEC_PER_SEC);
unsigned secs = (unsigned long)t; unsigned secs = (unsigned long)t;
int ret;
/* The trailing newline must be in the message. */ /* The trailing newline must be in the message. */
ret = trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg); trace_seq_printf(s, "MARK %u.%06lu %s", secs, usec_rem, msg);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED; return trace_handle_return(s);
} }
static enum print_line_t mmio_print_line(struct trace_iterator *iter) static enum print_line_t mmio_print_line(struct trace_iterator *iter)
......
This diff is collapsed.
...@@ -35,17 +35,11 @@ trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry); ...@@ -35,17 +35,11 @@ trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry);
extern int __unregister_ftrace_event(struct trace_event *event); extern int __unregister_ftrace_event(struct trace_event *event);
extern struct rw_semaphore trace_event_sem; extern struct rw_semaphore trace_event_sem;
#define SEQ_PUT_FIELD_RET(s, x) \ #define SEQ_PUT_FIELD(s, x) \
do { \ trace_seq_putmem(s, &(x), sizeof(x))
if (!trace_seq_putmem(s, &(x), sizeof(x))) \
return TRACE_TYPE_PARTIAL_LINE; \ #define SEQ_PUT_HEX_FIELD(s, x) \
} while (0) trace_seq_putmem_hex(s, &(x), sizeof(x))
#define SEQ_PUT_HEX_FIELD_RET(s, x) \
do { \
if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
return TRACE_TYPE_PARTIAL_LINE; \
} while (0)
#endif #endif
...@@ -305,7 +305,7 @@ static int t_show(struct seq_file *m, void *v) ...@@ -305,7 +305,7 @@ static int t_show(struct seq_file *m, void *v)
seq_puts(m, "\\t"); seq_puts(m, "\\t");
break; break;
case '\\': case '\\':
seq_puts(m, "\\"); seq_putc(m, '\\');
break; break;
case '"': case '"':
seq_puts(m, "\\\""); seq_puts(m, "\\\"");
......
...@@ -40,7 +40,8 @@ const char *reserved_field_names[] = { ...@@ -40,7 +40,8 @@ const char *reserved_field_names[] = {
int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \ int PRINT_TYPE_FUNC_NAME(type)(struct trace_seq *s, const char *name, \
void *data, void *ent) \ void *data, void *ent) \
{ \ { \
return trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \ trace_seq_printf(s, " %s=" fmt, name, *(type *)data); \
return !trace_seq_has_overflowed(s); \
} \ } \
const char PRINT_TYPE_FMT_NAME(type)[] = fmt; \ const char PRINT_TYPE_FMT_NAME(type)[] = fmt; \
NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(type)); NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(type));
...@@ -61,10 +62,11 @@ int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, const char *name, ...@@ -61,10 +62,11 @@ int PRINT_TYPE_FUNC_NAME(string)(struct trace_seq *s, const char *name,
int len = *(u32 *)data >> 16; int len = *(u32 *)data >> 16;
if (!len) if (!len)
return trace_seq_printf(s, " %s=(fault)", name); trace_seq_printf(s, " %s=(fault)", name);
else else
return trace_seq_printf(s, " %s=\"%s\"", name, trace_seq_printf(s, " %s=\"%s\"", name,
(const char *)get_loc_data(data, ent)); (const char *)get_loc_data(data, ent));
return !trace_seq_has_overflowed(s);
} }
NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(string)); NOKPROBE_SYMBOL(PRINT_TYPE_FUNC_NAME(string));
......
...@@ -14,122 +14,26 @@ ...@@ -14,122 +14,26 @@
#include "trace.h" #include "trace.h"
static struct trace_array *ctx_trace;
static int __read_mostly tracer_enabled;
static int sched_ref; static int sched_ref;
static DEFINE_MUTEX(sched_register_mutex); static DEFINE_MUTEX(sched_register_mutex);
static int sched_stopped;
void
tracing_sched_switch_trace(struct trace_array *tr,
struct task_struct *prev,
struct task_struct *next,
unsigned long flags, int pc)
{
struct ftrace_event_call *call = &event_context_switch;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
entry->prev_pid = prev->pid;
entry->prev_prio = prev->prio;
entry->prev_state = prev->state;
entry->next_pid = next->pid;
entry->next_prio = next->prio;
entry->next_state = next->state;
entry->next_cpu = task_cpu(next);
if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, flags, pc);
}
static void static void
probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next) probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
{ {
struct trace_array_cpu *data;
unsigned long flags;
int cpu;
int pc;
if (unlikely(!sched_ref)) if (unlikely(!sched_ref))
return; return;
tracing_record_cmdline(prev); tracing_record_cmdline(prev);
tracing_record_cmdline(next); tracing_record_cmdline(next);
if (!tracer_enabled || sched_stopped)
return;
pc = preempt_count();
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
if (likely(!atomic_read(&data->disabled)))
tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
local_irq_restore(flags);
}
void
tracing_sched_wakeup_trace(struct trace_array *tr,
struct task_struct *wakee,
struct task_struct *curr,
unsigned long flags, int pc)
{
struct ftrace_event_call *call = &event_wakeup;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
entry->prev_pid = curr->pid;
entry->prev_prio = curr->prio;
entry->prev_state = curr->state;
entry->next_pid = wakee->pid;
entry->next_prio = wakee->prio;
entry->next_state = wakee->state;
entry->next_cpu = task_cpu(wakee);
if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, flags, pc);
} }
static void static void
probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
{ {
struct trace_array_cpu *data;
unsigned long flags;
int cpu, pc;
if (unlikely(!sched_ref)) if (unlikely(!sched_ref))
return; return;
tracing_record_cmdline(current); tracing_record_cmdline(current);
if (!tracer_enabled || sched_stopped)
return;
pc = preempt_count();
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
if (likely(!atomic_read(&data->disabled)))
tracing_sched_wakeup_trace(ctx_trace, wakee, current,
flags, pc);
local_irq_restore(flags);
} }
static int tracing_sched_register(void) static int tracing_sched_register(void)
...@@ -197,51 +101,3 @@ void tracing_stop_cmdline_record(void) ...@@ -197,51 +101,3 @@ void tracing_stop_cmdline_record(void)
{ {
tracing_stop_sched_switch(); tracing_stop_sched_switch();
} }
/**
* tracing_start_sched_switch_record - start tracing context switches
*
* Turns on context switch tracing for a tracer.
*/
void tracing_start_sched_switch_record(void)
{
if (unlikely(!ctx_trace)) {
WARN_ON(1);
return;
}
tracing_start_sched_switch();
mutex_lock(&sched_register_mutex);
tracer_enabled++;
mutex_unlock(&sched_register_mutex);
}
/**
* tracing_stop_sched_switch_record - start tracing context switches
*
* Turns off context switch tracing for a tracer.
*/
void tracing_stop_sched_switch_record(void)
{
mutex_lock(&sched_register_mutex);
tracer_enabled--;
WARN_ON(tracer_enabled < 0);
mutex_unlock(&sched_register_mutex);
tracing_stop_sched_switch();
}
/**
* tracing_sched_switch_assign_trace - assign a trace array for ctx switch
* @tr: trace array pointer to assign
*
* Some tracers might want to record the context switches in their
* trace. This function lets those tracers assign the trace array
* to use.
*/
void tracing_sched_switch_assign_trace(struct trace_array *tr)
{
ctx_trace = tr;
}
...@@ -365,6 +365,62 @@ probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu) ...@@ -365,6 +365,62 @@ probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
wakeup_current_cpu = cpu; wakeup_current_cpu = cpu;
} }
static void
tracing_sched_switch_trace(struct trace_array *tr,
struct task_struct *prev,
struct task_struct *next,
unsigned long flags, int pc)
{
struct ftrace_event_call *call = &event_context_switch;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_CTX,
sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
entry->prev_pid = prev->pid;
entry->prev_prio = prev->prio;
entry->prev_state = prev->state;
entry->next_pid = next->pid;
entry->next_prio = next->prio;
entry->next_state = next->state;
entry->next_cpu = task_cpu(next);
if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, flags, pc);
}
static void
tracing_sched_wakeup_trace(struct trace_array *tr,
struct task_struct *wakee,
struct task_struct *curr,
unsigned long flags, int pc)
{
struct ftrace_event_call *call = &event_wakeup;
struct ring_buffer_event *event;
struct ctx_switch_entry *entry;
struct ring_buffer *buffer = tr->trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
sizeof(*entry), flags, pc);
if (!event)
return;
entry = ring_buffer_event_data(event);
entry->prev_pid = curr->pid;
entry->prev_prio = curr->prio;
entry->prev_state = curr->state;
entry->next_pid = wakee->pid;
entry->next_prio = wakee->prio;
entry->next_state = wakee->state;
entry->next_cpu = task_cpu(wakee);
if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, flags, pc);
}
static void notrace static void notrace
probe_wakeup_sched_switch(void *ignore, probe_wakeup_sched_switch(void *ignore,
struct task_struct *prev, struct task_struct *next) struct task_struct *prev, struct task_struct *next)
......
...@@ -69,20 +69,15 @@ int trace_print_seq(struct seq_file *m, struct trace_seq *s) ...@@ -69,20 +69,15 @@ int trace_print_seq(struct seq_file *m, struct trace_seq *s)
* trace_seq_printf() is used to store strings into a special * trace_seq_printf() is used to store strings into a special
* buffer (@s). Then the output may be either used by * buffer (@s). Then the output may be either used by
* the sequencer or pulled into another buffer. * the sequencer or pulled into another buffer.
*
* Returns 1 if we successfully written all the contents to
* the buffer.
* Returns 0 if we the length to write is bigger than the
* reserved buffer space. In this case, nothing gets written.
*/ */
int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{ {
unsigned int len = TRACE_SEQ_BUF_LEFT(s); unsigned int len = TRACE_SEQ_BUF_LEFT(s);
va_list ap; va_list ap;
int ret; int ret;
if (s->full || !len) if (s->full || !len)
return 0; return;
va_start(ap, fmt); va_start(ap, fmt);
ret = vsnprintf(s->buffer + s->len, len, fmt, ap); ret = vsnprintf(s->buffer + s->len, len, fmt, ap);
...@@ -91,12 +86,10 @@ int trace_seq_printf(struct trace_seq *s, const char *fmt, ...) ...@@ -91,12 +86,10 @@ int trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
/* If we can't write it all, don't bother writing anything */ /* If we can't write it all, don't bother writing anything */
if (ret >= len) { if (ret >= len) {
s->full = 1; s->full = 1;
return 0; return;
} }
s->len += ret; s->len += ret;
return 1;
} }
EXPORT_SYMBOL_GPL(trace_seq_printf); EXPORT_SYMBOL_GPL(trace_seq_printf);
...@@ -107,25 +100,18 @@ EXPORT_SYMBOL_GPL(trace_seq_printf); ...@@ -107,25 +100,18 @@ EXPORT_SYMBOL_GPL(trace_seq_printf);
* @nmaskbits: The number of bits that are valid in @maskp * @nmaskbits: The number of bits that are valid in @maskp
* *
* Writes a ASCII representation of a bitmask string into @s. * Writes a ASCII representation of a bitmask string into @s.
*
* Returns 1 if we successfully written all the contents to
* the buffer.
* Returns 0 if we the length to write is bigger than the
* reserved buffer space. In this case, nothing gets written.
*/ */
int trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp, void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
int nmaskbits) int nmaskbits)
{ {
unsigned int len = TRACE_SEQ_BUF_LEFT(s); unsigned int len = TRACE_SEQ_BUF_LEFT(s);
int ret; int ret;
if (s->full || !len) if (s->full || !len)
return 0; return;
ret = bitmap_scnprintf(s->buffer, len, maskp, nmaskbits); ret = bitmap_scnprintf(s->buffer + s->len, len, maskp, nmaskbits);
s->len += ret; s->len += ret;
return 1;
} }
EXPORT_SYMBOL_GPL(trace_seq_bitmask); EXPORT_SYMBOL_GPL(trace_seq_bitmask);
...@@ -139,28 +125,24 @@ EXPORT_SYMBOL_GPL(trace_seq_bitmask); ...@@ -139,28 +125,24 @@ EXPORT_SYMBOL_GPL(trace_seq_bitmask);
* trace_seq_printf is used to store strings into a special * trace_seq_printf is used to store strings into a special
* buffer (@s). Then the output may be either used by * buffer (@s). Then the output may be either used by
* the sequencer or pulled into another buffer. * the sequencer or pulled into another buffer.
*
* Returns how much it wrote to the buffer.
*/ */
int trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args) void trace_seq_vprintf(struct trace_seq *s, const char *fmt, va_list args)
{ {
unsigned int len = TRACE_SEQ_BUF_LEFT(s); unsigned int len = TRACE_SEQ_BUF_LEFT(s);
int ret; int ret;
if (s->full || !len) if (s->full || !len)
return 0; return;
ret = vsnprintf(s->buffer + s->len, len, fmt, args); ret = vsnprintf(s->buffer + s->len, len, fmt, args);
/* If we can't write it all, don't bother writing anything */ /* If we can't write it all, don't bother writing anything */
if (ret >= len) { if (ret >= len) {
s->full = 1; s->full = 1;
return 0; return;
} }
s->len += ret; s->len += ret;
return len;
} }
EXPORT_SYMBOL_GPL(trace_seq_vprintf); EXPORT_SYMBOL_GPL(trace_seq_vprintf);
...@@ -178,28 +160,24 @@ EXPORT_SYMBOL_GPL(trace_seq_vprintf); ...@@ -178,28 +160,24 @@ EXPORT_SYMBOL_GPL(trace_seq_vprintf);
* *
* This function will take the format and the binary array and finish * This function will take the format and the binary array and finish
* the conversion into the ASCII string within the buffer. * the conversion into the ASCII string within the buffer.
*
* Returns how much it wrote to the buffer.
*/ */
int trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary) void trace_seq_bprintf(struct trace_seq *s, const char *fmt, const u32 *binary)
{ {
unsigned int len = TRACE_SEQ_BUF_LEFT(s); unsigned int len = TRACE_SEQ_BUF_LEFT(s);
int ret; int ret;
if (s->full || !len) if (s->full || !len)
return 0; return;
ret = bstr_printf(s->buffer + s->len, len, fmt, binary); ret = bstr_printf(s->buffer + s->len, len, fmt, binary);
/* If we can't write it all, don't bother writing anything */ /* If we can't write it all, don't bother writing anything */
if (ret >= len) { if (ret >= len) {
s->full = 1; s->full = 1;
return 0; return;
} }
s->len += ret; s->len += ret;
return len;
} }
EXPORT_SYMBOL_GPL(trace_seq_bprintf); EXPORT_SYMBOL_GPL(trace_seq_bprintf);
...@@ -212,25 +190,21 @@ EXPORT_SYMBOL_GPL(trace_seq_bprintf); ...@@ -212,25 +190,21 @@ EXPORT_SYMBOL_GPL(trace_seq_bprintf);
* copy to user routines. This function records a simple string * copy to user routines. This function records a simple string
* into a special buffer (@s) for later retrieval by a sequencer * into a special buffer (@s) for later retrieval by a sequencer
* or other mechanism. * or other mechanism.
*
* Returns how much it wrote to the buffer.
*/ */
int trace_seq_puts(struct trace_seq *s, const char *str) void trace_seq_puts(struct trace_seq *s, const char *str)
{ {
unsigned int len = strlen(str); unsigned int len = strlen(str);
if (s->full) if (s->full)
return 0; return;
if (len > TRACE_SEQ_BUF_LEFT(s)) { if (len > TRACE_SEQ_BUF_LEFT(s)) {
s->full = 1; s->full = 1;
return 0; return;
} }
memcpy(s->buffer + s->len, str, len); memcpy(s->buffer + s->len, str, len);
s->len += len; s->len += len;
return len;
} }
EXPORT_SYMBOL_GPL(trace_seq_puts); EXPORT_SYMBOL_GPL(trace_seq_puts);
...@@ -243,22 +217,18 @@ EXPORT_SYMBOL_GPL(trace_seq_puts); ...@@ -243,22 +217,18 @@ EXPORT_SYMBOL_GPL(trace_seq_puts);
* copy to user routines. This function records a simple charater * copy to user routines. This function records a simple charater
* into a special buffer (@s) for later retrieval by a sequencer * into a special buffer (@s) for later retrieval by a sequencer
* or other mechanism. * or other mechanism.
*
* Returns how much it wrote to the buffer.
*/ */
int trace_seq_putc(struct trace_seq *s, unsigned char c) void trace_seq_putc(struct trace_seq *s, unsigned char c)
{ {
if (s->full) if (s->full)
return 0; return;
if (TRACE_SEQ_BUF_LEFT(s) < 1) { if (TRACE_SEQ_BUF_LEFT(s) < 1) {
s->full = 1; s->full = 1;
return 0; return;
} }
s->buffer[s->len++] = c; s->buffer[s->len++] = c;
return 1;
} }
EXPORT_SYMBOL_GPL(trace_seq_putc); EXPORT_SYMBOL_GPL(trace_seq_putc);
...@@ -271,23 +241,19 @@ EXPORT_SYMBOL_GPL(trace_seq_putc); ...@@ -271,23 +241,19 @@ EXPORT_SYMBOL_GPL(trace_seq_putc);
* There may be cases where raw memory needs to be written into the * There may be cases where raw memory needs to be written into the
* buffer and a strcpy() would not work. Using this function allows * buffer and a strcpy() would not work. Using this function allows
* for such cases. * for such cases.
*
* Returns how much it wrote to the buffer.
*/ */
int trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len) void trace_seq_putmem(struct trace_seq *s, const void *mem, unsigned int len)
{ {
if (s->full) if (s->full)
return 0; return;
if (len > TRACE_SEQ_BUF_LEFT(s)) { if (len > TRACE_SEQ_BUF_LEFT(s)) {
s->full = 1; s->full = 1;
return 0; return;
} }
memcpy(s->buffer + s->len, mem, len); memcpy(s->buffer + s->len, mem, len);
s->len += len; s->len += len;
return len;
} }
EXPORT_SYMBOL_GPL(trace_seq_putmem); EXPORT_SYMBOL_GPL(trace_seq_putmem);
...@@ -303,20 +269,17 @@ EXPORT_SYMBOL_GPL(trace_seq_putmem); ...@@ -303,20 +269,17 @@ EXPORT_SYMBOL_GPL(trace_seq_putmem);
* This is similar to trace_seq_putmem() except instead of just copying the * This is similar to trace_seq_putmem() except instead of just copying the
* raw memory into the buffer it writes its ASCII representation of it * raw memory into the buffer it writes its ASCII representation of it
* in hex characters. * in hex characters.
*
* Returns how much it wrote to the buffer.
*/ */
int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, void trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
unsigned int len) unsigned int len)
{ {
unsigned char hex[HEX_CHARS]; unsigned char hex[HEX_CHARS];
const unsigned char *data = mem; const unsigned char *data = mem;
unsigned int start_len; unsigned int start_len;
int i, j; int i, j;
int cnt = 0;
if (s->full) if (s->full)
return 0; return;
while (len) { while (len) {
start_len = min(len, HEX_CHARS - 1); start_len = min(len, HEX_CHARS - 1);
...@@ -335,9 +298,8 @@ int trace_seq_putmem_hex(struct trace_seq *s, const void *mem, ...@@ -335,9 +298,8 @@ int trace_seq_putmem_hex(struct trace_seq *s, const void *mem,
len -= j / 2; len -= j / 2;
hex[j++] = ' '; hex[j++] = ' ';
cnt += trace_seq_putmem(s, hex, j); trace_seq_putmem(s, hex, j);
} }
return cnt;
} }
EXPORT_SYMBOL_GPL(trace_seq_putmem_hex); EXPORT_SYMBOL_GPL(trace_seq_putmem_hex);
......
This diff is collapsed.
...@@ -552,8 +552,7 @@ static int create_trace_uprobe(int argc, char **argv) ...@@ -552,8 +552,7 @@ static int create_trace_uprobe(int argc, char **argv)
return ret; return ret;
fail_address_parse: fail_address_parse:
if (inode) iput(inode);
iput(inode);
pr_info("Failed to parse address or file.\n"); pr_info("Failed to parse address or file.\n");
...@@ -606,7 +605,7 @@ static int probes_seq_show(struct seq_file *m, void *v) ...@@ -606,7 +605,7 @@ static int probes_seq_show(struct seq_file *m, void *v)
for (i = 0; i < tu->tp.nr_args; i++) for (i = 0; i < tu->tp.nr_args; i++)
seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm); seq_printf(m, " %s=%s", tu->tp.args[i].name, tu->tp.args[i].comm);
seq_printf(m, "\n"); seq_putc(m, '\n');
return 0; return 0;
} }
...@@ -852,16 +851,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e ...@@ -852,16 +851,14 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e
tu = container_of(event, struct trace_uprobe, tp.call.event); tu = container_of(event, struct trace_uprobe, tp.call.event);
if (is_ret_probe(tu)) { if (is_ret_probe(tu)) {
if (!trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)", trace_seq_printf(s, "%s: (0x%lx <- 0x%lx)",
ftrace_event_name(&tu->tp.call), ftrace_event_name(&tu->tp.call),
entry->vaddr[1], entry->vaddr[0])) entry->vaddr[1], entry->vaddr[0]);
goto partial;
data = DATAOF_TRACE_ENTRY(entry, true); data = DATAOF_TRACE_ENTRY(entry, true);
} else { } else {
if (!trace_seq_printf(s, "%s: (0x%lx)", trace_seq_printf(s, "%s: (0x%lx)",
ftrace_event_name(&tu->tp.call), ftrace_event_name(&tu->tp.call),
entry->vaddr[0])) entry->vaddr[0]);
goto partial;
data = DATAOF_TRACE_ENTRY(entry, false); data = DATAOF_TRACE_ENTRY(entry, false);
} }
...@@ -869,14 +866,13 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e ...@@ -869,14 +866,13 @@ print_uprobe_event(struct trace_iterator *iter, int flags, struct trace_event *e
struct probe_arg *parg = &tu->tp.args[i]; struct probe_arg *parg = &tu->tp.args[i];
if (!parg->type->print(s, parg->name, data + parg->offset, entry)) if (!parg->type->print(s, parg->name, data + parg->offset, entry))
goto partial; goto out;
} }
if (trace_seq_puts(s, "\n")) trace_seq_putc(s, '\n');
return TRACE_TYPE_HANDLED;
partial: out:
return TRACE_TYPE_PARTIAL_LINE; return trace_handle_return(s);
} }
typedef bool (*filter_func_t)(struct uprobe_consumer *self, typedef bool (*filter_func_t)(struct uprobe_consumer *self,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment