Commit 9e8529af authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
 "Along with the usual minor fixes and clean ups there are a few major
  changes with this pull request.

   1) Multiple buffers for the ftrace facility

  This feature has been requested by many people over the last few
  years.  I even heard that Google was about to implement it themselves.
  I finally had time and cleaned up the code such that you can now
  create multiple instances of the ftrace buffer and have different
  events go to different buffers.  This way, a low frequency event will
  not be lost in the noise of a high frequency event.

  Note, currently only events can go to different buffers, the tracers
  (ie function, function_graph and the latency tracers) still can only
  be written to the main buffer.

   2) The function tracer triggers have now been extended.

  The function tracer had two triggers.  One to enable tracing when a
  function is hit, and one to disable tracing.  Now you can record a
  stack trace on a single (or many) function(s), take a snapshot of the
  buffer (copy it to the snapshot buffer), and you can enable or disable
  an event to be traced when a function is hit.

   3) A perf clock has been added.

  A "perf" clock can be chosen to be used when tracing.  This will cause
  ftrace to use the same clock as perf uses, and hopefully this will
  make it easier to interleave the perf and ftrace data for analysis."

* tag 'trace-3.10' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace: (82 commits)
  tracepoints: Prevent null probe from being added
  tracing: Compare to 1 instead of zero for is_signed_type()
  tracing: Remove obsolete macro guard _TRACE_PROFILE_INIT
  ftrace: Get rid of ftrace_profile_bits
  tracing: Check return value of tracing_init_dentry()
  tracing: Get rid of unneeded key calculation in ftrace_hash_move()
  tracing: Reset ftrace_graph_filter_enabled if count is zero
  tracing: Fix off-by-one on allocating stat->pages
  kernel: tracing: Use strlcpy instead of strncpy
  tracing: Update debugfs README file
  tracing: Fix ftrace_dump()
  tracing: Rename trace_event_mutex to trace_event_sem
  tracing: Fix comment about prefix in arch_syscall_match_sym_name()
  tracing: Convert trace_destroy_fields() to static
  tracing: Move find_event_field() into trace_events.c
  tracing: Use TRACE_MAX_PRINT instead of constant
  tracing: Use pr_warn_once instead of open coded implementation
  ring-buffer: Add ring buffer startup selftest
  tracing: Bring Documentation/trace/ftrace.txt up to date
  tracing: Add "perf" trace_clock
  ...

Conflicts:
	kernel/trace/ftrace.c
	kernel/trace/trace.c
parents ec25e246 4c69e6ea
...@@ -320,6 +320,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -320,6 +320,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
on: enable for both 32- and 64-bit processes on: enable for both 32- and 64-bit processes
off: disable for both 32- and 64-bit processes off: disable for both 32- and 64-bit processes
alloc_snapshot [FTRACE]
Allocate the ftrace snapshot buffer on boot up when the
main buffer is allocated. This is handy if debugging
and you need to use tracing_snapshot() on boot up, and
do not want to use tracing_snapshot_alloc() as it needs
to be done where GFP_KERNEL allocations are allowed.
amd_iommu= [HW,X86-64] amd_iommu= [HW,X86-64]
Pass parameters to the AMD IOMMU driver in the system. Pass parameters to the AMD IOMMU driver in the system.
Possible values are: Possible values are:
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -261,8 +261,10 @@ struct ftrace_probe_ops { ...@@ -261,8 +261,10 @@ struct ftrace_probe_ops {
void (*func)(unsigned long ip, void (*func)(unsigned long ip,
unsigned long parent_ip, unsigned long parent_ip,
void **data); void **data);
int (*callback)(unsigned long ip, void **data); int (*init)(struct ftrace_probe_ops *ops,
void (*free)(void **data); unsigned long ip, void **data);
void (*free)(struct ftrace_probe_ops *ops,
unsigned long ip, void **data);
int (*print)(struct seq_file *m, int (*print)(struct seq_file *m,
unsigned long ip, unsigned long ip,
struct ftrace_probe_ops *ops, struct ftrace_probe_ops *ops,
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/perf_event.h> #include <linux/perf_event.h>
struct trace_array; struct trace_array;
struct trace_buffer;
struct tracer; struct tracer;
struct dentry; struct dentry;
...@@ -38,6 +39,12 @@ const char *ftrace_print_symbols_seq_u64(struct trace_seq *p, ...@@ -38,6 +39,12 @@ const char *ftrace_print_symbols_seq_u64(struct trace_seq *p,
const char *ftrace_print_hex_seq(struct trace_seq *p, const char *ftrace_print_hex_seq(struct trace_seq *p,
const unsigned char *buf, int len); const unsigned char *buf, int len);
struct trace_iterator;
struct trace_event;
int ftrace_raw_output_prep(struct trace_iterator *iter,
struct trace_event *event);
/* /*
* The trace entry - the most basic unit of tracing. This is what * The trace entry - the most basic unit of tracing. This is what
* is printed in the end as a single line in the trace output, such as: * is printed in the end as a single line in the trace output, such as:
...@@ -61,6 +68,7 @@ struct trace_entry { ...@@ -61,6 +68,7 @@ struct trace_entry {
struct trace_iterator { struct trace_iterator {
struct trace_array *tr; struct trace_array *tr;
struct tracer *trace; struct tracer *trace;
struct trace_buffer *trace_buffer;
void *private; void *private;
int cpu_file; int cpu_file;
struct mutex mutex; struct mutex mutex;
...@@ -95,8 +103,6 @@ enum trace_iter_flags { ...@@ -95,8 +103,6 @@ enum trace_iter_flags {
}; };
struct trace_event;
typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
int flags, struct trace_event *event); int flags, struct trace_event *event);
...@@ -128,6 +134,13 @@ enum print_line_t { ...@@ -128,6 +134,13 @@ enum print_line_t {
void tracing_generic_entry_update(struct trace_entry *entry, void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags, unsigned long flags,
int pc); int pc);
struct ftrace_event_file;
struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
struct ftrace_event_file *ftrace_file,
int type, unsigned long len,
unsigned long flags, int pc);
struct ring_buffer_event * struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer, trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
int type, unsigned long len, int type, unsigned long len,
...@@ -182,53 +195,49 @@ extern int ftrace_event_reg(struct ftrace_event_call *event, ...@@ -182,53 +195,49 @@ extern int ftrace_event_reg(struct ftrace_event_call *event,
enum trace_reg type, void *data); enum trace_reg type, void *data);
enum { enum {
TRACE_EVENT_FL_ENABLED_BIT,
TRACE_EVENT_FL_FILTERED_BIT, TRACE_EVENT_FL_FILTERED_BIT,
TRACE_EVENT_FL_RECORDED_CMD_BIT,
TRACE_EVENT_FL_CAP_ANY_BIT, TRACE_EVENT_FL_CAP_ANY_BIT,
TRACE_EVENT_FL_NO_SET_FILTER_BIT, TRACE_EVENT_FL_NO_SET_FILTER_BIT,
TRACE_EVENT_FL_IGNORE_ENABLE_BIT, TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
TRACE_EVENT_FL_WAS_ENABLED_BIT,
}; };
/*
* Event flags:
* FILTERED - The event has a filter attached
* CAP_ANY - Any user can enable for perf
* NO_SET_FILTER - Set when filter has error and is to be ignored
* IGNORE_ENABLE - For ftrace internal events, do not enable with debugfs file
* WAS_ENABLED - Set and stays set when an event was ever enabled
* (used for module unloading, if a module event is enabled,
* it is best to clear the buffers that used it).
*/
enum { enum {
TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT), TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
TRACE_EVENT_FL_RECORDED_CMD = (1 << TRACE_EVENT_FL_RECORDED_CMD_BIT),
TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT), TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT), TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT), TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
TRACE_EVENT_FL_WAS_ENABLED = (1 << TRACE_EVENT_FL_WAS_ENABLED_BIT),
}; };
struct ftrace_event_call { struct ftrace_event_call {
struct list_head list; struct list_head list;
struct ftrace_event_class *class; struct ftrace_event_class *class;
char *name; char *name;
struct dentry *dir;
struct trace_event event; struct trace_event event;
const char *print_fmt; const char *print_fmt;
struct event_filter *filter; struct event_filter *filter;
struct list_head *files;
void *mod; void *mod;
void *data; void *data;
/* /*
* 32 bit flags: * bit 0: filter_active
* bit 1: enabled * bit 1: allow trace by non root (cap any)
* bit 2: filter_active * bit 2: failed to apply filter
* bit 3: enabled cmd record * bit 3: ftrace internal event (do not enable)
* bit 4: allow trace by non root (cap any) * bit 4: Event was enabled by module
* bit 5: failed to apply filter
* bit 6: ftrace internal event (do not enable)
*
* Changes to flags must hold the event_mutex.
*
* Note: Reads of flags do not hold the event_mutex since
* they occur in critical sections. But the way flags
* is currently used, these changes do no affect the code
* except that when a change is made, it may have a slight
* delay in propagating the changes to other CPUs due to
* caching and such.
*/ */
unsigned int flags; int flags; /* static flags of different events */
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
int perf_refcount; int perf_refcount;
...@@ -236,6 +245,56 @@ struct ftrace_event_call { ...@@ -236,6 +245,56 @@ struct ftrace_event_call {
#endif #endif
}; };
struct trace_array;
struct ftrace_subsystem_dir;
enum {
FTRACE_EVENT_FL_ENABLED_BIT,
FTRACE_EVENT_FL_RECORDED_CMD_BIT,
FTRACE_EVENT_FL_SOFT_MODE_BIT,
FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
};
/*
* Ftrace event file flags:
* ENABLED - The event is enabled
* RECORDED_CMD - The comms should be recorded at sched_switch
* SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
* SOFT_DISABLED - When set, do not trace the event (even though its
* tracepoint may be enabled)
*/
enum {
FTRACE_EVENT_FL_ENABLED = (1 << FTRACE_EVENT_FL_ENABLED_BIT),
FTRACE_EVENT_FL_RECORDED_CMD = (1 << FTRACE_EVENT_FL_RECORDED_CMD_BIT),
FTRACE_EVENT_FL_SOFT_MODE = (1 << FTRACE_EVENT_FL_SOFT_MODE_BIT),
FTRACE_EVENT_FL_SOFT_DISABLED = (1 << FTRACE_EVENT_FL_SOFT_DISABLED_BIT),
};
struct ftrace_event_file {
struct list_head list;
struct ftrace_event_call *event_call;
struct dentry *dir;
struct trace_array *tr;
struct ftrace_subsystem_dir *system;
/*
* 32 bit flags:
* bit 0: enabled
* bit 1: enabled cmd record
* bit 2: enable/disable with the soft disable bit
* bit 3: soft disabled
*
* Note: The bits must be set atomically to prevent races
* from other writers. Reads of flags do not need to be in
* sync as they occur in critical sections. But the way flags
* is currently used, these changes do not affect the code
* except that when a change is made, it may have a slight
* delay in propagating the changes to other CPUs due to
* caching and such. Which is mostly OK ;-)
*/
unsigned long flags;
};
#define __TRACE_EVENT_FLAGS(name, value) \ #define __TRACE_EVENT_FLAGS(name, value) \
static int __init trace_init_flags_##name(void) \ static int __init trace_init_flags_##name(void) \
{ \ { \
...@@ -274,7 +333,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type, ...@@ -274,7 +333,7 @@ extern int trace_define_field(struct ftrace_event_call *call, const char *type,
extern int trace_add_event_call(struct ftrace_event_call *call); extern int trace_add_event_call(struct ftrace_event_call *call);
extern void trace_remove_event_call(struct ftrace_event_call *call); extern void trace_remove_event_call(struct ftrace_event_call *call);
#define is_signed_type(type) (((type)(-1)) < (type)0) #define is_signed_type(type) (((type)(-1)) < (type)1)
int trace_set_clr_event(const char *system, const char *event, int set); int trace_set_clr_event(const char *system, const char *event, int set);
......
...@@ -486,6 +486,8 @@ enum ftrace_dump_mode { ...@@ -486,6 +486,8 @@ enum ftrace_dump_mode {
void tracing_on(void); void tracing_on(void);
void tracing_off(void); void tracing_off(void);
int tracing_is_on(void); int tracing_is_on(void);
void tracing_snapshot(void);
void tracing_snapshot_alloc(void);
extern void tracing_start(void); extern void tracing_start(void);
extern void tracing_stop(void); extern void tracing_stop(void);
...@@ -515,10 +517,32 @@ do { \ ...@@ -515,10 +517,32 @@ do { \
* *
* This is intended as a debugging tool for the developer only. * This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_printks scattered around in * Please refrain from leaving trace_printks scattered around in
* your code. * your code. (Extra memory is used for special buffers that are
* allocated when trace_printk() is used)
*
* A little optization trick is done here. If there's only one
* argument, there's no need to scan the string for printf formats.
* The trace_puts() will suffice. But how can we take advantage of
* using trace_puts() when trace_printk() has only one argument?
* By stringifying the args and checking the size we can tell
* whether or not there are args. __stringify((__VA_ARGS__)) will
* turn into "()\0" with a size of 3 when there are no args, anything
* else will be bigger. All we need to do is define a string to this,
* and then take its size and compare to 3. If it's bigger, use
* do_trace_printk() otherwise, optimize it to trace_puts(). Then just
* let gcc optimize the rest.
*/ */
#define trace_printk(fmt, args...) \ #define trace_printk(fmt, ...) \
do { \
char _______STR[] = __stringify((__VA_ARGS__)); \
if (sizeof(_______STR) > 3) \
do_trace_printk(fmt, ##__VA_ARGS__); \
else \
trace_puts(fmt); \
} while (0)
#define do_trace_printk(fmt, args...) \
do { \ do { \
static const char *trace_printk_fmt \ static const char *trace_printk_fmt \
__attribute__((section("__trace_printk_fmt"))) = \ __attribute__((section("__trace_printk_fmt"))) = \
...@@ -538,7 +562,45 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...); ...@@ -538,7 +562,45 @@ int __trace_bprintk(unsigned long ip, const char *fmt, ...);
extern __printf(2, 3) extern __printf(2, 3)
int __trace_printk(unsigned long ip, const char *fmt, ...); int __trace_printk(unsigned long ip, const char *fmt, ...);
extern void trace_dump_stack(void); /**
* trace_puts - write a string into the ftrace buffer
* @str: the string to record
*
* Note: __trace_bputs is an internal function for trace_puts and
* the @ip is passed in via the trace_puts macro.
*
* This is similar to trace_printk() but is made for those really fast
* paths that a developer wants the least amount of "Heisenbug" affects,
* where the processing of the print format is still too much.
*
* This function allows a kernel developer to debug fast path sections
* that printk is not appropriate for. By scattering in various
* printk like tracing in the code, a developer can quickly see
* where problems are occurring.
*
* This is intended as a debugging tool for the developer only.
* Please refrain from leaving trace_puts scattered around in
* your code. (Extra memory is used for special buffers that are
* allocated when trace_puts() is used)
*
* Returns: 0 if nothing was written, positive # if string was.
* (1 when __trace_bputs is used, strlen(str) when __trace_puts is used)
*/
extern int __trace_bputs(unsigned long ip, const char *str);
extern int __trace_puts(unsigned long ip, const char *str, int size);
#define trace_puts(str) ({ \
static const char *trace_printk_fmt \
__attribute__((section("__trace_printk_fmt"))) = \
__builtin_constant_p(str) ? str : NULL; \
\
if (__builtin_constant_p(str)) \
__trace_bputs(_THIS_IP_, trace_printk_fmt); \
else \
__trace_puts(_THIS_IP_, str, strlen(str)); \
})
extern void trace_dump_stack(int skip);
/* /*
* The double __builtin_constant_p is because gcc will give us an error * The double __builtin_constant_p is because gcc will give us an error
...@@ -573,6 +635,8 @@ static inline void trace_dump_stack(void) { } ...@@ -573,6 +635,8 @@ static inline void trace_dump_stack(void) { }
static inline void tracing_on(void) { } static inline void tracing_on(void) { }
static inline void tracing_off(void) { } static inline void tracing_off(void) { }
static inline int tracing_is_on(void) { return 0; } static inline int tracing_is_on(void) { return 0; }
static inline void tracing_snapshot(void) { }
static inline void tracing_snapshot_alloc(void) { }
static inline __printf(1, 2) static inline __printf(1, 2)
int trace_printk(const char *fmt, ...) int trace_printk(const char *fmt, ...)
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
#include <linux/kmemcheck.h> #include <linux/kmemcheck.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/poll.h>
struct ring_buffer; struct ring_buffer;
struct ring_buffer_iter; struct ring_buffer_iter;
...@@ -96,6 +97,11 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k ...@@ -96,6 +97,11 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \ __ring_buffer_alloc((size), (flags), &__key); \
}) })
void ring_buffer_wait(struct ring_buffer *buffer, int cpu);
int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
#define RING_BUFFER_ALL_CPUS -1 #define RING_BUFFER_ALL_CPUS -1
void ring_buffer_free(struct ring_buffer *buffer); void ring_buffer_free(struct ring_buffer *buffer);
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
extern u64 notrace trace_clock_local(void); extern u64 notrace trace_clock_local(void);
extern u64 notrace trace_clock(void); extern u64 notrace trace_clock(void);
extern u64 notrace trace_clock_jiffies(void);
extern u64 notrace trace_clock_global(void); extern u64 notrace trace_clock_global(void);
extern u64 notrace trace_clock_counter(void); extern u64 notrace trace_clock_counter(void);
......
...@@ -227,29 +227,18 @@ static notrace enum print_line_t \ ...@@ -227,29 +227,18 @@ static notrace enum print_line_t \
ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \ ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
struct trace_event *trace_event) \ struct trace_event *trace_event) \
{ \ { \
struct ftrace_event_call *event; \
struct trace_seq *s = &iter->seq; \ struct trace_seq *s = &iter->seq; \
struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
struct ftrace_raw_##call *field; \ struct ftrace_raw_##call *field; \
struct trace_entry *entry; \
struct trace_seq *p = &iter->tmp_seq; \
int ret; \ int ret; \
\ \
event = container_of(trace_event, struct ftrace_event_call, \ field = (typeof(field))iter->ent; \
event); \
\
entry = iter->ent; \
\
if (entry->type != event->event.type) { \
WARN_ON_ONCE(1); \
return TRACE_TYPE_UNHANDLED; \
} \
\
field = (typeof(field))entry; \
\ \
trace_seq_init(p); \ ret = ftrace_raw_output_prep(iter, trace_event); \
ret = trace_seq_printf(s, "%s: ", event->name); \
if (ret) \ if (ret) \
ret = trace_seq_printf(s, print); \ return ret; \
\
ret = trace_seq_printf(s, print); \
if (!ret) \ if (!ret) \
return TRACE_TYPE_PARTIAL_LINE; \ return TRACE_TYPE_PARTIAL_LINE; \
\ \
...@@ -335,7 +324,7 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \ ...@@ -335,7 +324,7 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = { \
#undef DECLARE_EVENT_CLASS #undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \ #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
static int notrace \ static int notrace __init \
ftrace_define_fields_##call(struct ftrace_event_call *event_call) \ ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
{ \ { \
struct ftrace_raw_##call field; \ struct ftrace_raw_##call field; \
...@@ -414,7 +403,8 @@ static inline notrace int ftrace_get_offsets_##call( \ ...@@ -414,7 +403,8 @@ static inline notrace int ftrace_get_offsets_##call( \
* *
* static void ftrace_raw_event_<call>(void *__data, proto) * static void ftrace_raw_event_<call>(void *__data, proto)
* { * {
* struct ftrace_event_call *event_call = __data; * struct ftrace_event_file *ftrace_file = __data;
* struct ftrace_event_call *event_call = ftrace_file->event_call;
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets; * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* struct ring_buffer_event *event; * struct ring_buffer_event *event;
* struct ftrace_raw_<call> *entry; <-- defined in stage 1 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
...@@ -423,12 +413,16 @@ static inline notrace int ftrace_get_offsets_##call( \ ...@@ -423,12 +413,16 @@ static inline notrace int ftrace_get_offsets_##call( \
* int __data_size; * int __data_size;
* int pc; * int pc;
* *
* if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
* &ftrace_file->flags))
* return;
*
* local_save_flags(irq_flags); * local_save_flags(irq_flags);
* pc = preempt_count(); * pc = preempt_count();
* *
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args); * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
* *
* event = trace_current_buffer_lock_reserve(&buffer, * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
* event_<call>->event.type, * event_<call>->event.type,
* sizeof(*entry) + __data_size, * sizeof(*entry) + __data_size,
* irq_flags, pc); * irq_flags, pc);
...@@ -440,7 +434,7 @@ static inline notrace int ftrace_get_offsets_##call( \ ...@@ -440,7 +434,7 @@ static inline notrace int ftrace_get_offsets_##call( \
* __array macros. * __array macros.
* *
* if (!filter_current_check_discard(buffer, event_call, entry, event)) * if (!filter_current_check_discard(buffer, event_call, entry, event))
* trace_current_buffer_unlock_commit(buffer, * trace_nowake_buffer_unlock_commit(buffer,
* event, irq_flags, pc); * event, irq_flags, pc);
* } * }
* *
...@@ -518,7 +512,8 @@ static inline notrace int ftrace_get_offsets_##call( \ ...@@ -518,7 +512,8 @@ static inline notrace int ftrace_get_offsets_##call( \
static notrace void \ static notrace void \
ftrace_raw_event_##call(void *__data, proto) \ ftrace_raw_event_##call(void *__data, proto) \
{ \ { \
struct ftrace_event_call *event_call = __data; \ struct ftrace_event_file *ftrace_file = __data; \
struct ftrace_event_call *event_call = ftrace_file->event_call; \
struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\ struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
struct ring_buffer_event *event; \ struct ring_buffer_event *event; \
struct ftrace_raw_##call *entry; \ struct ftrace_raw_##call *entry; \
...@@ -527,12 +522,16 @@ ftrace_raw_event_##call(void *__data, proto) \ ...@@ -527,12 +522,16 @@ ftrace_raw_event_##call(void *__data, proto) \
int __data_size; \ int __data_size; \
int pc; \ int pc; \
\ \
if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT, \
&ftrace_file->flags)) \
return; \
\
local_save_flags(irq_flags); \ local_save_flags(irq_flags); \
pc = preempt_count(); \ pc = preempt_count(); \
\ \
__data_size = ftrace_get_offsets_##call(&__data_offsets, args); \ __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
\ \
event = trace_current_buffer_lock_reserve(&buffer, \ event = trace_event_buffer_lock_reserve(&buffer, ftrace_file, \
event_call->event.type, \ event_call->event.type, \
sizeof(*entry) + __data_size, \ sizeof(*entry) + __data_size, \
irq_flags, pc); \ irq_flags, pc); \
...@@ -581,7 +580,7 @@ static inline void ftrace_test_probe_##call(void) \ ...@@ -581,7 +580,7 @@ static inline void ftrace_test_probe_##call(void) \
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \ #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
_TRACE_PERF_PROTO(call, PARAMS(proto)); \ _TRACE_PERF_PROTO(call, PARAMS(proto)); \
static const char print_fmt_##call[] = print; \ static const char print_fmt_##call[] = print; \
static struct ftrace_event_class __used event_class_##call = { \ static struct ftrace_event_class __used __refdata event_class_##call = { \
.system = __stringify(TRACE_SYSTEM), \ .system = __stringify(TRACE_SYSTEM), \
.define_fields = ftrace_define_fields_##call, \ .define_fields = ftrace_define_fields_##call, \
.fields = LIST_HEAD_INIT(event_class_##call.fields),\ .fields = LIST_HEAD_INIT(event_class_##call.fields),\
...@@ -705,5 +704,3 @@ static inline void perf_test_probe_##call(void) \ ...@@ -705,5 +704,3 @@ static inline void perf_test_probe_##call(void) \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE) #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#endif /* CONFIG_PERF_EVENTS */ #endif /* CONFIG_PERF_EVENTS */
#undef _TRACE_PROFILE_INIT
...@@ -176,6 +176,8 @@ config IRQSOFF_TRACER ...@@ -176,6 +176,8 @@ config IRQSOFF_TRACER
select GENERIC_TRACER select GENERIC_TRACER
select TRACER_MAX_TRACE select TRACER_MAX_TRACE
select RING_BUFFER_ALLOW_SWAP select RING_BUFFER_ALLOW_SWAP
select TRACER_SNAPSHOT
select TRACER_SNAPSHOT_PER_CPU_SWAP
help help
This option measures the time spent in irqs-off critical This option measures the time spent in irqs-off critical
sections, with microsecond accuracy. sections, with microsecond accuracy.
...@@ -198,6 +200,8 @@ config PREEMPT_TRACER ...@@ -198,6 +200,8 @@ config PREEMPT_TRACER
select GENERIC_TRACER select GENERIC_TRACER
select TRACER_MAX_TRACE select TRACER_MAX_TRACE
select RING_BUFFER_ALLOW_SWAP select RING_BUFFER_ALLOW_SWAP
select TRACER_SNAPSHOT
select TRACER_SNAPSHOT_PER_CPU_SWAP
help help
This option measures the time spent in preemption-off critical This option measures the time spent in preemption-off critical
sections, with microsecond accuracy. sections, with microsecond accuracy.
...@@ -217,6 +221,7 @@ config SCHED_TRACER ...@@ -217,6 +221,7 @@ config SCHED_TRACER
select GENERIC_TRACER select GENERIC_TRACER
select CONTEXT_SWITCH_TRACER select CONTEXT_SWITCH_TRACER
select TRACER_MAX_TRACE select TRACER_MAX_TRACE
select TRACER_SNAPSHOT
help help
This tracer tracks the latency of the highest priority task This tracer tracks the latency of the highest priority task
to be scheduled in, starting from the point it has woken up. to be scheduled in, starting from the point it has woken up.
...@@ -248,6 +253,27 @@ config TRACER_SNAPSHOT ...@@ -248,6 +253,27 @@ config TRACER_SNAPSHOT
echo 1 > /sys/kernel/debug/tracing/snapshot echo 1 > /sys/kernel/debug/tracing/snapshot
cat snapshot cat snapshot
config TRACER_SNAPSHOT_PER_CPU_SWAP
bool "Allow snapshot to swap per CPU"
depends on TRACER_SNAPSHOT
select RING_BUFFER_ALLOW_SWAP
help
Allow doing a snapshot of a single CPU buffer instead of a
full swap (all buffers). If this is set, then the following is
allowed:
echo 1 > /sys/kernel/debug/tracing/per_cpu/cpu2/snapshot
After which, only the tracing buffer for CPU 2 was swapped with
the main tracing buffer, and the other CPU buffers remain the same.
When this is enabled, this adds a little more overhead to the
trace recording, as it needs to add some checks to synchronize
recording with swaps. But this does not affect the performance
of the overall system. This is enabled by default when the preempt
or irq latency tracers are enabled, as those need to swap as well
and already adds the overhead (plus a lot more).
config TRACE_BRANCH_PROFILING config TRACE_BRANCH_PROFILING
bool bool
select GENERIC_TRACER select GENERIC_TRACER
...@@ -524,6 +550,29 @@ config RING_BUFFER_BENCHMARK ...@@ -524,6 +550,29 @@ config RING_BUFFER_BENCHMARK
If unsure, say N. If unsure, say N.
config RING_BUFFER_STARTUP_TEST
bool "Ring buffer startup self test"
depends on RING_BUFFER
help
Run a simple self test on the ring buffer on boot up. Late in the
kernel boot sequence, the test will start that kicks off
a thread per cpu. Each thread will write various size events
into the ring buffer. Another thread is created to send IPIs
to each of the threads, where the IPI handler will also write
to the ring buffer, to test/stress the nesting ability.
If any anomalies are discovered, a warning will be displayed
and all ring buffers will be disabled.
The test runs for 10 seconds. This will slow your boot time
by at least 10 more seconds.
At the end of the test, statics and more checks are done.
It will output the stats of each per cpu buffer. What
was written, the sizes, what was read, what was lost, and
other similar details.
If unsure, say N
endif # FTRACE endif # FTRACE
endif # TRACING_SUPPORT endif # TRACING_SUPPORT
......
...@@ -72,7 +72,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action, ...@@ -72,7 +72,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
bool blk_tracer = blk_tracer_enabled; bool blk_tracer = blk_tracer_enabled;
if (blk_tracer) { if (blk_tracer) {
buffer = blk_tr->buffer; buffer = blk_tr->trace_buffer.buffer;
pc = preempt_count(); pc = preempt_count();
event = trace_buffer_lock_reserve(buffer, TRACE_BLK, event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + len, sizeof(*t) + len,
...@@ -218,7 +218,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -218,7 +218,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
if (blk_tracer) { if (blk_tracer) {
tracing_record_cmdline(current); tracing_record_cmdline(current);
buffer = blk_tr->buffer; buffer = blk_tr->trace_buffer.buffer;
pc = preempt_count(); pc = preempt_count();
event = trace_buffer_lock_reserve(buffer, TRACE_BLK, event = trace_buffer_lock_reserve(buffer, TRACE_BLK,
sizeof(*t) + pdu_len, sizeof(*t) + pdu_len,
......
...@@ -486,7 +486,6 @@ struct ftrace_profile_stat { ...@@ -486,7 +486,6 @@ struct ftrace_profile_stat {
#define PROFILES_PER_PAGE \ #define PROFILES_PER_PAGE \
(PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile)) (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
static int ftrace_profile_bits __read_mostly;
static int ftrace_profile_enabled __read_mostly; static int ftrace_profile_enabled __read_mostly;
/* ftrace_profile_lock - synchronize the enable and disable of the profiler */ /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
...@@ -494,7 +493,8 @@ static DEFINE_MUTEX(ftrace_profile_lock); ...@@ -494,7 +493,8 @@ static DEFINE_MUTEX(ftrace_profile_lock);
static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats); static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
#define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */ #define FTRACE_PROFILE_HASH_BITS 10
#define FTRACE_PROFILE_HASH_SIZE (1 << FTRACE_PROFILE_HASH_BITS)
static void * static void *
function_stat_next(void *v, int idx) function_stat_next(void *v, int idx)
...@@ -676,7 +676,7 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) ...@@ -676,7 +676,7 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
for (i = 0; i < pages; i++) { for (i = 1; i < pages; i++) {
pg->next = (void *)get_zeroed_page(GFP_KERNEL); pg->next = (void *)get_zeroed_page(GFP_KERNEL);
if (!pg->next) if (!pg->next)
goto out_free; goto out_free;
...@@ -724,13 +724,6 @@ static int ftrace_profile_init_cpu(int cpu) ...@@ -724,13 +724,6 @@ static int ftrace_profile_init_cpu(int cpu)
if (!stat->hash) if (!stat->hash)
return -ENOMEM; return -ENOMEM;
if (!ftrace_profile_bits) {
size--;
for (; size; size >>= 1)
ftrace_profile_bits++;
}
/* Preallocate the function profiling pages */ /* Preallocate the function profiling pages */
if (ftrace_profile_pages_init(stat) < 0) { if (ftrace_profile_pages_init(stat) < 0) {
kfree(stat->hash); kfree(stat->hash);
...@@ -763,7 +756,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip) ...@@ -763,7 +756,7 @@ ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
struct hlist_head *hhd; struct hlist_head *hhd;
unsigned long key; unsigned long key;
key = hash_long(ip, ftrace_profile_bits); key = hash_long(ip, FTRACE_PROFILE_HASH_BITS);
hhd = &stat->hash[key]; hhd = &stat->hash[key];
if (hlist_empty(hhd)) if (hlist_empty(hhd))
...@@ -782,7 +775,7 @@ static void ftrace_add_profile(struct ftrace_profile_stat *stat, ...@@ -782,7 +775,7 @@ static void ftrace_add_profile(struct ftrace_profile_stat *stat,
{ {
unsigned long key; unsigned long key;
key = hash_long(rec->ip, ftrace_profile_bits); key = hash_long(rec->ip, FTRACE_PROFILE_HASH_BITS);
hlist_add_head_rcu(&rec->node, &stat->hash[key]); hlist_add_head_rcu(&rec->node, &stat->hash[key]);
} }
...@@ -1079,7 +1072,7 @@ struct ftrace_func_probe { ...@@ -1079,7 +1072,7 @@ struct ftrace_func_probe {
unsigned long flags; unsigned long flags;
unsigned long ip; unsigned long ip;
void *data; void *data;
struct rcu_head rcu; struct list_head free_list;
}; };
struct ftrace_func_entry { struct ftrace_func_entry {
...@@ -1329,7 +1322,6 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, ...@@ -1329,7 +1322,6 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
struct hlist_head *hhd; struct hlist_head *hhd;
struct ftrace_hash *old_hash; struct ftrace_hash *old_hash;
struct ftrace_hash *new_hash; struct ftrace_hash *new_hash;
unsigned long key;
int size = src->count; int size = src->count;
int bits = 0; int bits = 0;
int ret; int ret;
...@@ -1372,10 +1364,6 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, ...@@ -1372,10 +1364,6 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
for (i = 0; i < size; i++) { for (i = 0; i < size; i++) {
hhd = &src->buckets[i]; hhd = &src->buckets[i];
hlist_for_each_entry_safe(entry, tn, hhd, hlist) { hlist_for_each_entry_safe(entry, tn, hhd, hlist) {
if (bits > 0)
key = hash_long(entry->ip, bits);
else
key = 0;
remove_hash_entry(src, entry); remove_hash_entry(src, entry);
__add_hash_entry(new_hash, entry); __add_hash_entry(new_hash, entry);
} }
...@@ -2973,28 +2961,27 @@ static void __disable_ftrace_function_probe(void) ...@@ -2973,28 +2961,27 @@ static void __disable_ftrace_function_probe(void)
} }
static void ftrace_free_entry_rcu(struct rcu_head *rhp) static void ftrace_free_entry(struct ftrace_func_probe *entry)
{ {
struct ftrace_func_probe *entry =
container_of(rhp, struct ftrace_func_probe, rcu);
if (entry->ops->free) if (entry->ops->free)
entry->ops->free(&entry->data); entry->ops->free(entry->ops, entry->ip, &entry->data);
kfree(entry); kfree(entry);
} }
int int
register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data) void *data)
{ {
struct ftrace_func_probe *entry; struct ftrace_func_probe *entry;
struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
struct ftrace_hash *hash;
struct ftrace_page *pg; struct ftrace_page *pg;
struct dyn_ftrace *rec; struct dyn_ftrace *rec;
int type, len, not; int type, len, not;
unsigned long key; unsigned long key;
int count = 0; int count = 0;
char *search; char *search;
int ret;
type = filter_parse_regex(glob, strlen(glob), &search, &not); type = filter_parse_regex(glob, strlen(glob), &search, &not);
len = strlen(search); len = strlen(search);
...@@ -3005,8 +2992,16 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, ...@@ -3005,8 +2992,16 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
if (unlikely(ftrace_disabled)) hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
if (!hash) {
count = -ENOMEM;
goto out_unlock; goto out_unlock;
}
if (unlikely(ftrace_disabled)) {
count = -ENODEV;
goto out_unlock;
}
do_for_each_ftrace_rec(pg, rec) { do_for_each_ftrace_rec(pg, rec) {
...@@ -3030,14 +3025,21 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, ...@@ -3030,14 +3025,21 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
* for each function we find. We call the callback * for each function we find. We call the callback
* to give the caller an opportunity to do so. * to give the caller an opportunity to do so.
*/ */
if (ops->callback) { if (ops->init) {
if (ops->callback(rec->ip, &entry->data) < 0) { if (ops->init(ops, rec->ip, &entry->data) < 0) {
/* caller does not like this func */ /* caller does not like this func */
kfree(entry); kfree(entry);
continue; continue;
} }
} }
ret = enter_record(hash, rec, 0);
if (ret < 0) {
kfree(entry);
count = ret;
goto out_unlock;
}
entry->ops = ops; entry->ops = ops;
entry->ip = rec->ip; entry->ip = rec->ip;
...@@ -3045,10 +3047,16 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, ...@@ -3045,10 +3047,16 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]); hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
} while_for_each_ftrace_rec(); } while_for_each_ftrace_rec();
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
if (ret < 0)
count = ret;
__enable_ftrace_function_probe(); __enable_ftrace_function_probe();
out_unlock: out_unlock:
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
free_ftrace_hash(hash);
return count; return count;
} }
...@@ -3062,7 +3070,12 @@ static void ...@@ -3062,7 +3070,12 @@ static void
__unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
void *data, int flags) void *data, int flags)
{ {
struct ftrace_func_entry *rec_entry;
struct ftrace_func_probe *entry; struct ftrace_func_probe *entry;
struct ftrace_func_probe *p;
struct ftrace_hash **orig_hash = &trace_probe_ops.filter_hash;
struct list_head free_list;
struct ftrace_hash *hash;
struct hlist_node *tmp; struct hlist_node *tmp;
char str[KSYM_SYMBOL_LEN]; char str[KSYM_SYMBOL_LEN];
int type = MATCH_FULL; int type = MATCH_FULL;
...@@ -3083,6 +3096,14 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, ...@@ -3083,6 +3096,14 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
} }
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
if (!hash)
/* Hmm, should report this somehow */
goto out_unlock;
INIT_LIST_HEAD(&free_list);
for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) { for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
struct hlist_head *hhd = &ftrace_func_hash[i]; struct hlist_head *hhd = &ftrace_func_hash[i];
...@@ -3103,12 +3124,30 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, ...@@ -3103,12 +3124,30 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
continue; continue;
} }
rec_entry = ftrace_lookup_ip(hash, entry->ip);
/* It is possible more than one entry had this ip */
if (rec_entry)
free_hash_entry(hash, rec_entry);
hlist_del_rcu(&entry->node); hlist_del_rcu(&entry->node);
call_rcu_sched(&entry->rcu, ftrace_free_entry_rcu); list_add(&entry->free_list, &free_list);
} }
} }
__disable_ftrace_function_probe(); __disable_ftrace_function_probe();
/*
* Remove after the disable is called. Otherwise, if the last
* probe is removed, a null hash means *all enabled*.
*/
ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
synchronize_sched();
list_for_each_entry_safe(entry, p, &free_list, free_list) {
list_del(&entry->free_list);
ftrace_free_entry(entry);
}
out_unlock:
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
free_ftrace_hash(hash);
} }
void void
...@@ -3736,7 +3775,8 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer) ...@@ -3736,7 +3775,8 @@ ftrace_set_func(unsigned long *array, int *idx, char *buffer)
if (fail) if (fail)
return -EINVAL; return -EINVAL;
ftrace_graph_filter_enabled = 1; ftrace_graph_filter_enabled = !!(*idx);
return 0; return 0;
} }
......
This diff is collapsed.
This diff is collapsed.
...@@ -13,6 +13,11 @@ ...@@ -13,6 +13,11 @@
#include <linux/trace_seq.h> #include <linux/trace_seq.h>
#include <linux/ftrace_event.h> #include <linux/ftrace_event.h>
#ifdef CONFIG_FTRACE_SYSCALLS
#include <asm/unistd.h> /* For NR_SYSCALLS */
#include <asm/syscall.h> /* some archs define it here */
#endif
enum trace_type { enum trace_type {
__TRACE_FIRST_TYPE = 0, __TRACE_FIRST_TYPE = 0,
...@@ -29,6 +34,7 @@ enum trace_type { ...@@ -29,6 +34,7 @@ enum trace_type {
TRACE_GRAPH_ENT, TRACE_GRAPH_ENT,
TRACE_USER_STACK, TRACE_USER_STACK,
TRACE_BLK, TRACE_BLK,
TRACE_BPUTS,
__TRACE_LAST_TYPE, __TRACE_LAST_TYPE,
}; };
...@@ -127,12 +133,21 @@ enum trace_flag_type { ...@@ -127,12 +133,21 @@ enum trace_flag_type {
#define TRACE_BUF_SIZE 1024 #define TRACE_BUF_SIZE 1024
struct trace_array;
struct trace_cpu {
struct trace_array *tr;
struct dentry *dir;
int cpu;
};
/* /*
* The CPU trace array - it consists of thousands of trace entries * The CPU trace array - it consists of thousands of trace entries
* plus some other descriptor data: (for example which task started * plus some other descriptor data: (for example which task started
* the trace, etc.) * the trace, etc.)
*/ */
struct trace_array_cpu { struct trace_array_cpu {
struct trace_cpu trace_cpu;
atomic_t disabled; atomic_t disabled;
void *buffer_page; /* ring buffer spare */ void *buffer_page; /* ring buffer spare */
...@@ -151,20 +166,83 @@ struct trace_array_cpu { ...@@ -151,20 +166,83 @@ struct trace_array_cpu {
char comm[TASK_COMM_LEN]; char comm[TASK_COMM_LEN];
}; };
struct tracer;
struct trace_buffer {
struct trace_array *tr;
struct ring_buffer *buffer;
struct trace_array_cpu __percpu *data;
cycle_t time_start;
int cpu;
};
/* /*
* The trace array - an array of per-CPU trace arrays. This is the * The trace array - an array of per-CPU trace arrays. This is the
* highest level data structure that individual tracers deal with. * highest level data structure that individual tracers deal with.
* They have on/off state as well: * They have on/off state as well:
*/ */
struct trace_array { struct trace_array {
struct ring_buffer *buffer; struct list_head list;
int cpu; char *name;
struct trace_buffer trace_buffer;
#ifdef CONFIG_TRACER_MAX_TRACE
/*
* The max_buffer is used to snapshot the trace when a maximum
* latency is reached, or when the user initiates a snapshot.
* Some tracers will use this to store a maximum trace while
* it continues examining live traces.
*
* The buffers for the max_buffer are set up the same as the trace_buffer
* When a snapshot is taken, the buffer of the max_buffer is swapped
* with the buffer of the trace_buffer and the buffers are reset for
* the trace_buffer so the tracing can continue.
*/
struct trace_buffer max_buffer;
bool allocated_snapshot;
#endif
int buffer_disabled; int buffer_disabled;
cycle_t time_start; struct trace_cpu trace_cpu; /* place holder */
#ifdef CONFIG_FTRACE_SYSCALLS
int sys_refcount_enter;
int sys_refcount_exit;
DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
#endif
int stop_count;
int clock_id;
struct tracer *current_trace;
unsigned int flags;
raw_spinlock_t start_lock;
struct dentry *dir;
struct dentry *options;
struct dentry *percpu_dir;
struct dentry *event_dir;
struct list_head systems;
struct list_head events;
struct task_struct *waiter; struct task_struct *waiter;
struct trace_array_cpu *data[NR_CPUS]; int ref;
}; };
enum {
TRACE_ARRAY_FL_GLOBAL = (1 << 0)
};
extern struct list_head ftrace_trace_arrays;
/*
* The global tracer (top) should be the first trace array added,
* but we check the flag anyway.
*/
static inline struct trace_array *top_trace_array(void)
{
struct trace_array *tr;
tr = list_entry(ftrace_trace_arrays.prev,
typeof(*tr), list);
WARN_ON(!(tr->flags & TRACE_ARRAY_FL_GLOBAL));
return tr;
}
#define FTRACE_CMP_TYPE(var, type) \ #define FTRACE_CMP_TYPE(var, type) \
__builtin_types_compatible_p(typeof(var), type *) __builtin_types_compatible_p(typeof(var), type *)
...@@ -200,6 +278,7 @@ extern void __ftrace_bad_type(void); ...@@ -200,6 +278,7 @@ extern void __ftrace_bad_type(void);
IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\ IF_ASSIGN(var, ent, struct userstack_entry, TRACE_USER_STACK);\
IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \ IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \ IF_ASSIGN(var, ent, struct bprint_entry, TRACE_BPRINT); \
IF_ASSIGN(var, ent, struct bputs_entry, TRACE_BPUTS); \
IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \ IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
TRACE_MMIO_RW); \ TRACE_MMIO_RW); \
IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \ IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
...@@ -289,9 +368,10 @@ struct tracer { ...@@ -289,9 +368,10 @@ struct tracer {
struct tracer *next; struct tracer *next;
struct tracer_flags *flags; struct tracer_flags *flags;
bool print_max; bool print_max;
bool use_max_tr;
bool allocated_snapshot;
bool enabled; bool enabled;
#ifdef CONFIG_TRACER_MAX_TRACE
bool use_max_tr;
#endif
}; };
...@@ -427,8 +507,6 @@ static __always_inline void trace_clear_recursion(int bit) ...@@ -427,8 +507,6 @@ static __always_inline void trace_clear_recursion(int bit)
current->trace_recursion = val; current->trace_recursion = val;
} }
#define TRACE_PIPE_ALL_CPU -1
static inline struct ring_buffer_iter * static inline struct ring_buffer_iter *
trace_buffer_iter(struct trace_iterator *iter, int cpu) trace_buffer_iter(struct trace_iterator *iter, int cpu)
{ {
...@@ -439,10 +517,10 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu) ...@@ -439,10 +517,10 @@ trace_buffer_iter(struct trace_iterator *iter, int cpu)
int tracer_init(struct tracer *t, struct trace_array *tr); int tracer_init(struct tracer *t, struct trace_array *tr);
int tracing_is_enabled(void); int tracing_is_enabled(void);
void tracing_reset(struct trace_array *tr, int cpu); void tracing_reset(struct trace_buffer *buf, int cpu);
void tracing_reset_online_cpus(struct trace_array *tr); void tracing_reset_online_cpus(struct trace_buffer *buf);
void tracing_reset_current(int cpu); void tracing_reset_current(int cpu);
void tracing_reset_current_online_cpus(void); void tracing_reset_all_online_cpus(void);
int tracing_open_generic(struct inode *inode, struct file *filp); int tracing_open_generic(struct inode *inode, struct file *filp);
struct dentry *trace_create_file(const char *name, struct dentry *trace_create_file(const char *name,
umode_t mode, umode_t mode,
...@@ -450,6 +528,7 @@ struct dentry *trace_create_file(const char *name, ...@@ -450,6 +528,7 @@ struct dentry *trace_create_file(const char *name,
void *data, void *data,
const struct file_operations *fops); const struct file_operations *fops);
struct dentry *tracing_init_dentry_tr(struct trace_array *tr);
struct dentry *tracing_init_dentry(void); struct dentry *tracing_init_dentry(void);
struct ring_buffer_event; struct ring_buffer_event;
...@@ -583,7 +662,7 @@ extern int DYN_FTRACE_TEST_NAME(void); ...@@ -583,7 +662,7 @@ extern int DYN_FTRACE_TEST_NAME(void);
#define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2 #define DYN_FTRACE_TEST_NAME2 trace_selftest_dynamic_test_func2
extern int DYN_FTRACE_TEST_NAME2(void); extern int DYN_FTRACE_TEST_NAME2(void);
extern int ring_buffer_expanded; extern bool ring_buffer_expanded;
extern bool tracing_selftest_disabled; extern bool tracing_selftest_disabled;
DECLARE_PER_CPU(int, ftrace_cpu_disabled); DECLARE_PER_CPU(int, ftrace_cpu_disabled);
...@@ -619,6 +698,8 @@ trace_array_vprintk(struct trace_array *tr, ...@@ -619,6 +698,8 @@ trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args); unsigned long ip, const char *fmt, va_list args);
int trace_array_printk(struct trace_array *tr, int trace_array_printk(struct trace_array *tr,
unsigned long ip, const char *fmt, ...); unsigned long ip, const char *fmt, ...);
int trace_array_printk_buf(struct ring_buffer *buffer,
unsigned long ip, const char *fmt, ...);
void trace_printk_seq(struct trace_seq *s); void trace_printk_seq(struct trace_seq *s);
enum print_line_t print_trace_line(struct trace_iterator *iter); enum print_line_t print_trace_line(struct trace_iterator *iter);
...@@ -786,6 +867,7 @@ enum trace_iterator_flags { ...@@ -786,6 +867,7 @@ enum trace_iterator_flags {
TRACE_ITER_STOP_ON_FREE = 0x400000, TRACE_ITER_STOP_ON_FREE = 0x400000,
TRACE_ITER_IRQ_INFO = 0x800000, TRACE_ITER_IRQ_INFO = 0x800000,
TRACE_ITER_MARKERS = 0x1000000, TRACE_ITER_MARKERS = 0x1000000,
TRACE_ITER_FUNCTION = 0x2000000,
}; };
/* /*
...@@ -832,8 +914,8 @@ enum { ...@@ -832,8 +914,8 @@ enum {
struct ftrace_event_field { struct ftrace_event_field {
struct list_head link; struct list_head link;
char *name; const char *name;
char *type; const char *type;
int filter_type; int filter_type;
int offset; int offset;
int size; int size;
...@@ -851,12 +933,19 @@ struct event_filter { ...@@ -851,12 +933,19 @@ struct event_filter {
struct event_subsystem { struct event_subsystem {
struct list_head list; struct list_head list;
const char *name; const char *name;
struct dentry *entry;
struct event_filter *filter; struct event_filter *filter;
int nr_events;
int ref_count; int ref_count;
}; };
struct ftrace_subsystem_dir {
struct list_head list;
struct event_subsystem *subsystem;
struct trace_array *tr;
struct dentry *entry;
int ref_count;
int nr_events;
};
#define FILTER_PRED_INVALID ((unsigned short)-1) #define FILTER_PRED_INVALID ((unsigned short)-1)
#define FILTER_PRED_IS_RIGHT (1 << 15) #define FILTER_PRED_IS_RIGHT (1 << 15)
#define FILTER_PRED_FOLD (1 << 15) #define FILTER_PRED_FOLD (1 << 15)
...@@ -906,22 +995,20 @@ struct filter_pred { ...@@ -906,22 +995,20 @@ struct filter_pred {
unsigned short right; unsigned short right;
}; };
extern struct list_head ftrace_common_fields;
extern enum regex_type extern enum regex_type
filter_parse_regex(char *buff, int len, char **search, int *not); filter_parse_regex(char *buff, int len, char **search, int *not);
extern void print_event_filter(struct ftrace_event_call *call, extern void print_event_filter(struct ftrace_event_call *call,
struct trace_seq *s); struct trace_seq *s);
extern int apply_event_filter(struct ftrace_event_call *call, extern int apply_event_filter(struct ftrace_event_call *call,
char *filter_string); char *filter_string);
extern int apply_subsystem_event_filter(struct event_subsystem *system, extern int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
char *filter_string); char *filter_string);
extern void print_subsystem_event_filter(struct event_subsystem *system, extern void print_subsystem_event_filter(struct event_subsystem *system,
struct trace_seq *s); struct trace_seq *s);
extern int filter_assign_type(const char *type); extern int filter_assign_type(const char *type);
struct list_head * struct ftrace_event_field *
trace_get_fields(struct ftrace_event_call *event_call); trace_find_event_field(struct ftrace_event_call *call, char *name);
static inline int static inline int
filter_check_discard(struct ftrace_event_call *call, void *rec, filter_check_discard(struct ftrace_event_call *call, void *rec,
...@@ -938,6 +1025,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec, ...@@ -938,6 +1025,8 @@ filter_check_discard(struct ftrace_event_call *call, void *rec,
} }
extern void trace_event_enable_cmd_record(bool enable); extern void trace_event_enable_cmd_record(bool enable);
extern int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr);
extern int event_trace_del_tracer(struct trace_array *tr);
extern struct mutex event_mutex; extern struct mutex event_mutex;
extern struct list_head ftrace_events; extern struct list_head ftrace_events;
...@@ -948,7 +1037,18 @@ extern const char *__stop___trace_bprintk_fmt[]; ...@@ -948,7 +1037,18 @@ extern const char *__stop___trace_bprintk_fmt[];
void trace_printk_init_buffers(void); void trace_printk_init_buffers(void);
void trace_printk_start_comm(void); void trace_printk_start_comm(void);
int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set); int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
int set_tracer_flag(unsigned int mask, int enabled); int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
/*
* Normal trace_printk() and friends allocates special buffers
* to do the manipulation, as well as saves the print formats
* into sections to display. But the trace infrastructure wants
* to use these without the added overhead at the price of being
* a bit slower (used mainly for warnings, where we don't care
* about performance). The internal_trace_puts() is for such
* a purpose.
*/
#define internal_trace_puts(str) __trace_puts(_THIS_IP_, str, strlen(str))
#undef FTRACE_ENTRY #undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \ #define FTRACE_ENTRY(call, struct_name, id, tstruct, print, filter) \
......
...@@ -32,6 +32,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -32,6 +32,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
{ {
struct ftrace_event_call *call = &event_branch; struct ftrace_event_call *call = &event_branch;
struct trace_array *tr = branch_tracer; struct trace_array *tr = branch_tracer;
struct trace_array_cpu *data;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_branch *entry; struct trace_branch *entry;
struct ring_buffer *buffer; struct ring_buffer *buffer;
...@@ -51,11 +52,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -51,11 +52,12 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
if (atomic_inc_return(&tr->data[cpu]->disabled) != 1) data = per_cpu_ptr(tr->trace_buffer.data, cpu);
if (atomic_inc_return(&data->disabled) != 1)
goto out; goto out;
pc = preempt_count(); pc = preempt_count();
buffer = tr->buffer; buffer = tr->trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH, event = trace_buffer_lock_reserve(buffer, TRACE_BRANCH,
sizeof(*entry), flags, pc); sizeof(*entry), flags, pc);
if (!event) if (!event)
...@@ -80,7 +82,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect) ...@@ -80,7 +82,7 @@ probe_likely_condition(struct ftrace_branch_data *f, int val, int expect)
__buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
out: out:
atomic_dec(&tr->data[cpu]->disabled); atomic_dec(&data->disabled);
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -57,6 +57,16 @@ u64 notrace trace_clock(void) ...@@ -57,6 +57,16 @@ u64 notrace trace_clock(void)
return local_clock(); return local_clock();
} }
/*
* trace_jiffy_clock(): Simply use jiffies as a clock counter.
*/
u64 notrace trace_clock_jiffies(void)
{
u64 jiffy = jiffies - INITIAL_JIFFIES;
/* Return nsecs */
return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
}
/* /*
* trace_clock_global(): special globally coherent trace clock * trace_clock_global(): special globally coherent trace clock
......
...@@ -223,8 +223,8 @@ FTRACE_ENTRY(bprint, bprint_entry, ...@@ -223,8 +223,8 @@ FTRACE_ENTRY(bprint, bprint_entry,
__dynamic_array( u32, buf ) __dynamic_array( u32, buf )
), ),
F_printk("%08lx fmt:%p", F_printk("%pf: %s",
__entry->ip, __entry->fmt), (void *)__entry->ip, __entry->fmt),
FILTER_OTHER FILTER_OTHER
); );
...@@ -238,8 +238,23 @@ FTRACE_ENTRY(print, print_entry, ...@@ -238,8 +238,23 @@ FTRACE_ENTRY(print, print_entry,
__dynamic_array( char, buf ) __dynamic_array( char, buf )
), ),
F_printk("%08lx %s", F_printk("%pf: %s",
__entry->ip, __entry->buf), (void *)__entry->ip, __entry->buf),
FILTER_OTHER
);
FTRACE_ENTRY(bputs, bputs_entry,
TRACE_BPUTS,
F_STRUCT(
__field( unsigned long, ip )
__field( const char *, str )
),
F_printk("%pf: %s",
(void *)__entry->ip, __entry->str),
FILTER_OTHER FILTER_OTHER
); );
......
This diff is collapsed.
...@@ -658,33 +658,6 @@ void print_subsystem_event_filter(struct event_subsystem *system, ...@@ -658,33 +658,6 @@ void print_subsystem_event_filter(struct event_subsystem *system,
mutex_unlock(&event_mutex); mutex_unlock(&event_mutex);
} }
static struct ftrace_event_field *
__find_event_field(struct list_head *head, char *name)
{
struct ftrace_event_field *field;
list_for_each_entry(field, head, link) {
if (!strcmp(field->name, name))
return field;
}
return NULL;
}
static struct ftrace_event_field *
find_event_field(struct ftrace_event_call *call, char *name)
{
struct ftrace_event_field *field;
struct list_head *head;
field = __find_event_field(&ftrace_common_fields, name);
if (field)
return field;
head = trace_get_fields(call);
return __find_event_field(head, name);
}
static int __alloc_pred_stack(struct pred_stack *stack, int n_preds) static int __alloc_pred_stack(struct pred_stack *stack, int n_preds)
{ {
stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL); stack->preds = kcalloc(n_preds + 1, sizeof(*stack->preds), GFP_KERNEL);
...@@ -1337,7 +1310,7 @@ static struct filter_pred *create_pred(struct filter_parse_state *ps, ...@@ -1337,7 +1310,7 @@ static struct filter_pred *create_pred(struct filter_parse_state *ps,
return NULL; return NULL;
} }
field = find_event_field(call, operand1); field = trace_find_event_field(call, operand1);
if (!field) { if (!field) {
parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0); parse_error(ps, FILT_ERR_FIELD_NOT_FOUND, 0);
return NULL; return NULL;
...@@ -1907,16 +1880,17 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) ...@@ -1907,16 +1880,17 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
return err; return err;
} }
int apply_subsystem_event_filter(struct event_subsystem *system, int apply_subsystem_event_filter(struct ftrace_subsystem_dir *dir,
char *filter_string) char *filter_string)
{ {
struct event_subsystem *system = dir->subsystem;
struct event_filter *filter; struct event_filter *filter;
int err = 0; int err = 0;
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
/* Make sure the system still has events */ /* Make sure the system still has events */
if (!system->nr_events) { if (!dir->nr_events) {
err = -ENODEV; err = -ENODEV;
goto out_unlock; goto out_unlock;
} }
......
...@@ -129,7 +129,7 @@ static void __always_unused ____ftrace_check_##name(void) \ ...@@ -129,7 +129,7 @@ static void __always_unused ____ftrace_check_##name(void) \
#undef FTRACE_ENTRY #undef FTRACE_ENTRY
#define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \ #define FTRACE_ENTRY(name, struct_name, id, tstruct, print, filter) \
int \ static int __init \
ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
{ \ { \
struct struct_name field; \ struct struct_name field; \
...@@ -168,7 +168,7 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ ...@@ -168,7 +168,7 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
#define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\ #define FTRACE_ENTRY_REG(call, struct_name, etype, tstruct, print, filter,\
regfn) \ regfn) \
\ \
struct ftrace_event_class event_class_ftrace_##call = { \ struct ftrace_event_class __refdata event_class_ftrace_##call = { \
.system = __stringify(TRACE_SYSTEM), \ .system = __stringify(TRACE_SYSTEM), \
.define_fields = ftrace_define_fields_##call, \ .define_fields = ftrace_define_fields_##call, \
.fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\ .fields = LIST_HEAD_INIT(event_class_ftrace_##call.fields),\
......
...@@ -28,7 +28,7 @@ static void tracing_stop_function_trace(void); ...@@ -28,7 +28,7 @@ static void tracing_stop_function_trace(void);
static int function_trace_init(struct trace_array *tr) static int function_trace_init(struct trace_array *tr)
{ {
func_trace = tr; func_trace = tr;
tr->cpu = get_cpu(); tr->trace_buffer.cpu = get_cpu();
put_cpu(); put_cpu();
tracing_start_cmdline_record(); tracing_start_cmdline_record();
...@@ -44,7 +44,7 @@ static void function_trace_reset(struct trace_array *tr) ...@@ -44,7 +44,7 @@ static void function_trace_reset(struct trace_array *tr)
static void function_trace_start(struct trace_array *tr) static void function_trace_start(struct trace_array *tr)
{ {
tracing_reset_online_cpus(tr); tracing_reset_online_cpus(&tr->trace_buffer);
} }
/* Our option */ /* Our option */
...@@ -76,7 +76,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -76,7 +76,7 @@ function_trace_call(unsigned long ip, unsigned long parent_ip,
goto out; goto out;
cpu = smp_processor_id(); cpu = smp_processor_id();
data = tr->data[cpu]; data = per_cpu_ptr(tr->trace_buffer.data, cpu);
if (!atomic_read(&data->disabled)) { if (!atomic_read(&data->disabled)) {
local_save_flags(flags); local_save_flags(flags);
trace_function(tr, ip, parent_ip, flags, pc); trace_function(tr, ip, parent_ip, flags, pc);
...@@ -107,7 +107,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -107,7 +107,7 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
*/ */
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = tr->data[cpu]; data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) { if (likely(disabled == 1)) {
...@@ -214,66 +214,89 @@ static struct tracer function_trace __read_mostly = ...@@ -214,66 +214,89 @@ static struct tracer function_trace __read_mostly =
}; };
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
static void static int update_count(void **data)
ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
{ {
long *count = (long *)data; unsigned long *count = (long *)data;
if (tracing_is_on())
return;
if (!*count) if (!*count)
return; return 0;
if (*count != -1) if (*count != -1)
(*count)--; (*count)--;
tracing_on(); return 1;
} }
static void static void
ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data) ftrace_traceon_count(unsigned long ip, unsigned long parent_ip, void **data)
{ {
long *count = (long *)data; if (tracing_is_on())
return;
if (update_count(data))
tracing_on();
}
static void
ftrace_traceoff_count(unsigned long ip, unsigned long parent_ip, void **data)
{
if (!tracing_is_on()) if (!tracing_is_on())
return; return;
if (!*count) if (update_count(data))
tracing_off();
}
static void
ftrace_traceon(unsigned long ip, unsigned long parent_ip, void **data)
{
if (tracing_is_on())
return; return;
if (*count != -1) tracing_on();
(*count)--; }
static void
ftrace_traceoff(unsigned long ip, unsigned long parent_ip, void **data)
{
if (!tracing_is_on())
return;
tracing_off(); tracing_off();
} }
static int /*
ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, * Skip 4:
struct ftrace_probe_ops *ops, void *data); * ftrace_stacktrace()
* function_trace_probe_call()
* ftrace_ops_list_func()
* ftrace_call()
*/
#define STACK_SKIP 4
static struct ftrace_probe_ops traceon_probe_ops = { static void
.func = ftrace_traceon, ftrace_stacktrace(unsigned long ip, unsigned long parent_ip, void **data)
.print = ftrace_trace_onoff_print, {
}; trace_dump_stack(STACK_SKIP);
}
static struct ftrace_probe_ops traceoff_probe_ops = { static void
.func = ftrace_traceoff, ftrace_stacktrace_count(unsigned long ip, unsigned long parent_ip, void **data)
.print = ftrace_trace_onoff_print, {
}; if (!tracing_is_on())
return;
if (update_count(data))
trace_dump_stack(STACK_SKIP);
}
static int static int
ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, ftrace_probe_print(const char *name, struct seq_file *m,
struct ftrace_probe_ops *ops, void *data) unsigned long ip, void *data)
{ {
long count = (long)data; long count = (long)data;
seq_printf(m, "%ps:", (void *)ip); seq_printf(m, "%ps:%s", (void *)ip, name);
if (ops == &traceon_probe_ops)
seq_printf(m, "traceon");
else
seq_printf(m, "traceoff");
if (count == -1) if (count == -1)
seq_printf(m, ":unlimited\n"); seq_printf(m, ":unlimited\n");
...@@ -284,26 +307,61 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip, ...@@ -284,26 +307,61 @@ ftrace_trace_onoff_print(struct seq_file *m, unsigned long ip,
} }
static int static int
ftrace_trace_onoff_unreg(char *glob, char *cmd, char *param) ftrace_traceon_print(struct seq_file *m, unsigned long ip,
struct ftrace_probe_ops *ops, void *data)
{ {
struct ftrace_probe_ops *ops; return ftrace_probe_print("traceon", m, ip, data);
}
/* we register both traceon and traceoff to this callback */
if (strcmp(cmd, "traceon") == 0)
ops = &traceon_probe_ops;
else
ops = &traceoff_probe_ops;
unregister_ftrace_function_probe_func(glob, ops); static int
ftrace_traceoff_print(struct seq_file *m, unsigned long ip,
struct ftrace_probe_ops *ops, void *data)
{
return ftrace_probe_print("traceoff", m, ip, data);
}
return 0; static int
ftrace_stacktrace_print(struct seq_file *m, unsigned long ip,
struct ftrace_probe_ops *ops, void *data)
{
return ftrace_probe_print("stacktrace", m, ip, data);
} }
static struct ftrace_probe_ops traceon_count_probe_ops = {
.func = ftrace_traceon_count,
.print = ftrace_traceon_print,
};
static struct ftrace_probe_ops traceoff_count_probe_ops = {
.func = ftrace_traceoff_count,
.print = ftrace_traceoff_print,
};
static struct ftrace_probe_ops stacktrace_count_probe_ops = {
.func = ftrace_stacktrace_count,
.print = ftrace_stacktrace_print,
};
static struct ftrace_probe_ops traceon_probe_ops = {
.func = ftrace_traceon,
.print = ftrace_traceon_print,
};
static struct ftrace_probe_ops traceoff_probe_ops = {
.func = ftrace_traceoff,
.print = ftrace_traceoff_print,
};
static struct ftrace_probe_ops stacktrace_probe_ops = {
.func = ftrace_stacktrace,
.print = ftrace_stacktrace_print,
};
static int static int
ftrace_trace_onoff_callback(struct ftrace_hash *hash, ftrace_trace_probe_callback(struct ftrace_probe_ops *ops,
char *glob, char *cmd, char *param, int enable) struct ftrace_hash *hash, char *glob,
char *cmd, char *param, int enable)
{ {
struct ftrace_probe_ops *ops;
void *count = (void *)-1; void *count = (void *)-1;
char *number; char *number;
int ret; int ret;
...@@ -312,14 +370,10 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash, ...@@ -312,14 +370,10 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash,
if (!enable) if (!enable)
return -EINVAL; return -EINVAL;
if (glob[0] == '!') if (glob[0] == '!') {
return ftrace_trace_onoff_unreg(glob+1, cmd, param); unregister_ftrace_function_probe_func(glob+1, ops);
return 0;
/* we register both traceon and traceoff to this callback */ }
if (strcmp(cmd, "traceon") == 0)
ops = &traceon_probe_ops;
else
ops = &traceoff_probe_ops;
if (!param) if (!param)
goto out_reg; goto out_reg;
...@@ -343,6 +397,34 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash, ...@@ -343,6 +397,34 @@ ftrace_trace_onoff_callback(struct ftrace_hash *hash,
return ret < 0 ? ret : 0; return ret < 0 ? ret : 0;
} }
static int
ftrace_trace_onoff_callback(struct ftrace_hash *hash,
char *glob, char *cmd, char *param, int enable)
{
struct ftrace_probe_ops *ops;
/* we register both traceon and traceoff to this callback */
if (strcmp(cmd, "traceon") == 0)
ops = param ? &traceon_count_probe_ops : &traceon_probe_ops;
else
ops = param ? &traceoff_count_probe_ops : &traceoff_probe_ops;
return ftrace_trace_probe_callback(ops, hash, glob, cmd,
param, enable);
}
static int
ftrace_stacktrace_callback(struct ftrace_hash *hash,
char *glob, char *cmd, char *param, int enable)
{
struct ftrace_probe_ops *ops;
ops = param ? &stacktrace_count_probe_ops : &stacktrace_probe_ops;
return ftrace_trace_probe_callback(ops, hash, glob, cmd,
param, enable);
}
static struct ftrace_func_command ftrace_traceon_cmd = { static struct ftrace_func_command ftrace_traceon_cmd = {
.name = "traceon", .name = "traceon",
.func = ftrace_trace_onoff_callback, .func = ftrace_trace_onoff_callback,
...@@ -353,6 +435,11 @@ static struct ftrace_func_command ftrace_traceoff_cmd = { ...@@ -353,6 +435,11 @@ static struct ftrace_func_command ftrace_traceoff_cmd = {
.func = ftrace_trace_onoff_callback, .func = ftrace_trace_onoff_callback,
}; };
static struct ftrace_func_command ftrace_stacktrace_cmd = {
.name = "stacktrace",
.func = ftrace_stacktrace_callback,
};
static int __init init_func_cmd_traceon(void) static int __init init_func_cmd_traceon(void)
{ {
int ret; int ret;
...@@ -364,6 +451,12 @@ static int __init init_func_cmd_traceon(void) ...@@ -364,6 +451,12 @@ static int __init init_func_cmd_traceon(void)
ret = register_ftrace_command(&ftrace_traceon_cmd); ret = register_ftrace_command(&ftrace_traceon_cmd);
if (ret) if (ret)
unregister_ftrace_command(&ftrace_traceoff_cmd); unregister_ftrace_command(&ftrace_traceoff_cmd);
ret = register_ftrace_command(&ftrace_stacktrace_cmd);
if (ret) {
unregister_ftrace_command(&ftrace_traceoff_cmd);
unregister_ftrace_command(&ftrace_traceon_cmd);
}
return ret; return ret;
} }
#else #else
......
...@@ -218,7 +218,7 @@ int __trace_graph_entry(struct trace_array *tr, ...@@ -218,7 +218,7 @@ int __trace_graph_entry(struct trace_array *tr,
{ {
struct ftrace_event_call *call = &event_funcgraph_entry; struct ftrace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer = tr->buffer; struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ent_entry *entry; struct ftrace_graph_ent_entry *entry;
if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
...@@ -265,7 +265,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace) ...@@ -265,7 +265,7 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = tr->data[cpu]; data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) { if (likely(disabled == 1)) {
pc = preempt_count(); pc = preempt_count();
...@@ -323,7 +323,7 @@ void __trace_graph_return(struct trace_array *tr, ...@@ -323,7 +323,7 @@ void __trace_graph_return(struct trace_array *tr,
{ {
struct ftrace_event_call *call = &event_funcgraph_exit; struct ftrace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer = tr->buffer; struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ftrace_graph_ret_entry *entry; struct ftrace_graph_ret_entry *entry;
if (unlikely(__this_cpu_read(ftrace_cpu_disabled))) if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
...@@ -350,7 +350,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace) ...@@ -350,7 +350,7 @@ void trace_graph_return(struct ftrace_graph_ret *trace)
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = tr->data[cpu]; data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&data->disabled); disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1)) { if (likely(disabled == 1)) {
pc = preempt_count(); pc = preempt_count();
...@@ -560,9 +560,9 @@ get_return_for_leaf(struct trace_iterator *iter, ...@@ -560,9 +560,9 @@ get_return_for_leaf(struct trace_iterator *iter,
* We need to consume the current entry to see * We need to consume the current entry to see
* the next one. * the next one.
*/ */
ring_buffer_consume(iter->tr->buffer, iter->cpu, ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
NULL, NULL); NULL, NULL);
event = ring_buffer_peek(iter->tr->buffer, iter->cpu, event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
NULL, NULL); NULL, NULL);
} }
......
...@@ -33,6 +33,7 @@ enum { ...@@ -33,6 +33,7 @@ enum {
static int trace_type __read_mostly; static int trace_type __read_mostly;
static int save_flags; static int save_flags;
static bool function_enabled;
static void stop_irqsoff_tracer(struct trace_array *tr, int graph); static void stop_irqsoff_tracer(struct trace_array *tr, int graph);
static int start_irqsoff_tracer(struct trace_array *tr, int graph); static int start_irqsoff_tracer(struct trace_array *tr, int graph);
...@@ -121,7 +122,7 @@ static int func_prolog_dec(struct trace_array *tr, ...@@ -121,7 +122,7 @@ static int func_prolog_dec(struct trace_array *tr,
if (!irqs_disabled_flags(*flags)) if (!irqs_disabled_flags(*flags))
return 0; return 0;
*data = tr->data[cpu]; *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&(*data)->disabled); disabled = atomic_inc_return(&(*data)->disabled);
if (likely(disabled == 1)) if (likely(disabled == 1))
...@@ -175,7 +176,7 @@ static int irqsoff_set_flag(u32 old_flags, u32 bit, int set) ...@@ -175,7 +176,7 @@ static int irqsoff_set_flag(u32 old_flags, u32 bit, int set)
per_cpu(tracing_cpu, cpu) = 0; per_cpu(tracing_cpu, cpu) = 0;
tracing_max_latency = 0; tracing_max_latency = 0;
tracing_reset_online_cpus(irqsoff_trace); tracing_reset_online_cpus(&irqsoff_trace->trace_buffer);
return start_irqsoff_tracer(irqsoff_trace, set); return start_irqsoff_tracer(irqsoff_trace, set);
} }
...@@ -380,7 +381,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip) ...@@ -380,7 +381,7 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
if (per_cpu(tracing_cpu, cpu)) if (per_cpu(tracing_cpu, cpu))
return; return;
data = tr->data[cpu]; data = per_cpu_ptr(tr->trace_buffer.data, cpu);
if (unlikely(!data) || atomic_read(&data->disabled)) if (unlikely(!data) || atomic_read(&data->disabled))
return; return;
...@@ -418,7 +419,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip) ...@@ -418,7 +419,7 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
if (!tracer_enabled) if (!tracer_enabled)
return; return;
data = tr->data[cpu]; data = per_cpu_ptr(tr->trace_buffer.data, cpu);
if (unlikely(!data) || if (unlikely(!data) ||
!data->critical_start || atomic_read(&data->disabled)) !data->critical_start || atomic_read(&data->disabled))
...@@ -528,15 +529,60 @@ void trace_preempt_off(unsigned long a0, unsigned long a1) ...@@ -528,15 +529,60 @@ void trace_preempt_off(unsigned long a0, unsigned long a1)
} }
#endif /* CONFIG_PREEMPT_TRACER */ #endif /* CONFIG_PREEMPT_TRACER */
static int start_irqsoff_tracer(struct trace_array *tr, int graph) static int register_irqsoff_function(int graph, int set)
{ {
int ret = 0; int ret;
if (!graph) /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
ret = register_ftrace_function(&trace_ops); if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
else return 0;
if (graph)
ret = register_ftrace_graph(&irqsoff_graph_return, ret = register_ftrace_graph(&irqsoff_graph_return,
&irqsoff_graph_entry); &irqsoff_graph_entry);
else
ret = register_ftrace_function(&trace_ops);
if (!ret)
function_enabled = true;
return ret;
}
static void unregister_irqsoff_function(int graph)
{
if (!function_enabled)
return;
if (graph)
unregister_ftrace_graph();
else
unregister_ftrace_function(&trace_ops);
function_enabled = false;
}
static void irqsoff_function_set(int set)
{
if (set)
register_irqsoff_function(is_graph(), 1);
else
unregister_irqsoff_function(is_graph());
}
static int irqsoff_flag_changed(struct tracer *tracer, u32 mask, int set)
{
if (mask & TRACE_ITER_FUNCTION)
irqsoff_function_set(set);
return trace_keep_overwrite(tracer, mask, set);
}
static int start_irqsoff_tracer(struct trace_array *tr, int graph)
{
int ret;
ret = register_irqsoff_function(graph, 0);
if (!ret && tracing_is_enabled()) if (!ret && tracing_is_enabled())
tracer_enabled = 1; tracer_enabled = 1;
...@@ -550,10 +596,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph) ...@@ -550,10 +596,7 @@ static void stop_irqsoff_tracer(struct trace_array *tr, int graph)
{ {
tracer_enabled = 0; tracer_enabled = 0;
if (!graph) unregister_irqsoff_function(graph);
unregister_ftrace_function(&trace_ops);
else
unregister_ftrace_graph();
} }
static void __irqsoff_tracer_init(struct trace_array *tr) static void __irqsoff_tracer_init(struct trace_array *tr)
...@@ -561,14 +604,14 @@ static void __irqsoff_tracer_init(struct trace_array *tr) ...@@ -561,14 +604,14 @@ static void __irqsoff_tracer_init(struct trace_array *tr)
save_flags = trace_flags; save_flags = trace_flags;
/* non overwrite screws up the latency tracers */ /* non overwrite screws up the latency tracers */
set_tracer_flag(TRACE_ITER_OVERWRITE, 1); set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
tracing_max_latency = 0; tracing_max_latency = 0;
irqsoff_trace = tr; irqsoff_trace = tr;
/* make sure that the tracer is visible */ /* make sure that the tracer is visible */
smp_wmb(); smp_wmb();
tracing_reset_online_cpus(tr); tracing_reset_online_cpus(&tr->trace_buffer);
if (start_irqsoff_tracer(tr, is_graph())) if (start_irqsoff_tracer(tr, is_graph()))
printk(KERN_ERR "failed to start irqsoff tracer\n"); printk(KERN_ERR "failed to start irqsoff tracer\n");
...@@ -581,8 +624,8 @@ static void irqsoff_tracer_reset(struct trace_array *tr) ...@@ -581,8 +624,8 @@ static void irqsoff_tracer_reset(struct trace_array *tr)
stop_irqsoff_tracer(tr, is_graph()); stop_irqsoff_tracer(tr, is_graph());
set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
} }
static void irqsoff_tracer_start(struct trace_array *tr) static void irqsoff_tracer_start(struct trace_array *tr)
...@@ -615,7 +658,7 @@ static struct tracer irqsoff_tracer __read_mostly = ...@@ -615,7 +658,7 @@ static struct tracer irqsoff_tracer __read_mostly =
.print_line = irqsoff_print_line, .print_line = irqsoff_print_line,
.flags = &tracer_flags, .flags = &tracer_flags,
.set_flag = irqsoff_set_flag, .set_flag = irqsoff_set_flag,
.flag_changed = trace_keep_overwrite, .flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_irqsoff, .selftest = trace_selftest_startup_irqsoff,
#endif #endif
...@@ -649,7 +692,7 @@ static struct tracer preemptoff_tracer __read_mostly = ...@@ -649,7 +692,7 @@ static struct tracer preemptoff_tracer __read_mostly =
.print_line = irqsoff_print_line, .print_line = irqsoff_print_line,
.flags = &tracer_flags, .flags = &tracer_flags,
.set_flag = irqsoff_set_flag, .set_flag = irqsoff_set_flag,
.flag_changed = trace_keep_overwrite, .flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptoff, .selftest = trace_selftest_startup_preemptoff,
#endif #endif
...@@ -685,7 +728,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly = ...@@ -685,7 +728,7 @@ static struct tracer preemptirqsoff_tracer __read_mostly =
.print_line = irqsoff_print_line, .print_line = irqsoff_print_line,
.flags = &tracer_flags, .flags = &tracer_flags,
.set_flag = irqsoff_set_flag, .set_flag = irqsoff_set_flag,
.flag_changed = trace_keep_overwrite, .flag_changed = irqsoff_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_preemptirqsoff, .selftest = trace_selftest_startup_preemptirqsoff,
#endif #endif
......
...@@ -26,7 +26,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) ...@@ -26,7 +26,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
trace_init_global_iter(&iter); trace_init_global_iter(&iter);
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
atomic_inc(&iter.tr->data[cpu]->disabled); atomic_inc(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
} }
old_userobj = trace_flags; old_userobj = trace_flags;
...@@ -43,17 +43,17 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) ...@@ -43,17 +43,17 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
iter.iter_flags |= TRACE_FILE_LAT_FMT; iter.iter_flags |= TRACE_FILE_LAT_FMT;
iter.pos = -1; iter.pos = -1;
if (cpu_file == TRACE_PIPE_ALL_CPU) { if (cpu_file == RING_BUFFER_ALL_CPUS) {
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
iter.buffer_iter[cpu] = iter.buffer_iter[cpu] =
ring_buffer_read_prepare(iter.tr->buffer, cpu); ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu);
ring_buffer_read_start(iter.buffer_iter[cpu]); ring_buffer_read_start(iter.buffer_iter[cpu]);
tracing_iter_reset(&iter, cpu); tracing_iter_reset(&iter, cpu);
} }
} else { } else {
iter.cpu_file = cpu_file; iter.cpu_file = cpu_file;
iter.buffer_iter[cpu_file] = iter.buffer_iter[cpu_file] =
ring_buffer_read_prepare(iter.tr->buffer, cpu_file); ring_buffer_read_prepare(iter.trace_buffer->buffer, cpu_file);
ring_buffer_read_start(iter.buffer_iter[cpu_file]); ring_buffer_read_start(iter.buffer_iter[cpu_file]);
tracing_iter_reset(&iter, cpu_file); tracing_iter_reset(&iter, cpu_file);
} }
...@@ -83,7 +83,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file) ...@@ -83,7 +83,7 @@ static void ftrace_dump_buf(int skip_lines, long cpu_file)
trace_flags = old_userobj; trace_flags = old_userobj;
for_each_tracing_cpu(cpu) { for_each_tracing_cpu(cpu) {
atomic_dec(&iter.tr->data[cpu]->disabled); atomic_dec(&per_cpu_ptr(iter.trace_buffer->data, cpu)->disabled);
} }
for_each_tracing_cpu(cpu) for_each_tracing_cpu(cpu)
...@@ -115,7 +115,7 @@ static int kdb_ftdump(int argc, const char **argv) ...@@ -115,7 +115,7 @@ static int kdb_ftdump(int argc, const char **argv)
!cpu_online(cpu_file)) !cpu_online(cpu_file))
return KDB_BADINT; return KDB_BADINT;
} else { } else {
cpu_file = TRACE_PIPE_ALL_CPU; cpu_file = RING_BUFFER_ALL_CPUS;
} }
kdb_trap_printk++; kdb_trap_printk++;
......
...@@ -31,7 +31,7 @@ static void mmio_reset_data(struct trace_array *tr) ...@@ -31,7 +31,7 @@ static void mmio_reset_data(struct trace_array *tr)
overrun_detected = false; overrun_detected = false;
prev_overruns = 0; prev_overruns = 0;
tracing_reset_online_cpus(tr); tracing_reset_online_cpus(&tr->trace_buffer);
} }
static int mmio_trace_init(struct trace_array *tr) static int mmio_trace_init(struct trace_array *tr)
...@@ -128,7 +128,7 @@ static void mmio_close(struct trace_iterator *iter) ...@@ -128,7 +128,7 @@ static void mmio_close(struct trace_iterator *iter)
static unsigned long count_overruns(struct trace_iterator *iter) static unsigned long count_overruns(struct trace_iterator *iter)
{ {
unsigned long cnt = atomic_xchg(&dropped_count, 0); unsigned long cnt = atomic_xchg(&dropped_count, 0);
unsigned long over = ring_buffer_overruns(iter->tr->buffer); unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
if (over > prev_overruns) if (over > prev_overruns)
cnt += over - prev_overruns; cnt += over - prev_overruns;
...@@ -309,7 +309,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, ...@@ -309,7 +309,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
struct mmiotrace_rw *rw) struct mmiotrace_rw *rw)
{ {
struct ftrace_event_call *call = &event_mmiotrace_rw; struct ftrace_event_call *call = &event_mmiotrace_rw;
struct ring_buffer *buffer = tr->buffer; struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry; struct trace_mmiotrace_rw *entry;
int pc = preempt_count(); int pc = preempt_count();
...@@ -330,7 +330,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, ...@@ -330,7 +330,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
void mmio_trace_rw(struct mmiotrace_rw *rw) void mmio_trace_rw(struct mmiotrace_rw *rw)
{ {
struct trace_array *tr = mmio_trace_array; struct trace_array *tr = mmio_trace_array;
struct trace_array_cpu *data = tr->data[smp_processor_id()]; struct trace_array_cpu *data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
__trace_mmiotrace_rw(tr, data, rw); __trace_mmiotrace_rw(tr, data, rw);
} }
...@@ -339,7 +339,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, ...@@ -339,7 +339,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
struct mmiotrace_map *map) struct mmiotrace_map *map)
{ {
struct ftrace_event_call *call = &event_mmiotrace_map; struct ftrace_event_call *call = &event_mmiotrace_map;
struct ring_buffer *buffer = tr->buffer; struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry; struct trace_mmiotrace_map *entry;
int pc = preempt_count(); int pc = preempt_count();
...@@ -363,7 +363,7 @@ void mmio_trace_mapping(struct mmiotrace_map *map) ...@@ -363,7 +363,7 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
struct trace_array_cpu *data; struct trace_array_cpu *data;
preempt_disable(); preempt_disable();
data = tr->data[smp_processor_id()]; data = per_cpu_ptr(tr->trace_buffer.data, smp_processor_id());
__trace_mmiotrace_map(tr, data, map); __trace_mmiotrace_map(tr, data, map);
preempt_enable(); preempt_enable();
} }
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
/* must be a power of 2 */ /* must be a power of 2 */
#define EVENT_HASHSIZE 128 #define EVENT_HASHSIZE 128
DECLARE_RWSEM(trace_event_mutex); DECLARE_RWSEM(trace_event_sem);
static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly; static struct hlist_head event_hash[EVENT_HASHSIZE] __read_mostly;
...@@ -37,6 +37,22 @@ int trace_print_seq(struct seq_file *m, struct trace_seq *s) ...@@ -37,6 +37,22 @@ int trace_print_seq(struct seq_file *m, struct trace_seq *s)
return ret; return ret;
} }
enum print_line_t trace_print_bputs_msg_only(struct trace_iterator *iter)
{
struct trace_seq *s = &iter->seq;
struct trace_entry *entry = iter->ent;
struct bputs_entry *field;
int ret;
trace_assign_type(field, entry);
ret = trace_seq_puts(s, field->str);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return TRACE_TYPE_HANDLED;
}
enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter) enum print_line_t trace_print_bprintk_msg_only(struct trace_iterator *iter)
{ {
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
...@@ -397,6 +413,32 @@ ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len) ...@@ -397,6 +413,32 @@ ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
} }
EXPORT_SYMBOL(ftrace_print_hex_seq); EXPORT_SYMBOL(ftrace_print_hex_seq);
int ftrace_raw_output_prep(struct trace_iterator *iter,
struct trace_event *trace_event)
{
struct ftrace_event_call *event;
struct trace_seq *s = &iter->seq;
struct trace_seq *p = &iter->tmp_seq;
struct trace_entry *entry;
int ret;
event = container_of(trace_event, struct ftrace_event_call, event);
entry = iter->ent;
if (entry->type != event->event.type) {
WARN_ON_ONCE(1);
return TRACE_TYPE_UNHANDLED;
}
trace_seq_init(p);
ret = trace_seq_printf(s, "%s: ", event->name);
if (!ret)
return TRACE_TYPE_PARTIAL_LINE;
return 0;
}
EXPORT_SYMBOL(ftrace_raw_output_prep);
#ifdef CONFIG_KRETPROBES #ifdef CONFIG_KRETPROBES
static inline const char *kretprobed(const char *name) static inline const char *kretprobed(const char *name)
{ {
...@@ -617,7 +659,7 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts) ...@@ -617,7 +659,7 @@ lat_print_timestamp(struct trace_iterator *iter, u64 next_ts)
{ {
unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE; unsigned long verbose = trace_flags & TRACE_ITER_VERBOSE;
unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS; unsigned long in_ns = iter->iter_flags & TRACE_FILE_TIME_IN_NS;
unsigned long long abs_ts = iter->ts - iter->tr->time_start; unsigned long long abs_ts = iter->ts - iter->trace_buffer->time_start;
unsigned long long rel_ts = next_ts - iter->ts; unsigned long long rel_ts = next_ts - iter->ts;
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
...@@ -783,12 +825,12 @@ static int trace_search_list(struct list_head **list) ...@@ -783,12 +825,12 @@ static int trace_search_list(struct list_head **list)
void trace_event_read_lock(void) void trace_event_read_lock(void)
{ {
down_read(&trace_event_mutex); down_read(&trace_event_sem);
} }
void trace_event_read_unlock(void) void trace_event_read_unlock(void)
{ {
up_read(&trace_event_mutex); up_read(&trace_event_sem);
} }
/** /**
...@@ -811,7 +853,7 @@ int register_ftrace_event(struct trace_event *event) ...@@ -811,7 +853,7 @@ int register_ftrace_event(struct trace_event *event)
unsigned key; unsigned key;
int ret = 0; int ret = 0;
down_write(&trace_event_mutex); down_write(&trace_event_sem);
if (WARN_ON(!event)) if (WARN_ON(!event))
goto out; goto out;
...@@ -866,14 +908,14 @@ int register_ftrace_event(struct trace_event *event) ...@@ -866,14 +908,14 @@ int register_ftrace_event(struct trace_event *event)
ret = event->type; ret = event->type;
out: out:
up_write(&trace_event_mutex); up_write(&trace_event_sem);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(register_ftrace_event); EXPORT_SYMBOL_GPL(register_ftrace_event);
/* /*
* Used by module code with the trace_event_mutex held for write. * Used by module code with the trace_event_sem held for write.
*/ */
int __unregister_ftrace_event(struct trace_event *event) int __unregister_ftrace_event(struct trace_event *event)
{ {
...@@ -888,9 +930,9 @@ int __unregister_ftrace_event(struct trace_event *event) ...@@ -888,9 +930,9 @@ int __unregister_ftrace_event(struct trace_event *event)
*/ */
int unregister_ftrace_event(struct trace_event *event) int unregister_ftrace_event(struct trace_event *event)
{ {
down_write(&trace_event_mutex); down_write(&trace_event_sem);
__unregister_ftrace_event(event); __unregister_ftrace_event(event);
up_write(&trace_event_mutex); up_write(&trace_event_sem);
return 0; return 0;
} }
...@@ -1217,6 +1259,64 @@ static struct trace_event trace_user_stack_event = { ...@@ -1217,6 +1259,64 @@ static struct trace_event trace_user_stack_event = {
.funcs = &trace_user_stack_funcs, .funcs = &trace_user_stack_funcs,
}; };
/* TRACE_BPUTS */
static enum print_line_t
trace_bputs_print(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
struct trace_entry *entry = iter->ent;
struct trace_seq *s = &iter->seq;
struct bputs_entry *field;
trace_assign_type(field, entry);
if (!seq_print_ip_sym(s, field->ip, flags))
goto partial;
if (!trace_seq_puts(s, ": "))
goto partial;
if (!trace_seq_puts(s, field->str))
goto partial;
return TRACE_TYPE_HANDLED;
partial:
return TRACE_TYPE_PARTIAL_LINE;
}
static enum print_line_t
trace_bputs_raw(struct trace_iterator *iter, int flags,
struct trace_event *event)
{
struct bputs_entry *field;
struct trace_seq *s = &iter->seq;
trace_assign_type(field, iter->ent);
if (!trace_seq_printf(s, ": %lx : ", field->ip))
goto partial;
if (!trace_seq_puts(s, field->str))
goto partial;
return TRACE_TYPE_HANDLED;
partial:
return TRACE_TYPE_PARTIAL_LINE;
}
static struct trace_event_functions trace_bputs_funcs = {
.trace = trace_bputs_print,
.raw = trace_bputs_raw,
};
static struct trace_event trace_bputs_event = {
.type = TRACE_BPUTS,
.funcs = &trace_bputs_funcs,
};
/* TRACE_BPRINT */ /* TRACE_BPRINT */
static enum print_line_t static enum print_line_t
trace_bprint_print(struct trace_iterator *iter, int flags, trace_bprint_print(struct trace_iterator *iter, int flags,
...@@ -1329,6 +1429,7 @@ static struct trace_event *events[] __initdata = { ...@@ -1329,6 +1429,7 @@ static struct trace_event *events[] __initdata = {
&trace_wake_event, &trace_wake_event,
&trace_stack_event, &trace_stack_event,
&trace_user_stack_event, &trace_user_stack_event,
&trace_bputs_event,
&trace_bprint_event, &trace_bprint_event,
&trace_print_event, &trace_print_event,
NULL NULL
......
...@@ -4,6 +4,8 @@ ...@@ -4,6 +4,8 @@
#include <linux/trace_seq.h> #include <linux/trace_seq.h>
#include "trace.h" #include "trace.h"
extern enum print_line_t
trace_print_bputs_msg_only(struct trace_iterator *iter);
extern enum print_line_t extern enum print_line_t
trace_print_bprintk_msg_only(struct trace_iterator *iter); trace_print_bprintk_msg_only(struct trace_iterator *iter);
extern enum print_line_t extern enum print_line_t
...@@ -31,7 +33,7 @@ trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry); ...@@ -31,7 +33,7 @@ trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry);
/* used by module unregistering */ /* used by module unregistering */
extern int __unregister_ftrace_event(struct trace_event *event); extern int __unregister_ftrace_event(struct trace_event *event);
extern struct rw_semaphore trace_event_mutex; extern struct rw_semaphore trace_event_sem;
#define MAX_MEMHEX_BYTES 8 #define MAX_MEMHEX_BYTES 8
#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1) #define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
......
...@@ -28,7 +28,7 @@ tracing_sched_switch_trace(struct trace_array *tr, ...@@ -28,7 +28,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
unsigned long flags, int pc) unsigned long flags, int pc)
{ {
struct ftrace_event_call *call = &event_context_switch; struct ftrace_event_call *call = &event_context_switch;
struct ring_buffer *buffer = tr->buffer; struct ring_buffer *buffer = tr->trace_buffer.buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ctx_switch_entry *entry; struct ctx_switch_entry *entry;
...@@ -69,7 +69,7 @@ probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *n ...@@ -69,7 +69,7 @@ probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *n
pc = preempt_count(); pc = preempt_count();
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = ctx_trace->data[cpu]; data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
if (likely(!atomic_read(&data->disabled))) if (likely(!atomic_read(&data->disabled)))
tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc); tracing_sched_switch_trace(ctx_trace, prev, next, flags, pc);
...@@ -86,7 +86,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -86,7 +86,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
struct ftrace_event_call *call = &event_wakeup; struct ftrace_event_call *call = &event_wakeup;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ctx_switch_entry *entry; struct ctx_switch_entry *entry;
struct ring_buffer *buffer = tr->buffer; struct ring_buffer *buffer = tr->trace_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
sizeof(*entry), flags, pc); sizeof(*entry), flags, pc);
...@@ -123,7 +123,7 @@ probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success) ...@@ -123,7 +123,7 @@ probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
pc = preempt_count(); pc = preempt_count();
local_irq_save(flags); local_irq_save(flags);
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
data = ctx_trace->data[cpu]; data = per_cpu_ptr(ctx_trace->trace_buffer.data, cpu);
if (likely(!atomic_read(&data->disabled))) if (likely(!atomic_read(&data->disabled)))
tracing_sched_wakeup_trace(ctx_trace, wakee, current, tracing_sched_wakeup_trace(ctx_trace, wakee, current,
......
...@@ -37,6 +37,7 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace); ...@@ -37,6 +37,7 @@ static int wakeup_graph_entry(struct ftrace_graph_ent *trace);
static void wakeup_graph_return(struct ftrace_graph_ret *trace); static void wakeup_graph_return(struct ftrace_graph_ret *trace);
static int save_flags; static int save_flags;
static bool function_enabled;
#define TRACE_DISPLAY_GRAPH 1 #define TRACE_DISPLAY_GRAPH 1
...@@ -89,7 +90,7 @@ func_prolog_preempt_disable(struct trace_array *tr, ...@@ -89,7 +90,7 @@ func_prolog_preempt_disable(struct trace_array *tr,
if (cpu != wakeup_current_cpu) if (cpu != wakeup_current_cpu)
goto out_enable; goto out_enable;
*data = tr->data[cpu]; *data = per_cpu_ptr(tr->trace_buffer.data, cpu);
disabled = atomic_inc_return(&(*data)->disabled); disabled = atomic_inc_return(&(*data)->disabled);
if (unlikely(disabled != 1)) if (unlikely(disabled != 1))
goto out; goto out;
...@@ -134,15 +135,60 @@ static struct ftrace_ops trace_ops __read_mostly = ...@@ -134,15 +135,60 @@ static struct ftrace_ops trace_ops __read_mostly =
}; };
#endif /* CONFIG_FUNCTION_TRACER */ #endif /* CONFIG_FUNCTION_TRACER */
static int start_func_tracer(int graph) static int register_wakeup_function(int graph, int set)
{ {
int ret; int ret;
if (!graph) /* 'set' is set if TRACE_ITER_FUNCTION is about to be set */
ret = register_ftrace_function(&trace_ops); if (function_enabled || (!set && !(trace_flags & TRACE_ITER_FUNCTION)))
else return 0;
if (graph)
ret = register_ftrace_graph(&wakeup_graph_return, ret = register_ftrace_graph(&wakeup_graph_return,
&wakeup_graph_entry); &wakeup_graph_entry);
else
ret = register_ftrace_function(&trace_ops);
if (!ret)
function_enabled = true;
return ret;
}
static void unregister_wakeup_function(int graph)
{
if (!function_enabled)
return;
if (graph)
unregister_ftrace_graph();
else
unregister_ftrace_function(&trace_ops);
function_enabled = false;
}
static void wakeup_function_set(int set)
{
if (set)
register_wakeup_function(is_graph(), 1);
else
unregister_wakeup_function(is_graph());
}
static int wakeup_flag_changed(struct tracer *tracer, u32 mask, int set)
{
if (mask & TRACE_ITER_FUNCTION)
wakeup_function_set(set);
return trace_keep_overwrite(tracer, mask, set);
}
static int start_func_tracer(int graph)
{
int ret;
ret = register_wakeup_function(graph, 0);
if (!ret && tracing_is_enabled()) if (!ret && tracing_is_enabled())
tracer_enabled = 1; tracer_enabled = 1;
...@@ -156,10 +202,7 @@ static void stop_func_tracer(int graph) ...@@ -156,10 +202,7 @@ static void stop_func_tracer(int graph)
{ {
tracer_enabled = 0; tracer_enabled = 0;
if (!graph) unregister_wakeup_function(graph);
unregister_ftrace_function(&trace_ops);
else
unregister_ftrace_graph();
} }
#ifdef CONFIG_FUNCTION_GRAPH_TRACER #ifdef CONFIG_FUNCTION_GRAPH_TRACER
...@@ -353,7 +396,7 @@ probe_wakeup_sched_switch(void *ignore, ...@@ -353,7 +396,7 @@ probe_wakeup_sched_switch(void *ignore,
/* disable local data, not wakeup_cpu data */ /* disable local data, not wakeup_cpu data */
cpu = raw_smp_processor_id(); cpu = raw_smp_processor_id();
disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
if (likely(disabled != 1)) if (likely(disabled != 1))
goto out; goto out;
...@@ -365,7 +408,7 @@ probe_wakeup_sched_switch(void *ignore, ...@@ -365,7 +408,7 @@ probe_wakeup_sched_switch(void *ignore,
goto out_unlock; goto out_unlock;
/* The task we are waiting for is waking up */ /* The task we are waiting for is waking up */
data = wakeup_trace->data[wakeup_cpu]; data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
__trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc); __trace_function(wakeup_trace, CALLER_ADDR0, CALLER_ADDR1, flags, pc);
tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc); tracing_sched_switch_trace(wakeup_trace, prev, next, flags, pc);
...@@ -387,7 +430,7 @@ probe_wakeup_sched_switch(void *ignore, ...@@ -387,7 +430,7 @@ probe_wakeup_sched_switch(void *ignore,
arch_spin_unlock(&wakeup_lock); arch_spin_unlock(&wakeup_lock);
local_irq_restore(flags); local_irq_restore(flags);
out: out:
atomic_dec(&wakeup_trace->data[cpu]->disabled); atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
} }
static void __wakeup_reset(struct trace_array *tr) static void __wakeup_reset(struct trace_array *tr)
...@@ -405,7 +448,7 @@ static void wakeup_reset(struct trace_array *tr) ...@@ -405,7 +448,7 @@ static void wakeup_reset(struct trace_array *tr)
{ {
unsigned long flags; unsigned long flags;
tracing_reset_online_cpus(tr); tracing_reset_online_cpus(&tr->trace_buffer);
local_irq_save(flags); local_irq_save(flags);
arch_spin_lock(&wakeup_lock); arch_spin_lock(&wakeup_lock);
...@@ -435,7 +478,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) ...@@ -435,7 +478,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
return; return;
pc = preempt_count(); pc = preempt_count();
disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled); disabled = atomic_inc_return(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
if (unlikely(disabled != 1)) if (unlikely(disabled != 1))
goto out; goto out;
...@@ -458,7 +501,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) ...@@ -458,7 +501,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
local_save_flags(flags); local_save_flags(flags);
data = wakeup_trace->data[wakeup_cpu]; data = per_cpu_ptr(wakeup_trace->trace_buffer.data, wakeup_cpu);
data->preempt_timestamp = ftrace_now(cpu); data->preempt_timestamp = ftrace_now(cpu);
tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc); tracing_sched_wakeup_trace(wakeup_trace, p, current, flags, pc);
...@@ -472,7 +515,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success) ...@@ -472,7 +515,7 @@ probe_wakeup(void *ignore, struct task_struct *p, int success)
out_locked: out_locked:
arch_spin_unlock(&wakeup_lock); arch_spin_unlock(&wakeup_lock);
out: out:
atomic_dec(&wakeup_trace->data[cpu]->disabled); atomic_dec(&per_cpu_ptr(wakeup_trace->trace_buffer.data, cpu)->disabled);
} }
static void start_wakeup_tracer(struct trace_array *tr) static void start_wakeup_tracer(struct trace_array *tr)
...@@ -543,8 +586,8 @@ static int __wakeup_tracer_init(struct trace_array *tr) ...@@ -543,8 +586,8 @@ static int __wakeup_tracer_init(struct trace_array *tr)
save_flags = trace_flags; save_flags = trace_flags;
/* non overwrite screws up the latency tracers */ /* non overwrite screws up the latency tracers */
set_tracer_flag(TRACE_ITER_OVERWRITE, 1); set_tracer_flag(tr, TRACE_ITER_OVERWRITE, 1);
set_tracer_flag(TRACE_ITER_LATENCY_FMT, 1); set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, 1);
tracing_max_latency = 0; tracing_max_latency = 0;
wakeup_trace = tr; wakeup_trace = tr;
...@@ -573,8 +616,8 @@ static void wakeup_tracer_reset(struct trace_array *tr) ...@@ -573,8 +616,8 @@ static void wakeup_tracer_reset(struct trace_array *tr)
/* make sure we put back any tasks we are tracing */ /* make sure we put back any tasks we are tracing */
wakeup_reset(tr); wakeup_reset(tr);
set_tracer_flag(TRACE_ITER_LATENCY_FMT, lat_flag); set_tracer_flag(tr, TRACE_ITER_LATENCY_FMT, lat_flag);
set_tracer_flag(TRACE_ITER_OVERWRITE, overwrite_flag); set_tracer_flag(tr, TRACE_ITER_OVERWRITE, overwrite_flag);
} }
static void wakeup_tracer_start(struct trace_array *tr) static void wakeup_tracer_start(struct trace_array *tr)
...@@ -600,7 +643,7 @@ static struct tracer wakeup_tracer __read_mostly = ...@@ -600,7 +643,7 @@ static struct tracer wakeup_tracer __read_mostly =
.print_line = wakeup_print_line, .print_line = wakeup_print_line,
.flags = &tracer_flags, .flags = &tracer_flags,
.set_flag = wakeup_set_flag, .set_flag = wakeup_set_flag,
.flag_changed = trace_keep_overwrite, .flag_changed = wakeup_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup, .selftest = trace_selftest_startup_wakeup,
#endif #endif
...@@ -622,7 +665,7 @@ static struct tracer wakeup_rt_tracer __read_mostly = ...@@ -622,7 +665,7 @@ static struct tracer wakeup_rt_tracer __read_mostly =
.print_line = wakeup_print_line, .print_line = wakeup_print_line,
.flags = &tracer_flags, .flags = &tracer_flags,
.set_flag = wakeup_set_flag, .set_flag = wakeup_set_flag,
.flag_changed = trace_keep_overwrite, .flag_changed = wakeup_flag_changed,
#ifdef CONFIG_FTRACE_SELFTEST #ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_wakeup, .selftest = trace_selftest_startup_wakeup,
#endif #endif
......
This diff is collapsed.
...@@ -20,13 +20,24 @@ ...@@ -20,13 +20,24 @@
#define STACK_TRACE_ENTRIES 500 #define STACK_TRACE_ENTRIES 500
#ifdef CC_USING_FENTRY
# define fentry 1
#else
# define fentry 0
#endif
static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
{ [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
/*
* Reserve one entry for the passed in ip. This will allow
* us to remove most or all of the stack size overhead
* added by the stack tracer itself.
*/
static struct stack_trace max_stack_trace = { static struct stack_trace max_stack_trace = {
.max_entries = STACK_TRACE_ENTRIES, .max_entries = STACK_TRACE_ENTRIES - 1,
.entries = stack_dump_trace, .entries = &stack_dump_trace[1],
}; };
static unsigned long max_stack_size; static unsigned long max_stack_size;
...@@ -39,25 +50,34 @@ static DEFINE_MUTEX(stack_sysctl_mutex); ...@@ -39,25 +50,34 @@ static DEFINE_MUTEX(stack_sysctl_mutex);
int stack_tracer_enabled; int stack_tracer_enabled;
static int last_stack_tracer_enabled; static int last_stack_tracer_enabled;
static inline void check_stack(void) static inline void
check_stack(unsigned long ip, unsigned long *stack)
{ {
unsigned long this_size, flags; unsigned long this_size, flags;
unsigned long *p, *top, *start; unsigned long *p, *top, *start;
static int tracer_frame;
int frame_size = ACCESS_ONCE(tracer_frame);
int i; int i;
this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
this_size = THREAD_SIZE - this_size; this_size = THREAD_SIZE - this_size;
/* Remove the frame of the tracer */
this_size -= frame_size;
if (this_size <= max_stack_size) if (this_size <= max_stack_size)
return; return;
/* we do not handle interrupt stacks yet */ /* we do not handle interrupt stacks yet */
if (!object_is_on_stack(&this_size)) if (!object_is_on_stack(stack))
return; return;
local_irq_save(flags); local_irq_save(flags);
arch_spin_lock(&max_stack_lock); arch_spin_lock(&max_stack_lock);
/* In case another CPU set the tracer_frame on us */
if (unlikely(!frame_size))
this_size -= tracer_frame;
/* a race could have already updated it */ /* a race could have already updated it */
if (this_size <= max_stack_size) if (this_size <= max_stack_size)
goto out; goto out;
...@@ -69,11 +89,19 @@ static inline void check_stack(void) ...@@ -69,11 +89,19 @@ static inline void check_stack(void)
save_stack_trace(&max_stack_trace); save_stack_trace(&max_stack_trace);
/*
* Add the passed in ip from the function tracer.
* Searching for this on the stack will skip over
* most of the overhead from the stack tracer itself.
*/
stack_dump_trace[0] = ip;
max_stack_trace.nr_entries++;
/* /*
* Now find where in the stack these are. * Now find where in the stack these are.
*/ */
i = 0; i = 0;
start = &this_size; start = stack;
top = (unsigned long *) top = (unsigned long *)
(((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
...@@ -97,6 +125,18 @@ static inline void check_stack(void) ...@@ -97,6 +125,18 @@ static inline void check_stack(void)
found = 1; found = 1;
/* Start the search from here */ /* Start the search from here */
start = p + 1; start = p + 1;
/*
* We do not want to show the overhead
* of the stack tracer stack in the
* max stack. If we haven't figured
* out what that is, then figure it out
* now.
*/
if (unlikely(!tracer_frame) && i == 1) {
tracer_frame = (p - stack) *
sizeof(unsigned long);
max_stack_size -= tracer_frame;
}
} }
} }
...@@ -113,6 +153,7 @@ static void ...@@ -113,6 +153,7 @@ static void
stack_trace_call(unsigned long ip, unsigned long parent_ip, stack_trace_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs) struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
unsigned long stack;
int cpu; int cpu;
preempt_disable_notrace(); preempt_disable_notrace();
...@@ -122,7 +163,26 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip, ...@@ -122,7 +163,26 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
if (per_cpu(trace_active, cpu)++ != 0) if (per_cpu(trace_active, cpu)++ != 0)
goto out; goto out;
check_stack(); /*
* When fentry is used, the traced function does not get
* its stack frame set up, and we lose the parent.
* The ip is pretty useless because the function tracer
* was called before that function set up its stack frame.
* In this case, we use the parent ip.
*
* By adding the return address of either the parent ip
* or the current ip we can disregard most of the stack usage
* caused by the stack tracer itself.
*
* The function tracer always reports the address of where the
* mcount call was, but the stack will hold the return address.
*/
if (fentry)
ip = parent_ip;
else
ip += MCOUNT_INSN_SIZE;
check_stack(ip, &stack);
out: out:
per_cpu(trace_active, cpu)--; per_cpu(trace_active, cpu)--;
...@@ -371,6 +431,8 @@ static __init int stack_trace_init(void) ...@@ -371,6 +431,8 @@ static __init int stack_trace_init(void)
struct dentry *d_tracer; struct dentry *d_tracer;
d_tracer = tracing_init_dentry(); d_tracer = tracing_init_dentry();
if (!d_tracer)
return 0;
trace_create_file("stack_max_size", 0644, d_tracer, trace_create_file("stack_max_size", 0644, d_tracer,
&max_stack_size, &stack_max_size_fops); &max_stack_size, &stack_max_size_fops);
......
...@@ -307,6 +307,8 @@ static int tracing_stat_init(void) ...@@ -307,6 +307,8 @@ static int tracing_stat_init(void)
struct dentry *d_tracing; struct dentry *d_tracing;
d_tracing = tracing_init_dentry(); d_tracing = tracing_init_dentry();
if (!d_tracing)
return 0;
stat_dir = debugfs_create_dir("trace_stat", d_tracing); stat_dir = debugfs_create_dir("trace_stat", d_tracing);
if (!stat_dir) if (!stat_dir)
......
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment