Commit 4fcdae83 authored by Steven Rostedt's avatar Steven Rostedt Committed by Thomas Gleixner

ftrace: comment code

This is first installment of adding documentation to the ftrace.
Expect many more patches of this kind in the near future.
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent ab46428c
...@@ -64,26 +64,79 @@ cycle_t ftrace_now(int cpu) ...@@ -64,26 +64,79 @@ cycle_t ftrace_now(int cpu)
return cpu_clock(cpu); return cpu_clock(cpu);
} }
/*
* The global_trace is the descriptor that holds the tracing
* buffers for the live tracing. For each CPU, it contains
* a link list of pages that will store trace entries. The
* page descriptor of the pages in the memory is used to hold
* the link list by linking the lru item in the page descriptor
* to each of the pages in the buffer per CPU.
*
* For each active CPU there is a data field that holds the
* pages for the buffer for that CPU. Each CPU has the same number
* of pages allocated for its buffer.
*/
static struct trace_array global_trace; static struct trace_array global_trace;
static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu); static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
/*
* The max_tr is used to snapshot the global_trace when a maximum
* latency is reached. Some tracers will use this to store a maximum
* trace while it continues examining live traces.
*
* The buffers for the max_tr are set up the same as the global_trace.
* When a snapshot is taken, the link list of the max_tr is swapped
* with the link list of the global_trace and the buffers are reset for
* the global_trace so the tracing can continue.
*/
static struct trace_array max_tr; static struct trace_array max_tr;
static DEFINE_PER_CPU(struct trace_array_cpu, max_data); static DEFINE_PER_CPU(struct trace_array_cpu, max_data);
/* tracer_enabled is used to toggle activation of a tracer */
static int tracer_enabled = 1; static int tracer_enabled = 1;
/*
* trace_nr_entries is the number of entries that is allocated
* for a buffer. Note, the number of entries is always rounded
* to ENTRIES_PER_PAGE.
*/
static unsigned long trace_nr_entries = 65536UL; static unsigned long trace_nr_entries = 65536UL;
/* trace_types holds a link list of available tracers. */
static struct tracer *trace_types __read_mostly; static struct tracer *trace_types __read_mostly;
/* current_trace points to the tracer that is currently active */
static struct tracer *current_trace __read_mostly; static struct tracer *current_trace __read_mostly;
/*
* max_tracer_type_len is used to simplify the allocating of
* buffers to read userspace tracer names. We keep track of
* the longest tracer name registered.
*/
static int max_tracer_type_len; static int max_tracer_type_len;
/*
* trace_types_lock is used to protect the trace_types list.
* This lock is also used to keep user access serialized.
* Accesses from userspace will grab this lock while userspace
* activities happen inside the kernel.
*/
static DEFINE_MUTEX(trace_types_lock); static DEFINE_MUTEX(trace_types_lock);
/* trace_wait is a waitqueue for tasks blocked on trace_poll */
static DECLARE_WAIT_QUEUE_HEAD(trace_wait); static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
/* trace_flags holds iter_ctrl options */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
/**
* trace_wake_up - wake up tasks waiting for trace input
*
* Simply wakes up any task that is blocked on the trace_wait
* queue. These is used with trace_poll for tasks polling the trace.
*/
void trace_wake_up(void) void trace_wake_up(void)
{ {
/* /*
...@@ -117,6 +170,14 @@ unsigned long nsecs_to_usecs(unsigned long nsecs) ...@@ -117,6 +170,14 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
return nsecs / 1000; return nsecs / 1000;
} }
/*
* trace_flag_type is an enumeration that holds different
* states when a trace occurs. These are:
* IRQS_OFF - interrupts were disabled
* NEED_RESCED - reschedule is requested
* HARDIRQ - inside an interrupt handler
* SOFTIRQ - inside a softirq handler
*/
enum trace_flag_type { enum trace_flag_type {
TRACE_FLAG_IRQS_OFF = 0x01, TRACE_FLAG_IRQS_OFF = 0x01,
TRACE_FLAG_NEED_RESCHED = 0x02, TRACE_FLAG_NEED_RESCHED = 0x02,
...@@ -124,10 +185,14 @@ enum trace_flag_type { ...@@ -124,10 +185,14 @@ enum trace_flag_type {
TRACE_FLAG_SOFTIRQ = 0x08, TRACE_FLAG_SOFTIRQ = 0x08,
}; };
/*
* TRACE_ITER_SYM_MASK masks the options in trace_flags that
* control the output of kernel symbols.
*/
#define TRACE_ITER_SYM_MASK \ #define TRACE_ITER_SYM_MASK \
(TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR) (TRACE_ITER_PRINT_PARENT|TRACE_ITER_SYM_OFFSET|TRACE_ITER_SYM_ADDR)
/* These must match the bit postions above */ /* These must match the bit postions in trace_iterator_flags */
static const char *trace_options[] = { static const char *trace_options[] = {
"print-parent", "print-parent",
"sym-offset", "sym-offset",
...@@ -142,6 +207,15 @@ static const char *trace_options[] = { ...@@ -142,6 +207,15 @@ static const char *trace_options[] = {
NULL NULL
}; };
/*
* ftrace_max_lock is used to protect the swapping of buffers
* when taking a max snapshot. The buffers themselves are
* protected by per_cpu spinlocks. But the action of the swap
* needs its own lock.
*
* This is defined as a raw_spinlock_t in order to help
* with performance when lockdep debugging is enabled.
*/
static raw_spinlock_t ftrace_max_lock = static raw_spinlock_t ftrace_max_lock =
(raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
...@@ -172,6 +246,13 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -172,6 +246,13 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
tracing_record_cmdline(current); tracing_record_cmdline(current);
} }
/**
* check_pages - integrity check of trace buffers
*
* As a safty measure we check to make sure the data pages have not
* been corrupted. TODO: configure to disable this because it adds
* a bit of overhead.
*/
void check_pages(struct trace_array_cpu *data) void check_pages(struct trace_array_cpu *data)
{ {
struct page *page, *tmp; struct page *page, *tmp;
...@@ -185,6 +266,13 @@ void check_pages(struct trace_array_cpu *data) ...@@ -185,6 +266,13 @@ void check_pages(struct trace_array_cpu *data)
} }
} }
/**
* head_page - page address of the first page in per_cpu buffer.
*
* head_page returns the page address of the first page in
* a per_cpu buffer. This also preforms various consistency
* checks to make sure the buffer has not been corrupted.
*/
void *head_page(struct trace_array_cpu *data) void *head_page(struct trace_array_cpu *data)
{ {
struct page *page; struct page *page;
...@@ -199,6 +287,17 @@ void *head_page(struct trace_array_cpu *data) ...@@ -199,6 +287,17 @@ void *head_page(struct trace_array_cpu *data)
return page_address(page); return page_address(page);
} }
/**
* trace_seq_printf - sequence printing of trace information
* @s: trace sequence descriptor
* @fmt: printf format string
*
* The tracer may use either sequence operations or its own
* copy to user routines. To simplify formating of a trace
* trace_seq_printf is used to store strings into a special
* buffer (@s). Then the output may be either used by
* the sequencer or pulled into another buffer.
*/
int int
trace_seq_printf(struct trace_seq *s, const char *fmt, ...) trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{ {
...@@ -222,6 +321,16 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...) ...@@ -222,6 +321,16 @@ trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
return len; return len;
} }
/**
* trace_seq_puts - trace sequence printing of simple string
* @s: trace sequence descriptor
* @str: simple string to record
*
* The tracer may use either the sequence operations or its own
* copy to user routines. This function records a simple string
* into a special buffer (@s) for later retrieval by a sequencer
* or other mechanism.
*/
static int static int
trace_seq_puts(struct trace_seq *s, const char *str) trace_seq_puts(struct trace_seq *s, const char *str)
{ {
...@@ -304,6 +413,13 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s) ...@@ -304,6 +413,13 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s)
trace_seq_reset(s); trace_seq_reset(s);
} }
/*
* flip the trace buffers between two trace descriptors.
* This usually is the buffers between the global_trace and
* the max_tr to record a snapshot of a current trace.
*
* The ftrace_max_lock must be held.
*/
static void static void
flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2) flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
{ {
...@@ -325,6 +441,15 @@ flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2) ...@@ -325,6 +441,15 @@ flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
check_pages(tr2); check_pages(tr2);
} }
/**
* update_max_tr - snapshot all trace buffers from global_trace to max_tr
* @tr: tracer
* @tsk: the task with the latency
* @cpu: The cpu that initiated the trace.
*
* Flip the buffers between the @tr and the max_tr and record information
* about which task was the cause of this latency.
*/
void void
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{ {
...@@ -349,6 +474,8 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -349,6 +474,8 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
* @tr - tracer * @tr - tracer
* @tsk - task with the latency * @tsk - task with the latency
* @cpu - the cpu of the buffer to copy. * @cpu - the cpu of the buffer to copy.
*
* Flip the trace of a single CPU buffer between the @tr and the max_tr.
*/ */
void void
update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
...@@ -368,6 +495,12 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) ...@@ -368,6 +495,12 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
__raw_spin_unlock(&ftrace_max_lock); __raw_spin_unlock(&ftrace_max_lock);
} }
/**
* register_tracer - register a tracer with the ftrace system.
* @type - the plugin for the tracer
*
* Register a new plugin tracer.
*/
int register_tracer(struct tracer *type) int register_tracer(struct tracer *type)
{ {
struct tracer *t; struct tracer *t;
......
...@@ -294,6 +294,13 @@ extern long ns2usecs(cycle_t nsec); ...@@ -294,6 +294,13 @@ extern long ns2usecs(cycle_t nsec);
extern unsigned long trace_flags; extern unsigned long trace_flags;
/*
* trace_iterator_flags is an enumeration that defines bit
* positions into trace_flags that controls the output.
*
* NOTE: These bits must match the trace_options array in
* trace.c.
*/
enum trace_iterator_flags { enum trace_iterator_flags {
TRACE_ITER_PRINT_PARENT = 0x01, TRACE_ITER_PRINT_PARENT = 0x01,
TRACE_ITER_SYM_OFFSET = 0x02, TRACE_ITER_SYM_OFFSET = 0x02,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment