Commit a2a16d6a authored by Steven Rostedt's avatar Steven Rostedt

function-graph: add option to calculate graph time or not

graph time is the time that a function is executing another function.
Thus if function A calls B, if graph-time is set, then the time for
A includes B. This is the default behavior. But if graph-time is off,
then the time spent executing B is subtracted from A.
Signed-off-by: default avatarSteven Rostedt <srostedt@redhat.com>
parent cafb168a
...@@ -365,6 +365,7 @@ struct ftrace_ret_stack { ...@@ -365,6 +365,7 @@ struct ftrace_ret_stack {
unsigned long ret; unsigned long ret;
unsigned long func; unsigned long func;
unsigned long long calltime; unsigned long long calltime;
unsigned long long subtime;
}; };
/* /*
...@@ -376,8 +377,6 @@ extern void return_to_handler(void); ...@@ -376,8 +377,6 @@ extern void return_to_handler(void);
extern int extern int
ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth); ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth);
extern void
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret);
/* /*
* Sometimes we don't want to trace a function with the function * Sometimes we don't want to trace a function with the function
......
...@@ -604,6 +604,7 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace) ...@@ -604,6 +604,7 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace)
static void profile_graph_return(struct ftrace_graph_ret *trace) static void profile_graph_return(struct ftrace_graph_ret *trace)
{ {
struct ftrace_profile_stat *stat; struct ftrace_profile_stat *stat;
unsigned long long calltime;
struct ftrace_profile *rec; struct ftrace_profile *rec;
unsigned long flags; unsigned long flags;
...@@ -612,9 +613,27 @@ static void profile_graph_return(struct ftrace_graph_ret *trace) ...@@ -612,9 +613,27 @@ static void profile_graph_return(struct ftrace_graph_ret *trace)
if (!stat->hash) if (!stat->hash)
goto out; goto out;
calltime = trace->rettime - trace->calltime;
if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
int index;
index = trace->depth;
/* Append this call time to the parent time to subtract */
if (index)
current->ret_stack[index - 1].subtime += calltime;
if (current->ret_stack[index].subtime < calltime)
calltime -= current->ret_stack[index].subtime;
else
calltime = 0;
}
rec = ftrace_find_profiled_func(stat, trace->func); rec = ftrace_find_profiled_func(stat, trace->func);
if (rec) if (rec)
rec->time += trace->rettime - trace->calltime; rec->time += calltime;
out: out:
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -255,7 +255,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait); ...@@ -255,7 +255,8 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
/* trace_flags holds trace_options default values */ /* trace_flags holds trace_options default values */
unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK | unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME; TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
TRACE_ITER_GRAPH_TIME;
/** /**
* trace_wake_up - wake up tasks waiting for trace input * trace_wake_up - wake up tasks waiting for trace input
...@@ -317,6 +318,7 @@ static const char *trace_options[] = { ...@@ -317,6 +318,7 @@ static const char *trace_options[] = {
"latency-format", "latency-format",
"global-clock", "global-clock",
"sleep-time", "sleep-time",
"graph-time",
NULL NULL
}; };
......
...@@ -685,6 +685,7 @@ enum trace_iterator_flags { ...@@ -685,6 +685,7 @@ enum trace_iterator_flags {
TRACE_ITER_LATENCY_FMT = 0x40000, TRACE_ITER_LATENCY_FMT = 0x40000,
TRACE_ITER_GLOBAL_CLK = 0x80000, TRACE_ITER_GLOBAL_CLK = 0x80000,
TRACE_ITER_SLEEP_TIME = 0x100000, TRACE_ITER_SLEEP_TIME = 0x100000,
TRACE_ITER_GRAPH_TIME = 0x200000,
}; };
/* /*
......
...@@ -78,13 +78,14 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) ...@@ -78,13 +78,14 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth)
current->ret_stack[index].ret = ret; current->ret_stack[index].ret = ret;
current->ret_stack[index].func = func; current->ret_stack[index].func = func;
current->ret_stack[index].calltime = calltime; current->ret_stack[index].calltime = calltime;
current->ret_stack[index].subtime = 0;
*depth = index; *depth = index;
return 0; return 0;
} }
/* Retrieve a function return address to the trace stack on thread info.*/ /* Retrieve a function return address to the trace stack on thread info.*/
void static void
ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
{ {
int index; int index;
...@@ -104,9 +105,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret) ...@@ -104,9 +105,6 @@ ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
trace->calltime = current->ret_stack[index].calltime; trace->calltime = current->ret_stack[index].calltime;
trace->overrun = atomic_read(&current->trace_overrun); trace->overrun = atomic_read(&current->trace_overrun);
trace->depth = index; trace->depth = index;
barrier();
current->curr_ret_stack--;
} }
/* /*
...@@ -121,6 +119,8 @@ unsigned long ftrace_return_to_handler(void) ...@@ -121,6 +119,8 @@ unsigned long ftrace_return_to_handler(void)
ftrace_pop_return_trace(&trace, &ret); ftrace_pop_return_trace(&trace, &ret);
trace.rettime = trace_clock_local(); trace.rettime = trace_clock_local();
ftrace_graph_return(&trace); ftrace_graph_return(&trace);
barrier();
current->curr_ret_stack--;
if (unlikely(!ret)) { if (unlikely(!ret)) {
ftrace_graph_stop(); ftrace_graph_stop();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment