Commit b7f0c959 authored by Steven Rostedt (Red Hat)'s avatar Steven Rostedt (Red Hat) Committed by Steven Rostedt

tracing: Pass trace_array into trace_buffer_unlock_commit()

In preparation for having trace options be per instance, the trace_array
needs to be passed to the trace_buffer_unlock_commit(). The
trace_event_buffer_lock_reserve() already passes in the trace_event_file
where the trace_array can be derived from.

Also added a "__init" to the boot up test event plus function tracing
function function_test_events_call().
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 41907416
......@@ -168,10 +168,12 @@ struct ring_buffer_event *
trace_current_buffer_lock_reserve(struct ring_buffer **current_buffer,
int type, unsigned long len,
unsigned long flags, int pc);
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
void trace_buffer_unlock_commit(struct trace_array *tr,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc);
void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs);
......@@ -505,7 +507,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
enum event_trigger_type tt = ETT_NONE;
if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
trace_buffer_unlock_commit(file->tr, buffer, event, irq_flags, pc);
if (tt)
event_triggers_post_call(file, tt);
......@@ -537,7 +539,7 @@ event_trigger_unlock_commit_regs(struct trace_event_file *file,
enum event_trigger_type tt = ETT_NONE;
if (!__event_trigger_test_discard(file, buffer, event, entry, &tt))
trace_buffer_unlock_commit_regs(buffer, event,
trace_buffer_unlock_commit_regs(file->tr, buffer, event,
irq_flags, pc, regs);
if (tt)
......
......@@ -103,7 +103,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
memcpy((void *) t + sizeof(*t), data, len);
if (blk_tracer)
trace_buffer_unlock_commit(buffer, event, 0, pc);
trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
}
}
......@@ -278,7 +278,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
memcpy((void *) t + sizeof(*t), pdu_data, pdu_len);
if (blk_tracer) {
trace_buffer_unlock_commit(buffer, event, 0, pc);
trace_buffer_unlock_commit(blk_tr, buffer, event, 0, pc);
return;
}
}
......
......@@ -1683,23 +1683,16 @@ __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *eve
ring_buffer_unlock_commit(buffer, event);
}
static inline void
__trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
void trace_buffer_unlock_commit(struct trace_array *tr,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
__buffer_unlock_commit(buffer, event);
ftrace_trace_stack(buffer, flags, 6, pc);
ftrace_trace_userstack(buffer, flags, pc);
}
void trace_buffer_unlock_commit(struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc)
{
__trace_buffer_unlock_commit(buffer, event, flags, pc);
}
EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
static struct ring_buffer *temp_buffer;
......@@ -1741,7 +1734,8 @@ trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
}
EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct ring_buffer *buffer,
struct ring_buffer_event *event,
unsigned long flags, int pc,
struct pt_regs *regs)
......
......@@ -2891,7 +2891,9 @@ static __init void event_trace_self_tests(void)
static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
static void
static struct trace_array *event_tr;
static void __init
function_test_events_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs)
{
......@@ -2922,7 +2924,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
entry->ip = ip;
entry->parent_ip = parent_ip;
trace_buffer_unlock_commit(buffer, event, flags, pc);
trace_buffer_unlock_commit(event_tr, buffer, event, flags, pc);
out:
atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
......@@ -2944,6 +2946,9 @@ static __init void event_trace_self_test_with_function(void)
return;
}
pr_info("Running tests again, along with the function tracer\n");
event_tr = top_trace_array();
if (WARN_ON(!event_tr))
return;
event_trace_self_tests();
unregister_ftrace_function(&trace_ops);
}
......
......@@ -314,7 +314,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
entry->rw = *rw;
if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, 0, pc);
trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
}
void mmio_trace_rw(struct mmiotrace_rw *rw)
......@@ -344,7 +344,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
entry->map = *map;
if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, 0, pc);
trace_buffer_unlock_commit(tr, buffer, event, 0, pc);
}
void mmio_trace_mapping(struct mmiotrace_map *map)
......
......@@ -388,7 +388,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(next);
if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, flags, pc);
trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
}
static void
......@@ -416,7 +416,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
entry->next_cpu = task_cpu(wakee);
if (!call_filter_check_discard(call, entry, buffer, event))
trace_buffer_unlock_commit(buffer, event, flags, pc);
trace_buffer_unlock_commit(tr, buffer, event, flags, pc);
}
static void notrace
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment