Commit a1a138d0 authored by Masami Hiramatsu's avatar Masami Hiramatsu Committed by Frederic Weisbecker

tracing/kprobes: Use global event perf buffers in kprobe tracer

Use new percpu global event buffer instead of stack in kprobe
tracer while tracing through perf.
Signed-off-by: default avatarMasami Hiramatsu <mhiramat@redhat.com>
Acked-by: default avatarSteven Rostedt <rostedt@goodmis.org>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: Jim Keniston <jkenisto@us.ibm.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Frank Ch. Eigler <fche@redhat.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Jason Baron <jbaron@redhat.com>
Cc: K.Prasad <prasad@linux.vnet.ibm.com>
Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
Cc: Li Zefan <lizf@cn.fujitsu.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <20090925182011.10157.60140.stgit@omoto>
Signed-off-by: default avatarFrederic Weisbecker <fweisbec@gmail.com>
parent d7a4b414
...@@ -1149,35 +1149,49 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp, ...@@ -1149,35 +1149,49 @@ static __kprobes int kprobe_profile_func(struct kprobe *kp,
struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp); struct trace_probe *tp = container_of(kp, struct trace_probe, rp.kp);
struct ftrace_event_call *call = &tp->call; struct ftrace_event_call *call = &tp->call;
struct kprobe_trace_entry *entry; struct kprobe_trace_entry *entry;
int size, __size, i, pc; struct trace_entry *ent;
int size, __size, i, pc, __cpu;
unsigned long irq_flags; unsigned long irq_flags;
char *raw_data;
local_save_flags(irq_flags);
pc = preempt_count(); pc = preempt_count();
__size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args); __size = SIZEOF_KPROBE_TRACE_ENTRY(tp->nr_args);
size = ALIGN(__size + sizeof(u32), sizeof(u64)); size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32); size -= sizeof(u32);
if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
"profile buffer not large enough"))
return 0;
do { /*
char raw_data[size]; * Protect the non nmi buffer
struct trace_entry *ent; * This also protects the rcu read side
/* */
* Zero dead bytes from alignment to avoid stack leak local_irq_save(irq_flags);
* to userspace __cpu = smp_processor_id();
*/
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; if (in_nmi())
entry = (struct kprobe_trace_entry *)raw_data; raw_data = rcu_dereference(trace_profile_buf_nmi);
ent = &entry->ent; else
raw_data = rcu_dereference(trace_profile_buf);
tracing_generic_entry_update(ent, irq_flags, pc);
ent->type = call->id; if (!raw_data)
entry->nargs = tp->nr_args; goto end;
entry->ip = (unsigned long)kp->addr;
for (i = 0; i < tp->nr_args; i++) raw_data = per_cpu_ptr(raw_data, __cpu);
entry->args[i] = call_fetch(&tp->args[i].fetch, regs); /* Zero dead bytes from alignment to avoid buffer leak to userspace */
perf_tp_event(call->id, entry->ip, 1, entry, size); *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
} while (0); entry = (struct kprobe_trace_entry *)raw_data;
ent = &entry->ent;
tracing_generic_entry_update(ent, irq_flags, pc);
ent->type = call->id;
entry->nargs = tp->nr_args;
entry->ip = (unsigned long)kp->addr;
for (i = 0; i < tp->nr_args; i++)
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
perf_tp_event(call->id, entry->ip, 1, entry, size);
end:
local_irq_restore(irq_flags);
return 0; return 0;
} }
...@@ -1188,33 +1202,50 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri, ...@@ -1188,33 +1202,50 @@ static __kprobes int kretprobe_profile_func(struct kretprobe_instance *ri,
struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp); struct trace_probe *tp = container_of(ri->rp, struct trace_probe, rp);
struct ftrace_event_call *call = &tp->call; struct ftrace_event_call *call = &tp->call;
struct kretprobe_trace_entry *entry; struct kretprobe_trace_entry *entry;
int size, __size, i, pc; struct trace_entry *ent;
int size, __size, i, pc, __cpu;
unsigned long irq_flags; unsigned long irq_flags;
char *raw_data;
local_save_flags(irq_flags);
pc = preempt_count(); pc = preempt_count();
__size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args); __size = SIZEOF_KRETPROBE_TRACE_ENTRY(tp->nr_args);
size = ALIGN(__size + sizeof(u32), sizeof(u64)); size = ALIGN(__size + sizeof(u32), sizeof(u64));
size -= sizeof(u32); size -= sizeof(u32);
if (WARN_ONCE(size > FTRACE_MAX_PROFILE_SIZE,
"profile buffer not large enough"))
return 0;
/*
* Protect the non nmi buffer
* This also protects the rcu read side
*/
local_irq_save(irq_flags);
__cpu = smp_processor_id();
if (in_nmi())
raw_data = rcu_dereference(trace_profile_buf_nmi);
else
raw_data = rcu_dereference(trace_profile_buf);
if (!raw_data)
goto end;
raw_data = per_cpu_ptr(raw_data, __cpu);
/* Zero dead bytes from alignment to avoid buffer leak to userspace */
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
entry = (struct kretprobe_trace_entry *)raw_data;
ent = &entry->ent;
do { tracing_generic_entry_update(ent, irq_flags, pc);
char raw_data[size]; ent->type = call->id;
struct trace_entry *ent; entry->nargs = tp->nr_args;
entry->func = (unsigned long)tp->rp.kp.addr;
*(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL; entry->ret_ip = (unsigned long)ri->ret_addr;
entry = (struct kretprobe_trace_entry *)raw_data; for (i = 0; i < tp->nr_args; i++)
ent = &entry->ent; entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
tracing_generic_entry_update(ent, irq_flags, pc); end:
ent->type = call->id; local_irq_restore(irq_flags);
entry->nargs = tp->nr_args;
entry->func = (unsigned long)tp->rp.kp.addr;
entry->ret_ip = (unsigned long)ri->ret_addr;
for (i = 0; i < tp->nr_args; i++)
entry->args[i] = call_fetch(&tp->args[i].fetch, regs);
perf_tp_event(call->id, entry->ret_ip, 1, entry, size);
} while (0);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment