Commit f80836c8 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'tip/tracing/core-7' of...

Merge branch 'tip/tracing/core-7' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
parents 598357eb ff5f149b
...@@ -70,18 +70,25 @@ struct trace_iterator { ...@@ -70,18 +70,25 @@ struct trace_iterator {
}; };
struct trace_event;
typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter, typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
int flags); int flags, struct trace_event *event);
struct trace_event {
struct hlist_node node; struct trace_event_functions {
struct list_head list;
int type;
trace_print_func trace; trace_print_func trace;
trace_print_func raw; trace_print_func raw;
trace_print_func hex; trace_print_func hex;
trace_print_func binary; trace_print_func binary;
}; };
struct trace_event {
struct hlist_node node;
struct list_head list;
int type;
struct trace_event_functions *funcs;
};
extern int register_ftrace_event(struct trace_event *event); extern int register_ftrace_event(struct trace_event *event);
extern int unregister_ftrace_event(struct trace_event *event); extern int unregister_ftrace_event(struct trace_event *event);
...@@ -113,29 +120,70 @@ void tracing_record_cmdline(struct task_struct *tsk); ...@@ -113,29 +120,70 @@ void tracing_record_cmdline(struct task_struct *tsk);
struct event_filter; struct event_filter;
enum trace_reg {
TRACE_REG_REGISTER,
TRACE_REG_UNREGISTER,
TRACE_REG_PERF_REGISTER,
TRACE_REG_PERF_UNREGISTER,
};
struct ftrace_event_call;
struct ftrace_event_class {
char *system;
void *probe;
#ifdef CONFIG_PERF_EVENTS
void *perf_probe;
#endif
int (*reg)(struct ftrace_event_call *event,
enum trace_reg type);
int (*define_fields)(struct ftrace_event_call *);
struct list_head *(*get_fields)(struct ftrace_event_call *);
struct list_head fields;
int (*raw_init)(struct ftrace_event_call *);
};
enum {
TRACE_EVENT_FL_ENABLED_BIT,
TRACE_EVENT_FL_FILTERED_BIT,
};
enum {
TRACE_EVENT_FL_ENABLED = (1 << TRACE_EVENT_FL_ENABLED_BIT),
TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
};
struct ftrace_event_call { struct ftrace_event_call {
struct list_head list; struct list_head list;
struct ftrace_event_class *class;
char *name; char *name;
char *system;
struct dentry *dir; struct dentry *dir;
struct trace_event *event; struct trace_event event;
int enabled;
int (*regfunc)(struct ftrace_event_call *);
void (*unregfunc)(struct ftrace_event_call *);
int id;
const char *print_fmt; const char *print_fmt;
int (*raw_init)(struct ftrace_event_call *);
int (*define_fields)(struct ftrace_event_call *);
struct list_head fields;
int filter_active;
struct event_filter *filter; struct event_filter *filter;
void *mod; void *mod;
void *data; void *data;
/*
* 32 bit flags:
* bit 1: enabled
* bit 2: filter_active
*
* Changes to flags must hold the event_mutex.
*
* Note: Reads of flags do not hold the event_mutex since
* they occur in critical sections. But the way flags
* is currently used, these changes do no affect the code
* except that when a change is made, it may have a slight
* delay in propagating the changes to other CPUs due to
* caching and such.
*/
unsigned int flags;
#ifdef CONFIG_PERF_EVENTS
int perf_refcount; int perf_refcount;
struct hlist_head *perf_events; struct hlist_head *perf_events;
int (*perf_event_enable)(struct ftrace_event_call *); #endif
void (*perf_event_disable)(struct ftrace_event_call *);
}; };
#define PERF_MAX_TRACE_SIZE 2048 #define PERF_MAX_TRACE_SIZE 2048
......
...@@ -103,22 +103,6 @@ struct perf_event_attr; ...@@ -103,22 +103,6 @@ struct perf_event_attr;
#define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__) #define __SC_TEST5(t5, a5, ...) __SC_TEST(t5); __SC_TEST4(__VA_ARGS__)
#define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__) #define __SC_TEST6(t6, a6, ...) __SC_TEST(t6); __SC_TEST5(__VA_ARGS__)
#ifdef CONFIG_PERF_EVENTS
#define TRACE_SYS_ENTER_PERF_INIT(sname) \
.perf_event_enable = perf_sysenter_enable, \
.perf_event_disable = perf_sysenter_disable,
#define TRACE_SYS_EXIT_PERF_INIT(sname) \
.perf_event_enable = perf_sysexit_enable, \
.perf_event_disable = perf_sysexit_disable,
#else
#define TRACE_SYS_ENTER_PERF(sname)
#define TRACE_SYS_ENTER_PERF_INIT(sname)
#define TRACE_SYS_EXIT_PERF(sname)
#define TRACE_SYS_EXIT_PERF_INIT(sname)
#endif /* CONFIG_PERF_EVENTS */
#ifdef CONFIG_FTRACE_SYSCALLS #ifdef CONFIG_FTRACE_SYSCALLS
#define __SC_STR_ADECL1(t, a) #a #define __SC_STR_ADECL1(t, a) #a
#define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__) #define __SC_STR_ADECL2(t, a, ...) #a, __SC_STR_ADECL1(__VA_ARGS__)
...@@ -134,54 +118,43 @@ struct perf_event_attr; ...@@ -134,54 +118,43 @@ struct perf_event_attr;
#define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__) #define __SC_STR_TDECL5(t, a, ...) #t, __SC_STR_TDECL4(__VA_ARGS__)
#define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__) #define __SC_STR_TDECL6(t, a, ...) #t, __SC_STR_TDECL5(__VA_ARGS__)
extern struct ftrace_event_class event_class_syscall_enter;
extern struct ftrace_event_class event_class_syscall_exit;
extern struct trace_event_functions enter_syscall_print_funcs;
extern struct trace_event_functions exit_syscall_print_funcs;
#define SYSCALL_TRACE_ENTER_EVENT(sname) \ #define SYSCALL_TRACE_ENTER_EVENT(sname) \
static const struct syscall_metadata __syscall_meta_##sname; \ static struct syscall_metadata __syscall_meta_##sname; \
static struct ftrace_event_call \ static struct ftrace_event_call \
__attribute__((__aligned__(4))) event_enter_##sname; \ __attribute__((__aligned__(4))) event_enter_##sname; \
static struct trace_event enter_syscall_print_##sname = { \
.trace = print_syscall_enter, \
}; \
static struct ftrace_event_call __used \ static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \ __attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) \ __attribute__((section("_ftrace_events"))) \
event_enter_##sname = { \ event_enter_##sname = { \
.name = "sys_enter"#sname, \ .name = "sys_enter"#sname, \
.system = "syscalls", \ .class = &event_class_syscall_enter, \
.event = &enter_syscall_print_##sname, \ .event.funcs = &enter_syscall_print_funcs, \
.raw_init = init_syscall_trace, \
.define_fields = syscall_enter_define_fields, \
.regfunc = reg_event_syscall_enter, \
.unregfunc = unreg_event_syscall_enter, \
.data = (void *)&__syscall_meta_##sname,\ .data = (void *)&__syscall_meta_##sname,\
TRACE_SYS_ENTER_PERF_INIT(sname) \
} }
#define SYSCALL_TRACE_EXIT_EVENT(sname) \ #define SYSCALL_TRACE_EXIT_EVENT(sname) \
static const struct syscall_metadata __syscall_meta_##sname; \ static struct syscall_metadata __syscall_meta_##sname; \
static struct ftrace_event_call \ static struct ftrace_event_call \
__attribute__((__aligned__(4))) event_exit_##sname; \ __attribute__((__aligned__(4))) event_exit_##sname; \
static struct trace_event exit_syscall_print_##sname = { \
.trace = print_syscall_exit, \
}; \
static struct ftrace_event_call __used \ static struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \ __attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) \ __attribute__((section("_ftrace_events"))) \
event_exit_##sname = { \ event_exit_##sname = { \
.name = "sys_exit"#sname, \ .name = "sys_exit"#sname, \
.system = "syscalls", \ .class = &event_class_syscall_exit, \
.event = &exit_syscall_print_##sname, \ .event.funcs = &exit_syscall_print_funcs, \
.raw_init = init_syscall_trace, \
.define_fields = syscall_exit_define_fields, \
.regfunc = reg_event_syscall_exit, \
.unregfunc = unreg_event_syscall_exit, \
.data = (void *)&__syscall_meta_##sname,\ .data = (void *)&__syscall_meta_##sname,\
TRACE_SYS_EXIT_PERF_INIT(sname) \
} }
#define SYSCALL_METADATA(sname, nb) \ #define SYSCALL_METADATA(sname, nb) \
SYSCALL_TRACE_ENTER_EVENT(sname); \ SYSCALL_TRACE_ENTER_EVENT(sname); \
SYSCALL_TRACE_EXIT_EVENT(sname); \ SYSCALL_TRACE_EXIT_EVENT(sname); \
static const struct syscall_metadata __used \ static struct syscall_metadata __used \
__attribute__((__aligned__(4))) \ __attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \ __attribute__((section("__syscalls_metadata"))) \
__syscall_meta_##sname = { \ __syscall_meta_##sname = { \
...@@ -191,12 +164,14 @@ struct perf_event_attr; ...@@ -191,12 +164,14 @@ struct perf_event_attr;
.args = args_##sname, \ .args = args_##sname, \
.enter_event = &event_enter_##sname, \ .enter_event = &event_enter_##sname, \
.exit_event = &event_exit_##sname, \ .exit_event = &event_exit_##sname, \
.enter_fields = LIST_HEAD_INIT(__syscall_meta_##sname.enter_fields), \
.exit_fields = LIST_HEAD_INIT(__syscall_meta_##sname.exit_fields), \
}; };
#define SYSCALL_DEFINE0(sname) \ #define SYSCALL_DEFINE0(sname) \
SYSCALL_TRACE_ENTER_EVENT(_##sname); \ SYSCALL_TRACE_ENTER_EVENT(_##sname); \
SYSCALL_TRACE_EXIT_EVENT(_##sname); \ SYSCALL_TRACE_EXIT_EVENT(_##sname); \
static const struct syscall_metadata __used \ static struct syscall_metadata __used \
__attribute__((__aligned__(4))) \ __attribute__((__aligned__(4))) \
__attribute__((section("__syscalls_metadata"))) \ __attribute__((section("__syscalls_metadata"))) \
__syscall_meta__##sname = { \ __syscall_meta__##sname = { \
...@@ -204,6 +179,8 @@ struct perf_event_attr; ...@@ -204,6 +179,8 @@ struct perf_event_attr;
.nb_args = 0, \ .nb_args = 0, \
.enter_event = &event_enter__##sname, \ .enter_event = &event_enter__##sname, \
.exit_event = &event_exit__##sname, \ .exit_event = &event_exit__##sname, \
.enter_fields = LIST_HEAD_INIT(__syscall_meta__##sname.enter_fields), \
.exit_fields = LIST_HEAD_INIT(__syscall_meta__##sname.exit_fields), \
}; \ }; \
asmlinkage long sys_##sname(void) asmlinkage long sys_##sname(void)
#else #else
......
...@@ -20,12 +20,17 @@ ...@@ -20,12 +20,17 @@
struct module; struct module;
struct tracepoint; struct tracepoint;
struct tracepoint_func {
void *func;
void *data;
};
struct tracepoint { struct tracepoint {
const char *name; /* Tracepoint name */ const char *name; /* Tracepoint name */
int state; /* State. */ int state; /* State. */
void (*regfunc)(void); void (*regfunc)(void);
void (*unregfunc)(void); void (*unregfunc)(void);
void **funcs; struct tracepoint_func *funcs;
} __attribute__((aligned(32))); /* } __attribute__((aligned(32))); /*
* Aligned on 32 bytes because it is * Aligned on 32 bytes because it is
* globally visible and gcc happily * globally visible and gcc happily
...@@ -37,16 +42,19 @@ struct tracepoint { ...@@ -37,16 +42,19 @@ struct tracepoint {
* Connect a probe to a tracepoint. * Connect a probe to a tracepoint.
* Internal API, should not be used directly. * Internal API, should not be used directly.
*/ */
extern int tracepoint_probe_register(const char *name, void *probe); extern int tracepoint_probe_register(const char *name, void *probe, void *data);
/* /*
* Disconnect a probe from a tracepoint. * Disconnect a probe from a tracepoint.
* Internal API, should not be used directly. * Internal API, should not be used directly.
*/ */
extern int tracepoint_probe_unregister(const char *name, void *probe); extern int
tracepoint_probe_unregister(const char *name, void *probe, void *data);
extern int tracepoint_probe_register_noupdate(const char *name, void *probe); extern int tracepoint_probe_register_noupdate(const char *name, void *probe,
extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe); void *data);
extern int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
void *data);
extern void tracepoint_probe_update_all(void); extern void tracepoint_probe_update_all(void);
struct tracepoint_iter { struct tracepoint_iter {
...@@ -102,17 +110,27 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, ...@@ -102,17 +110,27 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
/* /*
* it_func[0] is never NULL because there is at least one element in the array * it_func[0] is never NULL because there is at least one element in the array
* when the array itself is non NULL. * when the array itself is non NULL.
*
* Note, the proto and args passed in includes "__data" as the first parameter.
* The reason for this is to handle the "void" prototype. If a tracepoint
* has a "void" prototype, then it is invalid to declare a function
* as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
* "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
*/ */
#define __DO_TRACE(tp, proto, args) \ #define __DO_TRACE(tp, proto, args) \
do { \ do { \
void **it_func; \ struct tracepoint_func *it_func_ptr; \
void *it_func; \
void *__data; \
\ \
rcu_read_lock_sched_notrace(); \ rcu_read_lock_sched_notrace(); \
it_func = rcu_dereference_sched((tp)->funcs); \ it_func_ptr = rcu_dereference_sched((tp)->funcs); \
if (it_func) { \ if (it_func_ptr) { \
do { \ do { \
((void(*)(proto))(*it_func))(args); \ it_func = (it_func_ptr)->func; \
} while (*(++it_func)); \ __data = (it_func_ptr)->data; \
((void(*)(proto))(it_func))(args); \
} while ((++it_func_ptr)->func); \
} \ } \
rcu_read_unlock_sched_notrace(); \ rcu_read_unlock_sched_notrace(); \
} while (0) } while (0)
...@@ -122,24 +140,32 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, ...@@ -122,24 +140,32 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
* not add unwanted padding between the beginning of the section and the * not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start. * structure. Force alignment to the same alignment as the section start.
*/ */
#define DECLARE_TRACE(name, proto, args) \ #define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \
extern struct tracepoint __tracepoint_##name; \ extern struct tracepoint __tracepoint_##name; \
static inline void trace_##name(proto) \ static inline void trace_##name(proto) \
{ \ { \
if (unlikely(__tracepoint_##name.state)) \ if (unlikely(__tracepoint_##name.state)) \
__DO_TRACE(&__tracepoint_##name, \ __DO_TRACE(&__tracepoint_##name, \
TP_PROTO(proto), TP_ARGS(args)); \ TP_PROTO(data_proto), \
TP_ARGS(data_args)); \
} \
static inline int \
register_trace_##name(void (*probe)(data_proto), void *data) \
{ \
return tracepoint_probe_register(#name, (void *)probe, \
data); \
} \ } \
static inline int register_trace_##name(void (*probe)(proto)) \ static inline int \
unregister_trace_##name(void (*probe)(data_proto), void *data) \
{ \ { \
return tracepoint_probe_register(#name, (void *)probe); \ return tracepoint_probe_unregister(#name, (void *)probe, \
data); \
} \ } \
static inline int unregister_trace_##name(void (*probe)(proto)) \ static inline void \
check_trace_callback_type_##name(void (*cb)(data_proto)) \
{ \ { \
return tracepoint_probe_unregister(#name, (void *)probe);\
} }
#define DEFINE_TRACE_FN(name, reg, unreg) \ #define DEFINE_TRACE_FN(name, reg, unreg) \
static const char __tpstrtab_##name[] \ static const char __tpstrtab_##name[] \
__attribute__((section("__tracepoints_strings"))) = #name; \ __attribute__((section("__tracepoints_strings"))) = #name; \
...@@ -156,18 +182,23 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, ...@@ -156,18 +182,23 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
EXPORT_SYMBOL(__tracepoint_##name) EXPORT_SYMBOL(__tracepoint_##name)
#else /* !CONFIG_TRACEPOINTS */ #else /* !CONFIG_TRACEPOINTS */
#define DECLARE_TRACE(name, proto, args) \ #define __DECLARE_TRACE(name, proto, args, data_proto, data_args) \
static inline void _do_trace_##name(struct tracepoint *tp, proto) \
{ } \
static inline void trace_##name(proto) \ static inline void trace_##name(proto) \
{ } \ { } \
static inline int register_trace_##name(void (*probe)(proto)) \ static inline int \
register_trace_##name(void (*probe)(data_proto), \
void *data) \
{ \ { \
return -ENOSYS; \ return -ENOSYS; \
} \ } \
static inline int unregister_trace_##name(void (*probe)(proto)) \ static inline int \
unregister_trace_##name(void (*probe)(data_proto), \
void *data) \
{ \ { \
return -ENOSYS; \ return -ENOSYS; \
} \
static inline void check_trace_callback_type_##name(void (*cb)(data_proto)) \
{ \
} }
#define DEFINE_TRACE_FN(name, reg, unreg) #define DEFINE_TRACE_FN(name, reg, unreg)
...@@ -176,6 +207,29 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin, ...@@ -176,6 +207,29 @@ static inline void tracepoint_update_probe_range(struct tracepoint *begin,
#define EXPORT_TRACEPOINT_SYMBOL(name) #define EXPORT_TRACEPOINT_SYMBOL(name)
#endif /* CONFIG_TRACEPOINTS */ #endif /* CONFIG_TRACEPOINTS */
/*
* The need for the DECLARE_TRACE_NOARGS() is to handle the prototype
* (void). "void" is a special value in a function prototype and can
* not be combined with other arguments. Since the DECLARE_TRACE()
* macro adds a data element at the beginning of the prototype,
* we need a way to differentiate "(void *data, proto)" from
* "(void *data, void)". The second prototype is invalid.
*
* DECLARE_TRACE_NOARGS() passes "void" as the tracepoint prototype
* and "void *__data" as the callback prototype.
*
* DECLARE_TRACE() passes "proto" as the tracepoint protoype and
* "void *__data, proto" as the callback prototype.
*/
#define DECLARE_TRACE_NOARGS(name) \
__DECLARE_TRACE(name, void, , void *__data, __data)
#define DECLARE_TRACE(name, proto, args) \
__DECLARE_TRACE(name, PARAMS(proto), PARAMS(args), \
PARAMS(void *__data, proto), \
PARAMS(__data, args))
#endif /* DECLARE_TRACE */ #endif /* DECLARE_TRACE */
#ifndef TRACE_EVENT #ifndef TRACE_EVENT
......
This diff is collapsed.
...@@ -25,6 +25,8 @@ struct syscall_metadata { ...@@ -25,6 +25,8 @@ struct syscall_metadata {
int nb_args; int nb_args;
const char **types; const char **types;
const char **args; const char **args;
struct list_head enter_fields;
struct list_head exit_fields;
struct ftrace_event_call *enter_event; struct ftrace_event_call *enter_event;
struct ftrace_event_call *exit_event; struct ftrace_event_call *exit_event;
...@@ -34,16 +36,16 @@ struct syscall_metadata { ...@@ -34,16 +36,16 @@ struct syscall_metadata {
extern unsigned long arch_syscall_addr(int nr); extern unsigned long arch_syscall_addr(int nr);
extern int init_syscall_trace(struct ftrace_event_call *call); extern int init_syscall_trace(struct ftrace_event_call *call);
extern int syscall_enter_define_fields(struct ftrace_event_call *call);
extern int syscall_exit_define_fields(struct ftrace_event_call *call);
extern int reg_event_syscall_enter(struct ftrace_event_call *call); extern int reg_event_syscall_enter(struct ftrace_event_call *call);
extern void unreg_event_syscall_enter(struct ftrace_event_call *call); extern void unreg_event_syscall_enter(struct ftrace_event_call *call);
extern int reg_event_syscall_exit(struct ftrace_event_call *call); extern int reg_event_syscall_exit(struct ftrace_event_call *call);
extern void unreg_event_syscall_exit(struct ftrace_event_call *call); extern void unreg_event_syscall_exit(struct ftrace_event_call *call);
extern int extern int
ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s); ftrace_format_syscall(struct ftrace_event_call *call, struct trace_seq *s);
enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags); enum print_line_t print_syscall_enter(struct trace_iterator *iter, int flags,
enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags); struct trace_event *event);
enum print_line_t print_syscall_exit(struct trace_iterator *iter, int flags,
struct trace_event *event);
#endif #endif
#ifdef CONFIG_PERF_EVENTS #ifdef CONFIG_PERF_EVENTS
......
This diff is collapsed.
...@@ -3234,7 +3234,8 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) ...@@ -3234,7 +3234,8 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
} }
static void static void
ftrace_graph_probe_sched_switch(struct task_struct *prev, struct task_struct *next) ftrace_graph_probe_sched_switch(void *ignore,
struct task_struct *prev, struct task_struct *next)
{ {
unsigned long long timestamp; unsigned long long timestamp;
int index; int index;
...@@ -3288,7 +3289,7 @@ static int start_graph_tracing(void) ...@@ -3288,7 +3289,7 @@ static int start_graph_tracing(void)
} while (ret == -EAGAIN); } while (ret == -EAGAIN);
if (!ret) { if (!ret) {
ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch); ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
if (ret) if (ret)
pr_info("ftrace_graph: Couldn't activate tracepoint" pr_info("ftrace_graph: Couldn't activate tracepoint"
" probe to kernel_sched_switch\n"); " probe to kernel_sched_switch\n");
...@@ -3364,7 +3365,7 @@ void unregister_ftrace_graph(void) ...@@ -3364,7 +3365,7 @@ void unregister_ftrace_graph(void)
ftrace_graph_entry = ftrace_graph_entry_stub; ftrace_graph_entry = ftrace_graph_entry_stub;
ftrace_shutdown(FTRACE_STOP_FUNC_RET); ftrace_shutdown(FTRACE_STOP_FUNC_RET);
unregister_pm_notifier(&ftrace_suspend_notifier); unregister_pm_notifier(&ftrace_suspend_notifier);
unregister_trace_sched_switch(ftrace_graph_probe_sched_switch); unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
out: out:
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
......
...@@ -95,7 +95,8 @@ static inline void kmemtrace_free(enum kmemtrace_type_id type_id, ...@@ -95,7 +95,8 @@ static inline void kmemtrace_free(enum kmemtrace_type_id type_id,
trace_wake_up(); trace_wake_up();
} }
static void kmemtrace_kmalloc(unsigned long call_site, static void kmemtrace_kmalloc(void *ignore,
unsigned long call_site,
const void *ptr, const void *ptr,
size_t bytes_req, size_t bytes_req,
size_t bytes_alloc, size_t bytes_alloc,
...@@ -105,7 +106,8 @@ static void kmemtrace_kmalloc(unsigned long call_site, ...@@ -105,7 +106,8 @@ static void kmemtrace_kmalloc(unsigned long call_site,
bytes_req, bytes_alloc, gfp_flags, -1); bytes_req, bytes_alloc, gfp_flags, -1);
} }
static void kmemtrace_kmem_cache_alloc(unsigned long call_site, static void kmemtrace_kmem_cache_alloc(void *ignore,
unsigned long call_site,
const void *ptr, const void *ptr,
size_t bytes_req, size_t bytes_req,
size_t bytes_alloc, size_t bytes_alloc,
...@@ -115,7 +117,8 @@ static void kmemtrace_kmem_cache_alloc(unsigned long call_site, ...@@ -115,7 +117,8 @@ static void kmemtrace_kmem_cache_alloc(unsigned long call_site,
bytes_req, bytes_alloc, gfp_flags, -1); bytes_req, bytes_alloc, gfp_flags, -1);
} }
static void kmemtrace_kmalloc_node(unsigned long call_site, static void kmemtrace_kmalloc_node(void *ignore,
unsigned long call_site,
const void *ptr, const void *ptr,
size_t bytes_req, size_t bytes_req,
size_t bytes_alloc, size_t bytes_alloc,
...@@ -126,7 +129,8 @@ static void kmemtrace_kmalloc_node(unsigned long call_site, ...@@ -126,7 +129,8 @@ static void kmemtrace_kmalloc_node(unsigned long call_site,
bytes_req, bytes_alloc, gfp_flags, node); bytes_req, bytes_alloc, gfp_flags, node);
} }
static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site, static void kmemtrace_kmem_cache_alloc_node(void *ignore,
unsigned long call_site,
const void *ptr, const void *ptr,
size_t bytes_req, size_t bytes_req,
size_t bytes_alloc, size_t bytes_alloc,
...@@ -137,12 +141,14 @@ static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site, ...@@ -137,12 +141,14 @@ static void kmemtrace_kmem_cache_alloc_node(unsigned long call_site,
bytes_req, bytes_alloc, gfp_flags, node); bytes_req, bytes_alloc, gfp_flags, node);
} }
static void kmemtrace_kfree(unsigned long call_site, const void *ptr) static void
kmemtrace_kfree(void *ignore, unsigned long call_site, const void *ptr)
{ {
kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr); kmemtrace_free(KMEMTRACE_TYPE_KMALLOC, call_site, ptr);
} }
static void kmemtrace_kmem_cache_free(unsigned long call_site, const void *ptr) static void kmemtrace_kmem_cache_free(void *ignore,
unsigned long call_site, const void *ptr)
{ {
kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr); kmemtrace_free(KMEMTRACE_TYPE_CACHE, call_site, ptr);
} }
...@@ -151,34 +157,34 @@ static int kmemtrace_start_probes(void) ...@@ -151,34 +157,34 @@ static int kmemtrace_start_probes(void)
{ {
int err; int err;
err = register_trace_kmalloc(kmemtrace_kmalloc); err = register_trace_kmalloc(kmemtrace_kmalloc, NULL);
if (err) if (err)
return err; return err;
err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc); err = register_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL);
if (err) if (err)
return err; return err;
err = register_trace_kmalloc_node(kmemtrace_kmalloc_node); err = register_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL);
if (err) if (err)
return err; return err;
err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node); err = register_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL);
if (err) if (err)
return err; return err;
err = register_trace_kfree(kmemtrace_kfree); err = register_trace_kfree(kmemtrace_kfree, NULL);
if (err) if (err)
return err; return err;
err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free); err = register_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL);
return err; return err;
} }
static void kmemtrace_stop_probes(void) static void kmemtrace_stop_probes(void)
{ {
unregister_trace_kmalloc(kmemtrace_kmalloc); unregister_trace_kmalloc(kmemtrace_kmalloc, NULL);
unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc); unregister_trace_kmem_cache_alloc(kmemtrace_kmem_cache_alloc, NULL);
unregister_trace_kmalloc_node(kmemtrace_kmalloc_node); unregister_trace_kmalloc_node(kmemtrace_kmalloc_node, NULL);
unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node); unregister_trace_kmem_cache_alloc_node(kmemtrace_kmem_cache_alloc_node, NULL);
unregister_trace_kfree(kmemtrace_kfree); unregister_trace_kfree(kmemtrace_kfree, NULL);
unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free); unregister_trace_kmem_cache_free(kmemtrace_kmem_cache_free, NULL);
} }
static int kmem_trace_init(struct trace_array *tr) static int kmem_trace_init(struct trace_array *tr)
...@@ -237,7 +243,8 @@ struct kmemtrace_user_event_alloc { ...@@ -237,7 +243,8 @@ struct kmemtrace_user_event_alloc {
}; };
static enum print_line_t static enum print_line_t
kmemtrace_print_alloc(struct trace_iterator *iter, int flags) kmemtrace_print_alloc(struct trace_iterator *iter, int flags,
struct trace_event *event)
{ {
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
struct kmemtrace_alloc_entry *entry; struct kmemtrace_alloc_entry *entry;
...@@ -257,7 +264,8 @@ kmemtrace_print_alloc(struct trace_iterator *iter, int flags) ...@@ -257,7 +264,8 @@ kmemtrace_print_alloc(struct trace_iterator *iter, int flags)
} }
static enum print_line_t static enum print_line_t
kmemtrace_print_free(struct trace_iterator *iter, int flags) kmemtrace_print_free(struct trace_iterator *iter, int flags,
struct trace_event *event)
{ {
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
struct kmemtrace_free_entry *entry; struct kmemtrace_free_entry *entry;
...@@ -275,7 +283,8 @@ kmemtrace_print_free(struct trace_iterator *iter, int flags) ...@@ -275,7 +283,8 @@ kmemtrace_print_free(struct trace_iterator *iter, int flags)
} }
static enum print_line_t static enum print_line_t
kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags) kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags,
struct trace_event *event)
{ {
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
struct kmemtrace_alloc_entry *entry; struct kmemtrace_alloc_entry *entry;
...@@ -309,7 +318,8 @@ kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags) ...@@ -309,7 +318,8 @@ kmemtrace_print_alloc_user(struct trace_iterator *iter, int flags)
} }
static enum print_line_t static enum print_line_t
kmemtrace_print_free_user(struct trace_iterator *iter, int flags) kmemtrace_print_free_user(struct trace_iterator *iter, int flags,
struct trace_event *event)
{ {
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
struct kmemtrace_free_entry *entry; struct kmemtrace_free_entry *entry;
...@@ -463,18 +473,26 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter) ...@@ -463,18 +473,26 @@ static enum print_line_t kmemtrace_print_line(struct trace_iterator *iter)
} }
} }
static struct trace_event kmem_trace_alloc = { static struct trace_event_functions kmem_trace_alloc_funcs = {
.type = TRACE_KMEM_ALLOC,
.trace = kmemtrace_print_alloc, .trace = kmemtrace_print_alloc,
.binary = kmemtrace_print_alloc_user, .binary = kmemtrace_print_alloc_user,
}; };
static struct trace_event kmem_trace_free = { static struct trace_event kmem_trace_alloc = {
.type = TRACE_KMEM_FREE, .type = TRACE_KMEM_ALLOC,
.funcs = &kmem_trace_alloc_funcs,
};
static struct trace_event_functions kmem_trace_free_funcs = {
.trace = kmemtrace_print_free, .trace = kmemtrace_print_free,
.binary = kmemtrace_print_free_user, .binary = kmemtrace_print_free_user,
}; };
static struct trace_event kmem_trace_free = {
.type = TRACE_KMEM_FREE,
.funcs = &kmem_trace_free_funcs,
};
static struct tracer kmem_tracer __read_mostly = { static struct tracer kmem_tracer __read_mostly = {
.name = "kmemtrace", .name = "kmemtrace",
.init = kmem_trace_init, .init = kmem_trace_init,
......
...@@ -1936,7 +1936,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter) ...@@ -1936,7 +1936,7 @@ static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
} }
if (event) if (event)
return event->trace(iter, sym_flags); return event->funcs->trace(iter, sym_flags, event);
if (!trace_seq_printf(s, "Unknown type %d\n", entry->type)) if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
goto partial; goto partial;
...@@ -1962,7 +1962,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter) ...@@ -1962,7 +1962,7 @@ static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
event = ftrace_find_event(entry->type); event = ftrace_find_event(entry->type);
if (event) if (event)
return event->raw(iter, 0); return event->funcs->raw(iter, 0, event);
if (!trace_seq_printf(s, "%d ?\n", entry->type)) if (!trace_seq_printf(s, "%d ?\n", entry->type))
goto partial; goto partial;
...@@ -1989,7 +1989,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter) ...@@ -1989,7 +1989,7 @@ static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
event = ftrace_find_event(entry->type); event = ftrace_find_event(entry->type);
if (event) { if (event) {
enum print_line_t ret = event->hex(iter, 0); enum print_line_t ret = event->funcs->hex(iter, 0, event);
if (ret != TRACE_TYPE_HANDLED) if (ret != TRACE_TYPE_HANDLED)
return ret; return ret;
} }
...@@ -2014,7 +2014,8 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter) ...@@ -2014,7 +2014,8 @@ static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
} }
event = ftrace_find_event(entry->type); event = ftrace_find_event(entry->type);
return event ? event->binary(iter, 0) : TRACE_TYPE_HANDLED; return event ? event->funcs->binary(iter, 0, event) :
TRACE_TYPE_HANDLED;
} }
int trace_empty(struct trace_iterator *iter) int trace_empty(struct trace_iterator *iter)
......
...@@ -405,12 +405,12 @@ void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, ...@@ -405,12 +405,12 @@ void ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags,
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc); int pc);
#else #else
static inline void ftrace_trace_stack(struct trace_array *tr, static inline void ftrace_trace_stack(struct ring_buffer *buffer,
unsigned long flags, int skip, int pc) unsigned long flags, int skip, int pc)
{ {
} }
static inline void ftrace_trace_userstack(struct trace_array *tr, static inline void ftrace_trace_userstack(struct ring_buffer *buffer,
unsigned long flags, int pc) unsigned long flags, int pc)
{ {
} }
...@@ -778,12 +778,15 @@ extern void print_subsystem_event_filter(struct event_subsystem *system, ...@@ -778,12 +778,15 @@ extern void print_subsystem_event_filter(struct event_subsystem *system,
struct trace_seq *s); struct trace_seq *s);
extern int filter_assign_type(const char *type); extern int filter_assign_type(const char *type);
struct list_head *
trace_get_fields(struct ftrace_event_call *event_call);
static inline int static inline int
filter_check_discard(struct ftrace_event_call *call, void *rec, filter_check_discard(struct ftrace_event_call *call, void *rec,
struct ring_buffer *buffer, struct ring_buffer *buffer,
struct ring_buffer_event *event) struct ring_buffer_event *event)
{ {
if (unlikely(call->filter_active) && if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
!filter_match_preds(call->filter, rec)) { !filter_match_preds(call->filter, rec)) {
ring_buffer_discard_commit(buffer, event); ring_buffer_discard_commit(buffer, event);
return 1; return 1;
......
...@@ -143,7 +143,7 @@ static void branch_trace_reset(struct trace_array *tr) ...@@ -143,7 +143,7 @@ static void branch_trace_reset(struct trace_array *tr)
} }
static enum print_line_t trace_branch_print(struct trace_iterator *iter, static enum print_line_t trace_branch_print(struct trace_iterator *iter,
int flags) int flags, struct trace_event *event)
{ {
struct trace_branch *field; struct trace_branch *field;
...@@ -167,9 +167,13 @@ static void branch_print_header(struct seq_file *s) ...@@ -167,9 +167,13 @@ static void branch_print_header(struct seq_file *s)
" |\n"); " |\n");
} }
static struct trace_event_functions trace_branch_funcs = {
.trace = trace_branch_print,
};
static struct trace_event trace_branch_event = { static struct trace_event trace_branch_event = {
.type = TRACE_BRANCH, .type = TRACE_BRANCH,
.trace = trace_branch_print, .funcs = &trace_branch_funcs,
}; };
static struct tracer branch_trace __read_mostly = static struct tracer branch_trace __read_mostly =
......
...@@ -56,7 +56,13 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event, ...@@ -56,7 +56,13 @@ static int perf_trace_event_init(struct ftrace_event_call *tp_event,
} }
} }
ret = tp_event->perf_event_enable(tp_event); if (tp_event->class->reg)
ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER);
else
ret = tracepoint_probe_register(tp_event->name,
tp_event->class->perf_probe,
tp_event);
if (ret) if (ret)
goto fail; goto fail;
...@@ -89,7 +95,8 @@ int perf_trace_init(struct perf_event *p_event) ...@@ -89,7 +95,8 @@ int perf_trace_init(struct perf_event *p_event)
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
list_for_each_entry(tp_event, &ftrace_events, list) { list_for_each_entry(tp_event, &ftrace_events, list) {
if (tp_event->id == event_id && tp_event->perf_event_enable && if (tp_event->event.type == event_id &&
tp_event->class && tp_event->class->perf_probe &&
try_module_get(tp_event->mod)) { try_module_get(tp_event->mod)) {
ret = perf_trace_event_init(tp_event, p_event); ret = perf_trace_event_init(tp_event, p_event);
break; break;
...@@ -128,7 +135,12 @@ void perf_trace_destroy(struct perf_event *p_event) ...@@ -128,7 +135,12 @@ void perf_trace_destroy(struct perf_event *p_event)
if (--tp_event->perf_refcount > 0) if (--tp_event->perf_refcount > 0)
return; return;
tp_event->perf_event_disable(tp_event); if (tp_event->class->reg)
tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER);
else
tracepoint_probe_unregister(tp_event->name,
tp_event->class->perf_probe,
tp_event);
free_percpu(tp_event->perf_events); free_percpu(tp_event->perf_events);
tp_event->perf_events = NULL; tp_event->perf_events = NULL;
......
This diff is collapsed.
...@@ -500,8 +500,10 @@ static struct ftrace_event_field * ...@@ -500,8 +500,10 @@ static struct ftrace_event_field *
find_event_field(struct ftrace_event_call *call, char *name) find_event_field(struct ftrace_event_call *call, char *name)
{ {
struct ftrace_event_field *field; struct ftrace_event_field *field;
struct list_head *head;
list_for_each_entry(field, &call->fields, link) { head = trace_get_fields(call);
list_for_each_entry(field, head, link) {
if (!strcmp(field->name, name)) if (!strcmp(field->name, name))
return field; return field;
} }
...@@ -545,7 +547,7 @@ static void filter_disable_preds(struct ftrace_event_call *call) ...@@ -545,7 +547,7 @@ static void filter_disable_preds(struct ftrace_event_call *call)
struct event_filter *filter = call->filter; struct event_filter *filter = call->filter;
int i; int i;
call->filter_active = 0; call->flags &= ~TRACE_EVENT_FL_FILTERED;
filter->n_preds = 0; filter->n_preds = 0;
for (i = 0; i < MAX_FILTER_PRED; i++) for (i = 0; i < MAX_FILTER_PRED; i++)
...@@ -572,7 +574,7 @@ void destroy_preds(struct ftrace_event_call *call) ...@@ -572,7 +574,7 @@ void destroy_preds(struct ftrace_event_call *call)
{ {
__free_preds(call->filter); __free_preds(call->filter);
call->filter = NULL; call->filter = NULL;
call->filter_active = 0; call->flags &= ~TRACE_EVENT_FL_FILTERED;
} }
static struct event_filter *__alloc_preds(void) static struct event_filter *__alloc_preds(void)
...@@ -611,7 +613,7 @@ static int init_preds(struct ftrace_event_call *call) ...@@ -611,7 +613,7 @@ static int init_preds(struct ftrace_event_call *call)
if (call->filter) if (call->filter)
return 0; return 0;
call->filter_active = 0; call->flags &= ~TRACE_EVENT_FL_FILTERED;
call->filter = __alloc_preds(); call->filter = __alloc_preds();
if (IS_ERR(call->filter)) if (IS_ERR(call->filter))
return PTR_ERR(call->filter); return PTR_ERR(call->filter);
...@@ -625,10 +627,10 @@ static int init_subsystem_preds(struct event_subsystem *system) ...@@ -625,10 +627,10 @@ static int init_subsystem_preds(struct event_subsystem *system)
int err; int err;
list_for_each_entry(call, &ftrace_events, list) { list_for_each_entry(call, &ftrace_events, list) {
if (!call->define_fields) if (!call->class || !call->class->define_fields)
continue; continue;
if (strcmp(call->system, system->name) != 0) if (strcmp(call->class->system, system->name) != 0)
continue; continue;
err = init_preds(call); err = init_preds(call);
...@@ -644,10 +646,10 @@ static void filter_free_subsystem_preds(struct event_subsystem *system) ...@@ -644,10 +646,10 @@ static void filter_free_subsystem_preds(struct event_subsystem *system)
struct ftrace_event_call *call; struct ftrace_event_call *call;
list_for_each_entry(call, &ftrace_events, list) { list_for_each_entry(call, &ftrace_events, list) {
if (!call->define_fields) if (!call->class || !call->class->define_fields)
continue; continue;
if (strcmp(call->system, system->name) != 0) if (strcmp(call->class->system, system->name) != 0)
continue; continue;
filter_disable_preds(call); filter_disable_preds(call);
...@@ -1249,10 +1251,10 @@ static int replace_system_preds(struct event_subsystem *system, ...@@ -1249,10 +1251,10 @@ static int replace_system_preds(struct event_subsystem *system,
list_for_each_entry(call, &ftrace_events, list) { list_for_each_entry(call, &ftrace_events, list) {
struct event_filter *filter = call->filter; struct event_filter *filter = call->filter;
if (!call->define_fields) if (!call->class || !call->class->define_fields)
continue; continue;
if (strcmp(call->system, system->name) != 0) if (strcmp(call->class->system, system->name) != 0)
continue; continue;
/* try to see if the filter can be applied */ /* try to see if the filter can be applied */
...@@ -1266,7 +1268,7 @@ static int replace_system_preds(struct event_subsystem *system, ...@@ -1266,7 +1268,7 @@ static int replace_system_preds(struct event_subsystem *system,
if (err) if (err)
filter_disable_preds(call); filter_disable_preds(call);
else { else {
call->filter_active = 1; call->flags |= TRACE_EVENT_FL_FILTERED;
replace_filter_string(filter, filter_string); replace_filter_string(filter, filter_string);
} }
fail = false; fail = false;
...@@ -1315,7 +1317,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string) ...@@ -1315,7 +1317,7 @@ int apply_event_filter(struct ftrace_event_call *call, char *filter_string)
if (err) if (err)
append_filter_err(ps, call->filter); append_filter_err(ps, call->filter);
else else
call->filter_active = 1; call->flags |= TRACE_EVENT_FL_FILTERED;
out: out:
filter_opstack_clear(ps); filter_opstack_clear(ps);
postfix_clear(ps); postfix_clear(ps);
...@@ -1393,7 +1395,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id, ...@@ -1393,7 +1395,7 @@ int ftrace_profile_set_filter(struct perf_event *event, int event_id,
mutex_lock(&event_mutex); mutex_lock(&event_mutex);
list_for_each_entry(call, &ftrace_events, list) { list_for_each_entry(call, &ftrace_events, list) {
if (call->id == event_id) if (call->event.type == event_id)
break; break;
} }
......
...@@ -127,7 +127,7 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \ ...@@ -127,7 +127,7 @@ ftrace_define_fields_##name(struct ftrace_event_call *event_call) \
static int ftrace_raw_init_event(struct ftrace_event_call *call) static int ftrace_raw_init_event(struct ftrace_event_call *call)
{ {
INIT_LIST_HEAD(&call->fields); INIT_LIST_HEAD(&call->class->fields);
return 0; return 0;
} }
...@@ -153,17 +153,21 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call) ...@@ -153,17 +153,21 @@ static int ftrace_raw_init_event(struct ftrace_event_call *call)
#define F_printk(fmt, args...) #fmt ", " __stringify(args) #define F_printk(fmt, args...) #fmt ", " __stringify(args)
#undef FTRACE_ENTRY #undef FTRACE_ENTRY
#define FTRACE_ENTRY(call, struct_name, type, tstruct, print) \ #define FTRACE_ENTRY(call, struct_name, etype, tstruct, print) \
\
struct ftrace_event_class event_class_ftrace_##call = { \
.system = __stringify(TRACE_SYSTEM), \
.define_fields = ftrace_define_fields_##call, \
.raw_init = ftrace_raw_init_event, \
}; \
\ \
struct ftrace_event_call __used \ struct ftrace_event_call __used \
__attribute__((__aligned__(4))) \ __attribute__((__aligned__(4))) \
__attribute__((section("_ftrace_events"))) event_##call = { \ __attribute__((section("_ftrace_events"))) event_##call = { \
.name = #call, \ .name = #call, \
.id = type, \ .event.type = etype, \
.system = __stringify(TRACE_SYSTEM), \ .class = &event_class_ftrace_##call, \
.raw_init = ftrace_raw_init_event, \
.print_fmt = print, \ .print_fmt = print, \
.define_fields = ftrace_define_fields_##call, \
}; \ }; \
#include "trace_entries.h" #include "trace_entries.h"
...@@ -1025,7 +1025,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent, ...@@ -1025,7 +1025,7 @@ print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
if (!event) if (!event)
return TRACE_TYPE_UNHANDLED; return TRACE_TYPE_UNHANDLED;
ret = event->trace(iter, sym_flags); ret = event->funcs->trace(iter, sym_flags, event);
if (ret != TRACE_TYPE_HANDLED) if (ret != TRACE_TYPE_HANDLED)
return ret; return ret;
} }
...@@ -1112,7 +1112,8 @@ print_graph_function(struct trace_iterator *iter) ...@@ -1112,7 +1112,8 @@ print_graph_function(struct trace_iterator *iter)
} }
static enum print_line_t static enum print_line_t
print_graph_function_event(struct trace_iterator *iter, int flags) print_graph_function_event(struct trace_iterator *iter, int flags,
struct trace_event *event)
{ {
return print_graph_function(iter); return print_graph_function(iter);
} }
...@@ -1225,14 +1226,18 @@ void graph_trace_close(struct trace_iterator *iter) ...@@ -1225,14 +1226,18 @@ void graph_trace_close(struct trace_iterator *iter)
} }
} }
static struct trace_event_functions graph_functions = {
.trace = print_graph_function_event,
};
static struct trace_event graph_trace_entry_event = { static struct trace_event graph_trace_entry_event = {
.type = TRACE_GRAPH_ENT, .type = TRACE_GRAPH_ENT,
.trace = print_graph_function_event, .funcs = &graph_functions,
}; };
static struct trace_event graph_trace_ret_event = { static struct trace_event graph_trace_ret_event = {
.type = TRACE_GRAPH_RET, .type = TRACE_GRAPH_RET,
.trace = print_graph_function_event, .funcs = &graph_functions
}; };
static struct tracer graph_trace __read_mostly = { static struct tracer graph_trace __read_mostly = {
......
...@@ -324,8 +324,8 @@ struct trace_probe { ...@@ -324,8 +324,8 @@ struct trace_probe {
unsigned long nhit; unsigned long nhit;
unsigned int flags; /* For TP_FLAG_* */ unsigned int flags; /* For TP_FLAG_* */
const char *symbol; /* symbol name */ const char *symbol; /* symbol name */
struct ftrace_event_class class;
struct ftrace_event_call call; struct ftrace_event_call call;
struct trace_event event;
ssize_t size; /* trace entry size */ ssize_t size; /* trace entry size */
unsigned int nr_args; unsigned int nr_args;
struct probe_arg args[]; struct probe_arg args[];
...@@ -404,6 +404,7 @@ static struct trace_probe *alloc_trace_probe(const char *group, ...@@ -404,6 +404,7 @@ static struct trace_probe *alloc_trace_probe(const char *group,
goto error; goto error;
} }
tp->call.class = &tp->class;
tp->call.name = kstrdup(event, GFP_KERNEL); tp->call.name = kstrdup(event, GFP_KERNEL);
if (!tp->call.name) if (!tp->call.name)
goto error; goto error;
...@@ -413,8 +414,8 @@ static struct trace_probe *alloc_trace_probe(const char *group, ...@@ -413,8 +414,8 @@ static struct trace_probe *alloc_trace_probe(const char *group,
goto error; goto error;
} }
tp->call.system = kstrdup(group, GFP_KERNEL); tp->class.system = kstrdup(group, GFP_KERNEL);
if (!tp->call.system) if (!tp->class.system)
goto error; goto error;
INIT_LIST_HEAD(&tp->list); INIT_LIST_HEAD(&tp->list);
...@@ -443,7 +444,7 @@ static void free_trace_probe(struct trace_probe *tp) ...@@ -443,7 +444,7 @@ static void free_trace_probe(struct trace_probe *tp)
for (i = 0; i < tp->nr_args; i++) for (i = 0; i < tp->nr_args; i++)
free_probe_arg(&tp->args[i]); free_probe_arg(&tp->args[i]);
kfree(tp->call.system); kfree(tp->call.class->system);
kfree(tp->call.name); kfree(tp->call.name);
kfree(tp->symbol); kfree(tp->symbol);
kfree(tp); kfree(tp);
...@@ -456,7 +457,7 @@ static struct trace_probe *find_probe_event(const char *event, ...@@ -456,7 +457,7 @@ static struct trace_probe *find_probe_event(const char *event,
list_for_each_entry(tp, &probe_list, list) list_for_each_entry(tp, &probe_list, list)
if (strcmp(tp->call.name, event) == 0 && if (strcmp(tp->call.name, event) == 0 &&
strcmp(tp->call.system, group) == 0) strcmp(tp->call.class->system, group) == 0)
return tp; return tp;
return NULL; return NULL;
} }
...@@ -481,7 +482,7 @@ static int register_trace_probe(struct trace_probe *tp) ...@@ -481,7 +482,7 @@ static int register_trace_probe(struct trace_probe *tp)
mutex_lock(&probe_lock); mutex_lock(&probe_lock);
/* register as an event */ /* register as an event */
old_tp = find_probe_event(tp->call.name, tp->call.system); old_tp = find_probe_event(tp->call.name, tp->call.class->system);
if (old_tp) { if (old_tp) {
/* delete old event */ /* delete old event */
unregister_trace_probe(old_tp); unregister_trace_probe(old_tp);
...@@ -904,7 +905,7 @@ static int probes_seq_show(struct seq_file *m, void *v) ...@@ -904,7 +905,7 @@ static int probes_seq_show(struct seq_file *m, void *v)
int i; int i;
seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p'); seq_printf(m, "%c", probe_is_return(tp) ? 'r' : 'p');
seq_printf(m, ":%s/%s", tp->call.system, tp->call.name); seq_printf(m, ":%s/%s", tp->call.class->system, tp->call.name);
if (!tp->symbol) if (!tp->symbol)
seq_printf(m, " 0x%p", tp->rp.kp.addr); seq_printf(m, " 0x%p", tp->rp.kp.addr);
...@@ -1061,8 +1062,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs) ...@@ -1061,8 +1062,8 @@ static __kprobes void kprobe_trace_func(struct kprobe *kp, struct pt_regs *regs)
size = sizeof(*entry) + tp->size; size = sizeof(*entry) + tp->size;
event = trace_current_buffer_lock_reserve(&buffer, call->id, size, event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
irq_flags, pc); size, irq_flags, pc);
if (!event) if (!event)
return; return;
...@@ -1094,8 +1095,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, ...@@ -1094,8 +1095,8 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
size = sizeof(*entry) + tp->size; size = sizeof(*entry) + tp->size;
event = trace_current_buffer_lock_reserve(&buffer, call->id, size, event = trace_current_buffer_lock_reserve(&buffer, call->event.type,
irq_flags, pc); size, irq_flags, pc);
if (!event) if (!event)
return; return;
...@@ -1112,18 +1113,17 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri, ...@@ -1112,18 +1113,17 @@ static __kprobes void kretprobe_trace_func(struct kretprobe_instance *ri,
/* Event entry printers */ /* Event entry printers */
enum print_line_t enum print_line_t
print_kprobe_event(struct trace_iterator *iter, int flags) print_kprobe_event(struct trace_iterator *iter, int flags,
struct trace_event *event)
{ {
struct kprobe_trace_entry_head *field; struct kprobe_trace_entry_head *field;
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
struct trace_event *event;
struct trace_probe *tp; struct trace_probe *tp;
u8 *data; u8 *data;
int i; int i;
field = (struct kprobe_trace_entry_head *)iter->ent; field = (struct kprobe_trace_entry_head *)iter->ent;
event = ftrace_find_event(field->ent.type); tp = container_of(event, struct trace_probe, call.event);
tp = container_of(event, struct trace_probe, event);
if (!trace_seq_printf(s, "%s: (", tp->call.name)) if (!trace_seq_printf(s, "%s: (", tp->call.name))
goto partial; goto partial;
...@@ -1149,18 +1149,17 @@ print_kprobe_event(struct trace_iterator *iter, int flags) ...@@ -1149,18 +1149,17 @@ print_kprobe_event(struct trace_iterator *iter, int flags)
} }
enum print_line_t enum print_line_t
print_kretprobe_event(struct trace_iterator *iter, int flags) print_kretprobe_event(struct trace_iterator *iter, int flags,
struct trace_event *event)
{ {
struct kretprobe_trace_entry_head *field; struct kretprobe_trace_entry_head *field;
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
struct trace_event *event;
struct trace_probe *tp; struct trace_probe *tp;
u8 *data; u8 *data;
int i; int i;
field = (struct kretprobe_trace_entry_head *)iter->ent; field = (struct kretprobe_trace_entry_head *)iter->ent;
event = ftrace_find_event(field->ent.type); tp = container_of(event, struct trace_probe, call.event);
tp = container_of(event, struct trace_probe, event);
if (!trace_seq_printf(s, "%s: (", tp->call.name)) if (!trace_seq_printf(s, "%s: (", tp->call.name))
goto partial; goto partial;
...@@ -1217,8 +1216,6 @@ static void probe_event_disable(struct ftrace_event_call *call) ...@@ -1217,8 +1216,6 @@ static void probe_event_disable(struct ftrace_event_call *call)
static int probe_event_raw_init(struct ftrace_event_call *event_call) static int probe_event_raw_init(struct ftrace_event_call *event_call)
{ {
INIT_LIST_HEAD(&event_call->fields);
return 0; return 0;
} }
...@@ -1353,7 +1350,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp, ...@@ -1353,7 +1350,7 @@ static __kprobes void kprobe_perf_func(struct kprobe *kp,
"profile buffer not large enough")) "profile buffer not large enough"))
return; return;
entry = perf_trace_buf_prepare(size, call->id, regs, &rctx); entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
if (!entry) if (!entry)
return; return;
...@@ -1385,7 +1382,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri, ...@@ -1385,7 +1382,7 @@ static __kprobes void kretprobe_perf_func(struct kretprobe_instance *ri,
"profile buffer not large enough")) "profile buffer not large enough"))
return; return;
entry = perf_trace_buf_prepare(size, call->id, regs, &rctx); entry = perf_trace_buf_prepare(size, call->event.type, regs, &rctx);
if (!entry) if (!entry)
return; return;
...@@ -1426,6 +1423,26 @@ static void probe_perf_disable(struct ftrace_event_call *call) ...@@ -1426,6 +1423,26 @@ static void probe_perf_disable(struct ftrace_event_call *call)
} }
#endif /* CONFIG_PERF_EVENTS */ #endif /* CONFIG_PERF_EVENTS */
static __kprobes
int kprobe_register(struct ftrace_event_call *event, enum trace_reg type)
{
switch (type) {
case TRACE_REG_REGISTER:
return probe_event_enable(event);
case TRACE_REG_UNREGISTER:
probe_event_disable(event);
return 0;
#ifdef CONFIG_PERF_EVENTS
case TRACE_REG_PERF_REGISTER:
return probe_perf_enable(event);
case TRACE_REG_PERF_UNREGISTER:
probe_perf_disable(event);
return 0;
#endif
}
return 0;
}
static __kprobes static __kprobes
int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs) int kprobe_dispatcher(struct kprobe *kp, struct pt_regs *regs)
...@@ -1455,6 +1472,14 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs) ...@@ -1455,6 +1472,14 @@ int kretprobe_dispatcher(struct kretprobe_instance *ri, struct pt_regs *regs)
return 0; /* We don't tweek kernel, so just return 0 */ return 0; /* We don't tweek kernel, so just return 0 */
} }
static struct trace_event_functions kretprobe_funcs = {
.trace = print_kretprobe_event
};
static struct trace_event_functions kprobe_funcs = {
.trace = print_kprobe_event
};
static int register_probe_event(struct trace_probe *tp) static int register_probe_event(struct trace_probe *tp)
{ {
struct ftrace_event_call *call = &tp->call; struct ftrace_event_call *call = &tp->call;
...@@ -1462,36 +1487,31 @@ static int register_probe_event(struct trace_probe *tp) ...@@ -1462,36 +1487,31 @@ static int register_probe_event(struct trace_probe *tp)
/* Initialize ftrace_event_call */ /* Initialize ftrace_event_call */
if (probe_is_return(tp)) { if (probe_is_return(tp)) {
tp->event.trace = print_kretprobe_event; INIT_LIST_HEAD(&call->class->fields);
call->raw_init = probe_event_raw_init; call->event.funcs = &kretprobe_funcs;
call->define_fields = kretprobe_event_define_fields; call->class->raw_init = probe_event_raw_init;
call->class->define_fields = kretprobe_event_define_fields;
} else { } else {
tp->event.trace = print_kprobe_event; INIT_LIST_HEAD(&call->class->fields);
call->raw_init = probe_event_raw_init; call->event.funcs = &kprobe_funcs;
call->define_fields = kprobe_event_define_fields; call->class->raw_init = probe_event_raw_init;
call->class->define_fields = kprobe_event_define_fields;
} }
if (set_print_fmt(tp) < 0) if (set_print_fmt(tp) < 0)
return -ENOMEM; return -ENOMEM;
call->event = &tp->event; ret = register_ftrace_event(&call->event);
call->id = register_ftrace_event(&tp->event); if (!ret) {
if (!call->id) {
kfree(call->print_fmt); kfree(call->print_fmt);
return -ENODEV; return -ENODEV;
} }
call->enabled = 0; call->flags = 0;
call->regfunc = probe_event_enable; call->class->reg = kprobe_register;
call->unregfunc = probe_event_disable;
#ifdef CONFIG_PERF_EVENTS
call->perf_event_enable = probe_perf_enable;
call->perf_event_disable = probe_perf_disable;
#endif
call->data = tp; call->data = tp;
ret = trace_add_event_call(call); ret = trace_add_event_call(call);
if (ret) { if (ret) {
pr_info("Failed to register kprobe event: %s\n", call->name); pr_info("Failed to register kprobe event: %s\n", call->name);
kfree(call->print_fmt); kfree(call->print_fmt);
unregister_ftrace_event(&tp->event); unregister_ftrace_event(&call->event);
} }
return ret; return ret;
} }
......
This diff is collapsed.
...@@ -25,7 +25,7 @@ extern void trace_event_read_unlock(void); ...@@ -25,7 +25,7 @@ extern void trace_event_read_unlock(void);
extern struct trace_event *ftrace_find_event(int type); extern struct trace_event *ftrace_find_event(int type);
extern enum print_line_t trace_nop_print(struct trace_iterator *iter, extern enum print_line_t trace_nop_print(struct trace_iterator *iter,
int flags); int flags, struct trace_event *event);
extern int extern int
trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry); trace_print_lat_fmt(struct trace_seq *s, struct trace_entry *entry);
......
...@@ -50,7 +50,7 @@ tracing_sched_switch_trace(struct trace_array *tr, ...@@ -50,7 +50,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
} }
static void static void
probe_sched_switch(struct task_struct *prev, struct task_struct *next) probe_sched_switch(void *ignore, struct task_struct *prev, struct task_struct *next)
{ {
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
...@@ -108,7 +108,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -108,7 +108,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
} }
static void static void
probe_sched_wakeup(struct task_struct *wakee, int success) probe_sched_wakeup(void *ignore, struct task_struct *wakee, int success)
{ {
struct trace_array_cpu *data; struct trace_array_cpu *data;
unsigned long flags; unsigned long flags;
...@@ -138,21 +138,21 @@ static int tracing_sched_register(void) ...@@ -138,21 +138,21 @@ static int tracing_sched_register(void)
{ {
int ret; int ret;
ret = register_trace_sched_wakeup(probe_sched_wakeup); ret = register_trace_sched_wakeup(probe_sched_wakeup, NULL);
if (ret) { if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint" pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_wakeup\n"); " probe to kernel_sched_wakeup\n");
return ret; return ret;
} }
ret = register_trace_sched_wakeup_new(probe_sched_wakeup); ret = register_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
if (ret) { if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint" pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_wakeup_new\n"); " probe to kernel_sched_wakeup_new\n");
goto fail_deprobe; goto fail_deprobe;
} }
ret = register_trace_sched_switch(probe_sched_switch); ret = register_trace_sched_switch(probe_sched_switch, NULL);
if (ret) { if (ret) {
pr_info("sched trace: Couldn't activate tracepoint" pr_info("sched trace: Couldn't activate tracepoint"
" probe to kernel_sched_switch\n"); " probe to kernel_sched_switch\n");
...@@ -161,17 +161,17 @@ static int tracing_sched_register(void) ...@@ -161,17 +161,17 @@ static int tracing_sched_register(void)
return ret; return ret;
fail_deprobe_wake_new: fail_deprobe_wake_new:
unregister_trace_sched_wakeup_new(probe_sched_wakeup); unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
fail_deprobe: fail_deprobe:
unregister_trace_sched_wakeup(probe_sched_wakeup); unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
return ret; return ret;
} }
static void tracing_sched_unregister(void) static void tracing_sched_unregister(void)
{ {
unregister_trace_sched_switch(probe_sched_switch); unregister_trace_sched_switch(probe_sched_switch, NULL);
unregister_trace_sched_wakeup_new(probe_sched_wakeup); unregister_trace_sched_wakeup_new(probe_sched_wakeup, NULL);
unregister_trace_sched_wakeup(probe_sched_wakeup); unregister_trace_sched_wakeup(probe_sched_wakeup, NULL);
} }
static void tracing_start_sched_switch(void) static void tracing_start_sched_switch(void)
......
...@@ -98,7 +98,8 @@ static int report_latency(cycle_t delta) ...@@ -98,7 +98,8 @@ static int report_latency(cycle_t delta)
return 1; return 1;
} }
static void probe_wakeup_migrate_task(struct task_struct *task, int cpu) static void
probe_wakeup_migrate_task(void *ignore, struct task_struct *task, int cpu)
{ {
if (task != wakeup_task) if (task != wakeup_task)
return; return;
...@@ -107,7 +108,8 @@ static void probe_wakeup_migrate_task(struct task_struct *task, int cpu) ...@@ -107,7 +108,8 @@ static void probe_wakeup_migrate_task(struct task_struct *task, int cpu)
} }
static void notrace static void notrace
probe_wakeup_sched_switch(struct task_struct *prev, struct task_struct *next) probe_wakeup_sched_switch(void *ignore,
struct task_struct *prev, struct task_struct *next)
{ {
struct trace_array_cpu *data; struct trace_array_cpu *data;
cycle_t T0, T1, delta; cycle_t T0, T1, delta;
...@@ -199,7 +201,7 @@ static void wakeup_reset(struct trace_array *tr) ...@@ -199,7 +201,7 @@ static void wakeup_reset(struct trace_array *tr)
} }
static void static void
probe_wakeup(struct task_struct *p, int success) probe_wakeup(void *ignore, struct task_struct *p, int success)
{ {
struct trace_array_cpu *data; struct trace_array_cpu *data;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -263,28 +265,28 @@ static void start_wakeup_tracer(struct trace_array *tr) ...@@ -263,28 +265,28 @@ static void start_wakeup_tracer(struct trace_array *tr)
{ {
int ret; int ret;
ret = register_trace_sched_wakeup(probe_wakeup); ret = register_trace_sched_wakeup(probe_wakeup, NULL);
if (ret) { if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint" pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_wakeup\n"); " probe to kernel_sched_wakeup\n");
return; return;
} }
ret = register_trace_sched_wakeup_new(probe_wakeup); ret = register_trace_sched_wakeup_new(probe_wakeup, NULL);
if (ret) { if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint" pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_wakeup_new\n"); " probe to kernel_sched_wakeup_new\n");
goto fail_deprobe; goto fail_deprobe;
} }
ret = register_trace_sched_switch(probe_wakeup_sched_switch); ret = register_trace_sched_switch(probe_wakeup_sched_switch, NULL);
if (ret) { if (ret) {
pr_info("sched trace: Couldn't activate tracepoint" pr_info("sched trace: Couldn't activate tracepoint"
" probe to kernel_sched_switch\n"); " probe to kernel_sched_switch\n");
goto fail_deprobe_wake_new; goto fail_deprobe_wake_new;
} }
ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task); ret = register_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
if (ret) { if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint" pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_migrate_task\n"); " probe to kernel_sched_migrate_task\n");
...@@ -311,19 +313,19 @@ static void start_wakeup_tracer(struct trace_array *tr) ...@@ -311,19 +313,19 @@ static void start_wakeup_tracer(struct trace_array *tr)
return; return;
fail_deprobe_wake_new: fail_deprobe_wake_new:
unregister_trace_sched_wakeup_new(probe_wakeup); unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
fail_deprobe: fail_deprobe:
unregister_trace_sched_wakeup(probe_wakeup); unregister_trace_sched_wakeup(probe_wakeup, NULL);
} }
static void stop_wakeup_tracer(struct trace_array *tr) static void stop_wakeup_tracer(struct trace_array *tr)
{ {
tracer_enabled = 0; tracer_enabled = 0;
unregister_ftrace_function(&trace_ops); unregister_ftrace_function(&trace_ops);
unregister_trace_sched_switch(probe_wakeup_sched_switch); unregister_trace_sched_switch(probe_wakeup_sched_switch, NULL);
unregister_trace_sched_wakeup_new(probe_wakeup); unregister_trace_sched_wakeup_new(probe_wakeup, NULL);
unregister_trace_sched_wakeup(probe_wakeup); unregister_trace_sched_wakeup(probe_wakeup, NULL);
unregister_trace_sched_migrate_task(probe_wakeup_migrate_task); unregister_trace_sched_migrate_task(probe_wakeup_migrate_task, NULL);
} }
static int __wakeup_tracer_init(struct trace_array *tr) static int __wakeup_tracer_init(struct trace_array *tr)
......
...@@ -15,6 +15,54 @@ static int sys_refcount_exit; ...@@ -15,6 +15,54 @@ static int sys_refcount_exit;
static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls); static DECLARE_BITMAP(enabled_enter_syscalls, NR_syscalls);
static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls); static DECLARE_BITMAP(enabled_exit_syscalls, NR_syscalls);
static int syscall_enter_register(struct ftrace_event_call *event,
enum trace_reg type);
static int syscall_exit_register(struct ftrace_event_call *event,
enum trace_reg type);
static int syscall_enter_define_fields(struct ftrace_event_call *call);
static int syscall_exit_define_fields(struct ftrace_event_call *call);
static struct list_head *
syscall_get_enter_fields(struct ftrace_event_call *call)
{
struct syscall_metadata *entry = call->data;
return &entry->enter_fields;
}
static struct list_head *
syscall_get_exit_fields(struct ftrace_event_call *call)
{
struct syscall_metadata *entry = call->data;
return &entry->exit_fields;
}
struct trace_event_functions enter_syscall_print_funcs = {
.trace = print_syscall_enter,
};
struct trace_event_functions exit_syscall_print_funcs = {
.trace = print_syscall_exit,
};
struct ftrace_event_class event_class_syscall_enter = {
.system = "syscalls",
.reg = syscall_enter_register,
.define_fields = syscall_enter_define_fields,
.get_fields = syscall_get_enter_fields,
.raw_init = init_syscall_trace,
};
struct ftrace_event_class event_class_syscall_exit = {
.system = "syscalls",
.reg = syscall_exit_register,
.define_fields = syscall_exit_define_fields,
.get_fields = syscall_get_exit_fields,
.raw_init = init_syscall_trace,
};
extern unsigned long __start_syscalls_metadata[]; extern unsigned long __start_syscalls_metadata[];
extern unsigned long __stop_syscalls_metadata[]; extern unsigned long __stop_syscalls_metadata[];
...@@ -53,7 +101,8 @@ static struct syscall_metadata *syscall_nr_to_meta(int nr) ...@@ -53,7 +101,8 @@ static struct syscall_metadata *syscall_nr_to_meta(int nr)
} }
enum print_line_t enum print_line_t
print_syscall_enter(struct trace_iterator *iter, int flags) print_syscall_enter(struct trace_iterator *iter, int flags,
struct trace_event *event)
{ {
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
struct trace_entry *ent = iter->ent; struct trace_entry *ent = iter->ent;
...@@ -68,7 +117,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags) ...@@ -68,7 +117,7 @@ print_syscall_enter(struct trace_iterator *iter, int flags)
if (!entry) if (!entry)
goto end; goto end;
if (entry->enter_event->id != ent->type) { if (entry->enter_event->event.type != ent->type) {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
goto end; goto end;
} }
...@@ -105,7 +154,8 @@ print_syscall_enter(struct trace_iterator *iter, int flags) ...@@ -105,7 +154,8 @@ print_syscall_enter(struct trace_iterator *iter, int flags)
} }
enum print_line_t enum print_line_t
print_syscall_exit(struct trace_iterator *iter, int flags) print_syscall_exit(struct trace_iterator *iter, int flags,
struct trace_event *event)
{ {
struct trace_seq *s = &iter->seq; struct trace_seq *s = &iter->seq;
struct trace_entry *ent = iter->ent; struct trace_entry *ent = iter->ent;
...@@ -123,7 +173,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags) ...@@ -123,7 +173,7 @@ print_syscall_exit(struct trace_iterator *iter, int flags)
return TRACE_TYPE_HANDLED; return TRACE_TYPE_HANDLED;
} }
if (entry->exit_event->id != ent->type) { if (entry->exit_event->event.type != ent->type) {
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
return TRACE_TYPE_UNHANDLED; return TRACE_TYPE_UNHANDLED;
} }
...@@ -205,7 +255,7 @@ static void free_syscall_print_fmt(struct ftrace_event_call *call) ...@@ -205,7 +255,7 @@ static void free_syscall_print_fmt(struct ftrace_event_call *call)
kfree(call->print_fmt); kfree(call->print_fmt);
} }
int syscall_enter_define_fields(struct ftrace_event_call *call) static int syscall_enter_define_fields(struct ftrace_event_call *call)
{ {
struct syscall_trace_enter trace; struct syscall_trace_enter trace;
struct syscall_metadata *meta = call->data; struct syscall_metadata *meta = call->data;
...@@ -228,7 +278,7 @@ int syscall_enter_define_fields(struct ftrace_event_call *call) ...@@ -228,7 +278,7 @@ int syscall_enter_define_fields(struct ftrace_event_call *call)
return ret; return ret;
} }
int syscall_exit_define_fields(struct ftrace_event_call *call) static int syscall_exit_define_fields(struct ftrace_event_call *call)
{ {
struct syscall_trace_exit trace; struct syscall_trace_exit trace;
int ret; int ret;
...@@ -243,7 +293,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call) ...@@ -243,7 +293,7 @@ int syscall_exit_define_fields(struct ftrace_event_call *call)
return ret; return ret;
} }
void ftrace_syscall_enter(struct pt_regs *regs, long id) void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id)
{ {
struct syscall_trace_enter *entry; struct syscall_trace_enter *entry;
struct syscall_metadata *sys_data; struct syscall_metadata *sys_data;
...@@ -265,7 +315,7 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id) ...@@ -265,7 +315,7 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id)
size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
event = trace_current_buffer_lock_reserve(&buffer, event = trace_current_buffer_lock_reserve(&buffer,
sys_data->enter_event->id, size, 0, 0); sys_data->enter_event->event.type, size, 0, 0);
if (!event) if (!event)
return; return;
...@@ -278,7 +328,7 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id) ...@@ -278,7 +328,7 @@ void ftrace_syscall_enter(struct pt_regs *regs, long id)
trace_current_buffer_unlock_commit(buffer, event, 0, 0); trace_current_buffer_unlock_commit(buffer, event, 0, 0);
} }
void ftrace_syscall_exit(struct pt_regs *regs, long ret) void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
{ {
struct syscall_trace_exit *entry; struct syscall_trace_exit *entry;
struct syscall_metadata *sys_data; struct syscall_metadata *sys_data;
...@@ -297,7 +347,7 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret) ...@@ -297,7 +347,7 @@ void ftrace_syscall_exit(struct pt_regs *regs, long ret)
return; return;
event = trace_current_buffer_lock_reserve(&buffer, event = trace_current_buffer_lock_reserve(&buffer,
sys_data->exit_event->id, sizeof(*entry), 0, 0); sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
if (!event) if (!event)
return; return;
...@@ -320,7 +370,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call) ...@@ -320,7 +370,7 @@ int reg_event_syscall_enter(struct ftrace_event_call *call)
return -ENOSYS; return -ENOSYS;
mutex_lock(&syscall_trace_lock); mutex_lock(&syscall_trace_lock);
if (!sys_refcount_enter) if (!sys_refcount_enter)
ret = register_trace_sys_enter(ftrace_syscall_enter); ret = register_trace_sys_enter(ftrace_syscall_enter, NULL);
if (!ret) { if (!ret) {
set_bit(num, enabled_enter_syscalls); set_bit(num, enabled_enter_syscalls);
sys_refcount_enter++; sys_refcount_enter++;
...@@ -340,7 +390,7 @@ void unreg_event_syscall_enter(struct ftrace_event_call *call) ...@@ -340,7 +390,7 @@ void unreg_event_syscall_enter(struct ftrace_event_call *call)
sys_refcount_enter--; sys_refcount_enter--;
clear_bit(num, enabled_enter_syscalls); clear_bit(num, enabled_enter_syscalls);
if (!sys_refcount_enter) if (!sys_refcount_enter)
unregister_trace_sys_enter(ftrace_syscall_enter); unregister_trace_sys_enter(ftrace_syscall_enter, NULL);
mutex_unlock(&syscall_trace_lock); mutex_unlock(&syscall_trace_lock);
} }
...@@ -354,7 +404,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call) ...@@ -354,7 +404,7 @@ int reg_event_syscall_exit(struct ftrace_event_call *call)
return -ENOSYS; return -ENOSYS;
mutex_lock(&syscall_trace_lock); mutex_lock(&syscall_trace_lock);
if (!sys_refcount_exit) if (!sys_refcount_exit)
ret = register_trace_sys_exit(ftrace_syscall_exit); ret = register_trace_sys_exit(ftrace_syscall_exit, NULL);
if (!ret) { if (!ret) {
set_bit(num, enabled_exit_syscalls); set_bit(num, enabled_exit_syscalls);
sys_refcount_exit++; sys_refcount_exit++;
...@@ -374,7 +424,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call) ...@@ -374,7 +424,7 @@ void unreg_event_syscall_exit(struct ftrace_event_call *call)
sys_refcount_exit--; sys_refcount_exit--;
clear_bit(num, enabled_exit_syscalls); clear_bit(num, enabled_exit_syscalls);
if (!sys_refcount_exit) if (!sys_refcount_exit)
unregister_trace_sys_exit(ftrace_syscall_exit); unregister_trace_sys_exit(ftrace_syscall_exit, NULL);
mutex_unlock(&syscall_trace_lock); mutex_unlock(&syscall_trace_lock);
} }
...@@ -434,7 +484,7 @@ static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls); ...@@ -434,7 +484,7 @@ static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
static int sys_perf_refcount_enter; static int sys_perf_refcount_enter;
static int sys_perf_refcount_exit; static int sys_perf_refcount_exit;
static void perf_syscall_enter(struct pt_regs *regs, long id) static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
{ {
struct syscall_metadata *sys_data; struct syscall_metadata *sys_data;
struct syscall_trace_enter *rec; struct syscall_trace_enter *rec;
...@@ -461,7 +511,7 @@ static void perf_syscall_enter(struct pt_regs *regs, long id) ...@@ -461,7 +511,7 @@ static void perf_syscall_enter(struct pt_regs *regs, long id)
return; return;
rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size, rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
sys_data->enter_event->id, regs, &rctx); sys_data->enter_event->event.type, regs, &rctx);
if (!rec) if (!rec)
return; return;
...@@ -482,7 +532,7 @@ int perf_sysenter_enable(struct ftrace_event_call *call) ...@@ -482,7 +532,7 @@ int perf_sysenter_enable(struct ftrace_event_call *call)
mutex_lock(&syscall_trace_lock); mutex_lock(&syscall_trace_lock);
if (!sys_perf_refcount_enter) if (!sys_perf_refcount_enter)
ret = register_trace_sys_enter(perf_syscall_enter); ret = register_trace_sys_enter(perf_syscall_enter, NULL);
if (ret) { if (ret) {
pr_info("event trace: Could not activate" pr_info("event trace: Could not activate"
"syscall entry trace point"); "syscall entry trace point");
...@@ -504,11 +554,11 @@ void perf_sysenter_disable(struct ftrace_event_call *call) ...@@ -504,11 +554,11 @@ void perf_sysenter_disable(struct ftrace_event_call *call)
sys_perf_refcount_enter--; sys_perf_refcount_enter--;
clear_bit(num, enabled_perf_enter_syscalls); clear_bit(num, enabled_perf_enter_syscalls);
if (!sys_perf_refcount_enter) if (!sys_perf_refcount_enter)
unregister_trace_sys_enter(perf_syscall_enter); unregister_trace_sys_enter(perf_syscall_enter, NULL);
mutex_unlock(&syscall_trace_lock); mutex_unlock(&syscall_trace_lock);
} }
static void perf_syscall_exit(struct pt_regs *regs, long ret) static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
{ {
struct syscall_metadata *sys_data; struct syscall_metadata *sys_data;
struct syscall_trace_exit *rec; struct syscall_trace_exit *rec;
...@@ -538,7 +588,7 @@ static void perf_syscall_exit(struct pt_regs *regs, long ret) ...@@ -538,7 +588,7 @@ static void perf_syscall_exit(struct pt_regs *regs, long ret)
return; return;
rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size, rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
sys_data->exit_event->id, regs, &rctx); sys_data->exit_event->event.type, regs, &rctx);
if (!rec) if (!rec)
return; return;
...@@ -558,7 +608,7 @@ int perf_sysexit_enable(struct ftrace_event_call *call) ...@@ -558,7 +608,7 @@ int perf_sysexit_enable(struct ftrace_event_call *call)
mutex_lock(&syscall_trace_lock); mutex_lock(&syscall_trace_lock);
if (!sys_perf_refcount_exit) if (!sys_perf_refcount_exit)
ret = register_trace_sys_exit(perf_syscall_exit); ret = register_trace_sys_exit(perf_syscall_exit, NULL);
if (ret) { if (ret) {
pr_info("event trace: Could not activate" pr_info("event trace: Could not activate"
"syscall exit trace point"); "syscall exit trace point");
...@@ -580,9 +630,50 @@ void perf_sysexit_disable(struct ftrace_event_call *call) ...@@ -580,9 +630,50 @@ void perf_sysexit_disable(struct ftrace_event_call *call)
sys_perf_refcount_exit--; sys_perf_refcount_exit--;
clear_bit(num, enabled_perf_exit_syscalls); clear_bit(num, enabled_perf_exit_syscalls);
if (!sys_perf_refcount_exit) if (!sys_perf_refcount_exit)
unregister_trace_sys_exit(perf_syscall_exit); unregister_trace_sys_exit(perf_syscall_exit, NULL);
mutex_unlock(&syscall_trace_lock); mutex_unlock(&syscall_trace_lock);
} }
#endif /* CONFIG_PERF_EVENTS */ #endif /* CONFIG_PERF_EVENTS */
static int syscall_enter_register(struct ftrace_event_call *event,
enum trace_reg type)
{
switch (type) {
case TRACE_REG_REGISTER:
return reg_event_syscall_enter(event);
case TRACE_REG_UNREGISTER:
unreg_event_syscall_enter(event);
return 0;
#ifdef CONFIG_PERF_EVENTS
case TRACE_REG_PERF_REGISTER:
return perf_sysenter_enable(event);
case TRACE_REG_PERF_UNREGISTER:
perf_sysenter_disable(event);
return 0;
#endif
}
return 0;
}
static int syscall_exit_register(struct ftrace_event_call *event,
enum trace_reg type)
{
switch (type) {
case TRACE_REG_REGISTER:
return reg_event_syscall_exit(event);
case TRACE_REG_UNREGISTER:
unreg_event_syscall_exit(event);
return 0;
#ifdef CONFIG_PERF_EVENTS
case TRACE_REG_PERF_REGISTER:
return perf_sysexit_enable(event);
case TRACE_REG_PERF_UNREGISTER:
perf_sysexit_disable(event);
return 0;
#endif
}
return 0;
}
...@@ -49,7 +49,8 @@ static void cpu_workqueue_stat_free(struct kref *kref) ...@@ -49,7 +49,8 @@ static void cpu_workqueue_stat_free(struct kref *kref)
/* Insertion of a work */ /* Insertion of a work */
static void static void
probe_workqueue_insertion(struct task_struct *wq_thread, probe_workqueue_insertion(void *ignore,
struct task_struct *wq_thread,
struct work_struct *work) struct work_struct *work)
{ {
int cpu = cpumask_first(&wq_thread->cpus_allowed); int cpu = cpumask_first(&wq_thread->cpus_allowed);
...@@ -70,7 +71,8 @@ probe_workqueue_insertion(struct task_struct *wq_thread, ...@@ -70,7 +71,8 @@ probe_workqueue_insertion(struct task_struct *wq_thread,
/* Execution of a work */ /* Execution of a work */
static void static void
probe_workqueue_execution(struct task_struct *wq_thread, probe_workqueue_execution(void *ignore,
struct task_struct *wq_thread,
struct work_struct *work) struct work_struct *work)
{ {
int cpu = cpumask_first(&wq_thread->cpus_allowed); int cpu = cpumask_first(&wq_thread->cpus_allowed);
...@@ -90,7 +92,8 @@ probe_workqueue_execution(struct task_struct *wq_thread, ...@@ -90,7 +92,8 @@ probe_workqueue_execution(struct task_struct *wq_thread,
} }
/* Creation of a cpu workqueue thread */ /* Creation of a cpu workqueue thread */
static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) static void probe_workqueue_creation(void *ignore,
struct task_struct *wq_thread, int cpu)
{ {
struct cpu_workqueue_stats *cws; struct cpu_workqueue_stats *cws;
unsigned long flags; unsigned long flags;
...@@ -114,7 +117,8 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu) ...@@ -114,7 +117,8 @@ static void probe_workqueue_creation(struct task_struct *wq_thread, int cpu)
} }
/* Destruction of a cpu workqueue thread */ /* Destruction of a cpu workqueue thread */
static void probe_workqueue_destruction(struct task_struct *wq_thread) static void
probe_workqueue_destruction(void *ignore, struct task_struct *wq_thread)
{ {
/* Workqueue only execute on one cpu */ /* Workqueue only execute on one cpu */
int cpu = cpumask_first(&wq_thread->cpus_allowed); int cpu = cpumask_first(&wq_thread->cpus_allowed);
...@@ -259,19 +263,19 @@ int __init trace_workqueue_early_init(void) ...@@ -259,19 +263,19 @@ int __init trace_workqueue_early_init(void)
{ {
int ret, cpu; int ret, cpu;
ret = register_trace_workqueue_insertion(probe_workqueue_insertion); ret = register_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
if (ret) if (ret)
goto out; goto out;
ret = register_trace_workqueue_execution(probe_workqueue_execution); ret = register_trace_workqueue_execution(probe_workqueue_execution, NULL);
if (ret) if (ret)
goto no_insertion; goto no_insertion;
ret = register_trace_workqueue_creation(probe_workqueue_creation); ret = register_trace_workqueue_creation(probe_workqueue_creation, NULL);
if (ret) if (ret)
goto no_execution; goto no_execution;
ret = register_trace_workqueue_destruction(probe_workqueue_destruction); ret = register_trace_workqueue_destruction(probe_workqueue_destruction, NULL);
if (ret) if (ret)
goto no_creation; goto no_creation;
...@@ -283,11 +287,11 @@ int __init trace_workqueue_early_init(void) ...@@ -283,11 +287,11 @@ int __init trace_workqueue_early_init(void)
return 0; return 0;
no_creation: no_creation:
unregister_trace_workqueue_creation(probe_workqueue_creation); unregister_trace_workqueue_creation(probe_workqueue_creation, NULL);
no_execution: no_execution:
unregister_trace_workqueue_execution(probe_workqueue_execution); unregister_trace_workqueue_execution(probe_workqueue_execution, NULL);
no_insertion: no_insertion:
unregister_trace_workqueue_insertion(probe_workqueue_insertion); unregister_trace_workqueue_insertion(probe_workqueue_insertion, NULL);
out: out:
pr_warning("trace_workqueue: unable to trace workqueues\n"); pr_warning("trace_workqueue: unable to trace workqueues\n");
......
...@@ -54,7 +54,7 @@ static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE]; ...@@ -54,7 +54,7 @@ static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
*/ */
struct tracepoint_entry { struct tracepoint_entry {
struct hlist_node hlist; struct hlist_node hlist;
void **funcs; struct tracepoint_func *funcs;
int refcount; /* Number of times armed. 0 if disarmed. */ int refcount; /* Number of times armed. 0 if disarmed. */
char name[0]; char name[0];
}; };
...@@ -64,12 +64,12 @@ struct tp_probes { ...@@ -64,12 +64,12 @@ struct tp_probes {
struct rcu_head rcu; struct rcu_head rcu;
struct list_head list; struct list_head list;
} u; } u;
void *probes[0]; struct tracepoint_func probes[0];
}; };
static inline void *allocate_probes(int count) static inline void *allocate_probes(int count)
{ {
struct tp_probes *p = kmalloc(count * sizeof(void *) struct tp_probes *p = kmalloc(count * sizeof(struct tracepoint_func)
+ sizeof(struct tp_probes), GFP_KERNEL); + sizeof(struct tp_probes), GFP_KERNEL);
return p == NULL ? NULL : p->probes; return p == NULL ? NULL : p->probes;
} }
...@@ -79,7 +79,7 @@ static void rcu_free_old_probes(struct rcu_head *head) ...@@ -79,7 +79,7 @@ static void rcu_free_old_probes(struct rcu_head *head)
kfree(container_of(head, struct tp_probes, u.rcu)); kfree(container_of(head, struct tp_probes, u.rcu));
} }
static inline void release_probes(void *old) static inline void release_probes(struct tracepoint_func *old)
{ {
if (old) { if (old) {
struct tp_probes *tp_probes = container_of(old, struct tp_probes *tp_probes = container_of(old,
...@@ -95,15 +95,16 @@ static void debug_print_probes(struct tracepoint_entry *entry) ...@@ -95,15 +95,16 @@ static void debug_print_probes(struct tracepoint_entry *entry)
if (!tracepoint_debug || !entry->funcs) if (!tracepoint_debug || !entry->funcs)
return; return;
for (i = 0; entry->funcs[i]; i++) for (i = 0; entry->funcs[i].func; i++)
printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i]); printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i].func);
} }
static void * static struct tracepoint_func *
tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe) tracepoint_entry_add_probe(struct tracepoint_entry *entry,
void *probe, void *data)
{ {
int nr_probes = 0; int nr_probes = 0;
void **old, **new; struct tracepoint_func *old, *new;
WARN_ON(!probe); WARN_ON(!probe);
...@@ -111,8 +112,9 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe) ...@@ -111,8 +112,9 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
old = entry->funcs; old = entry->funcs;
if (old) { if (old) {
/* (N -> N+1), (N != 0, 1) probes */ /* (N -> N+1), (N != 0, 1) probes */
for (nr_probes = 0; old[nr_probes]; nr_probes++) for (nr_probes = 0; old[nr_probes].func; nr_probes++)
if (old[nr_probes] == probe) if (old[nr_probes].func == probe &&
old[nr_probes].data == data)
return ERR_PTR(-EEXIST); return ERR_PTR(-EEXIST);
} }
/* + 2 : one for new probe, one for NULL func */ /* + 2 : one for new probe, one for NULL func */
...@@ -120,9 +122,10 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe) ...@@ -120,9 +122,10 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
if (new == NULL) if (new == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
if (old) if (old)
memcpy(new, old, nr_probes * sizeof(void *)); memcpy(new, old, nr_probes * sizeof(struct tracepoint_func));
new[nr_probes] = probe; new[nr_probes].func = probe;
new[nr_probes + 1] = NULL; new[nr_probes].data = data;
new[nr_probes + 1].func = NULL;
entry->refcount = nr_probes + 1; entry->refcount = nr_probes + 1;
entry->funcs = new; entry->funcs = new;
debug_print_probes(entry); debug_print_probes(entry);
...@@ -130,10 +133,11 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe) ...@@ -130,10 +133,11 @@ tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
} }
static void * static void *
tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe) tracepoint_entry_remove_probe(struct tracepoint_entry *entry,
void *probe, void *data)
{ {
int nr_probes = 0, nr_del = 0, i; int nr_probes = 0, nr_del = 0, i;
void **old, **new; struct tracepoint_func *old, *new;
old = entry->funcs; old = entry->funcs;
...@@ -142,8 +146,10 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe) ...@@ -142,8 +146,10 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
debug_print_probes(entry); debug_print_probes(entry);
/* (N -> M), (N > 1, M >= 0) probes */ /* (N -> M), (N > 1, M >= 0) probes */
for (nr_probes = 0; old[nr_probes]; nr_probes++) { for (nr_probes = 0; old[nr_probes].func; nr_probes++) {
if ((!probe || old[nr_probes] == probe)) if (!probe ||
(old[nr_probes].func == probe &&
old[nr_probes].data == data))
nr_del++; nr_del++;
} }
...@@ -160,10 +166,11 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe) ...@@ -160,10 +166,11 @@ tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
new = allocate_probes(nr_probes - nr_del + 1); new = allocate_probes(nr_probes - nr_del + 1);
if (new == NULL) if (new == NULL)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
for (i = 0; old[i]; i++) for (i = 0; old[i].func; i++)
if ((probe && old[i] != probe)) if (probe &&
(old[i].func != probe || old[i].data != data))
new[j++] = old[i]; new[j++] = old[i];
new[nr_probes - nr_del] = NULL; new[nr_probes - nr_del].func = NULL;
entry->refcount = nr_probes - nr_del; entry->refcount = nr_probes - nr_del;
entry->funcs = new; entry->funcs = new;
} }
...@@ -315,18 +322,19 @@ static void tracepoint_update_probes(void) ...@@ -315,18 +322,19 @@ static void tracepoint_update_probes(void)
module_update_tracepoints(); module_update_tracepoints();
} }
static void *tracepoint_add_probe(const char *name, void *probe) static struct tracepoint_func *
tracepoint_add_probe(const char *name, void *probe, void *data)
{ {
struct tracepoint_entry *entry; struct tracepoint_entry *entry;
void *old; struct tracepoint_func *old;
entry = get_tracepoint(name); entry = get_tracepoint(name);
if (!entry) { if (!entry) {
entry = add_tracepoint(name); entry = add_tracepoint(name);
if (IS_ERR(entry)) if (IS_ERR(entry))
return entry; return (struct tracepoint_func *)entry;
} }
old = tracepoint_entry_add_probe(entry, probe); old = tracepoint_entry_add_probe(entry, probe, data);
if (IS_ERR(old) && !entry->refcount) if (IS_ERR(old) && !entry->refcount)
remove_tracepoint(entry); remove_tracepoint(entry);
return old; return old;
...@@ -340,12 +348,12 @@ static void *tracepoint_add_probe(const char *name, void *probe) ...@@ -340,12 +348,12 @@ static void *tracepoint_add_probe(const char *name, void *probe)
* Returns 0 if ok, error value on error. * Returns 0 if ok, error value on error.
* The probe address must at least be aligned on the architecture pointer size. * The probe address must at least be aligned on the architecture pointer size.
*/ */
int tracepoint_probe_register(const char *name, void *probe) int tracepoint_probe_register(const char *name, void *probe, void *data)
{ {
void *old; struct tracepoint_func *old;
mutex_lock(&tracepoints_mutex); mutex_lock(&tracepoints_mutex);
old = tracepoint_add_probe(name, probe); old = tracepoint_add_probe(name, probe, data);
mutex_unlock(&tracepoints_mutex); mutex_unlock(&tracepoints_mutex);
if (IS_ERR(old)) if (IS_ERR(old))
return PTR_ERR(old); return PTR_ERR(old);
...@@ -356,15 +364,16 @@ int tracepoint_probe_register(const char *name, void *probe) ...@@ -356,15 +364,16 @@ int tracepoint_probe_register(const char *name, void *probe)
} }
EXPORT_SYMBOL_GPL(tracepoint_probe_register); EXPORT_SYMBOL_GPL(tracepoint_probe_register);
static void *tracepoint_remove_probe(const char *name, void *probe) static struct tracepoint_func *
tracepoint_remove_probe(const char *name, void *probe, void *data)
{ {
struct tracepoint_entry *entry; struct tracepoint_entry *entry;
void *old; struct tracepoint_func *old;
entry = get_tracepoint(name); entry = get_tracepoint(name);
if (!entry) if (!entry)
return ERR_PTR(-ENOENT); return ERR_PTR(-ENOENT);
old = tracepoint_entry_remove_probe(entry, probe); old = tracepoint_entry_remove_probe(entry, probe, data);
if (IS_ERR(old)) if (IS_ERR(old))
return old; return old;
if (!entry->refcount) if (!entry->refcount)
...@@ -382,12 +391,12 @@ static void *tracepoint_remove_probe(const char *name, void *probe) ...@@ -382,12 +391,12 @@ static void *tracepoint_remove_probe(const char *name, void *probe)
* itself uses stop_machine(), which insures that every preempt disabled section * itself uses stop_machine(), which insures that every preempt disabled section
* have finished. * have finished.
*/ */
int tracepoint_probe_unregister(const char *name, void *probe) int tracepoint_probe_unregister(const char *name, void *probe, void *data)
{ {
void *old; struct tracepoint_func *old;
mutex_lock(&tracepoints_mutex); mutex_lock(&tracepoints_mutex);
old = tracepoint_remove_probe(name, probe); old = tracepoint_remove_probe(name, probe, data);
mutex_unlock(&tracepoints_mutex); mutex_unlock(&tracepoints_mutex);
if (IS_ERR(old)) if (IS_ERR(old))
return PTR_ERR(old); return PTR_ERR(old);
...@@ -418,12 +427,13 @@ static void tracepoint_add_old_probes(void *old) ...@@ -418,12 +427,13 @@ static void tracepoint_add_old_probes(void *old)
* *
* caller must call tracepoint_probe_update_all() * caller must call tracepoint_probe_update_all()
*/ */
int tracepoint_probe_register_noupdate(const char *name, void *probe) int tracepoint_probe_register_noupdate(const char *name, void *probe,
void *data)
{ {
void *old; struct tracepoint_func *old;
mutex_lock(&tracepoints_mutex); mutex_lock(&tracepoints_mutex);
old = tracepoint_add_probe(name, probe); old = tracepoint_add_probe(name, probe, data);
if (IS_ERR(old)) { if (IS_ERR(old)) {
mutex_unlock(&tracepoints_mutex); mutex_unlock(&tracepoints_mutex);
return PTR_ERR(old); return PTR_ERR(old);
...@@ -441,12 +451,13 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate); ...@@ -441,12 +451,13 @@ EXPORT_SYMBOL_GPL(tracepoint_probe_register_noupdate);
* *
* caller must call tracepoint_probe_update_all() * caller must call tracepoint_probe_update_all()
*/ */
int tracepoint_probe_unregister_noupdate(const char *name, void *probe) int tracepoint_probe_unregister_noupdate(const char *name, void *probe,
void *data)
{ {
void *old; struct tracepoint_func *old;
mutex_lock(&tracepoints_mutex); mutex_lock(&tracepoints_mutex);
old = tracepoint_remove_probe(name, probe); old = tracepoint_remove_probe(name, probe, data);
if (IS_ERR(old)) { if (IS_ERR(old)) {
mutex_unlock(&tracepoints_mutex); mutex_unlock(&tracepoints_mutex);
return PTR_ERR(old); return PTR_ERR(old);
......
...@@ -172,12 +172,12 @@ static void trace_drop_common(struct sk_buff *skb, void *location) ...@@ -172,12 +172,12 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
return; return;
} }
static void trace_kfree_skb_hit(struct sk_buff *skb, void *location) static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
{ {
trace_drop_common(skb, location); trace_drop_common(skb, location);
} }
static void trace_napi_poll_hit(struct napi_struct *napi) static void trace_napi_poll_hit(void *ignore, struct napi_struct *napi)
{ {
struct dm_hw_stat_delta *new_stat; struct dm_hw_stat_delta *new_stat;
...@@ -225,12 +225,12 @@ static int set_all_monitor_traces(int state) ...@@ -225,12 +225,12 @@ static int set_all_monitor_traces(int state)
switch (state) { switch (state) {
case TRACE_ON: case TRACE_ON:
rc |= register_trace_kfree_skb(trace_kfree_skb_hit); rc |= register_trace_kfree_skb(trace_kfree_skb_hit, NULL);
rc |= register_trace_napi_poll(trace_napi_poll_hit); rc |= register_trace_napi_poll(trace_napi_poll_hit, NULL);
break; break;
case TRACE_OFF: case TRACE_OFF:
rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit); rc |= unregister_trace_kfree_skb(trace_kfree_skb_hit, NULL);
rc |= unregister_trace_napi_poll(trace_napi_poll_hit); rc |= unregister_trace_napi_poll(trace_napi_poll_hit, NULL);
tracepoint_synchronize_unregister(); tracepoint_synchronize_unregister();
......
...@@ -7,7 +7,5 @@ ...@@ -7,7 +7,5 @@
DECLARE_TRACE(subsys_event, DECLARE_TRACE(subsys_event,
TP_PROTO(struct inode *inode, struct file *file), TP_PROTO(struct inode *inode, struct file *file),
TP_ARGS(inode, file)); TP_ARGS(inode, file));
DECLARE_TRACE(subsys_eventb, DECLARE_TRACE_NOARGS(subsys_eventb);
TP_PROTO(void),
TP_ARGS());
#endif #endif
...@@ -13,7 +13,8 @@ ...@@ -13,7 +13,8 @@
* Here the caller only guarantees locking for struct file and struct inode. * Here the caller only guarantees locking for struct file and struct inode.
* Locking must therefore be done in the probe to use the dentry. * Locking must therefore be done in the probe to use the dentry.
*/ */
static void probe_subsys_event(struct inode *inode, struct file *file) static void probe_subsys_event(void *ignore,
struct inode *inode, struct file *file)
{ {
path_get(&file->f_path); path_get(&file->f_path);
dget(file->f_path.dentry); dget(file->f_path.dentry);
...@@ -23,7 +24,7 @@ static void probe_subsys_event(struct inode *inode, struct file *file) ...@@ -23,7 +24,7 @@ static void probe_subsys_event(struct inode *inode, struct file *file)
path_put(&file->f_path); path_put(&file->f_path);
} }
static void probe_subsys_eventb(void) static void probe_subsys_eventb(void *ignore)
{ {
printk(KERN_INFO "Event B is encountered\n"); printk(KERN_INFO "Event B is encountered\n");
} }
...@@ -32,9 +33,9 @@ static int __init tp_sample_trace_init(void) ...@@ -32,9 +33,9 @@ static int __init tp_sample_trace_init(void)
{ {
int ret; int ret;
ret = register_trace_subsys_event(probe_subsys_event); ret = register_trace_subsys_event(probe_subsys_event, NULL);
WARN_ON(ret); WARN_ON(ret);
ret = register_trace_subsys_eventb(probe_subsys_eventb); ret = register_trace_subsys_eventb(probe_subsys_eventb, NULL);
WARN_ON(ret); WARN_ON(ret);
return 0; return 0;
...@@ -44,8 +45,8 @@ module_init(tp_sample_trace_init); ...@@ -44,8 +45,8 @@ module_init(tp_sample_trace_init);
static void __exit tp_sample_trace_exit(void) static void __exit tp_sample_trace_exit(void)
{ {
unregister_trace_subsys_eventb(probe_subsys_eventb); unregister_trace_subsys_eventb(probe_subsys_eventb, NULL);
unregister_trace_subsys_event(probe_subsys_event); unregister_trace_subsys_event(probe_subsys_event, NULL);
tracepoint_synchronize_unregister(); tracepoint_synchronize_unregister();
} }
......
...@@ -12,7 +12,8 @@ ...@@ -12,7 +12,8 @@
* Here the caller only guarantees locking for struct file and struct inode. * Here the caller only guarantees locking for struct file and struct inode.
* Locking must therefore be done in the probe to use the dentry. * Locking must therefore be done in the probe to use the dentry.
*/ */
static void probe_subsys_event(struct inode *inode, struct file *file) static void probe_subsys_event(void *ignore,
struct inode *inode, struct file *file)
{ {
printk(KERN_INFO "Event is encountered with inode number %lu\n", printk(KERN_INFO "Event is encountered with inode number %lu\n",
inode->i_ino); inode->i_ino);
...@@ -22,7 +23,7 @@ static int __init tp_sample_trace_init(void) ...@@ -22,7 +23,7 @@ static int __init tp_sample_trace_init(void)
{ {
int ret; int ret;
ret = register_trace_subsys_event(probe_subsys_event); ret = register_trace_subsys_event(probe_subsys_event, NULL);
WARN_ON(ret); WARN_ON(ret);
return 0; return 0;
...@@ -32,7 +33,7 @@ module_init(tp_sample_trace_init); ...@@ -32,7 +33,7 @@ module_init(tp_sample_trace_init);
static void __exit tp_sample_trace_exit(void) static void __exit tp_sample_trace_exit(void)
{ {
unregister_trace_subsys_event(probe_subsys_event); unregister_trace_subsys_event(probe_subsys_event, NULL);
tracepoint_synchronize_unregister(); tracepoint_synchronize_unregister();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment