Commit 9837acff authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing updates from Steven Rostedt:
 "This set has a few minor updates, but the big change is the redesign
  of the trampoline logic.

  The trampoline logic of 3.17 required a descriptor for every function
  that is registered to be traced and uses a trampoline.  Currently,
  only the function graph tracer uses a trampoline, but if you were to
  trace all 32,000 (give or take a few thousand) functions with the
  function graph tracer, it would create 32,000 descriptors to let us
  know that there's a trampoline associated with it.  This takes up a
  bit of memory when there's a better way to do it.

  The redesign now reuses the ftrace_ops' (what registers the function
  graph tracer) hash tables.  The hash tables tell ftrace what the
  tracer wants to trace or doesn't want to trace.  There's two of them:
  one that tells us what to trace, the other tells us what not to trace.
  If the first one is empty, it means all functions should be traced,
  otherwise only the ones that are listed should be.  The second hash
  table tells us what not to trace, and if it is empty, all functions
  may be traced, and if there's any listed, then those should not be
  traced even if they exist in the first hash table.

  It took a bit of massaging, but now these hashes can be used to keep
  track of what has a trampoline and what does not, and allows the
  ftrace accounting to work.  Now we can trace all functions when using
  the function graph trampoline, and avoid needing to create any special
  descriptors to hold all the functions that are being traced"

* tag 'trace-3.18' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  ftrace: Only disable ftrace_enabled to test buffer in selftest
  ftrace: Add sanity check when unregistering last ftrace_ops
  kernel: trace_syscalls: Replace rcu_assign_pointer() with RCU_INIT_POINTER()
  tracing: generate RCU warnings even when tracepoints are disabled
  ftrace: Replace tramp_hash with old_*_hash to save space
  ftrace: Annotate the ops operation on update
  ftrace: Grab any ops for a rec for enabled_functions output
  ftrace: Remove freeing of old_hash from ftrace_hash_move()
  ftrace: Set callback to ftrace_stub when no ops are registered
  ftrace: Add helper function ftrace_ops_get_func()
  ftrace: Add separate function for non recursive callbacks
parents ca321885 3ddee63a
...@@ -56,6 +56,8 @@ struct ftrace_ops; ...@@ -56,6 +56,8 @@ struct ftrace_ops;
typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs); struct ftrace_ops *op, struct pt_regs *regs);
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
/* /*
* FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
* set in the flags member. * set in the flags member.
...@@ -89,6 +91,9 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip, ...@@ -89,6 +91,9 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
* INITIALIZED - The ftrace_ops has already been initialized (first use time * INITIALIZED - The ftrace_ops has already been initialized (first use time
* register_ftrace_function() is called, it will initialized the ops) * register_ftrace_function() is called, it will initialized the ops)
* DELETED - The ops are being deleted, do not let them be registered again. * DELETED - The ops are being deleted, do not let them be registered again.
* ADDING - The ops is in the process of being added.
* REMOVING - The ops is in the process of being removed.
* MODIFYING - The ops is in the process of changing its filter functions.
*/ */
enum { enum {
FTRACE_OPS_FL_ENABLED = 1 << 0, FTRACE_OPS_FL_ENABLED = 1 << 0,
...@@ -100,6 +105,9 @@ enum { ...@@ -100,6 +105,9 @@ enum {
FTRACE_OPS_FL_STUB = 1 << 6, FTRACE_OPS_FL_STUB = 1 << 6,
FTRACE_OPS_FL_INITIALIZED = 1 << 7, FTRACE_OPS_FL_INITIALIZED = 1 << 7,
FTRACE_OPS_FL_DELETED = 1 << 8, FTRACE_OPS_FL_DELETED = 1 << 8,
FTRACE_OPS_FL_ADDING = 1 << 9,
FTRACE_OPS_FL_REMOVING = 1 << 10,
FTRACE_OPS_FL_MODIFYING = 1 << 11,
}; };
#ifdef CONFIG_DYNAMIC_FTRACE #ifdef CONFIG_DYNAMIC_FTRACE
...@@ -132,7 +140,7 @@ struct ftrace_ops { ...@@ -132,7 +140,7 @@ struct ftrace_ops {
int nr_trampolines; int nr_trampolines;
struct ftrace_ops_hash local_hash; struct ftrace_ops_hash local_hash;
struct ftrace_ops_hash *func_hash; struct ftrace_ops_hash *func_hash;
struct ftrace_hash *tramp_hash; struct ftrace_ops_hash old_hash;
unsigned long trampoline; unsigned long trampoline;
#endif #endif
}; };
......
...@@ -157,6 +157,12 @@ extern void syscall_unregfunc(void); ...@@ -157,6 +157,12 @@ extern void syscall_unregfunc(void);
* Make sure the alignment of the structure in the __tracepoints section will * Make sure the alignment of the structure in the __tracepoints section will
* not add unwanted padding between the beginning of the section and the * not add unwanted padding between the beginning of the section and the
* structure. Force alignment to the same alignment as the section start. * structure. Force alignment to the same alignment as the section start.
*
* When lockdep is enabled, we make sure to always do the RCU portions of
* the tracepoint code, regardless of whether tracing is on or we match the
* condition. This lets us find RCU issues triggered with tracepoints even
* when this tracepoint is off. This code has no purpose other than poking
* RCU a bit.
*/ */
#define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \ #define __DECLARE_TRACE(name, proto, args, cond, data_proto, data_args) \
extern struct tracepoint __tracepoint_##name; \ extern struct tracepoint __tracepoint_##name; \
...@@ -167,6 +173,11 @@ extern void syscall_unregfunc(void); ...@@ -167,6 +173,11 @@ extern void syscall_unregfunc(void);
TP_PROTO(data_proto), \ TP_PROTO(data_proto), \
TP_ARGS(data_args), \ TP_ARGS(data_args), \
TP_CONDITION(cond),,); \ TP_CONDITION(cond),,); \
if (IS_ENABLED(CONFIG_LOCKDEP)) { \
rcu_read_lock_sched_notrace(); \
rcu_dereference_sched(__tracepoint_##name.funcs);\
rcu_read_unlock_sched_notrace(); \
} \
} \ } \
__DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \ __DECLARE_TRACE_RCU(name, PARAMS(proto), PARAMS(args), \
PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \ PARAMS(cond), PARAMS(data_proto), PARAMS(data_args)) \
......
...@@ -113,6 +113,9 @@ ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub; ...@@ -113,6 +113,9 @@ ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
static struct ftrace_ops global_ops; static struct ftrace_ops global_ops;
static struct ftrace_ops control_ops; static struct ftrace_ops control_ops;
static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs);
#if ARCH_SUPPORTS_FTRACE_OPS #if ARCH_SUPPORTS_FTRACE_OPS
static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip, static void ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs); struct ftrace_ops *op, struct pt_regs *regs);
...@@ -250,19 +253,25 @@ static void update_ftrace_function(void) ...@@ -250,19 +253,25 @@ static void update_ftrace_function(void)
{ {
ftrace_func_t func; ftrace_func_t func;
/*
* Prepare the ftrace_ops that the arch callback will use.
* If there's only one ftrace_ops registered, the ftrace_ops_list
* will point to the ops we want.
*/
set_function_trace_op = ftrace_ops_list;
/* If there's no ftrace_ops registered, just call the stub function */
if (ftrace_ops_list == &ftrace_list_end) {
func = ftrace_stub;
/* /*
* If we are at the end of the list and this ops is * If we are at the end of the list and this ops is
* recursion safe and not dynamic and the arch supports passing ops, * recursion safe and not dynamic and the arch supports passing ops,
* then have the mcount trampoline call the function directly. * then have the mcount trampoline call the function directly.
*/ */
if (ftrace_ops_list == &ftrace_list_end || } else if (ftrace_ops_list->next == &ftrace_list_end) {
(ftrace_ops_list->next == &ftrace_list_end && func = ftrace_ops_get_func(ftrace_ops_list);
!(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
(ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
!FTRACE_FORCE_LIST_FUNC)) {
/* Set the ftrace_ops that the arch callback uses */
set_function_trace_op = ftrace_ops_list;
func = ftrace_ops_list->func;
} else { } else {
/* Just use the default ftrace_ops */ /* Just use the default ftrace_ops */
set_function_trace_op = &ftrace_list_end; set_function_trace_op = &ftrace_list_end;
...@@ -1048,6 +1057,12 @@ static struct pid * const ftrace_swapper_pid = &init_struct_pid; ...@@ -1048,6 +1057,12 @@ static struct pid * const ftrace_swapper_pid = &init_struct_pid;
static struct ftrace_ops *removed_ops; static struct ftrace_ops *removed_ops;
/*
* Set when doing a global update, like enabling all recs or disabling them.
* It is not set when just updating a single ftrace_ops.
*/
static bool update_all_ops;
#ifndef CONFIG_FTRACE_MCOUNT_RECORD #ifndef CONFIG_FTRACE_MCOUNT_RECORD
# error Dynamic ftrace depends on MCOUNT_RECORD # error Dynamic ftrace depends on MCOUNT_RECORD
#endif #endif
...@@ -1307,7 +1322,6 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, ...@@ -1307,7 +1322,6 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
struct ftrace_func_entry *entry; struct ftrace_func_entry *entry;
struct hlist_node *tn; struct hlist_node *tn;
struct hlist_head *hhd; struct hlist_head *hhd;
struct ftrace_hash *old_hash;
struct ftrace_hash *new_hash; struct ftrace_hash *new_hash;
int size = src->count; int size = src->count;
int bits = 0; int bits = 0;
...@@ -1352,15 +1366,28 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, ...@@ -1352,15 +1366,28 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
*/ */
ftrace_hash_rec_disable_modify(ops, enable); ftrace_hash_rec_disable_modify(ops, enable);
old_hash = *dst;
rcu_assign_pointer(*dst, new_hash); rcu_assign_pointer(*dst, new_hash);
free_ftrace_hash_rcu(old_hash);
ftrace_hash_rec_enable_modify(ops, enable); ftrace_hash_rec_enable_modify(ops, enable);
return 0; return 0;
} }
static bool hash_contains_ip(unsigned long ip,
struct ftrace_ops_hash *hash)
{
/*
* The function record is a match if it exists in the filter
* hash and not in the notrace hash. Note, an emty hash is
* considered a match for the filter hash, but an empty
* notrace hash is considered not in the notrace hash.
*/
return (ftrace_hash_empty(hash->filter_hash) ||
ftrace_lookup_ip(hash->filter_hash, ip)) &&
(ftrace_hash_empty(hash->notrace_hash) ||
!ftrace_lookup_ip(hash->notrace_hash, ip));
}
/* /*
* Test the hashes for this ops to see if we want to call * Test the hashes for this ops to see if we want to call
* the ops->func or not. * the ops->func or not.
...@@ -1376,8 +1403,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable, ...@@ -1376,8 +1403,7 @@ ftrace_hash_move(struct ftrace_ops *ops, int enable,
static int static int
ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
{ {
struct ftrace_hash *filter_hash; struct ftrace_ops_hash hash;
struct ftrace_hash *notrace_hash;
int ret; int ret;
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
...@@ -1390,13 +1416,10 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs) ...@@ -1390,13 +1416,10 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip, void *regs)
return 0; return 0;
#endif #endif
filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash); hash.filter_hash = rcu_dereference_raw_notrace(ops->func_hash->filter_hash);
notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash); hash.notrace_hash = rcu_dereference_raw_notrace(ops->func_hash->notrace_hash);
if ((ftrace_hash_empty(filter_hash) || if (hash_contains_ip(ip, &hash))
ftrace_lookup_ip(filter_hash, ip)) &&
(ftrace_hash_empty(notrace_hash) ||
!ftrace_lookup_ip(notrace_hash, ip)))
ret = 1; ret = 1;
else else
ret = 0; ret = 0;
...@@ -1508,46 +1531,6 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec) ...@@ -1508,46 +1531,6 @@ static bool test_rec_ops_needs_regs(struct dyn_ftrace *rec)
return keep_regs; return keep_regs;
} }
static void ftrace_remove_tramp(struct ftrace_ops *ops,
struct dyn_ftrace *rec)
{
/* If TRAMP is not set, no ops should have a trampoline for this */
if (!(rec->flags & FTRACE_FL_TRAMP))
return;
rec->flags &= ~FTRACE_FL_TRAMP;
if ((!ftrace_hash_empty(ops->func_hash->filter_hash) &&
!ftrace_lookup_ip(ops->func_hash->filter_hash, rec->ip)) ||
ftrace_lookup_ip(ops->func_hash->notrace_hash, rec->ip))
return;
/*
* The tramp_hash entry will be removed at time
* of update.
*/
ops->nr_trampolines--;
}
static void ftrace_clear_tramps(struct dyn_ftrace *rec, struct ftrace_ops *ops)
{
struct ftrace_ops *op;
/* If TRAMP is not set, no ops should have a trampoline for this */
if (!(rec->flags & FTRACE_FL_TRAMP))
return;
do_for_each_ftrace_op(op, ftrace_ops_list) {
/*
* This function is called to clear other tramps
* not the one that is being updated.
*/
if (op == ops)
continue;
if (op->nr_trampolines)
ftrace_remove_tramp(op, rec);
} while_for_each_ftrace_op(op);
}
static void __ftrace_hash_rec_update(struct ftrace_ops *ops, static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
int filter_hash, int filter_hash,
bool inc) bool inc)
...@@ -1636,18 +1619,16 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, ...@@ -1636,18 +1619,16 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
* function, and the ops has a trampoline registered * function, and the ops has a trampoline registered
* for it, then we can call it directly. * for it, then we can call it directly.
*/ */
if (ftrace_rec_count(rec) == 1 && ops->trampoline) { if (ftrace_rec_count(rec) == 1 && ops->trampoline)
rec->flags |= FTRACE_FL_TRAMP; rec->flags |= FTRACE_FL_TRAMP;
ops->nr_trampolines++; else
} else {
/* /*
* If we are adding another function callback * If we are adding another function callback
* to this function, and the previous had a * to this function, and the previous had a
* custom trampoline in use, then we need to go * custom trampoline in use, then we need to go
* back to the default trampoline. * back to the default trampoline.
*/ */
ftrace_clear_tramps(rec, ops); rec->flags &= ~FTRACE_FL_TRAMP;
}
/* /*
* If any ops wants regs saved for this function * If any ops wants regs saved for this function
...@@ -1660,9 +1641,6 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, ...@@ -1660,9 +1641,6 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
return; return;
rec->flags--; rec->flags--;
if (ops->trampoline && !ftrace_rec_count(rec))
ftrace_remove_tramp(ops, rec);
/* /*
* If the rec had REGS enabled and the ops that is * If the rec had REGS enabled and the ops that is
* being removed had REGS set, then see if there is * being removed had REGS set, then see if there is
...@@ -1676,6 +1654,17 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops, ...@@ -1676,6 +1654,17 @@ static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
rec->flags &= ~FTRACE_FL_REGS; rec->flags &= ~FTRACE_FL_REGS;
} }
/*
* If the rec had TRAMP enabled, then it needs to
* be cleared. As TRAMP can only be enabled iff
* there is only a single ops attached to it.
* In otherwords, always disable it on decrementing.
* In the future, we may set it if rec count is
* decremented to one, and the ops that is left
* has a trampoline.
*/
rec->flags &= ~FTRACE_FL_TRAMP;
/* /*
* flags will be cleared in ftrace_check_record() * flags will be cleared in ftrace_check_record()
* if rec count is zero. * if rec count is zero.
...@@ -1894,22 +1883,73 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable) ...@@ -1894,22 +1883,73 @@ int ftrace_test_record(struct dyn_ftrace *rec, int enable)
return ftrace_check_record(rec, enable, 0); return ftrace_check_record(rec, enable, 0);
} }
static struct ftrace_ops *
ftrace_find_tramp_ops_any(struct dyn_ftrace *rec)
{
struct ftrace_ops *op;
unsigned long ip = rec->ip;
do_for_each_ftrace_op(op, ftrace_ops_list) {
if (!op->trampoline)
continue;
if (hash_contains_ip(ip, op->func_hash))
return op;
} while_for_each_ftrace_op(op);
return NULL;
}
static struct ftrace_ops * static struct ftrace_ops *
ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec) ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
{ {
struct ftrace_ops *op; struct ftrace_ops *op;
unsigned long ip = rec->ip;
/* Removed ops need to be tested first */ /*
if (removed_ops && removed_ops->tramp_hash) { * Need to check removed ops first.
if (ftrace_lookup_ip(removed_ops->tramp_hash, rec->ip)) * If they are being removed, and this rec has a tramp,
* and this rec is in the ops list, then it would be the
* one with the tramp.
*/
if (removed_ops) {
if (hash_contains_ip(ip, &removed_ops->old_hash))
return removed_ops; return removed_ops;
} }
/*
* Need to find the current trampoline for a rec.
* Now, a trampoline is only attached to a rec if there
* was a single 'ops' attached to it. But this can be called
* when we are adding another op to the rec or removing the
* current one. Thus, if the op is being added, we can
* ignore it because it hasn't attached itself to the rec
* yet. That means we just need to find the op that has a
* trampoline and is not beeing added.
*/
do_for_each_ftrace_op(op, ftrace_ops_list) { do_for_each_ftrace_op(op, ftrace_ops_list) {
if (!op->tramp_hash)
if (!op->trampoline)
continue; continue;
if (ftrace_lookup_ip(op->tramp_hash, rec->ip)) /*
* If the ops is being added, it hasn't gotten to
* the point to be removed from this tree yet.
*/
if (op->flags & FTRACE_OPS_FL_ADDING)
continue;
/*
* If the ops is not being added and has a trampoline,
* then it must be the one that we want!
*/
if (hash_contains_ip(ip, op->func_hash))
return op;
/* If the ops is being modified, it may be in the old hash. */
if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
hash_contains_ip(ip, &op->old_hash))
return op; return op;
} while_for_each_ftrace_op(op); } while_for_each_ftrace_op(op);
...@@ -1921,10 +1961,11 @@ static struct ftrace_ops * ...@@ -1921,10 +1961,11 @@ static struct ftrace_ops *
ftrace_find_tramp_ops_new(struct dyn_ftrace *rec) ftrace_find_tramp_ops_new(struct dyn_ftrace *rec)
{ {
struct ftrace_ops *op; struct ftrace_ops *op;
unsigned long ip = rec->ip;
do_for_each_ftrace_op(op, ftrace_ops_list) { do_for_each_ftrace_op(op, ftrace_ops_list) {
/* pass rec in as regs to have non-NULL val */ /* pass rec in as regs to have non-NULL val */
if (ftrace_ops_test(op, rec->ip, rec)) if (hash_contains_ip(ip, op->func_hash))
return op; return op;
} while_for_each_ftrace_op(op); } while_for_each_ftrace_op(op);
...@@ -2231,92 +2272,6 @@ void __weak arch_ftrace_update_code(int command) ...@@ -2231,92 +2272,6 @@ void __weak arch_ftrace_update_code(int command)
ftrace_run_stop_machine(command); ftrace_run_stop_machine(command);
} }
static int ftrace_save_ops_tramp_hash(struct ftrace_ops *ops)
{
struct ftrace_page *pg;
struct dyn_ftrace *rec;
int size, bits;
int ret;
size = ops->nr_trampolines;
bits = 0;
/*
* Make the hash size about 1/2 the # found
*/
for (size /= 2; size; size >>= 1)
bits++;
ops->tramp_hash = alloc_ftrace_hash(bits);
/*
* TODO: a failed allocation is going to screw up
* the accounting of what needs to be modified
* and not. For now, we kill ftrace if we fail
* to allocate here. But there are ways around this,
* but that will take a little more work.
*/
if (!ops->tramp_hash)
return -ENOMEM;
do_for_each_ftrace_rec(pg, rec) {
if (ftrace_rec_count(rec) == 1 &&
ftrace_ops_test(ops, rec->ip, rec)) {
/*
* If another ops adds to a rec, the rec will
* lose its trampoline and never get it back
* until all ops are off of it.
*/
if (!(rec->flags & FTRACE_FL_TRAMP))
continue;
/* This record had better have a trampoline */
if (FTRACE_WARN_ON(!(rec->flags & FTRACE_FL_TRAMP_EN)))
return -1;
ret = add_hash_entry(ops->tramp_hash, rec->ip);
if (ret < 0)
return ret;
}
} while_for_each_ftrace_rec();
/* The number of recs in the hash must match nr_trampolines */
if (FTRACE_WARN_ON(ops->tramp_hash->count != ops->nr_trampolines))
pr_warn("count=%ld trampolines=%d\n",
ops->tramp_hash->count,
ops->nr_trampolines);
return 0;
}
static int ftrace_save_tramp_hashes(void)
{
struct ftrace_ops *op;
int ret;
/*
* Now that any trampoline is being used, we need to save the
* hashes for the ops that have them. This allows the mapping
* back from the record to the ops that has the trampoline to
* know what code is being replaced. Modifying code must always
* verify what it is changing.
*/
do_for_each_ftrace_op(op, ftrace_ops_list) {
/* The tramp_hash is recreated each time. */
free_ftrace_hash(op->tramp_hash);
op->tramp_hash = NULL;
if (op->nr_trampolines) {
ret = ftrace_save_ops_tramp_hash(op);
if (ret)
return ret;
}
} while_for_each_ftrace_op(op);
return 0;
}
static void ftrace_run_update_code(int command) static void ftrace_run_update_code(int command)
{ {
int ret; int ret;
...@@ -2336,9 +2291,13 @@ static void ftrace_run_update_code(int command) ...@@ -2336,9 +2291,13 @@ static void ftrace_run_update_code(int command)
ret = ftrace_arch_code_modify_post_process(); ret = ftrace_arch_code_modify_post_process();
FTRACE_WARN_ON(ret); FTRACE_WARN_ON(ret);
}
ret = ftrace_save_tramp_hashes(); static void ftrace_run_modify_code(struct ftrace_ops *ops, int command)
FTRACE_WARN_ON(ret); {
ops->flags |= FTRACE_OPS_FL_MODIFYING;
ftrace_run_update_code(command);
ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
} }
static ftrace_func_t saved_ftrace_func; static ftrace_func_t saved_ftrace_func;
...@@ -2362,6 +2321,13 @@ static void ftrace_startup_enable(int command) ...@@ -2362,6 +2321,13 @@ static void ftrace_startup_enable(int command)
ftrace_run_update_code(command); ftrace_run_update_code(command);
} }
static void ftrace_startup_all(int command)
{
update_all_ops = true;
ftrace_startup_enable(command);
update_all_ops = false;
}
static int ftrace_startup(struct ftrace_ops *ops, int command) static int ftrace_startup(struct ftrace_ops *ops, int command)
{ {
int ret; int ret;
...@@ -2376,12 +2342,22 @@ static int ftrace_startup(struct ftrace_ops *ops, int command) ...@@ -2376,12 +2342,22 @@ static int ftrace_startup(struct ftrace_ops *ops, int command)
ftrace_start_up++; ftrace_start_up++;
command |= FTRACE_UPDATE_CALLS; command |= FTRACE_UPDATE_CALLS;
ops->flags |= FTRACE_OPS_FL_ENABLED; /*
* Note that ftrace probes uses this to start up
* and modify functions it will probe. But we still
* set the ADDING flag for modification, as probes
* do not have trampolines. If they add them in the
* future, then the probes will need to distinguish
* between adding and updating probes.
*/
ops->flags |= FTRACE_OPS_FL_ENABLED | FTRACE_OPS_FL_ADDING;
ftrace_hash_rec_enable(ops, 1); ftrace_hash_rec_enable(ops, 1);
ftrace_startup_enable(command); ftrace_startup_enable(command);
ops->flags &= ~FTRACE_OPS_FL_ADDING;
return 0; return 0;
} }
...@@ -2431,11 +2407,35 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command) ...@@ -2431,11 +2407,35 @@ static int ftrace_shutdown(struct ftrace_ops *ops, int command)
* If the ops uses a trampoline, then it needs to be * If the ops uses a trampoline, then it needs to be
* tested first on update. * tested first on update.
*/ */
ops->flags |= FTRACE_OPS_FL_REMOVING;
removed_ops = ops; removed_ops = ops;
/* The trampoline logic checks the old hashes */
ops->old_hash.filter_hash = ops->func_hash->filter_hash;
ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
ftrace_run_update_code(command); ftrace_run_update_code(command);
/*
* If there's no more ops registered with ftrace, run a
* sanity check to make sure all rec flags are cleared.
*/
if (ftrace_ops_list == &ftrace_list_end) {
struct ftrace_page *pg;
struct dyn_ftrace *rec;
do_for_each_ftrace_rec(pg, rec) {
if (FTRACE_WARN_ON_ONCE(rec->flags))
pr_warn(" %pS flags:%lx\n",
(void *)rec->ip, rec->flags);
} while_for_each_ftrace_rec();
}
ops->old_hash.filter_hash = NULL;
ops->old_hash.notrace_hash = NULL;
removed_ops = NULL; removed_ops = NULL;
ops->flags &= ~FTRACE_OPS_FL_REMOVING;
/* /*
* Dynamic ops may be freed, we must make sure that all * Dynamic ops may be freed, we must make sure that all
...@@ -2960,8 +2960,8 @@ static int t_show(struct seq_file *m, void *v) ...@@ -2960,8 +2960,8 @@ static int t_show(struct seq_file *m, void *v)
if (rec->flags & FTRACE_FL_TRAMP_EN) { if (rec->flags & FTRACE_FL_TRAMP_EN) {
struct ftrace_ops *ops; struct ftrace_ops *ops;
ops = ftrace_find_tramp_ops_curr(rec); ops = ftrace_find_tramp_ops_any(rec);
if (ops && ops->trampoline) if (ops)
seq_printf(m, "\ttramp: %pS", seq_printf(m, "\ttramp: %pS",
(void *)ops->trampoline); (void *)ops->trampoline);
else else
...@@ -3348,7 +3348,7 @@ static void __enable_ftrace_function_probe(void) ...@@ -3348,7 +3348,7 @@ static void __enable_ftrace_function_probe(void)
if (ftrace_probe_registered) { if (ftrace_probe_registered) {
/* still need to update the function call sites */ /* still need to update the function call sites */
if (ftrace_enabled) if (ftrace_enabled)
ftrace_run_update_code(FTRACE_UPDATE_CALLS); ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS);
return; return;
} }
...@@ -3399,6 +3399,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, ...@@ -3399,6 +3399,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
{ {
struct ftrace_func_probe *entry; struct ftrace_func_probe *entry;
struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
struct ftrace_hash *old_hash = *orig_hash;
struct ftrace_hash *hash; struct ftrace_hash *hash;
struct ftrace_page *pg; struct ftrace_page *pg;
struct dyn_ftrace *rec; struct dyn_ftrace *rec;
...@@ -3417,7 +3418,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, ...@@ -3417,7 +3418,7 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
mutex_lock(&trace_probe_ops.func_hash->regex_lock); mutex_lock(&trace_probe_ops.func_hash->regex_lock);
hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash); hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, old_hash);
if (!hash) { if (!hash) {
count = -ENOMEM; count = -ENOMEM;
goto out; goto out;
...@@ -3476,7 +3477,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, ...@@ -3476,7 +3477,9 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
} while_for_each_ftrace_rec(); } while_for_each_ftrace_rec();
ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
if (ret < 0) if (!ret)
free_ftrace_hash_rcu(old_hash);
else
count = ret; count = ret;
__enable_ftrace_function_probe(); __enable_ftrace_function_probe();
...@@ -3503,6 +3506,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, ...@@ -3503,6 +3506,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
struct ftrace_func_probe *entry; struct ftrace_func_probe *entry;
struct ftrace_func_probe *p; struct ftrace_func_probe *p;
struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash; struct ftrace_hash **orig_hash = &trace_probe_ops.func_hash->filter_hash;
struct ftrace_hash *old_hash = *orig_hash;
struct list_head free_list; struct list_head free_list;
struct ftrace_hash *hash; struct ftrace_hash *hash;
struct hlist_node *tmp; struct hlist_node *tmp;
...@@ -3510,6 +3514,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, ...@@ -3510,6 +3514,7 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
int type = MATCH_FULL; int type = MATCH_FULL;
int i, len = 0; int i, len = 0;
char *search; char *search;
int ret;
if (glob && (strcmp(glob, "*") == 0 || !strlen(glob))) if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
glob = NULL; glob = NULL;
...@@ -3568,8 +3573,11 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops, ...@@ -3568,8 +3573,11 @@ __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
* Remove after the disable is called. Otherwise, if the last * Remove after the disable is called. Otherwise, if the last
* probe is removed, a null hash means *all enabled*. * probe is removed, a null hash means *all enabled*.
*/ */
ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
synchronize_sched(); synchronize_sched();
if (!ret)
free_ftrace_hash_rcu(old_hash);
list_for_each_entry_safe(entry, p, &free_list, free_list) { list_for_each_entry_safe(entry, p, &free_list, free_list) {
list_del(&entry->free_list); list_del(&entry->free_list);
ftrace_free_entry(entry); ftrace_free_entry(entry);
...@@ -3759,7 +3767,7 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove) ...@@ -3759,7 +3767,7 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
static void ftrace_ops_update_code(struct ftrace_ops *ops) static void ftrace_ops_update_code(struct ftrace_ops *ops)
{ {
if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
ftrace_run_update_code(FTRACE_UPDATE_CALLS); ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS);
} }
static int static int
...@@ -3767,6 +3775,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, ...@@ -3767,6 +3775,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
unsigned long ip, int remove, int reset, int enable) unsigned long ip, int remove, int reset, int enable)
{ {
struct ftrace_hash **orig_hash; struct ftrace_hash **orig_hash;
struct ftrace_hash *old_hash;
struct ftrace_hash *hash; struct ftrace_hash *hash;
int ret; int ret;
...@@ -3801,10 +3810,12 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len, ...@@ -3801,10 +3810,12 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
} }
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
old_hash = *orig_hash;
ret = ftrace_hash_move(ops, enable, orig_hash, hash); ret = ftrace_hash_move(ops, enable, orig_hash, hash);
if (!ret) if (!ret) {
ftrace_ops_update_code(ops); ftrace_ops_update_code(ops);
free_ftrace_hash_rcu(old_hash);
}
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
out_regex_unlock: out_regex_unlock:
...@@ -4013,6 +4024,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file) ...@@ -4013,6 +4024,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
struct seq_file *m = (struct seq_file *)file->private_data; struct seq_file *m = (struct seq_file *)file->private_data;
struct ftrace_iterator *iter; struct ftrace_iterator *iter;
struct ftrace_hash **orig_hash; struct ftrace_hash **orig_hash;
struct ftrace_hash *old_hash;
struct trace_parser *parser; struct trace_parser *parser;
int filter_hash; int filter_hash;
int ret; int ret;
...@@ -4042,11 +4054,13 @@ int ftrace_regex_release(struct inode *inode, struct file *file) ...@@ -4042,11 +4054,13 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
orig_hash = &iter->ops->func_hash->notrace_hash; orig_hash = &iter->ops->func_hash->notrace_hash;
mutex_lock(&ftrace_lock); mutex_lock(&ftrace_lock);
old_hash = *orig_hash;
ret = ftrace_hash_move(iter->ops, filter_hash, ret = ftrace_hash_move(iter->ops, filter_hash,
orig_hash, iter->hash); orig_hash, iter->hash);
if (!ret) if (!ret) {
ftrace_ops_update_code(iter->ops); ftrace_ops_update_code(iter->ops);
free_ftrace_hash_rcu(old_hash);
}
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
} }
...@@ -4678,6 +4692,7 @@ core_initcall(ftrace_nodyn_init); ...@@ -4678,6 +4692,7 @@ core_initcall(ftrace_nodyn_init);
static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; } static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
static inline void ftrace_startup_enable(int command) { } static inline void ftrace_startup_enable(int command) { }
static inline void ftrace_startup_all(int command) { }
/* Keep as macros so we do not need to define the commands */ /* Keep as macros so we do not need to define the commands */
# define ftrace_startup(ops, command) \ # define ftrace_startup(ops, command) \
({ \ ({ \
...@@ -4827,6 +4842,56 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip) ...@@ -4827,6 +4842,56 @@ static void ftrace_ops_no_ops(unsigned long ip, unsigned long parent_ip)
} }
#endif #endif
/*
* If there's only one function registered but it does not support
* recursion, this function will be called by the mcount trampoline.
* This function will handle recursion protection.
*/
static void ftrace_ops_recurs_func(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *regs)
{
int bit;
bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX);
if (bit < 0)
return;
op->func(ip, parent_ip, op, regs);
trace_clear_recursion(bit);
}
/**
* ftrace_ops_get_func - get the function a trampoline should call
* @ops: the ops to get the function for
*
* Normally the mcount trampoline will call the ops->func, but there
* are times that it should not. For example, if the ops does not
* have its own recursion protection, then it should call the
* ftrace_ops_recurs_func() instead.
*
* Returns the function that the trampoline should call for @ops.
*/
ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops)
{
/*
* If this is a dynamic ops or we force list func,
* then it needs to call the list anyway.
*/
if (ops->flags & FTRACE_OPS_FL_DYNAMIC || FTRACE_FORCE_LIST_FUNC)
return ftrace_ops_list_func;
/*
* If the func handles its own recursion, call it directly.
* Otherwise call the recursion protected function that
* will call the ftrace ops function.
*/
if (!(ops->flags & FTRACE_OPS_FL_RECURSION_SAFE))
return ftrace_ops_recurs_func;
return ops->func;
}
static void clear_ftrace_swapper(void) static void clear_ftrace_swapper(void)
{ {
struct task_struct *p; struct task_struct *p;
...@@ -4927,7 +4992,8 @@ static int ftrace_pid_add(int p) ...@@ -4927,7 +4992,8 @@ static int ftrace_pid_add(int p)
set_ftrace_pid_task(pid); set_ftrace_pid_task(pid);
ftrace_update_pid_func(); ftrace_update_pid_func();
ftrace_startup_enable(0);
ftrace_startup_all(0);
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
return 0; return 0;
...@@ -4956,7 +5022,7 @@ static void ftrace_pid_reset(void) ...@@ -4956,7 +5022,7 @@ static void ftrace_pid_reset(void)
} }
ftrace_update_pid_func(); ftrace_update_pid_func();
ftrace_startup_enable(0); ftrace_startup_all(0);
mutex_unlock(&ftrace_lock); mutex_unlock(&ftrace_lock);
} }
......
...@@ -382,6 +382,8 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace, ...@@ -382,6 +382,8 @@ static int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
/* check the trace buffer */ /* check the trace buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count); ret = trace_test_buffer(&tr->trace_buffer, &count);
ftrace_enabled = 1;
tracing_start(); tracing_start();
/* we should only have one item */ /* we should only have one item */
...@@ -679,6 +681,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) ...@@ -679,6 +681,8 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
/* check the trace buffer */ /* check the trace buffer */
ret = trace_test_buffer(&tr->trace_buffer, &count); ret = trace_test_buffer(&tr->trace_buffer, &count);
ftrace_enabled = 1;
trace->reset(tr); trace->reset(tr);
tracing_start(); tracing_start();
......
...@@ -425,7 +425,7 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file, ...@@ -425,7 +425,7 @@ static void unreg_event_syscall_enter(struct ftrace_event_file *file,
return; return;
mutex_lock(&syscall_trace_lock); mutex_lock(&syscall_trace_lock);
tr->sys_refcount_enter--; tr->sys_refcount_enter--;
rcu_assign_pointer(tr->enter_syscall_files[num], NULL); RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL);
if (!tr->sys_refcount_enter) if (!tr->sys_refcount_enter)
unregister_trace_sys_enter(ftrace_syscall_enter, tr); unregister_trace_sys_enter(ftrace_syscall_enter, tr);
mutex_unlock(&syscall_trace_lock); mutex_unlock(&syscall_trace_lock);
...@@ -463,7 +463,7 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file, ...@@ -463,7 +463,7 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file,
return; return;
mutex_lock(&syscall_trace_lock); mutex_lock(&syscall_trace_lock);
tr->sys_refcount_exit--; tr->sys_refcount_exit--;
rcu_assign_pointer(tr->exit_syscall_files[num], NULL); RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL);
if (!tr->sys_refcount_exit) if (!tr->sys_refcount_exit)
unregister_trace_sys_exit(ftrace_syscall_exit, tr); unregister_trace_sys_exit(ftrace_syscall_exit, tr);
mutex_unlock(&syscall_trace_lock); mutex_unlock(&syscall_trace_lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment