Commit 594d2815 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace

Pull tracing updates from Steven Rostedt:

 - Remove unused ftrace_direct_funcs variables

 - Fix a possible NULL pointer dereference race in eventfs

 - Update do_div() usage in trace event benchmark test

 - Speedup direct function registration with asynchronous RCU callback.

   The synchronization was done in the registration code and this caused
   delays when registering direct callbacks. Move the freeing to a
   call_rcu() that will prevent delaying of the registering.

 - Replace simple_strtoul() usage with kstrtoul()

* tag 'trace-v6.10' of git://git.kernel.org/pub/scm/linux/kernel/git/trace/linux-trace:
  eventfs: Fix a possible null pointer dereference in eventfs_find_events()
  ftrace: Fix possible use-after-free issue in ftrace_location()
  ftrace: Remove unused global 'ftrace_direct_func_count'
  ftrace: Remove unused list 'ftrace_direct_funcs'
  tracing: Improve benchmark test performance by using do_div()
  ftrace: Use asynchronous grace period for register_ftrace_direct()
  ftrace: Replaces simple_strtoul in ftrace
parents 70a66320 d4e9a968
......@@ -345,10 +345,9 @@ static struct eventfs_inode *eventfs_find_events(struct dentry *dentry)
* If the ei is being freed, the ownership of the children
* doesn't matter.
*/
if (ei->is_freed) {
ei = NULL;
break;
}
if (ei->is_freed)
return NULL;
// Walk upwards until you find the events inode
} while (!ei->is_events);
......
......@@ -83,7 +83,6 @@ static inline void early_trace_init(void) { }
struct module;
struct ftrace_hash;
struct ftrace_direct_func;
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
defined(CONFIG_DYNAMIC_FTRACE)
......@@ -414,7 +413,6 @@ struct ftrace_func_entry {
};
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
extern int ftrace_direct_func_count;
unsigned long ftrace_find_rec_direct(unsigned long ip);
int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr);
int unregister_ftrace_direct(struct ftrace_ops *ops, unsigned long addr,
......@@ -426,7 +424,6 @@ void ftrace_stub_direct_tramp(void);
#else
struct ftrace_ops;
# define ftrace_direct_func_count 0
static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
{
return 0;
......
......@@ -125,17 +125,6 @@ int function_graph_enter(unsigned long ret, unsigned long func,
{
struct ftrace_graph_ent trace;
#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
/*
* Skip graph tracing if the return location is served by direct trampoline,
* since call sequence and return addresses are unpredictable anyway.
* Ex: BPF trampoline may call original function and may skip frame
* depending on type of BPF programs attached.
*/
if (ftrace_direct_func_count &&
ftrace_find_rec_direct(ret - MCOUNT_INSN_SIZE))
return -EBUSY;
#endif
trace.func = func;
trace.depth = ++current->curr_ret_depth;
......
......@@ -1595,12 +1595,15 @@ static struct dyn_ftrace *lookup_rec(unsigned long start, unsigned long end)
unsigned long ftrace_location_range(unsigned long start, unsigned long end)
{
struct dyn_ftrace *rec;
unsigned long ip = 0;
rcu_read_lock();
rec = lookup_rec(start, end);
if (rec)
return rec->ip;
ip = rec->ip;
rcu_read_unlock();
return 0;
return ip;
}
/**
......@@ -1614,25 +1617,22 @@ unsigned long ftrace_location_range(unsigned long start, unsigned long end)
*/
unsigned long ftrace_location(unsigned long ip)
{
struct dyn_ftrace *rec;
unsigned long loc;
unsigned long offset;
unsigned long size;
rec = lookup_rec(ip, ip);
if (!rec) {
loc = ftrace_location_range(ip, ip);
if (!loc) {
if (!kallsyms_lookup_size_offset(ip, &size, &offset))
goto out;
/* map sym+0 to __fentry__ */
if (!offset)
rec = lookup_rec(ip, ip + size - 1);
loc = ftrace_location_range(ip, ip + size - 1);
}
if (rec)
return rec->ip;
out:
return 0;
return loc;
}
/**
......@@ -2538,7 +2538,6 @@ ftrace_find_unique_ops(struct dyn_ftrace *rec)
/* Protected by rcu_tasks for reading, and direct_mutex for writing */
static struct ftrace_hash __rcu *direct_functions = EMPTY_HASH;
static DEFINE_MUTEX(direct_mutex);
int ftrace_direct_func_count;
/*
* Search the direct_functions hash to see if the given instruction pointer
......@@ -4201,12 +4200,12 @@ static int
add_rec_by_index(struct ftrace_hash *hash, struct ftrace_glob *func_g,
int clear_filter)
{
long index = simple_strtoul(func_g->search, NULL, 0);
long index;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
/* The index starts at 1 */
if (--index < 0)
if (kstrtoul(func_g->search, 0, &index) || --index < 0)
return 0;
do_for_each_ftrace_rec(pg, rec) {
......@@ -5317,14 +5316,6 @@ ftrace_set_addr(struct ftrace_ops *ops, unsigned long *ips, unsigned int cnt,
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
struct ftrace_direct_func {
struct list_head next;
unsigned long addr;
int count;
};
static LIST_HEAD(ftrace_direct_funcs);
static int register_ftrace_function_nolock(struct ftrace_ops *ops);
/*
......@@ -5365,6 +5356,13 @@ static void remove_direct_functions_hash(struct ftrace_hash *hash, unsigned long
}
}
static void register_ftrace_direct_cb(struct rcu_head *rhp)
{
struct ftrace_hash *fhp = container_of(rhp, struct ftrace_hash, rcu);
free_ftrace_hash(fhp);
}
/**
* register_ftrace_direct - Call a custom trampoline directly
* for multiple functions registered in @ops
......@@ -5463,10 +5461,8 @@ int register_ftrace_direct(struct ftrace_ops *ops, unsigned long addr)
out_unlock:
mutex_unlock(&direct_mutex);
if (free_hash && free_hash != EMPTY_HASH) {
synchronize_rcu_tasks();
free_ftrace_hash(free_hash);
}
if (free_hash && free_hash != EMPTY_HASH)
call_rcu_tasks(&free_hash->rcu, register_ftrace_direct_cb);
if (new_hash)
free_ftrace_hash(new_hash);
......@@ -5816,9 +5812,8 @@ __setup("ftrace_graph_notrace=", set_graph_notrace_function);
static int __init set_graph_max_depth_function(char *str)
{
if (!str)
if (!str || kstrtouint(str, 0, &fgraph_max_depth))
return 0;
fgraph_max_depth = simple_strtoul(str, NULL, 0);
return 1;
}
__setup("ftrace_graph_max_depth=", set_graph_max_depth_function);
......@@ -6595,6 +6590,8 @@ static int ftrace_process_locs(struct module *mod,
/* We should have used all pages unless we skipped some */
if (pg_unuse) {
WARN_ON(!skipped);
/* Need to synchronize with ftrace_location_range() */
synchronize_rcu();
ftrace_free_pages(pg_unuse);
}
return ret;
......@@ -6808,6 +6805,9 @@ void ftrace_release_mod(struct module *mod)
out_unlock:
mutex_unlock(&ftrace_lock);
/* Need to synchronize with ftrace_location_range() */
if (tmp_page)
synchronize_rcu();
for (pg = tmp_page; pg; pg = tmp_page) {
/* Needs to be called outside of ftrace_lock */
......@@ -7141,6 +7141,7 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
unsigned long start = (unsigned long)(start_ptr);
unsigned long end = (unsigned long)(end_ptr);
struct ftrace_page **last_pg = &ftrace_pages_start;
struct ftrace_page *tmp_page = NULL;
struct ftrace_page *pg;
struct dyn_ftrace *rec;
struct dyn_ftrace key;
......@@ -7182,12 +7183,8 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
ftrace_update_tot_cnt--;
if (!pg->index) {
*last_pg = pg->next;
if (pg->records) {
free_pages((unsigned long)pg->records, pg->order);
ftrace_number_of_pages -= 1 << pg->order;
}
ftrace_number_of_groups--;
kfree(pg);
pg->next = tmp_page;
tmp_page = pg;
pg = container_of(last_pg, struct ftrace_page, next);
if (!(*last_pg))
ftrace_pages = pg;
......@@ -7204,6 +7201,11 @@ void ftrace_free_mem(struct module *mod, void *start_ptr, void *end_ptr)
clear_func_from_hashes(func);
kfree(func);
}
/* Need to synchronize with ftrace_location_range() */
if (tmp_page) {
synchronize_rcu();
ftrace_free_pages(tmp_page);
}
}
void __init ftrace_free_init_mem(void)
......
......@@ -104,7 +104,7 @@ static void trace_do_benchmark(void)
stddev = 0;
delta = bm_total;
delta = div64_u64(delta, bm_cnt);
do_div(delta, (u32)bm_cnt);
avg = delta;
if (stddev > 0) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment