Commit 415be6c2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v4.13-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing fixes from Steven Rostedt:
 "Various bug fixes:

   - Two small memory leaks in error paths.

   - A missed return error code on an error path.

   - A fix to check the tracing ring buffer CPU when it doesn't exist
     (caused by setting maxcpus on the command line that is less than
     the actual number of CPUs, and then onlining them manually).

   - A fix to have the reset of boot tracers called by lateinit_sync()
     instead of just lateinit(). As some of the tracers register via
     lateinit(), and if the clear happens before the tracer is
     registered, it will never start even though it was told to via the
     kernel command line"

* tag 'trace-v4.13-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing: Fix freeing of filter in create_filter() when set_str is false
  tracing: Fix kmemleak in tracing_map_array_free()
  ftrace: Check for null ret_stack on profile function graph entry function
  ring-buffer: Have ring_buffer_alloc_read_page() return error on offline CPU
  tracing: Missing error code in tracer_alloc_buffers()
  tracing: Call clear_boot_tracer() at lateinit_sync
parents 1cffe595 8b0db1a5
...@@ -889,6 +889,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace) ...@@ -889,6 +889,10 @@ static int profile_graph_entry(struct ftrace_graph_ent *trace)
function_profile_call(trace->func, 0, NULL, NULL); function_profile_call(trace->func, 0, NULL, NULL);
/* If function graph is shutting down, ret_stack can be NULL */
if (!current->ret_stack)
return 0;
if (index >= 0 && index < FTRACE_RETFUNC_DEPTH) if (index >= 0 && index < FTRACE_RETFUNC_DEPTH)
current->ret_stack[index].subtime = 0; current->ret_stack[index].subtime = 0;
......
...@@ -4386,15 +4386,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); ...@@ -4386,15 +4386,19 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
* the page that was allocated, with the read page of the buffer. * the page that was allocated, with the read page of the buffer.
* *
* Returns: * Returns:
* The page allocated, or NULL on error. * The page allocated, or ERR_PTR
*/ */
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_data_page *bpage = NULL; struct buffer_data_page *bpage = NULL;
unsigned long flags; unsigned long flags;
struct page *page; struct page *page;
if (!cpumask_test_cpu(cpu, buffer->cpumask))
return ERR_PTR(-ENODEV);
cpu_buffer = buffer->buffers[cpu];
local_irq_save(flags); local_irq_save(flags);
arch_spin_lock(&cpu_buffer->lock); arch_spin_lock(&cpu_buffer->lock);
...@@ -4412,7 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) ...@@ -4412,7 +4416,7 @@ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu)
page = alloc_pages_node(cpu_to_node(cpu), page = alloc_pages_node(cpu_to_node(cpu),
GFP_KERNEL | __GFP_NORETRY, 0); GFP_KERNEL | __GFP_NORETRY, 0);
if (!page) if (!page)
return NULL; return ERR_PTR(-ENOMEM);
bpage = page_address(page); bpage = page_address(page);
...@@ -4467,8 +4471,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); ...@@ -4467,8 +4471,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
* *
* for example: * for example:
* rpage = ring_buffer_alloc_read_page(buffer, cpu); * rpage = ring_buffer_alloc_read_page(buffer, cpu);
* if (!rpage) * if (IS_ERR(rpage))
* return error; * return PTR_ERR(rpage);
* ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0);
* if (ret >= 0) * if (ret >= 0)
* process_page(rpage, ret); * process_page(rpage, ret);
......
...@@ -113,7 +113,7 @@ static enum event_status read_page(int cpu) ...@@ -113,7 +113,7 @@ static enum event_status read_page(int cpu)
int i; int i;
bpage = ring_buffer_alloc_read_page(buffer, cpu); bpage = ring_buffer_alloc_read_page(buffer, cpu);
if (!bpage) if (IS_ERR(bpage))
return EVENT_DROPPED; return EVENT_DROPPED;
ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1); ret = ring_buffer_read_page(buffer, &bpage, PAGE_SIZE, cpu, 1);
......
...@@ -6598,7 +6598,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, ...@@ -6598,7 +6598,7 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
{ {
struct ftrace_buffer_info *info = filp->private_data; struct ftrace_buffer_info *info = filp->private_data;
struct trace_iterator *iter = &info->iter; struct trace_iterator *iter = &info->iter;
ssize_t ret; ssize_t ret = 0;
ssize_t size; ssize_t size;
if (!count) if (!count)
...@@ -6612,10 +6612,15 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, ...@@ -6612,10 +6612,15 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
if (!info->spare) { if (!info->spare) {
info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer, info->spare = ring_buffer_alloc_read_page(iter->trace_buffer->buffer,
iter->cpu_file); iter->cpu_file);
info->spare_cpu = iter->cpu_file; if (IS_ERR(info->spare)) {
ret = PTR_ERR(info->spare);
info->spare = NULL;
} else {
info->spare_cpu = iter->cpu_file;
}
} }
if (!info->spare) if (!info->spare)
return -ENOMEM; return ret;
/* Do we have previous read data to read? */ /* Do we have previous read data to read? */
if (info->read < PAGE_SIZE) if (info->read < PAGE_SIZE)
...@@ -6790,8 +6795,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, ...@@ -6790,8 +6795,9 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
ref->ref = 1; ref->ref = 1;
ref->buffer = iter->trace_buffer->buffer; ref->buffer = iter->trace_buffer->buffer;
ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
if (!ref->page) { if (IS_ERR(ref->page)) {
ret = -ENOMEM; ret = PTR_ERR(ref->page);
ref->page = NULL;
kfree(ref); kfree(ref);
break; break;
} }
...@@ -8293,6 +8299,7 @@ __init static int tracer_alloc_buffers(void) ...@@ -8293,6 +8299,7 @@ __init static int tracer_alloc_buffers(void)
if (ret < 0) if (ret < 0)
goto out_free_cpumask; goto out_free_cpumask;
/* Used for event triggers */ /* Used for event triggers */
ret = -ENOMEM;
temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE); temp_buffer = ring_buffer_alloc(PAGE_SIZE, RB_FL_OVERWRITE);
if (!temp_buffer) if (!temp_buffer)
goto out_rm_hp_state; goto out_rm_hp_state;
...@@ -8407,4 +8414,4 @@ __init static int clear_boot_tracer(void) ...@@ -8407,4 +8414,4 @@ __init static int clear_boot_tracer(void)
} }
fs_initcall(tracer_init_tracefs); fs_initcall(tracer_init_tracefs);
late_initcall(clear_boot_tracer); late_initcall_sync(clear_boot_tracer);
...@@ -1959,6 +1959,10 @@ static int create_filter(struct trace_event_call *call, ...@@ -1959,6 +1959,10 @@ static int create_filter(struct trace_event_call *call,
if (err && set_str) if (err && set_str)
append_filter_err(ps, filter); append_filter_err(ps, filter);
} }
if (err && !set_str) {
free_event_filter(filter);
filter = NULL;
}
create_filter_finish(ps); create_filter_finish(ps);
*filterp = filter; *filterp = filter;
......
...@@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a) ...@@ -221,16 +221,19 @@ void tracing_map_array_free(struct tracing_map_array *a)
if (!a) if (!a)
return; return;
if (!a->pages) { if (!a->pages)
kfree(a); goto free;
return;
}
for (i = 0; i < a->n_pages; i++) { for (i = 0; i < a->n_pages; i++) {
if (!a->pages[i]) if (!a->pages[i])
break; break;
free_page((unsigned long)a->pages[i]); free_page((unsigned long)a->pages[i]);
} }
kfree(a->pages);
free:
kfree(a);
} }
struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts, struct tracing_map_array *tracing_map_array_alloc(unsigned int n_elts,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment