Commit ae9f4939 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Misc fixlets"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf: Fix error return code
  ftrace: Fix strncpy() use, use strlcpy() instead of strncpy()
  perf: Fix strncpy() use, use strlcpy() instead of strncpy()
  perf: Fix strncpy() use, always make sure it's NUL terminated
  perf: Fix ring_buffer perf_output_space() boundary calculation
  perf/x86: Fix uninitialized pt_regs in intel_pmu_drain_bts_buffer()
parents 93263e52 c4814202
...@@ -314,10 +314,11 @@ int intel_pmu_drain_bts_buffer(void) ...@@ -314,10 +314,11 @@ int intel_pmu_drain_bts_buffer(void)
if (top <= at) if (top <= at)
return 0; return 0;
memset(&regs, 0, sizeof(regs));
ds->bts_index = ds->bts_buffer_base; ds->bts_index = ds->bts_buffer_base;
perf_sample_data_init(&data, 0, event->hw.last_period); perf_sample_data_init(&data, 0, event->hw.last_period);
regs.ip = 0;
/* /*
* Prepare a generic sample, i.e. fill in the invariant fields. * Prepare a generic sample, i.e. fill in the invariant fields.
......
...@@ -4737,7 +4737,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event) ...@@ -4737,7 +4737,8 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
} else { } else {
if (arch_vma_name(mmap_event->vma)) { if (arch_vma_name(mmap_event->vma)) {
name = strncpy(tmp, arch_vma_name(mmap_event->vma), name = strncpy(tmp, arch_vma_name(mmap_event->vma),
sizeof(tmp)); sizeof(tmp) - 1);
tmp[sizeof(tmp) - 1] = '\0';
goto got_name; goto got_name;
} }
...@@ -5986,6 +5987,7 @@ int perf_pmu_register(struct pmu *pmu, char *name, int type) ...@@ -5986,6 +5987,7 @@ int perf_pmu_register(struct pmu *pmu, char *name, int type)
if (pmu->pmu_cpu_context) if (pmu->pmu_cpu_context)
goto got_cpu_context; goto got_cpu_context;
ret = -ENOMEM;
pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context); pmu->pmu_cpu_context = alloc_percpu(struct perf_cpu_context);
if (!pmu->pmu_cpu_context) if (!pmu->pmu_cpu_context)
goto free_dev; goto free_dev;
......
...@@ -16,7 +16,7 @@ struct ring_buffer { ...@@ -16,7 +16,7 @@ struct ring_buffer {
int page_order; /* allocation order */ int page_order; /* allocation order */
#endif #endif
int nr_pages; /* nr of data pages */ int nr_pages; /* nr of data pages */
int writable; /* are we writable */ int overwrite; /* can overwrite itself */
atomic_t poll; /* POLL_ for wakeups */ atomic_t poll; /* POLL_ for wakeups */
......
...@@ -18,12 +18,24 @@ ...@@ -18,12 +18,24 @@
static bool perf_output_space(struct ring_buffer *rb, unsigned long tail, static bool perf_output_space(struct ring_buffer *rb, unsigned long tail,
unsigned long offset, unsigned long head) unsigned long offset, unsigned long head)
{ {
unsigned long mask; unsigned long sz = perf_data_size(rb);
unsigned long mask = sz - 1;
if (!rb->writable) /*
* check if user-writable
* overwrite : over-write its own tail
* !overwrite: buffer possibly drops events.
*/
if (rb->overwrite)
return true; return true;
mask = perf_data_size(rb) - 1; /*
* verify that payload is not bigger than buffer
* otherwise masking logic may fail to detect
* the "not enough space" condition
*/
if ((head - offset) > sz)
return false;
offset = (offset - tail) & mask; offset = (offset - tail) & mask;
head = (head - tail) & mask; head = (head - tail) & mask;
...@@ -212,7 +224,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags) ...@@ -212,7 +224,9 @@ ring_buffer_init(struct ring_buffer *rb, long watermark, int flags)
rb->watermark = max_size / 2; rb->watermark = max_size / 2;
if (flags & RING_BUFFER_WRITABLE) if (flags & RING_BUFFER_WRITABLE)
rb->writable = 1; rb->overwrite = 0;
else
rb->overwrite = 1;
atomic_set(&rb->refcount, 1); atomic_set(&rb->refcount, 1);
......
...@@ -3440,14 +3440,14 @@ static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata; ...@@ -3440,14 +3440,14 @@ static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
static int __init set_ftrace_notrace(char *str) static int __init set_ftrace_notrace(char *str)
{ {
strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE); strlcpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
return 1; return 1;
} }
__setup("ftrace_notrace=", set_ftrace_notrace); __setup("ftrace_notrace=", set_ftrace_notrace);
static int __init set_ftrace_filter(char *str) static int __init set_ftrace_filter(char *str)
{ {
strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE); strlcpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
return 1; return 1;
} }
__setup("ftrace_filter=", set_ftrace_filter); __setup("ftrace_filter=", set_ftrace_filter);
......
...@@ -132,7 +132,7 @@ static char *default_bootup_tracer; ...@@ -132,7 +132,7 @@ static char *default_bootup_tracer;
static int __init set_cmdline_ftrace(char *str) static int __init set_cmdline_ftrace(char *str)
{ {
strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE); strlcpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
default_bootup_tracer = bootup_tracer_buf; default_bootup_tracer = bootup_tracer_buf;
/* We are using ftrace early, expand it */ /* We are using ftrace early, expand it */
ring_buffer_expanded = 1; ring_buffer_expanded = 1;
...@@ -162,7 +162,7 @@ static char *trace_boot_options __initdata; ...@@ -162,7 +162,7 @@ static char *trace_boot_options __initdata;
static int __init set_trace_boot_options(char *str) static int __init set_trace_boot_options(char *str)
{ {
strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE); strlcpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
trace_boot_options = trace_boot_options_buf; trace_boot_options = trace_boot_options_buf;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment