Commit 2711ca23 authored by Steven Rostedt's avatar Steven Rostedt Committed by Steven Rostedt

ring-buffer: Move zeroing out excess in page to ring buffer code

Currently the trace splice code zeros out the excess bytes in the page before
sending it off to userspace.

This is to make sure userspace is not getting anything it should not be
when reading the pages, because the excess data was never initialized
to zero before writing (for perfomance reasons).

But the splice code has no business in doing this work, it should be
done by the ring buffer. With the latest changes for recording lost
events, the splice code gets it wrong anyway.

Move the zeroing out of excess bytes into the ring buffer code.
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent b3230c8b
...@@ -3902,12 +3902,12 @@ int ring_buffer_read_page(struct ring_buffer *buffer, ...@@ -3902,12 +3902,12 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
ret = read; ret = read;
cpu_buffer->lost_events = 0; cpu_buffer->lost_events = 0;
commit = local_read(&bpage->commit);
/* /*
* Set a flag in the commit field if we lost events * Set a flag in the commit field if we lost events
*/ */
if (missed_events) { if (missed_events) {
commit = local_read(&bpage->commit);
/* If there is room at the end of the page to save the /* If there is room at the end of the page to save the
* missed events, then record it there. * missed events, then record it there.
*/ */
...@@ -3915,10 +3915,17 @@ int ring_buffer_read_page(struct ring_buffer *buffer, ...@@ -3915,10 +3915,17 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
memcpy(&bpage->data[commit], &missed_events, memcpy(&bpage->data[commit], &missed_events,
sizeof(missed_events)); sizeof(missed_events));
local_add(RB_MISSED_STORED, &bpage->commit); local_add(RB_MISSED_STORED, &bpage->commit);
commit += sizeof(missed_events);
} }
local_add(RB_MISSED_EVENTS, &bpage->commit); local_add(RB_MISSED_EVENTS, &bpage->commit);
} }
/*
* This page may be off to user land. Zero it out here.
*/
if (commit < BUF_PAGE_SIZE)
memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit);
out_unlock: out_unlock:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
......
...@@ -3661,7 +3661,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, ...@@ -3661,7 +3661,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct ftrace_buffer_info *info = filp->private_data; struct ftrace_buffer_info *info = filp->private_data;
unsigned int pos;
ssize_t ret; ssize_t ret;
size_t size; size_t size;
...@@ -3688,11 +3687,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf, ...@@ -3688,11 +3687,6 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
if (ret < 0) if (ret < 0)
return 0; return 0;
pos = ring_buffer_page_len(info->spare);
if (pos < PAGE_SIZE)
memset(info->spare + pos, 0, PAGE_SIZE - pos);
read: read:
size = PAGE_SIZE - info->read; size = PAGE_SIZE - info->read;
if (size > count) if (size > count)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment