Commit 139f8400 authored by Tzvetomir Stoyanov (VMware)'s avatar Tzvetomir Stoyanov (VMware) Committed by Steven Rostedt (Google)

ring-buffer: Page size per ring buffer

Currently the size of one sub buffer page is global for all buffers and
it is hard coded to one system page. In order to introduce configurable
ring buffer sub page size, the internal logic should be refactored to
work with sub page size per ring buffer.

Link: https://lore.kernel.org/linux-trace-devel/20211213094825.61876-3-tz.stoyanov@gmail.com
Link: https://lore.kernel.org/linux-trace-kernel/20231219185628.009147038@goodmis.org

Cc: Masami Hiramatsu <mhiramat@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Vincent Donnefort <vdonnefort@google.com>
Cc: Kent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarTzvetomir Stoyanov (VMware) <tz.stoyanov@gmail.com>
Signed-off-by: default avatarSteven Rostedt (Google) <rostedt@goodmis.org>
parent d5cfbdfc
...@@ -200,7 +200,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page, ...@@ -200,7 +200,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
struct trace_seq; struct trace_seq;
int ring_buffer_print_entry_header(struct trace_seq *s); int ring_buffer_print_entry_header(struct trace_seq *s);
int ring_buffer_print_page_header(struct trace_seq *s); int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s);
enum ring_buffer_flags { enum ring_buffer_flags {
RB_FL_OVERWRITE = 1 << 0, RB_FL_OVERWRITE = 1 << 0,
......
...@@ -374,11 +374,6 @@ static inline bool test_time_stamp(u64 delta) ...@@ -374,11 +374,6 @@ static inline bool test_time_stamp(u64 delta)
return !!(delta & TS_DELTA_TEST); return !!(delta & TS_DELTA_TEST);
} }
#define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE)
/* Max payload is BUF_PAGE_SIZE - header (8bytes) */
#define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2))
struct rb_irq_work { struct rb_irq_work {
struct irq_work work; struct irq_work work;
wait_queue_head_t waiters; wait_queue_head_t waiters;
...@@ -510,6 +505,9 @@ struct trace_buffer { ...@@ -510,6 +505,9 @@ struct trace_buffer {
struct rb_irq_work irq_work; struct rb_irq_work irq_work;
bool time_stamp_abs; bool time_stamp_abs;
unsigned int subbuf_size;
unsigned int max_data_size;
}; };
struct ring_buffer_iter { struct ring_buffer_iter {
...@@ -523,10 +521,11 @@ struct ring_buffer_iter { ...@@ -523,10 +521,11 @@ struct ring_buffer_iter {
u64 read_stamp; u64 read_stamp;
u64 page_stamp; u64 page_stamp;
struct ring_buffer_event *event; struct ring_buffer_event *event;
size_t event_size;
int missed_events; int missed_events;
}; };
int ring_buffer_print_page_header(struct trace_seq *s) int ring_buffer_print_page_header(struct trace_buffer *buffer, struct trace_seq *s)
{ {
struct buffer_data_page field; struct buffer_data_page field;
...@@ -550,7 +549,7 @@ int ring_buffer_print_page_header(struct trace_seq *s) ...@@ -550,7 +549,7 @@ int ring_buffer_print_page_header(struct trace_seq *s)
trace_seq_printf(s, "\tfield: char data;\t" trace_seq_printf(s, "\tfield: char data;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n", "offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), data), (unsigned int)offsetof(typeof(field), data),
(unsigned int)BUF_PAGE_SIZE, (unsigned int)buffer->subbuf_size,
(unsigned int)is_signed_type(char)); (unsigned int)is_signed_type(char));
return !trace_seq_has_overflowed(s); return !trace_seq_has_overflowed(s);
...@@ -1625,7 +1624,13 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, ...@@ -1625,7 +1624,13 @@ struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) if (!zalloc_cpumask_var(&buffer->cpumask, GFP_KERNEL))
goto fail_free_buffer; goto fail_free_buffer;
nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); /* Default buffer page size - one system page */
buffer->subbuf_size = PAGE_SIZE - BUF_PAGE_HDR_SIZE;
/* Max payload is buffer page size - header (8bytes) */
buffer->max_data_size = buffer->subbuf_size - (sizeof(u32) * 2);
nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
buffer->flags = flags; buffer->flags = flags;
buffer->clock = trace_clock_local; buffer->clock = trace_clock_local;
buffer->reader_lock_key = key; buffer->reader_lock_key = key;
...@@ -1944,7 +1949,7 @@ static void update_pages_handler(struct work_struct *work) ...@@ -1944,7 +1949,7 @@ static void update_pages_handler(struct work_struct *work)
* @size: the new size. * @size: the new size.
* @cpu_id: the cpu buffer to resize * @cpu_id: the cpu buffer to resize
* *
* Minimum size is 2 * BUF_PAGE_SIZE. * Minimum size is 2 * buffer->subbuf_size.
* *
* Returns 0 on success and < 0 on failure. * Returns 0 on success and < 0 on failure.
*/ */
...@@ -1966,7 +1971,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, ...@@ -1966,7 +1971,7 @@ int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
!cpumask_test_cpu(cpu_id, buffer->cpumask)) !cpumask_test_cpu(cpu_id, buffer->cpumask))
return 0; return 0;
nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); nr_pages = DIV_ROUND_UP(size, buffer->subbuf_size);
/* we need a minimum of two pages */ /* we need a minimum of two pages */
if (nr_pages < 2) if (nr_pages < 2)
...@@ -2213,7 +2218,7 @@ rb_iter_head_event(struct ring_buffer_iter *iter) ...@@ -2213,7 +2218,7 @@ rb_iter_head_event(struct ring_buffer_iter *iter)
*/ */
barrier(); barrier();
if ((iter->head + length) > commit || length > BUF_PAGE_SIZE) if ((iter->head + length) > commit || length > iter->event_size)
/* Writer corrupted the read? */ /* Writer corrupted the read? */
goto reset; goto reset;
...@@ -2446,6 +2451,7 @@ static inline void ...@@ -2446,6 +2451,7 @@ static inline void
rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
unsigned long tail, struct rb_event_info *info) unsigned long tail, struct rb_event_info *info)
{ {
unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
struct buffer_page *tail_page = info->tail_page; struct buffer_page *tail_page = info->tail_page;
struct ring_buffer_event *event; struct ring_buffer_event *event;
unsigned long length = info->length; unsigned long length = info->length;
...@@ -2454,13 +2460,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2454,13 +2460,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
* Only the event that crossed the page boundary * Only the event that crossed the page boundary
* must fill the old tail_page with padding. * must fill the old tail_page with padding.
*/ */
if (tail >= BUF_PAGE_SIZE) { if (tail >= bsize) {
/* /*
* If the page was filled, then we still need * If the page was filled, then we still need
* to update the real_end. Reset it to zero * to update the real_end. Reset it to zero
* and the reader will ignore it. * and the reader will ignore it.
*/ */
if (tail == BUF_PAGE_SIZE) if (tail == bsize)
tail_page->real_end = 0; tail_page->real_end = 0;
local_sub(length, &tail_page->write); local_sub(length, &tail_page->write);
...@@ -2488,7 +2494,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2488,7 +2494,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
* If we are less than the minimum size, we don't need to * If we are less than the minimum size, we don't need to
* worry about it. * worry about it.
*/ */
if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { if (tail > (bsize - RB_EVNT_MIN_SIZE)) {
/* No room for any events */ /* No room for any events */
/* Mark the rest of the page with padding */ /* Mark the rest of the page with padding */
...@@ -2503,19 +2509,19 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2503,19 +2509,19 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
} }
/* Put in a discarded event */ /* Put in a discarded event */
event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; event->array[0] = (bsize - tail) - RB_EVNT_HDR_SIZE;
event->type_len = RINGBUF_TYPE_PADDING; event->type_len = RINGBUF_TYPE_PADDING;
/* time delta must be non zero */ /* time delta must be non zero */
event->time_delta = 1; event->time_delta = 1;
/* account for padding bytes */ /* account for padding bytes */
local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); local_add(bsize - tail, &cpu_buffer->entries_bytes);
/* Make sure the padding is visible before the tail_page->write update */ /* Make sure the padding is visible before the tail_page->write update */
smp_wmb(); smp_wmb();
/* Set write to end of buffer */ /* Set write to end of buffer */
length = (tail + length) - BUF_PAGE_SIZE; length = (tail + length) - bsize;
local_sub(length, &tail_page->write); local_sub(length, &tail_page->write);
} }
...@@ -3469,7 +3475,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -3469,7 +3475,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
tail = write - info->length; tail = write - info->length;
/* See if we shot pass the end of this buffer page */ /* See if we shot pass the end of this buffer page */
if (unlikely(write > BUF_PAGE_SIZE)) { if (unlikely(write > cpu_buffer->buffer->subbuf_size)) {
check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); check_buffer(cpu_buffer, info, CHECK_FULL_PAGE);
return rb_move_tail(cpu_buffer, tail, info); return rb_move_tail(cpu_buffer, tail, info);
} }
...@@ -3600,7 +3606,7 @@ rb_reserve_next_event(struct trace_buffer *buffer, ...@@ -3600,7 +3606,7 @@ rb_reserve_next_event(struct trace_buffer *buffer,
if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) {
add_ts_default = RB_ADD_STAMP_ABSOLUTE; add_ts_default = RB_ADD_STAMP_ABSOLUTE;
info.length += RB_LEN_TIME_EXTEND; info.length += RB_LEN_TIME_EXTEND;
if (info.length > BUF_MAX_DATA_SIZE) if (info.length > cpu_buffer->buffer->max_data_size)
goto out_fail; goto out_fail;
} else { } else {
add_ts_default = RB_ADD_STAMP_NONE; add_ts_default = RB_ADD_STAMP_NONE;
...@@ -3675,7 +3681,7 @@ ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length) ...@@ -3675,7 +3681,7 @@ ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
if (unlikely(atomic_read(&cpu_buffer->record_disabled))) if (unlikely(atomic_read(&cpu_buffer->record_disabled)))
goto out; goto out;
if (unlikely(length > BUF_MAX_DATA_SIZE)) if (unlikely(length > buffer->max_data_size))
goto out; goto out;
if (unlikely(trace_recursive_lock(cpu_buffer))) if (unlikely(trace_recursive_lock(cpu_buffer)))
...@@ -3825,7 +3831,7 @@ int ring_buffer_write(struct trace_buffer *buffer, ...@@ -3825,7 +3831,7 @@ int ring_buffer_write(struct trace_buffer *buffer,
if (atomic_read(&cpu_buffer->record_disabled)) if (atomic_read(&cpu_buffer->record_disabled))
goto out; goto out;
if (length > BUF_MAX_DATA_SIZE) if (length > buffer->max_data_size)
goto out; goto out;
if (unlikely(trace_recursive_lock(cpu_buffer))) if (unlikely(trace_recursive_lock(cpu_buffer)))
...@@ -4405,6 +4411,7 @@ static struct buffer_page * ...@@ -4405,6 +4411,7 @@ static struct buffer_page *
rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
{ {
struct buffer_page *reader = NULL; struct buffer_page *reader = NULL;
unsigned long bsize = READ_ONCE(cpu_buffer->buffer->subbuf_size);
unsigned long overwrite; unsigned long overwrite;
unsigned long flags; unsigned long flags;
int nr_loops = 0; int nr_loops = 0;
...@@ -4540,7 +4547,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -4540,7 +4547,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
#define USECS_WAIT 1000000 #define USECS_WAIT 1000000
for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) { for (nr_loops = 0; nr_loops < USECS_WAIT; nr_loops++) {
/* If the write is past the end of page, a writer is still updating it */ /* If the write is past the end of page, a writer is still updating it */
if (likely(!reader || rb_page_write(reader) <= BUF_PAGE_SIZE)) if (likely(!reader || rb_page_write(reader) <= bsize))
break; break;
udelay(1); udelay(1);
...@@ -4984,7 +4991,8 @@ ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) ...@@ -4984,7 +4991,8 @@ ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
return NULL; return NULL;
/* Holds the entire event: data and meta data */ /* Holds the entire event: data and meta data */
iter->event = kmalloc(BUF_PAGE_SIZE, flags); iter->event_size = buffer->subbuf_size;
iter->event = kmalloc(iter->event_size, flags);
if (!iter->event) { if (!iter->event) {
kfree(iter); kfree(iter);
return NULL; return NULL;
...@@ -5102,14 +5110,14 @@ unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) ...@@ -5102,14 +5110,14 @@ unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
{ {
/* /*
* Earlier, this method returned * Earlier, this method returned
* BUF_PAGE_SIZE * buffer->nr_pages * buffer->subbuf_size * buffer->nr_pages
* Since the nr_pages field is now removed, we have converted this to * Since the nr_pages field is now removed, we have converted this to
* return the per cpu buffer value. * return the per cpu buffer value.
*/ */
if (!cpumask_test_cpu(cpu, buffer->cpumask)) if (!cpumask_test_cpu(cpu, buffer->cpumask))
return 0; return 0;
return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; return buffer->subbuf_size * buffer->buffers[cpu]->nr_pages;
} }
EXPORT_SYMBOL_GPL(ring_buffer_size); EXPORT_SYMBOL_GPL(ring_buffer_size);
...@@ -5123,8 +5131,8 @@ unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer) ...@@ -5123,8 +5131,8 @@ unsigned long ring_buffer_max_event_size(struct trace_buffer *buffer)
{ {
/* If abs timestamp is requested, events have a timestamp too */ /* If abs timestamp is requested, events have a timestamp too */
if (ring_buffer_time_stamp_abs(buffer)) if (ring_buffer_time_stamp_abs(buffer))
return BUF_MAX_DATA_SIZE - RB_LEN_TIME_EXTEND; return buffer->max_data_size - RB_LEN_TIME_EXTEND;
return BUF_MAX_DATA_SIZE; return buffer->max_data_size;
} }
EXPORT_SYMBOL_GPL(ring_buffer_max_event_size); EXPORT_SYMBOL_GPL(ring_buffer_max_event_size);
...@@ -5730,7 +5738,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer, ...@@ -5730,7 +5738,7 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
/* If there is room at the end of the page to save the /* If there is room at the end of the page to save the
* missed events, then record it there. * missed events, then record it there.
*/ */
if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { if (buffer->subbuf_size - commit >= sizeof(missed_events)) {
memcpy(&bpage->data[commit], &missed_events, memcpy(&bpage->data[commit], &missed_events,
sizeof(missed_events)); sizeof(missed_events));
local_add(RB_MISSED_STORED, &bpage->commit); local_add(RB_MISSED_STORED, &bpage->commit);
...@@ -5742,8 +5750,8 @@ int ring_buffer_read_page(struct trace_buffer *buffer, ...@@ -5742,8 +5750,8 @@ int ring_buffer_read_page(struct trace_buffer *buffer,
/* /*
* This page may be off to user land. Zero it out here. * This page may be off to user land. Zero it out here.
*/ */
if (commit < BUF_PAGE_SIZE) if (commit < buffer->subbuf_size)
memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); memset(&bpage->data[commit], 0, buffer->subbuf_size - commit);
out_unlock: out_unlock:
raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
......
...@@ -5018,7 +5018,7 @@ static int tracing_release(struct inode *inode, struct file *file) ...@@ -5018,7 +5018,7 @@ static int tracing_release(struct inode *inode, struct file *file)
return 0; return 0;
} }
static int tracing_release_generic_tr(struct inode *inode, struct file *file) int tracing_release_generic_tr(struct inode *inode, struct file *file)
{ {
struct trace_array *tr = inode->i_private; struct trace_array *tr = inode->i_private;
......
...@@ -616,6 +616,7 @@ void tracing_reset_all_online_cpus(void); ...@@ -616,6 +616,7 @@ void tracing_reset_all_online_cpus(void);
void tracing_reset_all_online_cpus_unlocked(void); void tracing_reset_all_online_cpus_unlocked(void);
int tracing_open_generic(struct inode *inode, struct file *filp); int tracing_open_generic(struct inode *inode, struct file *filp);
int tracing_open_generic_tr(struct inode *inode, struct file *filp); int tracing_open_generic_tr(struct inode *inode, struct file *filp);
int tracing_release_generic_tr(struct inode *inode, struct file *file);
int tracing_open_file_tr(struct inode *inode, struct file *filp); int tracing_open_file_tr(struct inode *inode, struct file *filp);
int tracing_release_file_tr(struct inode *inode, struct file *filp); int tracing_release_file_tr(struct inode *inode, struct file *filp);
int tracing_single_release_file_tr(struct inode *inode, struct file *filp); int tracing_single_release_file_tr(struct inode *inode, struct file *filp);
......
...@@ -1893,9 +1893,9 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt, ...@@ -1893,9 +1893,9 @@ subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
} }
static ssize_t static ssize_t
show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) show_header_page_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{ {
int (*func)(struct trace_seq *s) = filp->private_data; struct trace_array *tr = filp->private_data;
struct trace_seq *s; struct trace_seq *s;
int r; int r;
...@@ -1908,7 +1908,31 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos) ...@@ -1908,7 +1908,31 @@ show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
trace_seq_init(s); trace_seq_init(s);
func(s); ring_buffer_print_page_header(tr->array_buffer.buffer, s);
r = simple_read_from_buffer(ubuf, cnt, ppos,
s->buffer, trace_seq_used(s));
kfree(s);
return r;
}
static ssize_t
show_header_event_file(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
{
struct trace_seq *s;
int r;
if (*ppos)
return 0;
s = kmalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
trace_seq_init(s);
ring_buffer_print_entry_header(s);
r = simple_read_from_buffer(ubuf, cnt, ppos, r = simple_read_from_buffer(ubuf, cnt, ppos,
s->buffer, trace_seq_used(s)); s->buffer, trace_seq_used(s));
...@@ -2165,10 +2189,18 @@ static const struct file_operations ftrace_tr_enable_fops = { ...@@ -2165,10 +2189,18 @@ static const struct file_operations ftrace_tr_enable_fops = {
.release = subsystem_release, .release = subsystem_release,
}; };
static const struct file_operations ftrace_show_header_fops = { static const struct file_operations ftrace_show_header_page_fops = {
.open = tracing_open_generic, .open = tracing_open_generic_tr,
.read = show_header, .read = show_header_page_file,
.llseek = default_llseek, .llseek = default_llseek,
.release = tracing_release_generic_tr,
};
static const struct file_operations ftrace_show_header_event_fops = {
.open = tracing_open_generic_tr,
.read = show_header_event_file,
.llseek = default_llseek,
.release = tracing_release_generic_tr,
}; };
static int static int
...@@ -3794,17 +3826,16 @@ static int events_callback(const char *name, umode_t *mode, void **data, ...@@ -3794,17 +3826,16 @@ static int events_callback(const char *name, umode_t *mode, void **data,
return 1; return 1;
} }
if (strcmp(name, "header_page") == 0) if (strcmp(name, "header_page") == 0) {
*data = ring_buffer_print_page_header; *mode = TRACE_MODE_READ;
*fops = &ftrace_show_header_page_fops;
else if (strcmp(name, "header_event") == 0)
*data = ring_buffer_print_entry_header;
else } else if (strcmp(name, "header_event") == 0) {
*mode = TRACE_MODE_READ;
*fops = &ftrace_show_header_event_fops;
} else
return 0; return 0;
*mode = TRACE_MODE_READ;
*fops = &ftrace_show_header_fops;
return 1; return 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment