tracing: Make struct ring_buffer less ambiguous

As there's two struct ring_buffers in the kernel, it causes some confusion.
The other one being the perf ring buffer. It was agreed upon that as neither
of the ring buffers are generic enough to be used globally, they should be
renamed as:

   perf's ring_buffer -> perf_buffer
   ftrace's ring_buffer -> trace_buffer

This implements the changes to the ring buffer that ftrace uses.

Link: https://lore.kernel.org/r/20191213140531.116b3200@gandalf.local.homeSigned-off-by: default avatarSteven Rostedt (VMware) <rostedt@goodmis.org>
parent 1c5eb448
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#define OP_BUFFER_FLAGS 0 #define OP_BUFFER_FLAGS 0
static struct ring_buffer *op_ring_buffer; static struct trace_buffer *op_ring_buffer;
DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer); DEFINE_PER_CPU(struct oprofile_cpu_buffer, op_cpu_buffer);
static void wq_sync_buffer(struct work_struct *work); static void wq_sync_buffer(struct work_struct *work);
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/poll.h> #include <linux/poll.h>
struct ring_buffer; struct trace_buffer;
struct ring_buffer_iter; struct ring_buffer_iter;
/* /*
...@@ -77,13 +77,13 @@ u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event); ...@@ -77,13 +77,13 @@ u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event);
* else * else
* ring_buffer_unlock_commit(buffer, event); * ring_buffer_unlock_commit(buffer, event);
*/ */
void ring_buffer_discard_commit(struct ring_buffer *buffer, void ring_buffer_discard_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event); struct ring_buffer_event *event);
/* /*
* size is in bytes for each per CPU buffer. * size is in bytes for each per CPU buffer.
*/ */
struct ring_buffer * struct trace_buffer *
__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key); __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
/* /*
...@@ -97,38 +97,38 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k ...@@ -97,38 +97,38 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \ __ring_buffer_alloc((size), (flags), &__key); \
}) })
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full); int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table); struct file *filp, poll_table *poll_table);
#define RING_BUFFER_ALL_CPUS -1 #define RING_BUFFER_ALL_CPUS -1
void ring_buffer_free(struct ring_buffer *buffer); void ring_buffer_free(struct trace_buffer *buffer);
int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu); int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val); void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer, struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
unsigned long length); unsigned long length);
int ring_buffer_unlock_commit(struct ring_buffer *buffer, int ring_buffer_unlock_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event); struct ring_buffer_event *event);
int ring_buffer_write(struct ring_buffer *buffer, int ring_buffer_write(struct trace_buffer *buffer,
unsigned long length, void *data); unsigned long length, void *data);
void ring_buffer_nest_start(struct ring_buffer *buffer); void ring_buffer_nest_start(struct trace_buffer *buffer);
void ring_buffer_nest_end(struct ring_buffer *buffer); void ring_buffer_nest_end(struct trace_buffer *buffer);
struct ring_buffer_event * struct ring_buffer_event *
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events); unsigned long *lost_events);
struct ring_buffer_event * struct ring_buffer_event *
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events); unsigned long *lost_events);
struct ring_buffer_iter * struct ring_buffer_iter *
ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags); ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
void ring_buffer_read_prepare_sync(void); void ring_buffer_read_prepare_sync(void);
void ring_buffer_read_start(struct ring_buffer_iter *iter); void ring_buffer_read_start(struct ring_buffer_iter *iter);
void ring_buffer_read_finish(struct ring_buffer_iter *iter); void ring_buffer_read_finish(struct ring_buffer_iter *iter);
...@@ -140,59 +140,59 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts); ...@@ -140,59 +140,59 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
void ring_buffer_iter_reset(struct ring_buffer_iter *iter); void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
int ring_buffer_iter_empty(struct ring_buffer_iter *iter); int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_reset(struct ring_buffer *buffer); void ring_buffer_reset(struct trace_buffer *buffer);
#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
struct ring_buffer *buffer_b, int cpu); struct trace_buffer *buffer_b, int cpu);
#else #else
static inline int static inline int
ring_buffer_swap_cpu(struct ring_buffer *buffer_a, ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
struct ring_buffer *buffer_b, int cpu) struct trace_buffer *buffer_b, int cpu)
{ {
return -ENODEV; return -ENODEV;
} }
#endif #endif
bool ring_buffer_empty(struct ring_buffer *buffer); bool ring_buffer_empty(struct trace_buffer *buffer);
bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu); bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_record_disable(struct ring_buffer *buffer); void ring_buffer_record_disable(struct trace_buffer *buffer);
void ring_buffer_record_enable(struct ring_buffer *buffer); void ring_buffer_record_enable(struct trace_buffer *buffer);
void ring_buffer_record_off(struct ring_buffer *buffer); void ring_buffer_record_off(struct trace_buffer *buffer);
void ring_buffer_record_on(struct ring_buffer *buffer); void ring_buffer_record_on(struct trace_buffer *buffer);
bool ring_buffer_record_is_on(struct ring_buffer *buffer); bool ring_buffer_record_is_on(struct trace_buffer *buffer);
bool ring_buffer_record_is_set_on(struct ring_buffer *buffer); bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu); void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu); u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
unsigned long ring_buffer_entries(struct ring_buffer *buffer); unsigned long ring_buffer_entries(struct trace_buffer *buffer);
unsigned long ring_buffer_overruns(struct ring_buffer *buffer); unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu); unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu); u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu);
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
int cpu, u64 *ts); int cpu, u64 *ts);
void ring_buffer_set_clock(struct ring_buffer *buffer, void ring_buffer_set_clock(struct trace_buffer *buffer,
u64 (*clock)(void)); u64 (*clock)(void));
void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs); void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer); bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu); size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu); size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu); void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data); void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
size_t len, int cpu, int full); size_t len, int cpu, int full);
struct trace_seq; struct trace_seq;
......
...@@ -153,7 +153,7 @@ void tracing_generic_entry_update(struct trace_entry *entry, ...@@ -153,7 +153,7 @@ void tracing_generic_entry_update(struct trace_entry *entry,
struct trace_event_file; struct trace_event_file;
struct ring_buffer_event * struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer, trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
struct trace_event_file *trace_file, struct trace_event_file *trace_file,
int type, unsigned long len, int type, unsigned long len,
unsigned long flags, int pc); unsigned long flags, int pc);
...@@ -210,7 +210,7 @@ extern int trace_event_reg(struct trace_event_call *event, ...@@ -210,7 +210,7 @@ extern int trace_event_reg(struct trace_event_call *event,
enum trace_reg type, void *data); enum trace_reg type, void *data);
struct trace_event_buffer { struct trace_event_buffer {
struct ring_buffer *buffer; struct trace_buffer *buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_event_file *trace_file; struct trace_event_file *trace_file;
void *entry; void *entry;
......
...@@ -570,7 +570,7 @@ static inline notrace int trace_event_get_offsets_##call( \ ...@@ -570,7 +570,7 @@ static inline notrace int trace_event_get_offsets_##call( \
* enum event_trigger_type __tt = ETT_NONE; * enum event_trigger_type __tt = ETT_NONE;
* struct ring_buffer_event *event; * struct ring_buffer_event *event;
* struct trace_event_raw_<call> *entry; <-- defined in stage 1 * struct trace_event_raw_<call> *entry; <-- defined in stage 1
* struct ring_buffer *buffer; * struct trace_buffer *buffer;
* unsigned long irq_flags; * unsigned long irq_flags;
* int __data_size; * int __data_size;
* int pc; * int pc;
......
...@@ -68,7 +68,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action, ...@@ -68,7 +68,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
{ {
struct blk_io_trace *t; struct blk_io_trace *t;
struct ring_buffer_event *event = NULL; struct ring_buffer_event *event = NULL;
struct ring_buffer *buffer = NULL; struct trace_buffer *buffer = NULL;
int pc = 0; int pc = 0;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
bool blk_tracer = blk_tracer_enabled; bool blk_tracer = blk_tracer_enabled;
...@@ -215,7 +215,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -215,7 +215,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
struct ring_buffer_event *event = NULL; struct ring_buffer_event *event = NULL;
struct ring_buffer *buffer = NULL; struct trace_buffer *buffer = NULL;
struct blk_io_trace *t; struct blk_io_trace *t;
unsigned long flags = 0; unsigned long flags = 0;
unsigned long *sequence; unsigned long *sequence;
......
...@@ -443,7 +443,7 @@ enum { ...@@ -443,7 +443,7 @@ enum {
struct ring_buffer_per_cpu { struct ring_buffer_per_cpu {
int cpu; int cpu;
atomic_t record_disabled; atomic_t record_disabled;
struct ring_buffer *buffer; struct trace_buffer *buffer;
raw_spinlock_t reader_lock; /* serialize readers */ raw_spinlock_t reader_lock; /* serialize readers */
arch_spinlock_t lock; arch_spinlock_t lock;
struct lock_class_key lock_key; struct lock_class_key lock_key;
...@@ -482,7 +482,7 @@ struct ring_buffer_per_cpu { ...@@ -482,7 +482,7 @@ struct ring_buffer_per_cpu {
struct rb_irq_work irq_work; struct rb_irq_work irq_work;
}; };
struct ring_buffer { struct trace_buffer {
unsigned flags; unsigned flags;
int cpus; int cpus;
atomic_t record_disabled; atomic_t record_disabled;
...@@ -518,7 +518,7 @@ struct ring_buffer_iter { ...@@ -518,7 +518,7 @@ struct ring_buffer_iter {
* *
* Returns the number of pages used by a per_cpu buffer of the ring buffer. * Returns the number of pages used by a per_cpu buffer of the ring buffer.
*/ */
size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu) size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu)
{ {
return buffer->buffers[cpu]->nr_pages; return buffer->buffers[cpu]->nr_pages;
} }
...@@ -530,7 +530,7 @@ size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu) ...@@ -530,7 +530,7 @@ size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu)
* *
* Returns the number of pages that have content in the ring buffer. * Returns the number of pages that have content in the ring buffer.
*/ */
size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu) size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu)
{ {
size_t read; size_t read;
size_t cnt; size_t cnt;
...@@ -573,7 +573,7 @@ static void rb_wake_up_waiters(struct irq_work *work) ...@@ -573,7 +573,7 @@ static void rb_wake_up_waiters(struct irq_work *work)
* as data is added to any of the @buffer's cpu buffers. Otherwise * as data is added to any of the @buffer's cpu buffers. Otherwise
* it will wait for data to be added to a specific cpu buffer. * it will wait for data to be added to a specific cpu buffer.
*/ */
int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full) int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full)
{ {
struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer); struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
...@@ -684,7 +684,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full) ...@@ -684,7 +684,7 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full)
* Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers, * Returns EPOLLIN | EPOLLRDNORM if data exists in the buffers,
* zero otherwise. * zero otherwise.
*/ */
__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table) struct file *filp, poll_table *poll_table)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
...@@ -742,13 +742,13 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, ...@@ -742,13 +742,13 @@ __poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
/* Up this if you want to test the TIME_EXTENTS and normalization */ /* Up this if you want to test the TIME_EXTENTS and normalization */
#define DEBUG_SHIFT 0 #define DEBUG_SHIFT 0
static inline u64 rb_time_stamp(struct ring_buffer *buffer) static inline u64 rb_time_stamp(struct trace_buffer *buffer)
{ {
/* shift to debug/test normalization and TIME_EXTENTS */ /* shift to debug/test normalization and TIME_EXTENTS */
return buffer->clock() << DEBUG_SHIFT; return buffer->clock() << DEBUG_SHIFT;
} }
u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu)
{ {
u64 time; u64 time;
...@@ -760,7 +760,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) ...@@ -760,7 +760,7 @@ u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu)
} }
EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); EXPORT_SYMBOL_GPL(ring_buffer_time_stamp);
void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
int cpu, u64 *ts) int cpu, u64 *ts)
{ {
/* Just stupid testing the normalize function and deltas */ /* Just stupid testing the normalize function and deltas */
...@@ -1283,7 +1283,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1283,7 +1283,7 @@ static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
} }
static struct ring_buffer_per_cpu * static struct ring_buffer_per_cpu *
rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu) rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_page *bpage; struct buffer_page *bpage;
...@@ -1374,10 +1374,10 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -1374,10 +1374,10 @@ static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
* when the buffer wraps. If this flag is not set, the buffer will * when the buffer wraps. If this flag is not set, the buffer will
* drop data when the tail hits the head. * drop data when the tail hits the head.
*/ */
struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, struct trace_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags,
struct lock_class_key *key) struct lock_class_key *key)
{ {
struct ring_buffer *buffer; struct trace_buffer *buffer;
long nr_pages; long nr_pages;
int bsize; int bsize;
int cpu; int cpu;
...@@ -1447,7 +1447,7 @@ EXPORT_SYMBOL_GPL(__ring_buffer_alloc); ...@@ -1447,7 +1447,7 @@ EXPORT_SYMBOL_GPL(__ring_buffer_alloc);
* @buffer: the buffer to free. * @buffer: the buffer to free.
*/ */
void void
ring_buffer_free(struct ring_buffer *buffer) ring_buffer_free(struct trace_buffer *buffer)
{ {
int cpu; int cpu;
...@@ -1463,18 +1463,18 @@ ring_buffer_free(struct ring_buffer *buffer) ...@@ -1463,18 +1463,18 @@ ring_buffer_free(struct ring_buffer *buffer)
} }
EXPORT_SYMBOL_GPL(ring_buffer_free); EXPORT_SYMBOL_GPL(ring_buffer_free);
void ring_buffer_set_clock(struct ring_buffer *buffer, void ring_buffer_set_clock(struct trace_buffer *buffer,
u64 (*clock)(void)) u64 (*clock)(void))
{ {
buffer->clock = clock; buffer->clock = clock;
} }
void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs) void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs)
{ {
buffer->time_stamp_abs = abs; buffer->time_stamp_abs = abs;
} }
bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer) bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer)
{ {
return buffer->time_stamp_abs; return buffer->time_stamp_abs;
} }
...@@ -1712,7 +1712,7 @@ static void update_pages_handler(struct work_struct *work) ...@@ -1712,7 +1712,7 @@ static void update_pages_handler(struct work_struct *work)
* *
* Returns 0 on success and < 0 on failure. * Returns 0 on success and < 0 on failure.
*/ */
int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size,
int cpu_id) int cpu_id)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
...@@ -1891,7 +1891,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, ...@@ -1891,7 +1891,7 @@ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size,
} }
EXPORT_SYMBOL_GPL(ring_buffer_resize); EXPORT_SYMBOL_GPL(ring_buffer_resize);
void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val)
{ {
mutex_lock(&buffer->mutex); mutex_lock(&buffer->mutex);
if (val) if (val)
...@@ -2206,7 +2206,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2206,7 +2206,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
{ {
struct buffer_page *tail_page = info->tail_page; struct buffer_page *tail_page = info->tail_page;
struct buffer_page *commit_page = cpu_buffer->commit_page; struct buffer_page *commit_page = cpu_buffer->commit_page;
struct ring_buffer *buffer = cpu_buffer->buffer; struct trace_buffer *buffer = cpu_buffer->buffer;
struct buffer_page *next_page; struct buffer_page *next_page;
int ret; int ret;
...@@ -2609,7 +2609,7 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2609,7 +2609,7 @@ static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
} }
static __always_inline void static __always_inline void
rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer)
{ {
size_t nr_pages; size_t nr_pages;
size_t dirty; size_t dirty;
...@@ -2733,7 +2733,7 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -2733,7 +2733,7 @@ trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
* Call this function before calling another ring_buffer_lock_reserve() and * Call this function before calling another ring_buffer_lock_reserve() and
* call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit(). * call ring_buffer_nest_end() after the nested ring_buffer_unlock_commit().
*/ */
void ring_buffer_nest_start(struct ring_buffer *buffer) void ring_buffer_nest_start(struct trace_buffer *buffer)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
int cpu; int cpu;
...@@ -2753,7 +2753,7 @@ void ring_buffer_nest_start(struct ring_buffer *buffer) ...@@ -2753,7 +2753,7 @@ void ring_buffer_nest_start(struct ring_buffer *buffer)
* Must be called after ring_buffer_nest_start() and after the * Must be called after ring_buffer_nest_start() and after the
* ring_buffer_unlock_commit(). * ring_buffer_unlock_commit().
*/ */
void ring_buffer_nest_end(struct ring_buffer *buffer) void ring_buffer_nest_end(struct trace_buffer *buffer)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
int cpu; int cpu;
...@@ -2775,7 +2775,7 @@ void ring_buffer_nest_end(struct ring_buffer *buffer) ...@@ -2775,7 +2775,7 @@ void ring_buffer_nest_end(struct ring_buffer *buffer)
* *
* Must be paired with ring_buffer_lock_reserve. * Must be paired with ring_buffer_lock_reserve.
*/ */
int ring_buffer_unlock_commit(struct ring_buffer *buffer, int ring_buffer_unlock_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event) struct ring_buffer_event *event)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
...@@ -2868,7 +2868,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -2868,7 +2868,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
} }
static __always_inline struct ring_buffer_event * static __always_inline struct ring_buffer_event *
rb_reserve_next_event(struct ring_buffer *buffer, rb_reserve_next_event(struct trace_buffer *buffer,
struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_per_cpu *cpu_buffer,
unsigned long length) unsigned long length)
{ {
...@@ -2961,7 +2961,7 @@ rb_reserve_next_event(struct ring_buffer *buffer, ...@@ -2961,7 +2961,7 @@ rb_reserve_next_event(struct ring_buffer *buffer,
* If NULL is returned, then nothing has been allocated or locked. * If NULL is returned, then nothing has been allocated or locked.
*/ */
struct ring_buffer_event * struct ring_buffer_event *
ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) ring_buffer_lock_reserve(struct trace_buffer *buffer, unsigned long length)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
...@@ -3062,7 +3062,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -3062,7 +3062,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
* If this function is called, do not call ring_buffer_unlock_commit on * If this function is called, do not call ring_buffer_unlock_commit on
* the event. * the event.
*/ */
void ring_buffer_discard_commit(struct ring_buffer *buffer, void ring_buffer_discard_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event) struct ring_buffer_event *event)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
...@@ -3113,7 +3113,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); ...@@ -3113,7 +3113,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_discard_commit);
* Note, like ring_buffer_lock_reserve, the length is the length of the data * Note, like ring_buffer_lock_reserve, the length is the length of the data
* and not the length of the event which would hold the header. * and not the length of the event which would hold the header.
*/ */
int ring_buffer_write(struct ring_buffer *buffer, int ring_buffer_write(struct trace_buffer *buffer,
unsigned long length, unsigned long length,
void *data) void *data)
{ {
...@@ -3193,7 +3193,7 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -3193,7 +3193,7 @@ static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
* *
* The caller should call synchronize_rcu() after this. * The caller should call synchronize_rcu() after this.
*/ */
void ring_buffer_record_disable(struct ring_buffer *buffer) void ring_buffer_record_disable(struct trace_buffer *buffer)
{ {
atomic_inc(&buffer->record_disabled); atomic_inc(&buffer->record_disabled);
} }
...@@ -3206,7 +3206,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable); ...@@ -3206,7 +3206,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable);
* Note, multiple disables will need the same number of enables * Note, multiple disables will need the same number of enables
* to truly enable the writing (much like preempt_disable). * to truly enable the writing (much like preempt_disable).
*/ */
void ring_buffer_record_enable(struct ring_buffer *buffer) void ring_buffer_record_enable(struct trace_buffer *buffer)
{ {
atomic_dec(&buffer->record_disabled); atomic_dec(&buffer->record_disabled);
} }
...@@ -3223,7 +3223,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable); ...@@ -3223,7 +3223,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_enable);
* it works like an on/off switch, where as the disable() version * it works like an on/off switch, where as the disable() version
* must be paired with a enable(). * must be paired with a enable().
*/ */
void ring_buffer_record_off(struct ring_buffer *buffer) void ring_buffer_record_off(struct trace_buffer *buffer)
{ {
unsigned int rd; unsigned int rd;
unsigned int new_rd; unsigned int new_rd;
...@@ -3246,7 +3246,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_off); ...@@ -3246,7 +3246,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_off);
* it works like an on/off switch, where as the enable() version * it works like an on/off switch, where as the enable() version
* must be paired with a disable(). * must be paired with a disable().
*/ */
void ring_buffer_record_on(struct ring_buffer *buffer) void ring_buffer_record_on(struct trace_buffer *buffer)
{ {
unsigned int rd; unsigned int rd;
unsigned int new_rd; unsigned int new_rd;
...@@ -3264,7 +3264,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_on); ...@@ -3264,7 +3264,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_on);
* *
* Returns true if the ring buffer is in a state that it accepts writes. * Returns true if the ring buffer is in a state that it accepts writes.
*/ */
bool ring_buffer_record_is_on(struct ring_buffer *buffer) bool ring_buffer_record_is_on(struct trace_buffer *buffer)
{ {
return !atomic_read(&buffer->record_disabled); return !atomic_read(&buffer->record_disabled);
} }
...@@ -3280,7 +3280,7 @@ bool ring_buffer_record_is_on(struct ring_buffer *buffer) ...@@ -3280,7 +3280,7 @@ bool ring_buffer_record_is_on(struct ring_buffer *buffer)
* ring_buffer_record_disable(), as that is a temporary disabling of * ring_buffer_record_disable(), as that is a temporary disabling of
* the ring buffer. * the ring buffer.
*/ */
bool ring_buffer_record_is_set_on(struct ring_buffer *buffer) bool ring_buffer_record_is_set_on(struct trace_buffer *buffer)
{ {
return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF); return !(atomic_read(&buffer->record_disabled) & RB_BUFFER_OFF);
} }
...@@ -3295,7 +3295,7 @@ bool ring_buffer_record_is_set_on(struct ring_buffer *buffer) ...@@ -3295,7 +3295,7 @@ bool ring_buffer_record_is_set_on(struct ring_buffer *buffer)
* *
* The caller should call synchronize_rcu() after this. * The caller should call synchronize_rcu() after this.
*/ */
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
...@@ -3315,7 +3315,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); ...@@ -3315,7 +3315,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu);
* Note, multiple disables will need the same number of enables * Note, multiple disables will need the same number of enables
* to truly enable the writing (much like preempt_disable). * to truly enable the writing (much like preempt_disable).
*/ */
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
...@@ -3345,7 +3345,7 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -3345,7 +3345,7 @@ rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
* @buffer: The ring buffer * @buffer: The ring buffer
* @cpu: The per CPU buffer to read from. * @cpu: The per CPU buffer to read from.
*/ */
u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu)
{ {
unsigned long flags; unsigned long flags;
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
...@@ -3378,7 +3378,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); ...@@ -3378,7 +3378,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts);
* @buffer: The ring buffer * @buffer: The ring buffer
* @cpu: The per CPU buffer to read from. * @cpu: The per CPU buffer to read from.
*/ */
unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu) unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret; unsigned long ret;
...@@ -3398,7 +3398,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); ...@@ -3398,7 +3398,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu);
* @buffer: The ring buffer * @buffer: The ring buffer
* @cpu: The per CPU buffer to get the entries from. * @cpu: The per CPU buffer to get the entries from.
*/ */
unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
...@@ -3417,7 +3417,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); ...@@ -3417,7 +3417,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu);
* @buffer: The ring buffer * @buffer: The ring buffer
* @cpu: The per CPU buffer to get the number of overruns from * @cpu: The per CPU buffer to get the number of overruns from
*/ */
unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret; unsigned long ret;
...@@ -3440,7 +3440,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); ...@@ -3440,7 +3440,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu);
* @cpu: The per CPU buffer to get the number of overruns from * @cpu: The per CPU buffer to get the number of overruns from
*/ */
unsigned long unsigned long
ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret; unsigned long ret;
...@@ -3462,7 +3462,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); ...@@ -3462,7 +3462,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu);
* @cpu: The per CPU buffer to get the number of overruns from * @cpu: The per CPU buffer to get the number of overruns from
*/ */
unsigned long unsigned long
ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu) ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
unsigned long ret; unsigned long ret;
...@@ -3483,7 +3483,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); ...@@ -3483,7 +3483,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu);
* @cpu: The per CPU buffer to get the number of events read * @cpu: The per CPU buffer to get the number of events read
*/ */
unsigned long unsigned long
ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu) ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
...@@ -3502,7 +3502,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); ...@@ -3502,7 +3502,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu);
* Returns the total number of entries in the ring buffer * Returns the total number of entries in the ring buffer
* (all CPU entries) * (all CPU entries)
*/ */
unsigned long ring_buffer_entries(struct ring_buffer *buffer) unsigned long ring_buffer_entries(struct trace_buffer *buffer)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
unsigned long entries = 0; unsigned long entries = 0;
...@@ -3525,7 +3525,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries); ...@@ -3525,7 +3525,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_entries);
* Returns the total number of overruns in the ring buffer * Returns the total number of overruns in the ring buffer
* (all CPU entries) * (all CPU entries)
*/ */
unsigned long ring_buffer_overruns(struct ring_buffer *buffer) unsigned long ring_buffer_overruns(struct trace_buffer *buffer)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
unsigned long overruns = 0; unsigned long overruns = 0;
...@@ -3949,7 +3949,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_peek); ...@@ -3949,7 +3949,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_peek);
static struct ring_buffer_event * static struct ring_buffer_event *
rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
{ {
struct ring_buffer *buffer; struct trace_buffer *buffer;
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
int nr_loops = 0; int nr_loops = 0;
...@@ -4077,7 +4077,7 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) ...@@ -4077,7 +4077,7 @@ rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked)
* not consume the data. * not consume the data.
*/ */
struct ring_buffer_event * struct ring_buffer_event *
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events) unsigned long *lost_events)
{ {
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
...@@ -4141,7 +4141,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) ...@@ -4141,7 +4141,7 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
* and eventually empty the ring buffer if the producer is slower. * and eventually empty the ring buffer if the producer is slower.
*/ */
struct ring_buffer_event * struct ring_buffer_event *
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events) unsigned long *lost_events)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
...@@ -4201,7 +4201,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume); ...@@ -4201,7 +4201,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_consume);
* This overall must be paired with ring_buffer_read_finish. * This overall must be paired with ring_buffer_read_finish.
*/ */
struct ring_buffer_iter * struct ring_buffer_iter *
ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags) ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_iter *iter; struct ring_buffer_iter *iter;
...@@ -4332,7 +4332,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read); ...@@ -4332,7 +4332,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_read);
* ring_buffer_size - return the size of the ring buffer (in bytes) * ring_buffer_size - return the size of the ring buffer (in bytes)
* @buffer: The ring buffer. * @buffer: The ring buffer.
*/ */
unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu) unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu)
{ {
/* /*
* Earlier, this method returned * Earlier, this method returned
...@@ -4398,7 +4398,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -4398,7 +4398,7 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
* @buffer: The ring buffer to reset a per cpu buffer of * @buffer: The ring buffer to reset a per cpu buffer of
* @cpu: The CPU buffer to be reset * @cpu: The CPU buffer to be reset
*/ */
void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
unsigned long flags; unsigned long flags;
...@@ -4435,7 +4435,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); ...@@ -4435,7 +4435,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu);
* ring_buffer_reset - reset a ring buffer * ring_buffer_reset - reset a ring buffer
* @buffer: The ring buffer to reset all cpu buffers * @buffer: The ring buffer to reset all cpu buffers
*/ */
void ring_buffer_reset(struct ring_buffer *buffer) void ring_buffer_reset(struct trace_buffer *buffer)
{ {
int cpu; int cpu;
...@@ -4448,7 +4448,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset); ...@@ -4448,7 +4448,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_reset);
* rind_buffer_empty - is the ring buffer empty? * rind_buffer_empty - is the ring buffer empty?
* @buffer: The ring buffer to test * @buffer: The ring buffer to test
*/ */
bool ring_buffer_empty(struct ring_buffer *buffer) bool ring_buffer_empty(struct trace_buffer *buffer)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags; unsigned long flags;
...@@ -4478,7 +4478,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty); ...@@ -4478,7 +4478,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty);
* @buffer: The ring buffer * @buffer: The ring buffer
* @cpu: The CPU buffer to test * @cpu: The CPU buffer to test
*/ */
bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
unsigned long flags; unsigned long flags;
...@@ -4510,8 +4510,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); ...@@ -4510,8 +4510,8 @@ EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu);
* it is expected that the tracer handles the cpu buffer not being * it is expected that the tracer handles the cpu buffer not being
* used at the moment. * used at the moment.
*/ */
int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
struct ring_buffer *buffer_b, int cpu) struct trace_buffer *buffer_b, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer_a; struct ring_buffer_per_cpu *cpu_buffer_a;
struct ring_buffer_per_cpu *cpu_buffer_b; struct ring_buffer_per_cpu *cpu_buffer_b;
...@@ -4590,7 +4590,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); ...@@ -4590,7 +4590,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu);
* Returns: * Returns:
* The page allocated, or ERR_PTR * The page allocated, or ERR_PTR
*/ */
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct buffer_data_page *bpage = NULL; struct buffer_data_page *bpage = NULL;
...@@ -4637,7 +4637,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); ...@@ -4637,7 +4637,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page);
* *
* Free a page allocated from ring_buffer_alloc_read_page. * Free a page allocated from ring_buffer_alloc_read_page.
*/ */
void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data) void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data)
{ {
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct buffer_data_page *bpage = data; struct buffer_data_page *bpage = data;
...@@ -4697,7 +4697,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); ...@@ -4697,7 +4697,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_free_read_page);
* >=0 if data has been transferred, returns the offset of consumed data. * >=0 if data has been transferred, returns the offset of consumed data.
* <0 if no data has been transferred. * <0 if no data has been transferred.
*/ */
int ring_buffer_read_page(struct ring_buffer *buffer, int ring_buffer_read_page(struct trace_buffer *buffer,
void **data_page, size_t len, int cpu, int full) void **data_page, size_t len, int cpu, int full)
{ {
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
...@@ -4868,12 +4868,12 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_page); ...@@ -4868,12 +4868,12 @@ EXPORT_SYMBOL_GPL(ring_buffer_read_page);
*/ */
int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
{ {
struct ring_buffer *buffer; struct trace_buffer *buffer;
long nr_pages_same; long nr_pages_same;
int cpu_i; int cpu_i;
unsigned long nr_pages; unsigned long nr_pages;
buffer = container_of(node, struct ring_buffer, node); buffer = container_of(node, struct trace_buffer, node);
if (cpumask_test_cpu(cpu, buffer->cpumask)) if (cpumask_test_cpu(cpu, buffer->cpumask))
return 0; return 0;
...@@ -4923,7 +4923,7 @@ int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) ...@@ -4923,7 +4923,7 @@ int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node)
static struct task_struct *rb_threads[NR_CPUS] __initdata; static struct task_struct *rb_threads[NR_CPUS] __initdata;
struct rb_test_data { struct rb_test_data {
struct ring_buffer *buffer; struct trace_buffer *buffer;
unsigned long events; unsigned long events;
unsigned long bytes_written; unsigned long bytes_written;
unsigned long bytes_alloc; unsigned long bytes_alloc;
...@@ -5065,7 +5065,7 @@ static __init int rb_hammer_test(void *arg) ...@@ -5065,7 +5065,7 @@ static __init int rb_hammer_test(void *arg)
static __init int test_ringbuffer(void) static __init int test_ringbuffer(void)
{ {
struct task_struct *rb_hammer; struct task_struct *rb_hammer;
struct ring_buffer *buffer; struct trace_buffer *buffer;
int cpu; int cpu;
int ret = 0; int ret = 0;
......
...@@ -29,7 +29,7 @@ static int reader_finish; ...@@ -29,7 +29,7 @@ static int reader_finish;
static DECLARE_COMPLETION(read_start); static DECLARE_COMPLETION(read_start);
static DECLARE_COMPLETION(read_done); static DECLARE_COMPLETION(read_done);
static struct ring_buffer *buffer; static struct trace_buffer *buffer;
static struct task_struct *producer; static struct task_struct *producer;
static struct task_struct *consumer; static struct task_struct *consumer;
static unsigned long read; static unsigned long read;
......
...@@ -163,7 +163,7 @@ static union trace_eval_map_item *trace_eval_maps; ...@@ -163,7 +163,7 @@ static union trace_eval_map_item *trace_eval_maps;
#endif /* CONFIG_TRACE_EVAL_MAP_FILE */ #endif /* CONFIG_TRACE_EVAL_MAP_FILE */
static int tracing_set_tracer(struct trace_array *tr, const char *buf); static int tracing_set_tracer(struct trace_array *tr, const char *buf);
static void ftrace_trace_userstack(struct ring_buffer *buffer, static void ftrace_trace_userstack(struct trace_buffer *buffer,
unsigned long flags, int pc); unsigned long flags, int pc);
#define MAX_TRACER_SIZE 100 #define MAX_TRACER_SIZE 100
...@@ -338,7 +338,7 @@ int tracing_check_open_get_tr(struct trace_array *tr) ...@@ -338,7 +338,7 @@ int tracing_check_open_get_tr(struct trace_array *tr)
} }
int call_filter_check_discard(struct trace_event_call *call, void *rec, int call_filter_check_discard(struct trace_event_call *call, void *rec,
struct ring_buffer *buffer, struct trace_buffer *buffer,
struct ring_buffer_event *event) struct ring_buffer_event *event)
{ {
if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) && if (unlikely(call->flags & TRACE_EVENT_FL_FILTERED) &&
...@@ -747,22 +747,22 @@ static inline void trace_access_lock_init(void) ...@@ -747,22 +747,22 @@ static inline void trace_access_lock_init(void)
#endif #endif
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
static void __ftrace_trace_stack(struct ring_buffer *buffer, static void __ftrace_trace_stack(struct trace_buffer *buffer,
unsigned long flags, unsigned long flags,
int skip, int pc, struct pt_regs *regs); int skip, int pc, struct pt_regs *regs);
static inline void ftrace_trace_stack(struct trace_array *tr, static inline void ftrace_trace_stack(struct trace_array *tr,
struct ring_buffer *buffer, struct trace_buffer *buffer,
unsigned long flags, unsigned long flags,
int skip, int pc, struct pt_regs *regs); int skip, int pc, struct pt_regs *regs);
#else #else
static inline void __ftrace_trace_stack(struct ring_buffer *buffer, static inline void __ftrace_trace_stack(struct trace_buffer *buffer,
unsigned long flags, unsigned long flags,
int skip, int pc, struct pt_regs *regs) int skip, int pc, struct pt_regs *regs)
{ {
} }
static inline void ftrace_trace_stack(struct trace_array *tr, static inline void ftrace_trace_stack(struct trace_array *tr,
struct ring_buffer *buffer, struct trace_buffer *buffer,
unsigned long flags, unsigned long flags,
int skip, int pc, struct pt_regs *regs) int skip, int pc, struct pt_regs *regs)
{ {
...@@ -780,7 +780,7 @@ trace_event_setup(struct ring_buffer_event *event, ...@@ -780,7 +780,7 @@ trace_event_setup(struct ring_buffer_event *event,
} }
static __always_inline struct ring_buffer_event * static __always_inline struct ring_buffer_event *
__trace_buffer_lock_reserve(struct ring_buffer *buffer, __trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type, int type,
unsigned long len, unsigned long len,
unsigned long flags, int pc) unsigned long flags, int pc)
...@@ -825,7 +825,7 @@ EXPORT_SYMBOL_GPL(tracing_on); ...@@ -825,7 +825,7 @@ EXPORT_SYMBOL_GPL(tracing_on);
static __always_inline void static __always_inline void
__buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) __buffer_unlock_commit(struct trace_buffer *buffer, struct ring_buffer_event *event)
{ {
__this_cpu_write(trace_taskinfo_save, true); __this_cpu_write(trace_taskinfo_save, true);
...@@ -848,7 +848,7 @@ __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *eve ...@@ -848,7 +848,7 @@ __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *eve
int __trace_puts(unsigned long ip, const char *str, int size) int __trace_puts(unsigned long ip, const char *str, int size)
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer; struct trace_buffer *buffer;
struct print_entry *entry; struct print_entry *entry;
unsigned long irq_flags; unsigned long irq_flags;
int alloc; int alloc;
...@@ -898,7 +898,7 @@ EXPORT_SYMBOL_GPL(__trace_puts); ...@@ -898,7 +898,7 @@ EXPORT_SYMBOL_GPL(__trace_puts);
int __trace_bputs(unsigned long ip, const char *str) int __trace_bputs(unsigned long ip, const char *str)
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer; struct trace_buffer *buffer;
struct bputs_entry *entry; struct bputs_entry *entry;
unsigned long irq_flags; unsigned long irq_flags;
int size = sizeof(struct bputs_entry); int size = sizeof(struct bputs_entry);
...@@ -1964,7 +1964,7 @@ int __init register_tracer(struct tracer *type) ...@@ -1964,7 +1964,7 @@ int __init register_tracer(struct tracer *type)
static void tracing_reset_cpu(struct array_buffer *buf, int cpu) static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
{ {
struct ring_buffer *buffer = buf->buffer; struct trace_buffer *buffer = buf->buffer;
if (!buffer) if (!buffer)
return; return;
...@@ -1980,7 +1980,7 @@ static void tracing_reset_cpu(struct array_buffer *buf, int cpu) ...@@ -1980,7 +1980,7 @@ static void tracing_reset_cpu(struct array_buffer *buf, int cpu)
void tracing_reset_online_cpus(struct array_buffer *buf) void tracing_reset_online_cpus(struct array_buffer *buf)
{ {
struct ring_buffer *buffer = buf->buffer; struct trace_buffer *buffer = buf->buffer;
int cpu; int cpu;
if (!buffer) if (!buffer)
...@@ -2098,7 +2098,7 @@ int is_tracing_stopped(void) ...@@ -2098,7 +2098,7 @@ int is_tracing_stopped(void)
*/ */
void tracing_start(void) void tracing_start(void)
{ {
struct ring_buffer *buffer; struct trace_buffer *buffer;
unsigned long flags; unsigned long flags;
if (tracing_disabled) if (tracing_disabled)
...@@ -2135,7 +2135,7 @@ void tracing_start(void) ...@@ -2135,7 +2135,7 @@ void tracing_start(void)
static void tracing_start_tr(struct trace_array *tr) static void tracing_start_tr(struct trace_array *tr)
{ {
struct ring_buffer *buffer; struct trace_buffer *buffer;
unsigned long flags; unsigned long flags;
if (tracing_disabled) if (tracing_disabled)
...@@ -2172,7 +2172,7 @@ static void tracing_start_tr(struct trace_array *tr) ...@@ -2172,7 +2172,7 @@ static void tracing_start_tr(struct trace_array *tr)
*/ */
void tracing_stop(void) void tracing_stop(void)
{ {
struct ring_buffer *buffer; struct trace_buffer *buffer;
unsigned long flags; unsigned long flags;
raw_spin_lock_irqsave(&global_trace.start_lock, flags); raw_spin_lock_irqsave(&global_trace.start_lock, flags);
...@@ -2200,7 +2200,7 @@ void tracing_stop(void) ...@@ -2200,7 +2200,7 @@ void tracing_stop(void)
static void tracing_stop_tr(struct trace_array *tr) static void tracing_stop_tr(struct trace_array *tr)
{ {
struct ring_buffer *buffer; struct trace_buffer *buffer;
unsigned long flags; unsigned long flags;
/* If global, we need to also stop the max tracer */ /* If global, we need to also stop the max tracer */
...@@ -2442,7 +2442,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned short type, ...@@ -2442,7 +2442,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned short type,
EXPORT_SYMBOL_GPL(tracing_generic_entry_update); EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
struct ring_buffer_event * struct ring_buffer_event *
trace_buffer_lock_reserve(struct ring_buffer *buffer, trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type, int type,
unsigned long len, unsigned long len,
unsigned long flags, int pc) unsigned long flags, int pc)
...@@ -2561,10 +2561,10 @@ void trace_buffered_event_disable(void) ...@@ -2561,10 +2561,10 @@ void trace_buffered_event_disable(void)
preempt_enable(); preempt_enable();
} }
static struct ring_buffer *temp_buffer; static struct trace_buffer *temp_buffer;
struct ring_buffer_event * struct ring_buffer_event *
trace_event_buffer_lock_reserve(struct ring_buffer **current_rb, trace_event_buffer_lock_reserve(struct trace_buffer **current_rb,
struct trace_event_file *trace_file, struct trace_event_file *trace_file,
int type, unsigned long len, int type, unsigned long len,
unsigned long flags, int pc) unsigned long flags, int pc)
...@@ -2689,7 +2689,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_commit); ...@@ -2689,7 +2689,7 @@ EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
# define STACK_SKIP 3 # define STACK_SKIP 3
void trace_buffer_unlock_commit_regs(struct trace_array *tr, void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct ring_buffer *buffer, struct trace_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc, unsigned long flags, int pc,
struct pt_regs *regs) struct pt_regs *regs)
...@@ -2710,7 +2710,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr, ...@@ -2710,7 +2710,7 @@ void trace_buffer_unlock_commit_regs(struct trace_array *tr,
* Similar to trace_buffer_unlock_commit_regs() but do not dump stack. * Similar to trace_buffer_unlock_commit_regs() but do not dump stack.
*/ */
void void
trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
struct ring_buffer_event *event) struct ring_buffer_event *event)
{ {
__buffer_unlock_commit(buffer, event); __buffer_unlock_commit(buffer, event);
...@@ -2845,7 +2845,7 @@ trace_function(struct trace_array *tr, ...@@ -2845,7 +2845,7 @@ trace_function(struct trace_array *tr,
int pc) int pc)
{ {
struct trace_event_call *call = &event_function; struct trace_event_call *call = &event_function;
struct ring_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ftrace_entry *entry; struct ftrace_entry *entry;
...@@ -2883,7 +2883,7 @@ struct ftrace_stacks { ...@@ -2883,7 +2883,7 @@ struct ftrace_stacks {
static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks); static DEFINE_PER_CPU(struct ftrace_stacks, ftrace_stacks);
static DEFINE_PER_CPU(int, ftrace_stack_reserve); static DEFINE_PER_CPU(int, ftrace_stack_reserve);
static void __ftrace_trace_stack(struct ring_buffer *buffer, static void __ftrace_trace_stack(struct trace_buffer *buffer,
unsigned long flags, unsigned long flags,
int skip, int pc, struct pt_regs *regs) int skip, int pc, struct pt_regs *regs)
{ {
...@@ -2958,7 +2958,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer, ...@@ -2958,7 +2958,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
} }
static inline void ftrace_trace_stack(struct trace_array *tr, static inline void ftrace_trace_stack(struct trace_array *tr,
struct ring_buffer *buffer, struct trace_buffer *buffer,
unsigned long flags, unsigned long flags,
int skip, int pc, struct pt_regs *regs) int skip, int pc, struct pt_regs *regs)
{ {
...@@ -2971,7 +2971,7 @@ static inline void ftrace_trace_stack(struct trace_array *tr, ...@@ -2971,7 +2971,7 @@ static inline void ftrace_trace_stack(struct trace_array *tr,
void __trace_stack(struct trace_array *tr, unsigned long flags, int skip, void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
int pc) int pc)
{ {
struct ring_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
if (rcu_is_watching()) { if (rcu_is_watching()) {
__ftrace_trace_stack(buffer, flags, skip, pc, NULL); __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
...@@ -3018,7 +3018,7 @@ EXPORT_SYMBOL_GPL(trace_dump_stack); ...@@ -3018,7 +3018,7 @@ EXPORT_SYMBOL_GPL(trace_dump_stack);
static DEFINE_PER_CPU(int, user_stack_count); static DEFINE_PER_CPU(int, user_stack_count);
static void static void
ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) ftrace_trace_userstack(struct trace_buffer *buffer, unsigned long flags, int pc)
{ {
struct trace_event_call *call = &event_user_stack; struct trace_event_call *call = &event_user_stack;
struct ring_buffer_event *event; struct ring_buffer_event *event;
...@@ -3063,7 +3063,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc) ...@@ -3063,7 +3063,7 @@ ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
preempt_enable(); preempt_enable();
} }
#else /* CONFIG_USER_STACKTRACE_SUPPORT */ #else /* CONFIG_USER_STACKTRACE_SUPPORT */
static void ftrace_trace_userstack(struct ring_buffer *buffer, static void ftrace_trace_userstack(struct trace_buffer *buffer,
unsigned long flags, int pc) unsigned long flags, int pc)
{ {
} }
...@@ -3188,7 +3188,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args) ...@@ -3188,7 +3188,7 @@ int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
{ {
struct trace_event_call *call = &event_bprint; struct trace_event_call *call = &event_bprint;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer; struct trace_buffer *buffer;
struct trace_array *tr = &global_trace; struct trace_array *tr = &global_trace;
struct bprint_entry *entry; struct bprint_entry *entry;
unsigned long flags; unsigned long flags;
...@@ -3245,7 +3245,7 @@ EXPORT_SYMBOL_GPL(trace_vbprintk); ...@@ -3245,7 +3245,7 @@ EXPORT_SYMBOL_GPL(trace_vbprintk);
__printf(3, 0) __printf(3, 0)
static int static int
__trace_array_vprintk(struct ring_buffer *buffer, __trace_array_vprintk(struct trace_buffer *buffer,
unsigned long ip, const char *fmt, va_list args) unsigned long ip, const char *fmt, va_list args)
{ {
struct trace_event_call *call = &event_print; struct trace_event_call *call = &event_print;
...@@ -3326,7 +3326,7 @@ int trace_array_printk(struct trace_array *tr, ...@@ -3326,7 +3326,7 @@ int trace_array_printk(struct trace_array *tr,
EXPORT_SYMBOL_GPL(trace_array_printk); EXPORT_SYMBOL_GPL(trace_array_printk);
__printf(3, 4) __printf(3, 4)
int trace_array_printk_buf(struct ring_buffer *buffer, int trace_array_printk_buf(struct trace_buffer *buffer,
unsigned long ip, const char *fmt, ...) unsigned long ip, const char *fmt, ...)
{ {
int ret; int ret;
...@@ -3382,7 +3382,7 @@ static struct trace_entry * ...@@ -3382,7 +3382,7 @@ static struct trace_entry *
__find_next_entry(struct trace_iterator *iter, int *ent_cpu, __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
unsigned long *missing_events, u64 *ent_ts) unsigned long *missing_events, u64 *ent_ts)
{ {
struct ring_buffer *buffer = iter->array_buffer->buffer; struct trace_buffer *buffer = iter->array_buffer->buffer;
struct trace_entry *ent, *next = NULL; struct trace_entry *ent, *next = NULL;
unsigned long lost_events = 0, next_lost = 0; unsigned long lost_events = 0, next_lost = 0;
int cpu_file = iter->cpu_file; int cpu_file = iter->cpu_file;
...@@ -6470,7 +6470,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf, ...@@ -6470,7 +6470,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
struct trace_array *tr = filp->private_data; struct trace_array *tr = filp->private_data;
struct ring_buffer_event *event; struct ring_buffer_event *event;
enum event_trigger_type tt = ETT_NONE; enum event_trigger_type tt = ETT_NONE;
struct ring_buffer *buffer; struct trace_buffer *buffer;
struct print_entry *entry; struct print_entry *entry;
unsigned long irq_flags; unsigned long irq_flags;
ssize_t written; ssize_t written;
...@@ -6550,7 +6550,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf, ...@@ -6550,7 +6550,7 @@ tracing_mark_raw_write(struct file *filp, const char __user *ubuf,
{ {
struct trace_array *tr = filp->private_data; struct trace_array *tr = filp->private_data;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer; struct trace_buffer *buffer;
struct raw_data_entry *entry; struct raw_data_entry *entry;
unsigned long irq_flags; unsigned long irq_flags;
ssize_t written; ssize_t written;
...@@ -7433,7 +7433,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file) ...@@ -7433,7 +7433,7 @@ static int tracing_buffers_release(struct inode *inode, struct file *file)
} }
struct buffer_ref { struct buffer_ref {
struct ring_buffer *buffer; struct trace_buffer *buffer;
void *page; void *page;
int cpu; int cpu;
refcount_t refcount; refcount_t refcount;
...@@ -8272,7 +8272,7 @@ rb_simple_write(struct file *filp, const char __user *ubuf, ...@@ -8272,7 +8272,7 @@ rb_simple_write(struct file *filp, const char __user *ubuf,
size_t cnt, loff_t *ppos) size_t cnt, loff_t *ppos)
{ {
struct trace_array *tr = filp->private_data; struct trace_array *tr = filp->private_data;
struct ring_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
unsigned long val; unsigned long val;
int ret; int ret;
......
...@@ -178,7 +178,7 @@ struct trace_option_dentry; ...@@ -178,7 +178,7 @@ struct trace_option_dentry;
struct array_buffer { struct array_buffer {
struct trace_array *tr; struct trace_array *tr;
struct ring_buffer *buffer; struct trace_buffer *buffer;
struct trace_array_cpu __percpu *data; struct trace_array_cpu __percpu *data;
u64 time_start; u64 time_start;
int cpu; int cpu;
...@@ -705,7 +705,7 @@ struct dentry *tracing_init_dentry(void); ...@@ -705,7 +705,7 @@ struct dentry *tracing_init_dentry(void);
struct ring_buffer_event; struct ring_buffer_event;
struct ring_buffer_event * struct ring_buffer_event *
trace_buffer_lock_reserve(struct ring_buffer *buffer, trace_buffer_lock_reserve(struct trace_buffer *buffer,
int type, int type,
unsigned long len, unsigned long len,
unsigned long flags, unsigned long flags,
...@@ -717,7 +717,7 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr, ...@@ -717,7 +717,7 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts); int *ent_cpu, u64 *ent_ts);
void trace_buffer_unlock_commit_nostack(struct ring_buffer *buffer, void trace_buffer_unlock_commit_nostack(struct trace_buffer *buffer,
struct ring_buffer_event *event); struct ring_buffer_event *event);
int trace_empty(struct trace_iterator *iter); int trace_empty(struct trace_iterator *iter);
...@@ -873,7 +873,7 @@ trace_vprintk(unsigned long ip, const char *fmt, va_list args); ...@@ -873,7 +873,7 @@ trace_vprintk(unsigned long ip, const char *fmt, va_list args);
extern int extern int
trace_array_vprintk(struct trace_array *tr, trace_array_vprintk(struct trace_array *tr,
unsigned long ip, const char *fmt, va_list args); unsigned long ip, const char *fmt, va_list args);
int trace_array_printk_buf(struct ring_buffer *buffer, int trace_array_printk_buf(struct trace_buffer *buffer,
unsigned long ip, const char *fmt, ...); unsigned long ip, const char *fmt, ...);
void trace_printk_seq(struct trace_seq *s); void trace_printk_seq(struct trace_seq *s);
enum print_line_t print_trace_line(struct trace_iterator *iter); enum print_line_t print_trace_line(struct trace_iterator *iter);
...@@ -1367,17 +1367,17 @@ struct trace_subsystem_dir { ...@@ -1367,17 +1367,17 @@ struct trace_subsystem_dir {
}; };
extern int call_filter_check_discard(struct trace_event_call *call, void *rec, extern int call_filter_check_discard(struct trace_event_call *call, void *rec,
struct ring_buffer *buffer, struct trace_buffer *buffer,
struct ring_buffer_event *event); struct ring_buffer_event *event);
void trace_buffer_unlock_commit_regs(struct trace_array *tr, void trace_buffer_unlock_commit_regs(struct trace_array *tr,
struct ring_buffer *buffer, struct trace_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc, unsigned long flags, int pc,
struct pt_regs *regs); struct pt_regs *regs);
static inline void trace_buffer_unlock_commit(struct trace_array *tr, static inline void trace_buffer_unlock_commit(struct trace_array *tr,
struct ring_buffer *buffer, struct trace_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
unsigned long flags, int pc) unsigned long flags, int pc)
{ {
...@@ -1390,7 +1390,7 @@ void trace_buffered_event_disable(void); ...@@ -1390,7 +1390,7 @@ void trace_buffered_event_disable(void);
void trace_buffered_event_enable(void); void trace_buffered_event_enable(void);
static inline void static inline void
__trace_event_discard_commit(struct ring_buffer *buffer, __trace_event_discard_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event) struct ring_buffer_event *event)
{ {
if (this_cpu_read(trace_buffered_event) == event) { if (this_cpu_read(trace_buffered_event) == event) {
...@@ -1416,7 +1416,7 @@ __trace_event_discard_commit(struct ring_buffer *buffer, ...@@ -1416,7 +1416,7 @@ __trace_event_discard_commit(struct ring_buffer *buffer,
*/ */
static inline bool static inline bool
__event_trigger_test_discard(struct trace_event_file *file, __event_trigger_test_discard(struct trace_event_file *file,
struct ring_buffer *buffer, struct trace_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
void *entry, void *entry,
enum event_trigger_type *tt) enum event_trigger_type *tt)
...@@ -1451,7 +1451,7 @@ __event_trigger_test_discard(struct trace_event_file *file, ...@@ -1451,7 +1451,7 @@ __event_trigger_test_discard(struct trace_event_file *file,
*/ */
static inline void static inline void
event_trigger_unlock_commit(struct trace_event_file *file, event_trigger_unlock_commit(struct trace_event_file *file,
struct ring_buffer *buffer, struct trace_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc) void *entry, unsigned long irq_flags, int pc)
{ {
...@@ -1482,7 +1482,7 @@ event_trigger_unlock_commit(struct trace_event_file *file, ...@@ -1482,7 +1482,7 @@ event_trigger_unlock_commit(struct trace_event_file *file,
*/ */
static inline void static inline void
event_trigger_unlock_commit_regs(struct trace_event_file *file, event_trigger_unlock_commit_regs(struct trace_event_file *file,
struct ring_buffer *buffer, struct trace_buffer *buffer,
struct ring_buffer_event *event, struct ring_buffer_event *event,
void *entry, unsigned long irq_flags, int pc, void *entry, unsigned long irq_flags, int pc,
struct pt_regs *regs) struct pt_regs *regs)
......
...@@ -32,10 +32,10 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect) ...@@ -32,10 +32,10 @@ probe_likely_condition(struct ftrace_likely_data *f, int val, int expect)
{ {
struct trace_event_call *call = &event_branch; struct trace_event_call *call = &event_branch;
struct trace_array *tr = branch_tracer; struct trace_array *tr = branch_tracer;
struct trace_buffer *buffer;
struct trace_array_cpu *data; struct trace_array_cpu *data;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_branch *entry; struct trace_branch *entry;
struct ring_buffer *buffer;
unsigned long flags; unsigned long flags;
int pc; int pc;
const char *p; const char *p;
......
...@@ -3391,8 +3391,8 @@ static void __init ...@@ -3391,8 +3391,8 @@ static void __init
function_test_events_call(unsigned long ip, unsigned long parent_ip, function_test_events_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *op, struct pt_regs *pt_regs) struct ftrace_ops *op, struct pt_regs *pt_regs)
{ {
struct trace_buffer *buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer;
struct ftrace_entry *entry; struct ftrace_entry *entry;
unsigned long flags; unsigned long flags;
long disabled; long disabled;
......
...@@ -879,7 +879,7 @@ static notrace void trace_event_raw_event_synth(void *__data, ...@@ -879,7 +879,7 @@ static notrace void trace_event_raw_event_synth(void *__data,
struct trace_event_file *trace_file = __data; struct trace_event_file *trace_file = __data;
struct synth_trace_event *entry; struct synth_trace_event *entry;
struct trace_event_buffer fbuffer; struct trace_event_buffer fbuffer;
struct ring_buffer *buffer; struct trace_buffer *buffer;
struct synth_event *event; struct synth_event *event;
unsigned int i, n_u64; unsigned int i, n_u64;
int fields_size = 0; int fields_size = 0;
......
...@@ -101,7 +101,7 @@ int __trace_graph_entry(struct trace_array *tr, ...@@ -101,7 +101,7 @@ int __trace_graph_entry(struct trace_array *tr,
{ {
struct trace_event_call *call = &event_funcgraph_entry; struct trace_event_call *call = &event_funcgraph_entry;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ftrace_graph_ent_entry *entry; struct ftrace_graph_ent_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT, event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
...@@ -221,7 +221,7 @@ void __trace_graph_return(struct trace_array *tr, ...@@ -221,7 +221,7 @@ void __trace_graph_return(struct trace_array *tr,
{ {
struct trace_event_call *call = &event_funcgraph_exit; struct trace_event_call *call = &event_funcgraph_exit;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ftrace_graph_ret_entry *entry; struct ftrace_graph_ret_entry *entry;
event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET, event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
......
...@@ -104,7 +104,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample) ...@@ -104,7 +104,7 @@ static void trace_hwlat_sample(struct hwlat_sample *sample)
{ {
struct trace_array *tr = hwlat_trace; struct trace_array *tr = hwlat_trace;
struct trace_event_call *call = &event_hwlat; struct trace_event_call *call = &event_hwlat;
struct ring_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct hwlat_entry *entry; struct hwlat_entry *entry;
unsigned long flags; unsigned long flags;
......
...@@ -1175,8 +1175,8 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs, ...@@ -1175,8 +1175,8 @@ __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
struct trace_event_file *trace_file) struct trace_event_file *trace_file)
{ {
struct kprobe_trace_entry_head *entry; struct kprobe_trace_entry_head *entry;
struct trace_buffer *buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer;
int size, dsize, pc; int size, dsize, pc;
unsigned long irq_flags; unsigned long irq_flags;
struct trace_event_call *call = trace_probe_event_call(&tk->tp); struct trace_event_call *call = trace_probe_event_call(&tk->tp);
...@@ -1223,8 +1223,8 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri, ...@@ -1223,8 +1223,8 @@ __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
struct trace_event_file *trace_file) struct trace_event_file *trace_file)
{ {
struct kretprobe_trace_entry_head *entry; struct kretprobe_trace_entry_head *entry;
struct trace_buffer *buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer;
int size, pc, dsize; int size, pc, dsize;
unsigned long irq_flags; unsigned long irq_flags;
struct trace_event_call *call = trace_probe_event_call(&tk->tp); struct trace_event_call *call = trace_probe_event_call(&tk->tp);
......
...@@ -297,7 +297,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr, ...@@ -297,7 +297,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
struct mmiotrace_rw *rw) struct mmiotrace_rw *rw)
{ {
struct trace_event_call *call = &event_mmiotrace_rw; struct trace_event_call *call = &event_mmiotrace_rw;
struct ring_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_mmiotrace_rw *entry; struct trace_mmiotrace_rw *entry;
int pc = preempt_count(); int pc = preempt_count();
...@@ -327,7 +327,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr, ...@@ -327,7 +327,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
struct mmiotrace_map *map) struct mmiotrace_map *map)
{ {
struct trace_event_call *call = &event_mmiotrace_map; struct trace_event_call *call = &event_mmiotrace_map;
struct ring_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct trace_mmiotrace_map *entry; struct trace_mmiotrace_map *entry;
int pc = preempt_count(); int pc = preempt_count();
......
...@@ -378,7 +378,7 @@ tracing_sched_switch_trace(struct trace_array *tr, ...@@ -378,7 +378,7 @@ tracing_sched_switch_trace(struct trace_array *tr,
unsigned long flags, int pc) unsigned long flags, int pc)
{ {
struct trace_event_call *call = &event_context_switch; struct trace_event_call *call = &event_context_switch;
struct ring_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ctx_switch_entry *entry; struct ctx_switch_entry *entry;
...@@ -408,7 +408,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr, ...@@ -408,7 +408,7 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
struct trace_event_call *call = &event_wakeup; struct trace_event_call *call = &event_wakeup;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ctx_switch_entry *entry; struct ctx_switch_entry *entry;
struct ring_buffer *buffer = tr->array_buffer.buffer; struct trace_buffer *buffer = tr->array_buffer.buffer;
event = trace_buffer_lock_reserve(buffer, TRACE_WAKE, event = trace_buffer_lock_reserve(buffer, TRACE_WAKE,
sizeof(*entry), flags, pc); sizeof(*entry), flags, pc);
......
...@@ -317,7 +317,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id) ...@@ -317,7 +317,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
struct syscall_trace_enter *entry; struct syscall_trace_enter *entry;
struct syscall_metadata *sys_data; struct syscall_metadata *sys_data;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer; struct trace_buffer *buffer;
unsigned long irq_flags; unsigned long irq_flags;
unsigned long args[6]; unsigned long args[6];
int pc; int pc;
...@@ -367,7 +367,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret) ...@@ -367,7 +367,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
struct syscall_trace_exit *entry; struct syscall_trace_exit *entry;
struct syscall_metadata *sys_data; struct syscall_metadata *sys_data;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer; struct trace_buffer *buffer;
unsigned long irq_flags; unsigned long irq_flags;
int pc; int pc;
int syscall_nr; int syscall_nr;
......
...@@ -938,8 +938,8 @@ static void __uprobe_trace_func(struct trace_uprobe *tu, ...@@ -938,8 +938,8 @@ static void __uprobe_trace_func(struct trace_uprobe *tu,
struct trace_event_file *trace_file) struct trace_event_file *trace_file)
{ {
struct uprobe_trace_entry_head *entry; struct uprobe_trace_entry_head *entry;
struct trace_buffer *buffer;
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer *buffer;
void *data; void *data;
int size, esize; int size, esize;
struct trace_event_call *call = trace_probe_event_call(&tu->tp); struct trace_event_call *call = trace_probe_event_call(&tu->tp);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment