Commit 2289d567 authored by Steven Rostedt (Red Hat)'s avatar Steven Rostedt (Red Hat) Committed by Steven Rostedt

ring-buffer: Force inline of hotpath helper functions

There's several small helper functions in ring_buffer.c that are used in the
hot path. For some reason, even though they are marked inline, gcc tends not
to enforce it. Make sure these functions are always inlined.

Link: http://lkml.kernel.org/r/20161121183700.GW26852@two.firstfloor.orgReported-by: default avatarAndi Kleen <andi@firstfloor.org>
Signed-off-by: default avatarSteven Rostedt <rostedt@goodmis.org>
parent 52ffabe3
...@@ -1829,48 +1829,48 @@ void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) ...@@ -1829,48 +1829,48 @@ void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val)
} }
EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite);
static inline void * static __always_inline void *
__rb_data_page_index(struct buffer_data_page *bpage, unsigned index) __rb_data_page_index(struct buffer_data_page *bpage, unsigned index)
{ {
return bpage->data + index; return bpage->data + index;
} }
static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) static __always_inline void *__rb_page_index(struct buffer_page *bpage, unsigned index)
{ {
return bpage->page->data + index; return bpage->page->data + index;
} }
static inline struct ring_buffer_event * static __always_inline struct ring_buffer_event *
rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
{ {
return __rb_page_index(cpu_buffer->reader_page, return __rb_page_index(cpu_buffer->reader_page,
cpu_buffer->reader_page->read); cpu_buffer->reader_page->read);
} }
static inline struct ring_buffer_event * static __always_inline struct ring_buffer_event *
rb_iter_head_event(struct ring_buffer_iter *iter) rb_iter_head_event(struct ring_buffer_iter *iter)
{ {
return __rb_page_index(iter->head_page, iter->head); return __rb_page_index(iter->head_page, iter->head);
} }
static inline unsigned rb_page_commit(struct buffer_page *bpage) static __always_inline unsigned rb_page_commit(struct buffer_page *bpage)
{ {
return local_read(&bpage->page->commit); return local_read(&bpage->page->commit);
} }
/* Size is determined by what has been committed */ /* Size is determined by what has been committed */
static inline unsigned rb_page_size(struct buffer_page *bpage) static __always_inline unsigned rb_page_size(struct buffer_page *bpage)
{ {
return rb_page_commit(bpage); return rb_page_commit(bpage);
} }
static inline unsigned static __always_inline unsigned
rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
{ {
return rb_page_commit(cpu_buffer->commit_page); return rb_page_commit(cpu_buffer->commit_page);
} }
static inline unsigned static __always_inline unsigned
rb_event_index(struct ring_buffer_event *event) rb_event_index(struct ring_buffer_event *event)
{ {
unsigned long addr = (unsigned long)event; unsigned long addr = (unsigned long)event;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment