Commit b971f061 authored by Robert Richter's avatar Robert Richter

Merge commit 'tip/tracing/core' into oprofile/core

Conflicts:
	drivers/oprofile/cpu_buffer.c
Signed-off-by: default avatarRobert Richter <robert.richter@amd.com>
parents cb6e943c c1ab9cab
...@@ -159,7 +159,7 @@ int op_cpu_buffer_write_commit(struct op_entry *entry) ...@@ -159,7 +159,7 @@ int op_cpu_buffer_write_commit(struct op_entry *entry)
struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu) struct op_sample *op_cpu_buffer_read_entry(struct op_entry *entry, int cpu)
{ {
struct ring_buffer_event *e; struct ring_buffer_event *e;
e = ring_buffer_consume(op_ring_buffer, cpu, NULL); e = ring_buffer_consume(op_ring_buffer, cpu, NULL, NULL);
if (!e) if (!e)
return NULL; return NULL;
......
...@@ -58,6 +58,7 @@ struct trace_iterator { ...@@ -58,6 +58,7 @@ struct trace_iterator {
/* The below is zeroed out in pipe_read */ /* The below is zeroed out in pipe_read */
struct trace_seq seq; struct trace_seq seq;
struct trace_entry *ent; struct trace_entry *ent;
unsigned long lost_events;
int leftover; int leftover;
int cpu; int cpu;
u64 ts; u64 ts;
......
...@@ -465,8 +465,7 @@ static inline void __module_get(struct module *module) ...@@ -465,8 +465,7 @@ static inline void __module_get(struct module *module)
if (module) { if (module) {
preempt_disable(); preempt_disable();
__this_cpu_inc(module->refptr->incs); __this_cpu_inc(module->refptr->incs);
trace_module_get(module, _THIS_IP_, trace_module_get(module, _THIS_IP_);
__this_cpu_read(module->refptr->incs));
preempt_enable(); preempt_enable();
} }
} }
...@@ -480,8 +479,7 @@ static inline int try_module_get(struct module *module) ...@@ -480,8 +479,7 @@ static inline int try_module_get(struct module *module)
if (likely(module_is_live(module))) { if (likely(module_is_live(module))) {
__this_cpu_inc(module->refptr->incs); __this_cpu_inc(module->refptr->incs);
trace_module_get(module, _THIS_IP_, trace_module_get(module, _THIS_IP_);
__this_cpu_read(module->refptr->incs));
} else } else
ret = 0; ret = 0;
......
...@@ -120,9 +120,11 @@ int ring_buffer_write(struct ring_buffer *buffer, ...@@ -120,9 +120,11 @@ int ring_buffer_write(struct ring_buffer *buffer,
unsigned long length, void *data); unsigned long length, void *data);
struct ring_buffer_event * struct ring_buffer_event *
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts); ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_event * struct ring_buffer_event *
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts); ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_iter * struct ring_buffer_iter *
ring_buffer_read_start(struct ring_buffer *buffer, int cpu); ring_buffer_read_start(struct ring_buffer *buffer, int cpu);
......
...@@ -51,11 +51,14 @@ TRACE_EVENT(module_free, ...@@ -51,11 +51,14 @@ TRACE_EVENT(module_free,
TP_printk("%s", __get_str(name)) TP_printk("%s", __get_str(name))
); );
#ifdef CONFIG_MODULE_UNLOAD
/* trace_module_get/put are only used if CONFIG_MODULE_UNLOAD is defined */
DECLARE_EVENT_CLASS(module_refcnt, DECLARE_EVENT_CLASS(module_refcnt,
TP_PROTO(struct module *mod, unsigned long ip, int refcnt), TP_PROTO(struct module *mod, unsigned long ip),
TP_ARGS(mod, ip, refcnt), TP_ARGS(mod, ip),
TP_STRUCT__entry( TP_STRUCT__entry(
__field( unsigned long, ip ) __field( unsigned long, ip )
...@@ -65,7 +68,7 @@ DECLARE_EVENT_CLASS(module_refcnt, ...@@ -65,7 +68,7 @@ DECLARE_EVENT_CLASS(module_refcnt,
TP_fast_assign( TP_fast_assign(
__entry->ip = ip; __entry->ip = ip;
__entry->refcnt = refcnt; __entry->refcnt = __this_cpu_read(mod->refptr->incs) + __this_cpu_read(mod->refptr->decs);
__assign_str(name, mod->name); __assign_str(name, mod->name);
), ),
...@@ -75,17 +78,18 @@ DECLARE_EVENT_CLASS(module_refcnt, ...@@ -75,17 +78,18 @@ DECLARE_EVENT_CLASS(module_refcnt,
DEFINE_EVENT(module_refcnt, module_get, DEFINE_EVENT(module_refcnt, module_get,
TP_PROTO(struct module *mod, unsigned long ip, int refcnt), TP_PROTO(struct module *mod, unsigned long ip),
TP_ARGS(mod, ip, refcnt) TP_ARGS(mod, ip)
); );
DEFINE_EVENT(module_refcnt, module_put, DEFINE_EVENT(module_refcnt, module_put,
TP_PROTO(struct module *mod, unsigned long ip, int refcnt), TP_PROTO(struct module *mod, unsigned long ip),
TP_ARGS(mod, ip, refcnt) TP_ARGS(mod, ip)
); );
#endif /* CONFIG_MODULE_UNLOAD */
TRACE_EVENT(module_request, TRACE_EVENT(module_request,
......
...@@ -100,18 +100,7 @@ TRACE_EVENT(signal_deliver, ...@@ -100,18 +100,7 @@ TRACE_EVENT(signal_deliver,
__entry->sa_handler, __entry->sa_flags) __entry->sa_handler, __entry->sa_flags)
); );
/** DECLARE_EVENT_CLASS(signal_queue_overflow,
* signal_overflow_fail - called when signal queue is overflow
* @sig: signal number
* @group: signal to process group or not (bool)
* @info: pointer to struct siginfo
*
* Kernel fails to generate 'sig' signal with 'info' siginfo, because
* siginfo queue is overflow, and the signal is dropped.
* 'group' is not 0 if the signal will be sent to a process group.
* 'sig' is always one of RT signals.
*/
TRACE_EVENT(signal_overflow_fail,
TP_PROTO(int sig, int group, struct siginfo *info), TP_PROTO(int sig, int group, struct siginfo *info),
...@@ -134,6 +123,24 @@ TRACE_EVENT(signal_overflow_fail, ...@@ -134,6 +123,24 @@ TRACE_EVENT(signal_overflow_fail,
__entry->sig, __entry->group, __entry->errno, __entry->code) __entry->sig, __entry->group, __entry->errno, __entry->code)
); );
/**
* signal_overflow_fail - called when signal queue is overflow
* @sig: signal number
* @group: signal to process group or not (bool)
* @info: pointer to struct siginfo
*
* Kernel fails to generate 'sig' signal with 'info' siginfo, because
* siginfo queue is overflow, and the signal is dropped.
* 'group' is not 0 if the signal will be sent to a process group.
* 'sig' is always one of RT signals.
*/
DEFINE_EVENT(signal_queue_overflow, signal_overflow_fail,
TP_PROTO(int sig, int group, struct siginfo *info),
TP_ARGS(sig, group, info)
);
/** /**
* signal_lose_info - called when siginfo is lost * signal_lose_info - called when siginfo is lost
* @sig: signal number * @sig: signal number
...@@ -145,28 +152,13 @@ TRACE_EVENT(signal_overflow_fail, ...@@ -145,28 +152,13 @@ TRACE_EVENT(signal_overflow_fail,
* 'group' is not 0 if the signal will be sent to a process group. * 'group' is not 0 if the signal will be sent to a process group.
* 'sig' is always one of non-RT signals. * 'sig' is always one of non-RT signals.
*/ */
TRACE_EVENT(signal_lose_info, DEFINE_EVENT(signal_queue_overflow, signal_lose_info,
TP_PROTO(int sig, int group, struct siginfo *info), TP_PROTO(int sig, int group, struct siginfo *info),
TP_ARGS(sig, group, info), TP_ARGS(sig, group, info)
TP_STRUCT__entry(
__field( int, sig )
__field( int, group )
__field( int, errno )
__field( int, code )
),
TP_fast_assign(
__entry->sig = sig;
__entry->group = group;
TP_STORE_SIGINFO(__entry, info);
),
TP_printk("sig=%d group=%d errno=%d code=%d",
__entry->sig, __entry->group, __entry->errno, __entry->code)
); );
#endif /* _TRACE_SIGNAL_H */ #endif /* _TRACE_SIGNAL_H */
/* This part must be outside protection */ /* This part must be outside protection */
......
...@@ -154,9 +154,11 @@ ...@@ -154,9 +154,11 @@
* *
* field = (typeof(field))entry; * field = (typeof(field))entry;
* *
* p = get_cpu_var(ftrace_event_seq); * p = &get_cpu_var(ftrace_event_seq);
* trace_seq_init(p); * trace_seq_init(p);
* ret = trace_seq_printf(s, <TP_printk> "\n"); * ret = trace_seq_printf(s, "%s: ", <call>);
* if (ret)
* ret = trace_seq_printf(s, <TP_printk> "\n");
* put_cpu(); * put_cpu();
* if (!ret) * if (!ret)
* return TRACE_TYPE_PARTIAL_LINE; * return TRACE_TYPE_PARTIAL_LINE;
...@@ -450,38 +452,38 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ ...@@ -450,38 +452,38 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
* *
* static void ftrace_raw_event_<call>(proto) * static void ftrace_raw_event_<call>(proto)
* { * {
* struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
* struct ring_buffer_event *event; * struct ring_buffer_event *event;
* struct ftrace_raw_<call> *entry; <-- defined in stage 1 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
* struct ring_buffer *buffer; * struct ring_buffer *buffer;
* unsigned long irq_flags; * unsigned long irq_flags;
* int __data_size;
* int pc; * int pc;
* *
* local_save_flags(irq_flags); * local_save_flags(irq_flags);
* pc = preempt_count(); * pc = preempt_count();
* *
* __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
*
* event = trace_current_buffer_lock_reserve(&buffer, * event = trace_current_buffer_lock_reserve(&buffer,
* event_<call>.id, * event_<call>.id,
* sizeof(struct ftrace_raw_<call>), * sizeof(*entry) + __data_size,
* irq_flags, pc); * irq_flags, pc);
* if (!event) * if (!event)
* return; * return;
* entry = ring_buffer_event_data(event); * entry = ring_buffer_event_data(event);
* *
* <assign>; <-- Here we assign the entries by the __field and * { <assign>; } <-- Here we assign the entries by the __field and
* __array macros. * __array macros.
* *
* trace_current_buffer_unlock_commit(buffer, event, irq_flags, pc); * if (!filter_current_check_discard(buffer, event_call, entry, event))
* trace_current_buffer_unlock_commit(buffer,
* event, irq_flags, pc);
* } * }
* *
* static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused) * static int ftrace_raw_reg_event_<call>(struct ftrace_event_call *unused)
* { * {
* int ret; * return register_trace_<call>(ftrace_raw_event_<call>);
*
* ret = register_trace_<call>(ftrace_raw_event_<call>);
* if (!ret)
* pr_info("event trace: Could not activate trace point "
* "probe to <call>");
* return ret;
* } * }
* *
* static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused) * static void ftrace_unreg_event_<call>(struct ftrace_event_call *unused)
...@@ -493,6 +495,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ ...@@ -493,6 +495,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
* .trace = ftrace_raw_output_<call>, <-- stage 2 * .trace = ftrace_raw_output_<call>, <-- stage 2
* }; * };
* *
* static const char print_fmt_<call>[] = <TP_printk>;
*
* static struct ftrace_event_call __used * static struct ftrace_event_call __used
* __attribute__((__aligned__(4))) * __attribute__((__aligned__(4)))
* __attribute__((section("_ftrace_events"))) event_<call> = { * __attribute__((section("_ftrace_events"))) event_<call> = {
...@@ -501,6 +505,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \ ...@@ -501,6 +505,8 @@ perf_trace_disable_##name(struct ftrace_event_call *unused) \
* .raw_init = trace_event_raw_init, * .raw_init = trace_event_raw_init,
* .regfunc = ftrace_reg_event_<call>, * .regfunc = ftrace_reg_event_<call>,
* .unregfunc = ftrace_unreg_event_<call>, * .unregfunc = ftrace_unreg_event_<call>,
* .print_fmt = print_fmt_<call>,
* .define_fields = ftrace_define_fields_<call>,
* } * }
* *
*/ */
...@@ -569,7 +575,6 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \ ...@@ -569,7 +575,6 @@ ftrace_raw_event_id_##call(struct ftrace_event_call *event_call, \
return; \ return; \
entry = ring_buffer_event_data(event); \ entry = ring_buffer_event_data(event); \
\ \
\
tstruct \ tstruct \
\ \
{ assign; } \ { assign; } \
......
...@@ -59,8 +59,6 @@ ...@@ -59,8 +59,6 @@
#define CREATE_TRACE_POINTS #define CREATE_TRACE_POINTS
#include <trace/events/module.h> #include <trace/events/module.h>
EXPORT_TRACEPOINT_SYMBOL(module_get);
#if 0 #if 0
#define DEBUGP printk #define DEBUGP printk
#else #else
...@@ -515,6 +513,9 @@ MODINFO_ATTR(srcversion); ...@@ -515,6 +513,9 @@ MODINFO_ATTR(srcversion);
static char last_unloaded_module[MODULE_NAME_LEN+1]; static char last_unloaded_module[MODULE_NAME_LEN+1];
#ifdef CONFIG_MODULE_UNLOAD #ifdef CONFIG_MODULE_UNLOAD
EXPORT_TRACEPOINT_SYMBOL(module_get);
/* Init the unload section of the module. */ /* Init the unload section of the module. */
static void module_unload_init(struct module *mod) static void module_unload_init(struct module *mod)
{ {
...@@ -867,8 +868,7 @@ void module_put(struct module *module) ...@@ -867,8 +868,7 @@ void module_put(struct module *module)
smp_wmb(); /* see comment in module_refcount */ smp_wmb(); /* see comment in module_refcount */
__this_cpu_inc(module->refptr->decs); __this_cpu_inc(module->refptr->decs);
trace_module_put(module, _RET_IP_, trace_module_put(module, _RET_IP_);
__this_cpu_read(module->refptr->decs));
/* Maybe they're waiting for us to drop reference? */ /* Maybe they're waiting for us to drop reference? */
if (unlikely(!module_is_live(module))) if (unlikely(!module_is_live(module)))
wake_up_process(module->waiter); wake_up_process(module->waiter);
......
...@@ -319,6 +319,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data); ...@@ -319,6 +319,11 @@ EXPORT_SYMBOL_GPL(ring_buffer_event_data);
#define TS_MASK ((1ULL << TS_SHIFT) - 1) #define TS_MASK ((1ULL << TS_SHIFT) - 1)
#define TS_DELTA_TEST (~TS_MASK) #define TS_DELTA_TEST (~TS_MASK)
/* Flag when events were overwritten */
#define RB_MISSED_EVENTS (1 << 31)
/* Missed count stored at end */
#define RB_MISSED_STORED (1 << 30)
struct buffer_data_page { struct buffer_data_page {
u64 time_stamp; /* page time stamp */ u64 time_stamp; /* page time stamp */
local_t commit; /* write committed index */ local_t commit; /* write committed index */
...@@ -338,6 +343,7 @@ struct buffer_page { ...@@ -338,6 +343,7 @@ struct buffer_page {
local_t write; /* index for next write */ local_t write; /* index for next write */
unsigned read; /* index for next read */ unsigned read; /* index for next read */
local_t entries; /* entries on this page */ local_t entries; /* entries on this page */
unsigned long real_end; /* real end of data */
struct buffer_data_page *page; /* Actual data page */ struct buffer_data_page *page; /* Actual data page */
}; };
...@@ -417,6 +423,12 @@ int ring_buffer_print_page_header(struct trace_seq *s) ...@@ -417,6 +423,12 @@ int ring_buffer_print_page_header(struct trace_seq *s)
(unsigned int)sizeof(field.commit), (unsigned int)sizeof(field.commit),
(unsigned int)is_signed_type(long)); (unsigned int)is_signed_type(long));
ret = trace_seq_printf(s, "\tfield: int overwrite;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), commit),
1,
(unsigned int)is_signed_type(long));
ret = trace_seq_printf(s, "\tfield: char data;\t" ret = trace_seq_printf(s, "\tfield: char data;\t"
"offset:%u;\tsize:%u;\tsigned:%u;\n", "offset:%u;\tsize:%u;\tsigned:%u;\n",
(unsigned int)offsetof(typeof(field), data), (unsigned int)offsetof(typeof(field), data),
...@@ -440,6 +452,8 @@ struct ring_buffer_per_cpu { ...@@ -440,6 +452,8 @@ struct ring_buffer_per_cpu {
struct buffer_page *tail_page; /* write to tail */ struct buffer_page *tail_page; /* write to tail */
struct buffer_page *commit_page; /* committed pages */ struct buffer_page *commit_page; /* committed pages */
struct buffer_page *reader_page; struct buffer_page *reader_page;
unsigned long lost_events;
unsigned long last_overrun;
local_t commit_overrun; local_t commit_overrun;
local_t overrun; local_t overrun;
local_t entries; local_t entries;
...@@ -1761,6 +1775,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, ...@@ -1761,6 +1775,13 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
event = __rb_page_index(tail_page, tail); event = __rb_page_index(tail_page, tail);
kmemcheck_annotate_bitfield(event, bitfield); kmemcheck_annotate_bitfield(event, bitfield);
/*
* Save the original length to the meta data.
* This will be used by the reader to add lost event
* counter.
*/
tail_page->real_end = tail;
/* /*
* If this event is bigger than the minimum size, then * If this event is bigger than the minimum size, then
* we need to be careful that we don't subtract the * we need to be careful that we don't subtract the
...@@ -2838,6 +2859,7 @@ static struct buffer_page * ...@@ -2838,6 +2859,7 @@ static struct buffer_page *
rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
{ {
struct buffer_page *reader = NULL; struct buffer_page *reader = NULL;
unsigned long overwrite;
unsigned long flags; unsigned long flags;
int nr_loops = 0; int nr_loops = 0;
int ret; int ret;
...@@ -2879,6 +2901,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -2879,6 +2901,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
local_set(&cpu_buffer->reader_page->write, 0); local_set(&cpu_buffer->reader_page->write, 0);
local_set(&cpu_buffer->reader_page->entries, 0); local_set(&cpu_buffer->reader_page->entries, 0);
local_set(&cpu_buffer->reader_page->page->commit, 0); local_set(&cpu_buffer->reader_page->page->commit, 0);
cpu_buffer->reader_page->real_end = 0;
spin: spin:
/* /*
...@@ -2898,6 +2921,18 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -2898,6 +2921,18 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
/* The reader page will be pointing to the new head */ /* The reader page will be pointing to the new head */
rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list);
/*
* We want to make sure we read the overruns after we set up our
* pointers to the next object. The writer side does a
* cmpxchg to cross pages which acts as the mb on the writer
* side. Note, the reader will constantly fail the swap
* while the writer is updating the pointers, so this
* guarantees that the overwrite recorded here is the one we
* want to compare with the last_overrun.
*/
smp_mb();
overwrite = local_read(&(cpu_buffer->overrun));
/* /*
* Here's the tricky part. * Here's the tricky part.
* *
...@@ -2929,6 +2964,11 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -2929,6 +2964,11 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->reader_page = reader; cpu_buffer->reader_page = reader;
rb_reset_reader_page(cpu_buffer); rb_reset_reader_page(cpu_buffer);
if (overwrite != cpu_buffer->last_overrun) {
cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun;
cpu_buffer->last_overrun = overwrite;
}
goto again; goto again;
out: out:
...@@ -3005,8 +3045,14 @@ static void rb_advance_iter(struct ring_buffer_iter *iter) ...@@ -3005,8 +3045,14 @@ static void rb_advance_iter(struct ring_buffer_iter *iter)
rb_advance_iter(iter); rb_advance_iter(iter);
} }
static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer)
{
return cpu_buffer->lost_events;
}
static struct ring_buffer_event * static struct ring_buffer_event *
rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts,
unsigned long *lost_events)
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct buffer_page *reader; struct buffer_page *reader;
...@@ -3058,6 +3104,8 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts) ...@@ -3058,6 +3104,8 @@ rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts)
ring_buffer_normalize_time_stamp(cpu_buffer->buffer, ring_buffer_normalize_time_stamp(cpu_buffer->buffer,
cpu_buffer->cpu, ts); cpu_buffer->cpu, ts);
} }
if (lost_events)
*lost_events = rb_lost_events(cpu_buffer);
return event; return event;
default: default:
...@@ -3168,12 +3216,14 @@ static inline int rb_ok_to_lock(void) ...@@ -3168,12 +3216,14 @@ static inline int rb_ok_to_lock(void)
* @buffer: The ring buffer to read * @buffer: The ring buffer to read
* @cpu: The cpu to peak at * @cpu: The cpu to peak at
* @ts: The timestamp counter of this event. * @ts: The timestamp counter of this event.
* @lost_events: a variable to store if events were lost (may be NULL)
* *
* This will return the event that will be read next, but does * This will return the event that will be read next, but does
* not consume the data. * not consume the data.
*/ */
struct ring_buffer_event * struct ring_buffer_event *
ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events)
{ {
struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
struct ring_buffer_event *event; struct ring_buffer_event *event;
...@@ -3188,7 +3238,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -3188,7 +3238,7 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
local_irq_save(flags); local_irq_save(flags);
if (dolock) if (dolock)
spin_lock(&cpu_buffer->reader_lock); spin_lock(&cpu_buffer->reader_lock);
event = rb_buffer_peek(cpu_buffer, ts); event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event && event->type_len == RINGBUF_TYPE_PADDING) if (event && event->type_len == RINGBUF_TYPE_PADDING)
rb_advance_reader(cpu_buffer); rb_advance_reader(cpu_buffer);
if (dolock) if (dolock)
...@@ -3230,13 +3280,17 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) ...@@ -3230,13 +3280,17 @@ ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
/** /**
* ring_buffer_consume - return an event and consume it * ring_buffer_consume - return an event and consume it
* @buffer: The ring buffer to get the next event from * @buffer: The ring buffer to get the next event from
* @cpu: the cpu to read the buffer from
* @ts: a variable to store the timestamp (may be NULL)
* @lost_events: a variable to store if events were lost (may be NULL)
* *
* Returns the next event in the ring buffer, and that event is consumed. * Returns the next event in the ring buffer, and that event is consumed.
* Meaning, that sequential reads will keep returning a different event, * Meaning, that sequential reads will keep returning a different event,
* and eventually empty the ring buffer if the producer is slower. * and eventually empty the ring buffer if the producer is slower.
*/ */
struct ring_buffer_event * struct ring_buffer_event *
ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events)
{ {
struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_per_cpu *cpu_buffer;
struct ring_buffer_event *event = NULL; struct ring_buffer_event *event = NULL;
...@@ -3257,9 +3311,11 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts) ...@@ -3257,9 +3311,11 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
if (dolock) if (dolock)
spin_lock(&cpu_buffer->reader_lock); spin_lock(&cpu_buffer->reader_lock);
event = rb_buffer_peek(cpu_buffer, ts); event = rb_buffer_peek(cpu_buffer, ts, lost_events);
if (event) if (event) {
cpu_buffer->lost_events = 0;
rb_advance_reader(cpu_buffer); rb_advance_reader(cpu_buffer);
}
if (dolock) if (dolock)
spin_unlock(&cpu_buffer->reader_lock); spin_unlock(&cpu_buffer->reader_lock);
...@@ -3408,6 +3464,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -3408,6 +3464,9 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
cpu_buffer->write_stamp = 0; cpu_buffer->write_stamp = 0;
cpu_buffer->read_stamp = 0; cpu_buffer->read_stamp = 0;
cpu_buffer->lost_events = 0;
cpu_buffer->last_overrun = 0;
rb_head_page_activate(cpu_buffer); rb_head_page_activate(cpu_buffer);
} }
...@@ -3683,6 +3742,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer, ...@@ -3683,6 +3742,7 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct buffer_data_page *bpage; struct buffer_data_page *bpage;
struct buffer_page *reader; struct buffer_page *reader;
unsigned long missed_events;
unsigned long flags; unsigned long flags;
unsigned int commit; unsigned int commit;
unsigned int read; unsigned int read;
...@@ -3719,6 +3779,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer, ...@@ -3719,6 +3779,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
read = reader->read; read = reader->read;
commit = rb_page_commit(reader); commit = rb_page_commit(reader);
/* Check if any events were dropped */
missed_events = cpu_buffer->lost_events;
/* /*
* If this page has been partially read or * If this page has been partially read or
* if len is not big enough to read the rest of the page or * if len is not big enough to read the rest of the page or
...@@ -3779,9 +3842,35 @@ int ring_buffer_read_page(struct ring_buffer *buffer, ...@@ -3779,9 +3842,35 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
local_set(&reader->entries, 0); local_set(&reader->entries, 0);
reader->read = 0; reader->read = 0;
*data_page = bpage; *data_page = bpage;
/*
* Use the real_end for the data size,
* This gives us a chance to store the lost events
* on the page.
*/
if (reader->real_end)
local_set(&bpage->commit, reader->real_end);
} }
ret = read; ret = read;
cpu_buffer->lost_events = 0;
/*
* Set a flag in the commit field if we lost events
*/
if (missed_events) {
commit = local_read(&bpage->commit);
/* If there is room at the end of the page to save the
* missed events, then record it there.
*/
if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) {
memcpy(&bpage->data[commit], &missed_events,
sizeof(missed_events));
local_add(RB_MISSED_STORED, &bpage->commit);
}
local_add(RB_MISSED_EVENTS, &bpage->commit);
}
out_unlock: out_unlock:
spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
......
...@@ -81,7 +81,7 @@ static enum event_status read_event(int cpu) ...@@ -81,7 +81,7 @@ static enum event_status read_event(int cpu)
int *entry; int *entry;
u64 ts; u64 ts;
event = ring_buffer_consume(buffer, cpu, &ts); event = ring_buffer_consume(buffer, cpu, &ts, NULL);
if (!event) if (!event)
return EVENT_DROPPED; return EVENT_DROPPED;
......
...@@ -1545,7 +1545,8 @@ static void trace_iterator_increment(struct trace_iterator *iter) ...@@ -1545,7 +1545,8 @@ static void trace_iterator_increment(struct trace_iterator *iter)
} }
static struct trace_entry * static struct trace_entry *
peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
unsigned long *lost_events)
{ {
struct ring_buffer_event *event; struct ring_buffer_event *event;
struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu]; struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
...@@ -1556,7 +1557,8 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) ...@@ -1556,7 +1557,8 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
if (buf_iter) if (buf_iter)
event = ring_buffer_iter_peek(buf_iter, ts); event = ring_buffer_iter_peek(buf_iter, ts);
else else
event = ring_buffer_peek(iter->tr->buffer, cpu, ts); event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
lost_events);
ftrace_enable_cpu(); ftrace_enable_cpu();
...@@ -1564,10 +1566,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts) ...@@ -1564,10 +1566,12 @@ peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
} }
static struct trace_entry * static struct trace_entry *
__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
unsigned long *missing_events, u64 *ent_ts)
{ {
struct ring_buffer *buffer = iter->tr->buffer; struct ring_buffer *buffer = iter->tr->buffer;
struct trace_entry *ent, *next = NULL; struct trace_entry *ent, *next = NULL;
unsigned long lost_events, next_lost = 0;
int cpu_file = iter->cpu_file; int cpu_file = iter->cpu_file;
u64 next_ts = 0, ts; u64 next_ts = 0, ts;
int next_cpu = -1; int next_cpu = -1;
...@@ -1580,7 +1584,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) ...@@ -1580,7 +1584,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
if (cpu_file > TRACE_PIPE_ALL_CPU) { if (cpu_file > TRACE_PIPE_ALL_CPU) {
if (ring_buffer_empty_cpu(buffer, cpu_file)) if (ring_buffer_empty_cpu(buffer, cpu_file))
return NULL; return NULL;
ent = peek_next_entry(iter, cpu_file, ent_ts); ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
if (ent_cpu) if (ent_cpu)
*ent_cpu = cpu_file; *ent_cpu = cpu_file;
...@@ -1592,7 +1596,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) ...@@ -1592,7 +1596,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
if (ring_buffer_empty_cpu(buffer, cpu)) if (ring_buffer_empty_cpu(buffer, cpu))
continue; continue;
ent = peek_next_entry(iter, cpu, &ts); ent = peek_next_entry(iter, cpu, &ts, &lost_events);
/* /*
* Pick the entry with the smallest timestamp: * Pick the entry with the smallest timestamp:
...@@ -1601,6 +1605,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) ...@@ -1601,6 +1605,7 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
next = ent; next = ent;
next_cpu = cpu; next_cpu = cpu;
next_ts = ts; next_ts = ts;
next_lost = lost_events;
} }
} }
...@@ -1610,6 +1615,9 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) ...@@ -1610,6 +1615,9 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
if (ent_ts) if (ent_ts)
*ent_ts = next_ts; *ent_ts = next_ts;
if (missing_events)
*missing_events = next_lost;
return next; return next;
} }
...@@ -1617,13 +1625,14 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts) ...@@ -1617,13 +1625,14 @@ __find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
int *ent_cpu, u64 *ent_ts) int *ent_cpu, u64 *ent_ts)
{ {
return __find_next_entry(iter, ent_cpu, ent_ts); return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
} }
/* Find the next real entry, and increment the iterator to the next entry */ /* Find the next real entry, and increment the iterator to the next entry */
static void *find_next_entry_inc(struct trace_iterator *iter) static void *find_next_entry_inc(struct trace_iterator *iter)
{ {
iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts); iter->ent = __find_next_entry(iter, &iter->cpu,
&iter->lost_events, &iter->ts);
if (iter->ent) if (iter->ent)
trace_iterator_increment(iter); trace_iterator_increment(iter);
...@@ -1635,7 +1644,8 @@ static void trace_consume(struct trace_iterator *iter) ...@@ -1635,7 +1644,8 @@ static void trace_consume(struct trace_iterator *iter)
{ {
/* Don't allow ftrace to trace into the ring buffers */ /* Don't allow ftrace to trace into the ring buffers */
ftrace_disable_cpu(); ftrace_disable_cpu();
ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts); ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
&iter->lost_events);
ftrace_enable_cpu(); ftrace_enable_cpu();
} }
...@@ -2030,6 +2040,10 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter) ...@@ -2030,6 +2040,10 @@ static enum print_line_t print_trace_line(struct trace_iterator *iter)
{ {
enum print_line_t ret; enum print_line_t ret;
if (iter->lost_events)
trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
iter->cpu, iter->lost_events);
if (iter->trace && iter->trace->print_line) { if (iter->trace && iter->trace->print_line) {
ret = iter->trace->print_line(iter); ret = iter->trace->print_line(iter);
if (ret != TRACE_TYPE_UNHANDLED) if (ret != TRACE_TYPE_UNHANDLED)
......
...@@ -490,9 +490,10 @@ get_return_for_leaf(struct trace_iterator *iter, ...@@ -490,9 +490,10 @@ get_return_for_leaf(struct trace_iterator *iter,
* We need to consume the current entry to see * We need to consume the current entry to see
* the next one. * the next one.
*/ */
ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL); ring_buffer_consume(iter->tr->buffer, iter->cpu,
NULL, NULL);
event = ring_buffer_peek(iter->tr->buffer, iter->cpu, event = ring_buffer_peek(iter->tr->buffer, iter->cpu,
NULL); NULL, NULL);
} }
if (!event) if (!event)
......
...@@ -30,7 +30,7 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu) ...@@ -30,7 +30,7 @@ static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
struct trace_entry *entry; struct trace_entry *entry;
unsigned int loops = 0; unsigned int loops = 0;
while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) { while ((event = ring_buffer_consume(tr->buffer, cpu, NULL, NULL))) {
entry = ring_buffer_event_data(event); entry = ring_buffer_event_data(event);
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment