Commit ec835f81 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'trace-v4.15-rc4-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace

Pull tracing fixes from Steven Rostedt:
 "Two more small fixes

   - The conversion of enums into their actual numbers to display in the
     event format file had an off-by-one bug, that could cause an enum
     not to be converted, and break user space parsing tools.

   - A fix to a previous fix to bring back the context recursion checks.
     The interrupt case checks for NMI, IRQ and softirq, but the softirq
     returned the same number regardless if it was set or not, although
     the logic would force it to be set if it were hit"

* tag 'trace-v4.15-rc4-3' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-trace:
  tracing: Fix converting enum's from the map in trace_event_eval_update()
  ring-buffer: Fix duplicate results in mapping context to bits in recursive lock
parents 672bb0fa 1ebe1eaf
...@@ -2579,8 +2579,7 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) ...@@ -2579,8 +2579,7 @@ trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
bit = RB_CTX_NORMAL; bit = RB_CTX_NORMAL;
else else
bit = pc & NMI_MASK ? RB_CTX_NMI : bit = pc & NMI_MASK ? RB_CTX_NMI :
pc & HARDIRQ_MASK ? RB_CTX_IRQ : pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
pc & SOFTIRQ_OFFSET ? 2 : RB_CTX_SOFTIRQ;
if (unlikely(val & (1 << bit))) if (unlikely(val & (1 << bit)))
return 1; return 1;
......
...@@ -2213,6 +2213,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len) ...@@ -2213,6 +2213,7 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
{ {
struct trace_event_call *call, *p; struct trace_event_call *call, *p;
const char *last_system = NULL; const char *last_system = NULL;
bool first = false;
int last_i; int last_i;
int i; int i;
...@@ -2220,15 +2221,28 @@ void trace_event_eval_update(struct trace_eval_map **map, int len) ...@@ -2220,15 +2221,28 @@ void trace_event_eval_update(struct trace_eval_map **map, int len)
list_for_each_entry_safe(call, p, &ftrace_events, list) { list_for_each_entry_safe(call, p, &ftrace_events, list) {
/* events are usually grouped together with systems */ /* events are usually grouped together with systems */
if (!last_system || call->class->system != last_system) { if (!last_system || call->class->system != last_system) {
first = true;
last_i = 0; last_i = 0;
last_system = call->class->system; last_system = call->class->system;
} }
/*
* Since calls are grouped by systems, the likelyhood that the
* next call in the iteration belongs to the same system as the
* previous call is high. As an optimization, we skip seaching
* for a map[] that matches the call's system if the last call
* was from the same system. That's what last_i is for. If the
* call has the same system as the previous call, then last_i
* will be the index of the first map[] that has a matching
* system.
*/
for (i = last_i; i < len; i++) { for (i = last_i; i < len; i++) {
if (call->class->system == map[i]->system) { if (call->class->system == map[i]->system) {
/* Save the first system if need be */ /* Save the first system if need be */
if (!last_i) if (first) {
last_i = i; last_i = i;
first = false;
}
update_event_printk(call, map[i]); update_event_printk(call, map[i]);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment