Commit 0e417fe1 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'core/locking' of...

Merge branch 'core/locking' of git://git.kernel.org/pub/scm/linux/kernel/git/frederic/random-tracing into core/locking
parents 53ba4f2f 913769f2
...@@ -2298,7 +2298,12 @@ void trace_hardirqs_on_caller(unsigned long ip) ...@@ -2298,7 +2298,12 @@ void trace_hardirqs_on_caller(unsigned long ip)
return; return;
if (unlikely(curr->hardirqs_enabled)) { if (unlikely(curr->hardirqs_enabled)) {
debug_atomic_inc(redundant_hardirqs_on); /*
* Neither irq nor preemption are disabled here
* so this is racy by nature but loosing one hit
* in a stat is not a big deal.
*/
this_cpu_inc(lockdep_stats.redundant_hardirqs_on);
return; return;
} }
/* we'll do an OFF -> ON transition: */ /* we'll do an OFF -> ON transition: */
......
...@@ -140,19 +140,13 @@ struct lockdep_stats { ...@@ -140,19 +140,13 @@ struct lockdep_stats {
DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats); DECLARE_PER_CPU(struct lockdep_stats, lockdep_stats);
#define debug_atomic_inc(ptr) { \ #define debug_atomic_inc(ptr) { \
struct lockdep_stats *__cpu_lockdep_stats; \
\
WARN_ON_ONCE(!irqs_disabled()); \ WARN_ON_ONCE(!irqs_disabled()); \
__cpu_lockdep_stats = &__get_cpu_var(lockdep_stats); \ this_cpu_inc(lockdep_stats.ptr); \
__cpu_lockdep_stats->ptr++; \
} }
#define debug_atomic_dec(ptr) { \ #define debug_atomic_dec(ptr) { \
struct lockdep_stats *__cpu_lockdep_stats; \
\
WARN_ON_ONCE(!irqs_disabled()); \ WARN_ON_ONCE(!irqs_disabled()); \
__cpu_lockdep_stats = &__get_cpu_var(lockdep_stats); \ this_cpu_inc(lockdep_stats.ptr); \
__cpu_lockdep_stats->ptr--; \
} }
#define debug_atomic_read(ptr) ({ \ #define debug_atomic_read(ptr) ({ \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment