Commit ca58abcb authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Linus Torvalds

lockdep: sanitise CONFIG_PROVE_LOCKING

Ensure that all of the lock dependency tracking code is under
CONFIG_PROVE_LOCKING.  This allows us to use the held lock tracking code for
other purposes.
Signed-off-by: default avatarPeter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: default avatarIngo Molnar <mingo@elte.hu>
Acked-by: default avatarJason Baron <jbaron@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 21f8ca3b
...@@ -95,6 +95,7 @@ static int lockdep_initialized; ...@@ -95,6 +95,7 @@ static int lockdep_initialized;
unsigned long nr_list_entries; unsigned long nr_list_entries;
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
#ifdef CONFIG_PROVE_LOCKING
/* /*
* Allocate a lockdep entry. (assumes the graph_lock held, returns * Allocate a lockdep entry. (assumes the graph_lock held, returns
* with NULL on failure) * with NULL on failure)
...@@ -111,6 +112,7 @@ static struct lock_list *alloc_list_entry(void) ...@@ -111,6 +112,7 @@ static struct lock_list *alloc_list_entry(void)
} }
return list_entries + nr_list_entries++; return list_entries + nr_list_entries++;
} }
#endif
/* /*
* All data structures here are protected by the global debug_lock. * All data structures here are protected by the global debug_lock.
...@@ -140,7 +142,9 @@ LIST_HEAD(all_lock_classes); ...@@ -140,7 +142,9 @@ LIST_HEAD(all_lock_classes);
static struct list_head classhash_table[CLASSHASH_SIZE]; static struct list_head classhash_table[CLASSHASH_SIZE];
unsigned long nr_lock_chains; unsigned long nr_lock_chains;
#ifdef CONFIG_PROVE_LOCKING
static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS]; static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
#endif
/* /*
* We put the lock dependency chains into a hash-table as well, to cache * We put the lock dependency chains into a hash-table as well, to cache
...@@ -482,6 +486,7 @@ static void print_lock_dependencies(struct lock_class *class, int depth) ...@@ -482,6 +486,7 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
} }
} }
#ifdef CONFIG_PROVE_LOCKING
/* /*
* Add a new dependency to the head of the list: * Add a new dependency to the head of the list:
*/ */
...@@ -541,6 +546,7 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth) ...@@ -541,6 +546,7 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
return 0; return 0;
} }
#endif
static void print_kernel_version(void) static void print_kernel_version(void)
{ {
...@@ -549,6 +555,7 @@ static void print_kernel_version(void) ...@@ -549,6 +555,7 @@ static void print_kernel_version(void)
init_utsname()->version); init_utsname()->version);
} }
#ifdef CONFIG_PROVE_LOCKING
/* /*
* When a circular dependency is detected, print the * When a circular dependency is detected, print the
* header first: * header first:
...@@ -639,6 +646,7 @@ check_noncircular(struct lock_class *source, unsigned int depth) ...@@ -639,6 +646,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
} }
return 1; return 1;
} }
#endif
static int very_verbose(struct lock_class *class) static int very_verbose(struct lock_class *class)
{ {
...@@ -823,6 +831,7 @@ check_usage(struct task_struct *curr, struct held_lock *prev, ...@@ -823,6 +831,7 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
#endif #endif
#ifdef CONFIG_PROVE_LOCKING
static int static int
print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
struct held_lock *next) struct held_lock *next)
...@@ -1087,7 +1096,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) ...@@ -1087,7 +1096,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
return 0; return 0;
} }
#endif
/* /*
* Is this the address of a static object: * Is this the address of a static object:
...@@ -1307,6 +1316,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) ...@@ -1307,6 +1316,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
return class; return class;
} }
#ifdef CONFIG_PROVE_LOCKING
/* /*
* Look up a dependency chain. If the key is not present yet then * Look up a dependency chain. If the key is not present yet then
* add it and return 1 - in this case the new dependency chain is * add it and return 1 - in this case the new dependency chain is
...@@ -1381,6 +1391,7 @@ static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class) ...@@ -1381,6 +1391,7 @@ static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
return 1; return 1;
} }
#endif
/* /*
* We are building curr_chain_key incrementally, so double-check * We are building curr_chain_key incrementally, so double-check
......
...@@ -88,7 +88,7 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock) ...@@ -88,7 +88,7 @@ unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
* _raw_spin_lock_flags() code, because lockdep assumes * _raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire: * that interrupts are not re-enabled during lock-acquire:
*/ */
#ifdef CONFIG_PROVE_LOCKING #ifdef CONFIG_LOCKDEP
_raw_spin_lock(lock); _raw_spin_lock(lock);
#else #else
_raw_spin_lock_flags(lock, &flags); _raw_spin_lock_flags(lock, &flags);
...@@ -305,7 +305,7 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas ...@@ -305,7 +305,7 @@ unsigned long __lockfunc _spin_lock_irqsave_nested(spinlock_t *lock, int subclas
* _raw_spin_lock_flags() code, because lockdep assumes * _raw_spin_lock_flags() code, because lockdep assumes
* that interrupts are not re-enabled during lock-acquire: * that interrupts are not re-enabled during lock-acquire:
*/ */
#ifdef CONFIG_PROVE_SPIN_LOCKING #ifdef CONFIG_LOCKDEP
_raw_spin_lock(lock); _raw_spin_lock(lock);
#else #else
_raw_spin_lock_flags(lock, &flags); _raw_spin_lock_flags(lock, &flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment