Commit 54dbf96c authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Ingo Molnar

rcu: Suppress RCU lockdep warnings during early boot

RCU is used during very early boot, before RCU and lockdep have
been initialized.  So make the underlying primitives
(rcu_read_lock_held(), rcu_read_lock_bh_held(),
rcu_read_lock_sched_held(), and rcu_dereference_check()) check
for early boot via the rcu_scheduler_active flag.  This will
suppress false positives.

Also introduce a debug_lockdep_rcu_enabled() static inline
helper function, which tags the CONTINUE_PROVE_RCU case as
likely(), as suggested by Ingo Molnar.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: laijs@cn.fujitsu.com
Cc: dipankar@in.ibm.com
Cc: mathieu.desnoyers@polymtl.ca
Cc: josh@joshtriplett.org
Cc: dvhltc@us.ibm.com
Cc: niv@us.ibm.com
Cc: peterz@infradead.org
Cc: rostedt@goodmis.org
Cc: Valdis.Kletnieks@vt.edu
Cc: dhowells@redhat.com
LKML-Reference: <1267631219-8713-2-git-send-email-paulmck@linux.vnet.ibm.com>
[ v2: removed incomplete debug_lockdep_rcu_update() bits ]
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 8d53dd54
...@@ -97,6 +97,11 @@ extern struct lockdep_map rcu_sched_lock_map; ...@@ -97,6 +97,11 @@ extern struct lockdep_map rcu_sched_lock_map;
# define rcu_read_release_sched() \ # define rcu_read_release_sched() \
lock_release(&rcu_sched_lock_map, 1, _THIS_IP_) lock_release(&rcu_sched_lock_map, 1, _THIS_IP_)
static inline int debug_lockdep_rcu_enabled(void)
{
return likely(rcu_scheduler_active && debug_locks);
}
/** /**
* rcu_read_lock_held - might we be in RCU read-side critical section? * rcu_read_lock_held - might we be in RCU read-side critical section?
* *
...@@ -104,12 +109,14 @@ extern struct lockdep_map rcu_sched_lock_map; ...@@ -104,12 +109,14 @@ extern struct lockdep_map rcu_sched_lock_map;
* an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING, * an RCU read-side critical section. In absence of CONFIG_PROVE_LOCKING,
* this assumes we are in an RCU read-side critical section unless it can * this assumes we are in an RCU read-side critical section unless it can
* prove otherwise. * prove otherwise.
*
* Check rcu_scheduler_active to prevent false positives during boot.
*/ */
static inline int rcu_read_lock_held(void) static inline int rcu_read_lock_held(void)
{ {
if (debug_locks) if (!debug_lockdep_rcu_enabled())
return lock_is_held(&rcu_lock_map); return 1;
return 1; return lock_is_held(&rcu_lock_map);
} }
/** /**
...@@ -119,12 +126,14 @@ static inline int rcu_read_lock_held(void) ...@@ -119,12 +126,14 @@ static inline int rcu_read_lock_held(void)
* an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING, * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING,
* this assumes we are in an RCU-bh read-side critical section unless it can * this assumes we are in an RCU-bh read-side critical section unless it can
* prove otherwise. * prove otherwise.
*
* Check rcu_scheduler_active to prevent false positives during boot.
*/ */
static inline int rcu_read_lock_bh_held(void) static inline int rcu_read_lock_bh_held(void)
{ {
if (debug_locks) if (!debug_lockdep_rcu_enabled())
return lock_is_held(&rcu_bh_lock_map); return 1;
return 1; return lock_is_held(&rcu_bh_lock_map);
} }
/** /**
...@@ -135,15 +144,19 @@ static inline int rcu_read_lock_bh_held(void) ...@@ -135,15 +144,19 @@ static inline int rcu_read_lock_bh_held(void)
* this assumes we are in an RCU-sched read-side critical section unless it * this assumes we are in an RCU-sched read-side critical section unless it
* can prove otherwise. Note that disabling of preemption (including * can prove otherwise. Note that disabling of preemption (including
* disabling irqs) counts as an RCU-sched read-side critical section. * disabling irqs) counts as an RCU-sched read-side critical section.
*
* Check rcu_scheduler_active to prevent false positives during boot.
*/ */
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
static inline int rcu_read_lock_sched_held(void) static inline int rcu_read_lock_sched_held(void)
{ {
int lockdep_opinion = 0; int lockdep_opinion = 0;
if (!debug_lockdep_rcu_enabled())
return 1;
if (debug_locks) if (debug_locks)
lockdep_opinion = lock_is_held(&rcu_sched_lock_map); lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
return lockdep_opinion || preempt_count() != 0 || !rcu_scheduler_active; return lockdep_opinion || preempt_count() != 0;
} }
#else /* #ifdef CONFIG_PREEMPT */ #else /* #ifdef CONFIG_PREEMPT */
static inline int rcu_read_lock_sched_held(void) static inline int rcu_read_lock_sched_held(void)
...@@ -174,7 +187,7 @@ static inline int rcu_read_lock_bh_held(void) ...@@ -174,7 +187,7 @@ static inline int rcu_read_lock_bh_held(void)
#ifdef CONFIG_PREEMPT #ifdef CONFIG_PREEMPT
static inline int rcu_read_lock_sched_held(void) static inline int rcu_read_lock_sched_held(void)
{ {
return preempt_count() != 0 || !rcu_scheduler_active; return !rcu_scheduler_active || preempt_count() != 0;
} }
#else /* #ifdef CONFIG_PREEMPT */ #else /* #ifdef CONFIG_PREEMPT */
static inline int rcu_read_lock_sched_held(void) static inline int rcu_read_lock_sched_held(void)
...@@ -198,7 +211,7 @@ static inline int rcu_read_lock_sched_held(void) ...@@ -198,7 +211,7 @@ static inline int rcu_read_lock_sched_held(void)
*/ */
#define rcu_dereference_check(p, c) \ #define rcu_dereference_check(p, c) \
({ \ ({ \
if (debug_locks && !(c)) \ if (debug_lockdep_rcu_enabled() && !(c)) \
lockdep_rcu_dereference(__FILE__, __LINE__); \ lockdep_rcu_dereference(__FILE__, __LINE__); \
rcu_dereference_raw(p); \ rcu_dereference_raw(p); \
}) })
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment