Commit 2c42818e authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Abstract common code for RCU grace-period-wait primitives

Pull the code that waits for an RCU grace period into a single function,
which is then called by synchronize_rcu() and friends in the case of
TREE_RCU and TREE_PREEMPT_RCU, and from rcu_barrier() and friends in
the case of TINY_RCU and TINY_PREEMPT_RCU.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent f039d1f1
...@@ -66,11 +66,73 @@ static inline void rcutorture_record_progress(unsigned long vernum) ...@@ -66,11 +66,73 @@ static inline void rcutorture_record_progress(unsigned long vernum)
#define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b)) #define ULONG_CMP_LT(a, b) (ULONG_MAX / 2 < (a) - (b))
/* Exported common interfaces */ /* Exported common interfaces */
#ifdef CONFIG_PREEMPT_RCU
/**
* call_rcu() - Queue an RCU callback for invocation after a grace period.
* @head: structure to be used for queueing the RCU updates.
* @func: actual callback function to be invoked after the grace period
*
* The callback function will be invoked some time after a full grace
* period elapses, in other words after all pre-existing RCU read-side
* critical sections have completed. However, the callback function
* might well execute concurrently with RCU read-side critical sections
* that started after call_rcu() was invoked. RCU read-side critical
* sections are delimited by rcu_read_lock() and rcu_read_unlock(),
* and may be nested.
*/
extern void call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *head));
#else /* #ifdef CONFIG_PREEMPT_RCU */
/* In classic RCU, call_rcu() is just call_rcu_sched(). */
#define call_rcu call_rcu_sched
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
* @func: actual callback function to be invoked after the grace period
*
* The callback function will be invoked some time after a full grace
* period elapses, in other words after all currently executing RCU
* read-side critical sections have completed. call_rcu_bh() assumes
* that the read-side critical sections end on completion of a softirq
* handler. This means that read-side critical sections in process
* context must not be interrupted by softirqs. This interface is to be
* used when most of the read-side critical sections are in softirq context.
* RCU read-side critical sections are delimited by :
* - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
* OR
* - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
* These may be nested.
*/
extern void call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head));
/**
* call_rcu_sched() - Queue an RCU for invocation after sched grace period.
* @head: structure to be used for queueing the RCU updates.
* @func: actual callback function to be invoked after the grace period
*
* The callback function will be invoked some time after a full grace
* period elapses, in other words after all currently executing RCU
* read-side critical sections have completed. call_rcu_sched() assumes
* that the read-side critical sections end on enabling of preemption
* or on voluntary preemption.
* RCU read-side critical sections are delimited by :
* - rcu_read_lock_sched() and rcu_read_unlock_sched(),
* OR
* anything that disables preemption.
* These may be nested.
*/
extern void call_rcu_sched(struct rcu_head *head, extern void call_rcu_sched(struct rcu_head *head,
void (*func)(struct rcu_head *rcu)); void (*func)(struct rcu_head *rcu));
extern void synchronize_sched(void); extern void synchronize_sched(void);
extern void rcu_barrier_bh(void);
extern void rcu_barrier_sched(void);
static inline void __rcu_read_lock_bh(void) static inline void __rcu_read_lock_bh(void)
{ {
...@@ -143,6 +205,15 @@ static inline void rcu_exit_nohz(void) ...@@ -143,6 +205,15 @@ static inline void rcu_exit_nohz(void)
#endif /* #else #ifdef CONFIG_NO_HZ */ #endif /* #else #ifdef CONFIG_NO_HZ */
/*
* Infrastructure to implement the synchronize_() primitives in
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
typedef void call_rcu_func_t(struct rcu_head *head,
void (*func)(struct rcu_head *head));
void wait_rcu_gp(call_rcu_func_t crf);
#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) #if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU)
#include <linux/rcutree.h> #include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU) #elif defined(CONFIG_TINY_RCU) || defined(CONFIG_TINY_PREEMPT_RCU)
...@@ -723,61 +794,6 @@ static inline notrace void rcu_read_unlock_sched_notrace(void) ...@@ -723,61 +794,6 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
#define RCU_INIT_POINTER(p, v) \ #define RCU_INIT_POINTER(p, v) \
p = (typeof(*v) __force __rcu *)(v) p = (typeof(*v) __force __rcu *)(v)
/* Infrastructure to implement the synchronize_() primitives. */
struct rcu_synchronize {
struct rcu_head head;
struct completion completion;
};
extern void wakeme_after_rcu(struct rcu_head *head);
#ifdef CONFIG_PREEMPT_RCU
/**
* call_rcu() - Queue an RCU callback for invocation after a grace period.
* @head: structure to be used for queueing the RCU updates.
* @func: actual callback function to be invoked after the grace period
*
* The callback function will be invoked some time after a full grace
* period elapses, in other words after all pre-existing RCU read-side
* critical sections have completed. However, the callback function
* might well execute concurrently with RCU read-side critical sections
* that started after call_rcu() was invoked. RCU read-side critical
* sections are delimited by rcu_read_lock() and rcu_read_unlock(),
* and may be nested.
*/
extern void call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *head));
#else /* #ifdef CONFIG_PREEMPT_RCU */
/* In classic RCU, call_rcu() is just call_rcu_sched(). */
#define call_rcu call_rcu_sched
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */
/**
* call_rcu_bh() - Queue an RCU for invocation after a quicker grace period.
* @head: structure to be used for queueing the RCU updates.
* @func: actual callback function to be invoked after the grace period
*
* The callback function will be invoked some time after a full grace
* period elapses, in other words after all currently executing RCU
* read-side critical sections have completed. call_rcu_bh() assumes
* that the read-side critical sections end on completion of a softirq
* handler. This means that read-side critical sections in process
* context must not be interrupted by softirqs. This interface is to be
* used when most of the read-side critical sections are in softirq context.
* RCU read-side critical sections are delimited by :
* - rcu_read_lock() and rcu_read_unlock(), if in interrupt context.
* OR
* - rcu_read_lock_bh() and rcu_read_unlock_bh(), if in process context.
* These may be nested.
*/
extern void call_rcu_bh(struct rcu_head *head,
void (*func)(struct rcu_head *head));
/* /*
* debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally * debug_rcu_head_queue()/debug_rcu_head_unqueue() are used internally
* by call_rcu() and rcu callback execution, and are therefore not part of the * by call_rcu() and rcu callback execution, and are therefore not part of the
......
...@@ -31,6 +31,16 @@ static inline void rcu_init(void) ...@@ -31,6 +31,16 @@ static inline void rcu_init(void)
{ {
} }
static inline void rcu_barrier_bh(void)
{
wait_rcu_gp(call_rcu_bh);
}
static inline void rcu_barrier_sched(void)
{
wait_rcu_gp(call_rcu_sched);
}
#ifdef CONFIG_TINY_RCU #ifdef CONFIG_TINY_RCU
static inline void synchronize_rcu_expedited(void) static inline void synchronize_rcu_expedited(void)
...@@ -45,9 +55,13 @@ static inline void rcu_barrier(void) ...@@ -45,9 +55,13 @@ static inline void rcu_barrier(void)
#else /* #ifdef CONFIG_TINY_RCU */ #else /* #ifdef CONFIG_TINY_RCU */
void rcu_barrier(void);
void synchronize_rcu_expedited(void); void synchronize_rcu_expedited(void);
static inline void rcu_barrier(void)
{
wait_rcu_gp(call_rcu);
}
#endif /* #else #ifdef CONFIG_TINY_RCU */ #endif /* #else #ifdef CONFIG_TINY_RCU */
static inline void synchronize_rcu_bh(void) static inline void synchronize_rcu_bh(void)
......
...@@ -67,6 +67,8 @@ static inline void synchronize_rcu_bh_expedited(void) ...@@ -67,6 +67,8 @@ static inline void synchronize_rcu_bh_expedited(void)
} }
extern void rcu_barrier(void); extern void rcu_barrier(void);
extern void rcu_barrier_bh(void);
extern void rcu_barrier_sched(void);
extern unsigned long rcutorture_testseq; extern unsigned long rcutorture_testseq;
extern unsigned long rcutorture_vernum; extern unsigned long rcutorture_vernum;
......
...@@ -94,11 +94,16 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held); ...@@ -94,11 +94,16 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
struct rcu_synchronize {
struct rcu_head head;
struct completion completion;
};
/* /*
* Awaken the corresponding synchronize_rcu() instance now that a * Awaken the corresponding synchronize_rcu() instance now that a
* grace period has elapsed. * grace period has elapsed.
*/ */
void wakeme_after_rcu(struct rcu_head *head) static void wakeme_after_rcu(struct rcu_head *head)
{ {
struct rcu_synchronize *rcu; struct rcu_synchronize *rcu;
...@@ -106,6 +111,20 @@ void wakeme_after_rcu(struct rcu_head *head) ...@@ -106,6 +111,20 @@ void wakeme_after_rcu(struct rcu_head *head)
complete(&rcu->completion); complete(&rcu->completion);
} }
void wait_rcu_gp(call_rcu_func_t crf)
{
struct rcu_synchronize rcu;
init_rcu_head_on_stack(&rcu.head);
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
crf(&rcu.head, wakeme_after_rcu);
/* Wait for it. */
wait_for_completion(&rcu.completion);
destroy_rcu_head_on_stack(&rcu.head);
}
EXPORT_SYMBOL_GPL(wait_rcu_gp);
#ifdef CONFIG_PROVE_RCU #ifdef CONFIG_PROVE_RCU
/* /*
* wrapper function to avoid #include problems. * wrapper function to avoid #include problems.
......
...@@ -281,34 +281,6 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) ...@@ -281,34 +281,6 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
} }
EXPORT_SYMBOL_GPL(call_rcu_bh); EXPORT_SYMBOL_GPL(call_rcu_bh);
void rcu_barrier_bh(void)
{
struct rcu_synchronize rcu;
init_rcu_head_on_stack(&rcu.head);
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu_bh(&rcu.head, wakeme_after_rcu);
/* Wait for it. */
wait_for_completion(&rcu.completion);
destroy_rcu_head_on_stack(&rcu.head);
}
EXPORT_SYMBOL_GPL(rcu_barrier_bh);
void rcu_barrier_sched(void)
{
struct rcu_synchronize rcu;
init_rcu_head_on_stack(&rcu.head);
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu_sched(&rcu.head, wakeme_after_rcu);
/* Wait for it. */
wait_for_completion(&rcu.completion);
destroy_rcu_head_on_stack(&rcu.head);
}
EXPORT_SYMBOL_GPL(rcu_barrier_sched);
/* /*
* Spawn the kthread that invokes RCU callbacks. * Spawn the kthread that invokes RCU callbacks.
*/ */
......
...@@ -697,20 +697,6 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) ...@@ -697,20 +697,6 @@ void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
} }
EXPORT_SYMBOL_GPL(call_rcu); EXPORT_SYMBOL_GPL(call_rcu);
void rcu_barrier(void)
{
struct rcu_synchronize rcu;
init_rcu_head_on_stack(&rcu.head);
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu(&rcu.head, wakeme_after_rcu);
/* Wait for it. */
wait_for_completion(&rcu.completion);
destroy_rcu_head_on_stack(&rcu.head);
}
EXPORT_SYMBOL_GPL(rcu_barrier);
/* /*
* synchronize_rcu - wait until a grace period has elapsed. * synchronize_rcu - wait until a grace period has elapsed.
* *
......
...@@ -1613,18 +1613,9 @@ EXPORT_SYMBOL_GPL(call_rcu_bh); ...@@ -1613,18 +1613,9 @@ EXPORT_SYMBOL_GPL(call_rcu_bh);
*/ */
void synchronize_sched(void) void synchronize_sched(void)
{ {
struct rcu_synchronize rcu;
if (rcu_blocking_is_gp()) if (rcu_blocking_is_gp())
return; return;
wait_rcu_gp(call_rcu_sched);
init_rcu_head_on_stack(&rcu.head);
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu_sched(&rcu.head, wakeme_after_rcu);
/* Wait for it. */
wait_for_completion(&rcu.completion);
destroy_rcu_head_on_stack(&rcu.head);
} }
EXPORT_SYMBOL_GPL(synchronize_sched); EXPORT_SYMBOL_GPL(synchronize_sched);
...@@ -1639,18 +1630,9 @@ EXPORT_SYMBOL_GPL(synchronize_sched); ...@@ -1639,18 +1630,9 @@ EXPORT_SYMBOL_GPL(synchronize_sched);
*/ */
void synchronize_rcu_bh(void) void synchronize_rcu_bh(void)
{ {
struct rcu_synchronize rcu;
if (rcu_blocking_is_gp()) if (rcu_blocking_is_gp())
return; return;
wait_rcu_gp(call_rcu_bh);
init_rcu_head_on_stack(&rcu.head);
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu_bh(&rcu.head, wakeme_after_rcu);
/* Wait for it. */
wait_for_completion(&rcu.completion);
destroy_rcu_head_on_stack(&rcu.head);
} }
EXPORT_SYMBOL_GPL(synchronize_rcu_bh); EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
......
...@@ -656,18 +656,9 @@ EXPORT_SYMBOL_GPL(call_rcu); ...@@ -656,18 +656,9 @@ EXPORT_SYMBOL_GPL(call_rcu);
*/ */
void synchronize_rcu(void) void synchronize_rcu(void)
{ {
struct rcu_synchronize rcu;
if (!rcu_scheduler_active) if (!rcu_scheduler_active)
return; return;
wait_rcu_gp(call_rcu);
init_rcu_head_on_stack(&rcu.head);
init_completion(&rcu.completion);
/* Will wake me after RCU finished. */
call_rcu(&rcu.head, wakeme_after_rcu);
/* Wait for it. */
wait_for_completion(&rcu.completion);
destroy_rcu_head_on_stack(&rcu.head);
} }
EXPORT_SYMBOL_GPL(synchronize_rcu); EXPORT_SYMBOL_GPL(synchronize_rcu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment