Commit 82fcecfa authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Update comments and help text for no more RCU-bh updaters

This commit updates comments and help text to account for the fact that
RCU-bh update-side functions are now simple wrappers for their RCU or
RCU-sched counterparts.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 65cfe358
...@@ -688,14 +688,10 @@ static inline void rcu_read_unlock(void) ...@@ -688,14 +688,10 @@ static inline void rcu_read_unlock(void)
/** /**
* rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section * rcu_read_lock_bh() - mark the beginning of an RCU-bh critical section
* *
* This is equivalent of rcu_read_lock(), but to be used when updates * This is equivalent of rcu_read_lock(), but also disables softirqs.
* are being done using call_rcu_bh() or synchronize_rcu_bh(). Since * Note that synchronize_rcu() and friends may be used for the update
* both call_rcu_bh() and synchronize_rcu_bh() consider completion of a * side, although synchronize_rcu_bh() is available as a wrapper in the
* softirq handler to be a quiescent state, a process in RCU read-side * short term. Longer term, the _bh update-side API will be eliminated.
* critical section must be protected by disabling softirqs. Read-side
* critical sections in interrupt context can use just rcu_read_lock(),
* though this should at least be commented to avoid confusing people
* reading the code.
* *
* Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh() * Note that rcu_read_lock_bh() and the matching rcu_read_unlock_bh()
* must occur in the same context, for example, it is illegal to invoke * must occur in the same context, for example, it is illegal to invoke
......
...@@ -36,13 +36,13 @@ do { \ ...@@ -36,13 +36,13 @@ do { \
* @...: List of call_rcu() functions for the flavors to wait on. * @...: List of call_rcu() functions for the flavors to wait on.
* *
* This macro waits concurrently for multiple flavors of RCU grace periods. * This macro waits concurrently for multiple flavors of RCU grace periods.
* For example, synchronize_rcu_mult(call_rcu, call_rcu_bh) would wait * For example, synchronize_rcu_mult(call_rcu, call_rcu_sched) would wait
* on concurrent RCU and RCU-bh grace periods. Waiting on a give SRCU * on concurrent RCU and RCU-sched grace periods. Waiting on a give SRCU
* domain requires you to write a wrapper function for that SRCU domain's * domain requires you to write a wrapper function for that SRCU domain's
* call_srcu() function, supplying the corresponding srcu_struct. * call_srcu() function, supplying the corresponding srcu_struct.
* *
* If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU * If Tiny RCU, tell _wait_rcu_gp() not to bother waiting for RCU
* or RCU-bh, given that anywhere synchronize_rcu_mult() can be called * or RCU-sched, given that anywhere synchronize_rcu_mult() can be called
* is automatically a grace period. * is automatically a grace period.
*/ */
#define synchronize_rcu_mult(...) \ #define synchronize_rcu_mult(...) \
......
...@@ -58,18 +58,8 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); ...@@ -58,18 +58,8 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
/** /**
* synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period * synchronize_rcu_bh_expedited - Brute-force RCU-bh grace period
* *
* Wait for an RCU-bh grace period to elapse, but use a "big hammer" * This is a transitional API and will soon be removed, with all
* approach to force the grace period to end quickly. This consumes * callers converted to synchronize_rcu_expedited().
* significant time on all CPUs and is unfriendly to real-time workloads,
* so is thus not recommended for any sort of common-case code. In fact,
* if you are using synchronize_rcu_bh_expedited() in a loop, please
* restructure your code to batch your updates, and then use a single
* synchronize_rcu_bh() instead.
*
* Note that it is illegal to call this function while holding any lock
* that is acquired by a CPU-hotplug notifier. And yes, it is also illegal
* to call this function from a CPU-hotplug notifier. Failing to observe
* these restriction will result in deadlock.
*/ */
static inline void synchronize_rcu_bh_expedited(void) static inline void synchronize_rcu_bh_expedited(void)
{ {
......
...@@ -229,11 +229,11 @@ config RCU_NOCB_CPU ...@@ -229,11 +229,11 @@ config RCU_NOCB_CPU
CPUs specified at boot time by the rcu_nocbs parameter. CPUs specified at boot time by the rcu_nocbs parameter.
For each such CPU, a kthread ("rcuox/N") will be created to For each such CPU, a kthread ("rcuox/N") will be created to
invoke callbacks, where the "N" is the CPU being offloaded, invoke callbacks, where the "N" is the CPU being offloaded,
and where the "x" is "b" for RCU-bh, "p" for RCU-preempt, and and where the "p" for RCU-preempt and "s" for RCU-sched.
"s" for RCU-sched. Nothing prevents this kthread from running Nothing prevents this kthread from running on the specified
on the specified CPUs, but (1) the kthreads may be preempted CPUs, but (1) the kthreads may be preempted between each
between each callback, and (2) affinity or cgroups can be used callback, and (2) affinity or cgroups can be used to force
to force the kthreads to run on whatever set of CPUs is desired. the kthreads to run on whatever set of CPUs is desired.
Say Y here if you want to help to debug reduced OS jitter. Say Y here if you want to help to debug reduced OS jitter.
Say N here if you are unsure. Say N here if you are unsure.
......
...@@ -565,7 +565,8 @@ unsigned long rcu_sched_get_gp_seq(void) ...@@ -565,7 +565,8 @@ unsigned long rcu_sched_get_gp_seq(void)
EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq); EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
/* /*
* Return the number of RCU-bh GPs completed thus far for debug & stats. * Return the number of RCU GPs completed thus far for debug & stats.
* This is a transitional API and will soon be removed.
*/ */
unsigned long rcu_bh_get_gp_seq(void) unsigned long rcu_bh_get_gp_seq(void)
{ {
...@@ -3069,13 +3070,13 @@ void kfree_call_rcu(struct rcu_head *head, ...@@ -3069,13 +3070,13 @@ void kfree_call_rcu(struct rcu_head *head,
EXPORT_SYMBOL_GPL(kfree_call_rcu); EXPORT_SYMBOL_GPL(kfree_call_rcu);
/* /*
* Because a context switch is a grace period for RCU-sched and RCU-bh, * Because a context switch is a grace period for RCU-sched, any blocking
* any blocking grace-period wait automatically implies a grace period * grace-period wait automatically implies a grace period if there
* if there is only one CPU online at any point time during execution * is only one CPU online at any point time during execution of either
* of either synchronize_sched() or synchronize_rcu_bh(). It is OK to * synchronize_sched() or synchronize_rcu_bh(). It is OK to occasionally
* occasionally incorrectly indicate that there are multiple CPUs online * incorrectly indicate that there are multiple CPUs online when there
* when there was in fact only one the whole time, as this just adds * was in fact only one the whole time, as this just adds some overhead:
* some overhead: RCU still operates correctly. * RCU still operates correctly.
*/ */
static int rcu_blocking_is_gp(void) static int rcu_blocking_is_gp(void)
{ {
......
...@@ -298,7 +298,7 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_held); ...@@ -298,7 +298,7 @@ EXPORT_SYMBOL_GPL(rcu_read_lock_held);
* *
* Check debug_lockdep_rcu_enabled() to prevent false positives during boot. * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
* *
* Note that rcu_read_lock() is disallowed if the CPU is either idle or * Note that rcu_read_lock_bh() is disallowed if the CPU is either idle or
* offline from an RCU perspective, so check for those as well. * offline from an RCU perspective, so check for those as well.
*/ */
int rcu_read_lock_bh_held(void) int rcu_read_lock_bh_held(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment