Commit 49918a54 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Clean up flavor-related definitions and comments in tree.c

Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 679d3f30
...@@ -382,12 +382,11 @@ static int rcu_is_cpu_rrupt_from_idle(void) ...@@ -382,12 +382,11 @@ static int rcu_is_cpu_rrupt_from_idle(void)
} }
/* /*
* Register a quiescent state for all RCU flavors. If there is an * Register an urgently needed quiescent state. If there is an
* emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight * emergency, invoke rcu_momentary_dyntick_idle() to do a heavy-weight
* dyntick-idle quiescent state visible to other CPUs (but only for those * dyntick-idle quiescent state visible to other CPUs, which will in
* RCU flavors in desperate need of a quiescent state, which will normally * some cases serve for expedited as well as normal grace periods.
* be none of them). Either way, do a lightweight quiescent state for * Either way, register a lightweight quiescent state.
* all RCU flavors.
* *
* The barrier() calls are redundant in the common case when this is * The barrier() calls are redundant in the common case when this is
* called externally, but just in case this is called from within this * called externally, but just in case this is called from within this
...@@ -564,7 +563,7 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags, ...@@ -564,7 +563,7 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
EXPORT_SYMBOL_GPL(rcutorture_get_gp_data); EXPORT_SYMBOL_GPL(rcutorture_get_gp_data);
/* /*
* Return the root node of the specified rcu_state structure. * Return the root node of the rcu_state structure.
*/ */
static struct rcu_node *rcu_get_root(void) static struct rcu_node *rcu_get_root(void)
{ {
...@@ -949,11 +948,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t) ...@@ -949,11 +948,7 @@ void rcu_request_urgent_qs_task(struct task_struct *t)
* Disable preemption to avoid false positives that could otherwise * Disable preemption to avoid false positives that could otherwise
* happen due to the current CPU number being sampled, this task being * happen due to the current CPU number being sampled, this task being
* preempted, its old CPU being taken offline, resuming on some other CPU, * preempted, its old CPU being taken offline, resuming on some other CPU,
* then determining that its old CPU is now offline. Because there are * then determining that its old CPU is now offline.
* multiple flavors of RCU, and because this function can be called in the
* midst of updating the flavors while a given CPU coming online or going
* offline, it is necessary to check all flavors. If any of the flavors
* believe that given CPU is online, it is considered to be online.
* *
* Disable checking if in an NMI handler because we cannot safely * Disable checking if in an NMI handler because we cannot safely
* report errors from NMI handlers anyway. In addition, it is OK to use * report errors from NMI handlers anyway. In addition, it is OK to use
...@@ -1563,11 +1558,10 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp) ...@@ -1563,11 +1558,10 @@ static bool rcu_future_gp_cleanup(struct rcu_node *rnp)
} }
/* /*
* Awaken the grace-period kthread for the specified flavor of RCU. * Awaken the grace-period kthread. Don't do a self-awaken, and don't
* Don't do a self-awaken, and don't bother awakening when there is * bother awakening when there is nothing for the grace-period kthread
* nothing for the grace-period kthread to do (as in several CPUs * to do (as in several CPUs raced to awaken, and we lost), and finally
* raced to awaken, and we lost), and finally don't try to awaken * don't try to awaken a kthread that has not yet been created.
* a kthread that has not yet been created.
*/ */
static void rcu_gp_kthread_wake(void) static void rcu_gp_kthread_wake(void)
{ {
...@@ -2119,13 +2113,13 @@ static int __noreturn rcu_gp_kthread(void *unused) ...@@ -2119,13 +2113,13 @@ static int __noreturn rcu_gp_kthread(void *unused)
} }
/* /*
* Report a full set of quiescent states to the specified rcu_state data * Report a full set of quiescent states to the rcu_state data structure.
* structure. Invoke rcu_gp_kthread_wake() to awaken the grace-period * Invoke rcu_gp_kthread_wake() to awaken the grace-period kthread if
* kthread if another grace period is required. Whether we wake * another grace period is required. Whether we wake the grace-period
* the grace-period kthread or it awakens itself for the next round * kthread or it awakens itself for the next round of quiescent-state
* of quiescent-state forcing, that kthread will clean up after the * forcing, that kthread will clean up after the just-completed grace
* just-completed grace period. Note that the caller must hold rnp->lock, * period. Note that the caller must hold rnp->lock, which is released
* which is released before return. * before return.
*/ */
static void rcu_report_qs_rsp(unsigned long flags) static void rcu_report_qs_rsp(unsigned long flags)
__releases(rcu_get_root()->lock) __releases(rcu_get_root()->lock)
...@@ -2212,7 +2206,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp, ...@@ -2212,7 +2206,7 @@ static void rcu_report_qs_rnp(unsigned long mask, struct rcu_node *rnp,
/* /*
* Record a quiescent state for all tasks that were previously queued * Record a quiescent state for all tasks that were previously queued
* on the specified rcu_node structure and that were blocking the current * on the specified rcu_node structure and that were blocking the current
* RCU grace period. The caller must hold the specified rnp->lock with * RCU grace period. The caller must hold the corresponding rnp->lock with
* irqs disabled, and this lock is released upon return, but irqs remain * irqs disabled, and this lock is released upon return, but irqs remain
* disabled. * disabled.
*/ */
...@@ -2714,11 +2708,11 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused ...@@ -2714,11 +2708,11 @@ static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused
} }
/* /*
* Schedule RCU callback invocation. If the specified type of RCU * Schedule RCU callback invocation. If the running implementation of RCU
* does not support RCU priority boosting, just do a direct call, * does not support RCU priority boosting, just do a direct call, otherwise
* otherwise wake up the per-CPU kernel kthread. Note that because we * wake up the per-CPU kernel kthread. Note that because we are running
* are running on the current CPU with softirqs disabled, the * on the current CPU with softirqs disabled, the rcu_cpu_kthread_task
* rcu_cpu_kthread_task cannot disappear out from under us. * cannot disappear out from under us.
*/ */
static void invoke_rcu_callbacks(struct rcu_data *rdp) static void invoke_rcu_callbacks(struct rcu_data *rdp)
{ {
...@@ -2959,11 +2953,10 @@ EXPORT_SYMBOL_GPL(cond_synchronize_rcu); ...@@ -2959,11 +2953,10 @@ EXPORT_SYMBOL_GPL(cond_synchronize_rcu);
/* /*
* Check to see if there is any immediate RCU-related work to be done by * Check to see if there is any immediate RCU-related work to be done by
* the current CPU, for the specified type of RCU, returning 1 if so and * the current CPU, returning 1 if so and zero otherwise. The checks are
* zero otherwise. The checks are in order of increasing expense: checks * in order of increasing expense: checks that can be carried out against
* that can be carried out against CPU-local state are performed first. * CPU-local state are performed first. However, we must check for CPU
* However, we must check for CPU stalls first, else we might not get * stalls first, else we might not get a chance.
* a chance.
*/ */
static int rcu_pending(void) static int rcu_pending(void)
{ {
...@@ -3070,10 +3063,7 @@ static void rcu_barrier_func(void *unused) ...@@ -3070,10 +3063,7 @@ static void rcu_barrier_func(void *unused)
} }
} }
/* /* Orchestrate an RCU barrier, waiting for all RCU callbacks to complete. */
* Orchestrate the specified type of RCU barrier, waiting for all
* RCU callbacks of the specified type to complete.
*/
static void _rcu_barrier(void) static void _rcu_barrier(void)
{ {
int cpu; int cpu;
...@@ -3393,7 +3383,7 @@ void rcu_report_dead(unsigned int cpu) ...@@ -3393,7 +3383,7 @@ void rcu_report_dead(unsigned int cpu)
struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu); struct rcu_data *rdp = per_cpu_ptr(&rcu_data, cpu);
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
/* QS for any half-done expedited RCU-sched GP. */ /* QS for any half-done expedited grace period. */
preempt_disable(); preempt_disable();
rcu_report_exp_rdp(this_cpu_ptr(&rcu_data)); rcu_report_exp_rdp(this_cpu_ptr(&rcu_data));
preempt_enable(); preempt_enable();
...@@ -3482,7 +3472,7 @@ static int rcu_pm_notify(struct notifier_block *self, ...@@ -3482,7 +3472,7 @@ static int rcu_pm_notify(struct notifier_block *self,
} }
/* /*
* Spawn the kthreads that handle each RCU flavor's grace periods. * Spawn the kthreads that handle RCU's grace periods.
*/ */
static int __init rcu_spawn_gp_kthread(void) static int __init rcu_spawn_gp_kthread(void)
{ {
...@@ -3545,7 +3535,7 @@ void rcu_scheduler_starting(void) ...@@ -3545,7 +3535,7 @@ void rcu_scheduler_starting(void)
} }
/* /*
* Helper function for rcu_init() that initializes one rcu_state structure. * Helper function for rcu_init() that initializes the rcu_state structure.
*/ */
static void __init rcu_init_one(void) static void __init rcu_init_one(void)
{ {
...@@ -3707,7 +3697,7 @@ static void __init rcu_init_geometry(void) ...@@ -3707,7 +3697,7 @@ static void __init rcu_init_geometry(void)
/* /*
* Dump out the structure of the rcu_node combining tree associated * Dump out the structure of the rcu_node combining tree associated
* with the rcu_state structure referenced by rsp. * with the rcu_state structure.
*/ */
static void __init rcu_dump_rcu_node_tree(void) static void __init rcu_dump_rcu_node_tree(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment