Commit e0f23060 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Paul E. McKenney

rcu: Update comments to reflect softirqs vs. kthreads

We now have kthreads only for flavors of RCU that support boosting,
so update the now-misleading comments accordingly.
Signed-off-by: default avatarPaul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 72fe701b
...@@ -198,7 +198,7 @@ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = { ...@@ -198,7 +198,7 @@ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
}; };
#endif /* #ifdef CONFIG_NO_HZ */ #endif /* #ifdef CONFIG_NO_HZ */
static int blimit = 10; /* Maximum callbacks per softirq. */ static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */
static int qhimark = 10000; /* If this many pending, ignore blimit. */ static int qhimark = 10000; /* If this many pending, ignore blimit. */
static int qlowmark = 100; /* Once only this many pending, use blimit. */ static int qlowmark = 100; /* Once only this many pending, use blimit. */
...@@ -1261,7 +1261,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) ...@@ -1261,7 +1261,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
local_irq_restore(flags); local_irq_restore(flags);
/* Re-raise the RCU softirq if there are callbacks remaining. */ /* Re-invoke RCU core processing if there are callbacks remaining. */
if (cpu_has_callbacks_ready_to_invoke(rdp)) if (cpu_has_callbacks_ready_to_invoke(rdp))
invoke_rcu_core(); invoke_rcu_core();
} }
...@@ -1269,7 +1269,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) ...@@ -1269,7 +1269,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
/* /*
* Check to see if this CPU is in a non-context-switch quiescent state * Check to see if this CPU is in a non-context-switch quiescent state
* (user mode or idle loop for rcu, non-softirq execution for rcu_bh). * (user mode or idle loop for rcu, non-softirq execution for rcu_bh).
* Also schedule the RCU softirq handler. * Also schedule RCU core processing.
* *
* This function must be called with hardirqs disabled. It is normally * This function must be called with hardirqs disabled. It is normally
* invoked from the scheduling-clock interrupt. If rcu_pending returns * invoked from the scheduling-clock interrupt. If rcu_pending returns
...@@ -1448,9 +1448,9 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed) ...@@ -1448,9 +1448,9 @@ static void force_quiescent_state(struct rcu_state *rsp, int relaxed)
#endif /* #else #ifdef CONFIG_SMP */ #endif /* #else #ifdef CONFIG_SMP */
/* /*
* This does the RCU processing work from softirq context for the * This does the RCU core processing work for the specified rcu_state
* specified rcu_state and rcu_data structures. This may be called * and rcu_data structures. This may be called only from the CPU to
* only from the CPU to whom the rdp belongs. * whom the rdp belongs.
*/ */
static void static void
__rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
...@@ -1487,7 +1487,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) ...@@ -1487,7 +1487,7 @@ __rcu_process_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
} }
/* /*
* Do softirq processing for the current CPU. * Do RCU core processing for the current CPU.
*/ */
static void rcu_process_callbacks(struct softirq_action *unused) static void rcu_process_callbacks(struct softirq_action *unused)
{ {
...@@ -1503,10 +1503,11 @@ static void rcu_process_callbacks(struct softirq_action *unused) ...@@ -1503,10 +1503,11 @@ static void rcu_process_callbacks(struct softirq_action *unused)
} }
/* /*
* Wake up the current CPU's kthread. This replaces raise_softirq() * Schedule RCU callback invocation. If the specified type of RCU
* in earlier versions of RCU. Note that because we are running on * does not support RCU priority boosting, just do a direct call,
* the current CPU with interrupts disabled, the rcu_cpu_kthread_task * otherwise wake up the per-CPU kernel kthread. Note that because we
* cannot disappear out from under us. * are running on the current CPU with interrupts disabled, the
* rcu_cpu_kthread_task cannot disappear out from under us.
*/ */
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp) static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
{ {
......
...@@ -1478,7 +1478,8 @@ static int rcu_cpu_kthread_should_stop(int cpu) ...@@ -1478,7 +1478,8 @@ static int rcu_cpu_kthread_should_stop(int cpu)
/* /*
* Per-CPU kernel thread that invokes RCU callbacks. This replaces the * Per-CPU kernel thread that invokes RCU callbacks. This replaces the
* earlier RCU softirq. * RCU softirq used in flavors and configurations of RCU that do not
* support RCU priority boosting.
*/ */
static int rcu_cpu_kthread(void *arg) static int rcu_cpu_kthread(void *arg)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment