Commit 965a002b authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Paul E. McKenney

rcu: Make TINY_RCU also use softirq for RCU_BOOST=n

This patch #ifdefs TINY_RCU kthreads out of the kernel unless RCU_BOOST=y,
thus eliminating context-switch overhead if RCU priority boosting has
not been configured.
Signed-off-by: default avatarPaul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 385680a9
...@@ -27,9 +27,13 @@ ...@@ -27,9 +27,13 @@
#include <linux/cache.h> #include <linux/cache.h>
#ifdef CONFIG_RCU_BOOST
static inline void rcu_init(void) static inline void rcu_init(void)
{ {
} }
#else /* #ifdef CONFIG_RCU_BOOST */
void rcu_init(void);
#endif /* #else #ifdef CONFIG_RCU_BOOST */
static inline void rcu_barrier_bh(void) static inline void rcu_barrier_bh(void)
{ {
......
...@@ -43,16 +43,11 @@ ...@@ -43,16 +43,11 @@
#include "rcu.h" #include "rcu.h"
/* Controls for rcu_kthread() kthread, replacing RCU_SOFTIRQ used previously. */
static struct task_struct *rcu_kthread_task;
static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
static unsigned long have_rcu_kthread_work;
/* Forward declarations for rcutiny_plugin.h. */ /* Forward declarations for rcutiny_plugin.h. */
struct rcu_ctrlblk; struct rcu_ctrlblk;
static void invoke_rcu_kthread(void); static void invoke_rcu_callbacks(void);
static void rcu_process_callbacks(struct rcu_ctrlblk *rcp); static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
static int rcu_kthread(void *arg); static void rcu_process_callbacks(struct softirq_action *unused);
static void __call_rcu(struct rcu_head *head, static void __call_rcu(struct rcu_head *head,
void (*func)(struct rcu_head *rcu), void (*func)(struct rcu_head *rcu),
struct rcu_ctrlblk *rcp); struct rcu_ctrlblk *rcp);
...@@ -101,16 +96,6 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp) ...@@ -101,16 +96,6 @@ static int rcu_qsctr_help(struct rcu_ctrlblk *rcp)
return 0; return 0;
} }
/*
* Wake up rcu_kthread() to process callbacks now eligible for invocation
* or to boost readers.
*/
static void invoke_rcu_kthread(void)
{
have_rcu_kthread_work = 1;
wake_up(&rcu_kthread_wq);
}
/* /*
* Record an rcu quiescent state. And an rcu_bh quiescent state while we * Record an rcu quiescent state. And an rcu_bh quiescent state while we
* are at it, given that any rcu quiescent state is also an rcu_bh * are at it, given that any rcu quiescent state is also an rcu_bh
...@@ -123,7 +108,7 @@ void rcu_sched_qs(int cpu) ...@@ -123,7 +108,7 @@ void rcu_sched_qs(int cpu)
local_irq_save(flags); local_irq_save(flags);
if (rcu_qsctr_help(&rcu_sched_ctrlblk) + if (rcu_qsctr_help(&rcu_sched_ctrlblk) +
rcu_qsctr_help(&rcu_bh_ctrlblk)) rcu_qsctr_help(&rcu_bh_ctrlblk))
invoke_rcu_kthread(); invoke_rcu_callbacks();
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -136,7 +121,7 @@ void rcu_bh_qs(int cpu) ...@@ -136,7 +121,7 @@ void rcu_bh_qs(int cpu)
local_irq_save(flags); local_irq_save(flags);
if (rcu_qsctr_help(&rcu_bh_ctrlblk)) if (rcu_qsctr_help(&rcu_bh_ctrlblk))
invoke_rcu_kthread(); invoke_rcu_callbacks();
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -160,7 +145,7 @@ void rcu_check_callbacks(int cpu, int user) ...@@ -160,7 +145,7 @@ void rcu_check_callbacks(int cpu, int user)
* Invoke the RCU callbacks on the specified rcu_ctrlkblk structure * Invoke the RCU callbacks on the specified rcu_ctrlkblk structure
* whose grace period has elapsed. * whose grace period has elapsed.
*/ */
static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
{ {
struct rcu_head *next, *list; struct rcu_head *next, *list;
unsigned long flags; unsigned long flags;
...@@ -200,36 +185,11 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp) ...@@ -200,36 +185,11 @@ static void rcu_process_callbacks(struct rcu_ctrlblk *rcp)
RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count)); RCU_TRACE(trace_rcu_batch_end(rcp->name, cb_count));
} }
/* static void rcu_process_callbacks(struct softirq_action *unused)
* This kthread invokes RCU callbacks whose grace periods have
* elapsed. It is awakened as needed, and takes the place of the
* RCU_SOFTIRQ that was used previously for this purpose.
* This is a kthread, but it is never stopped, at least not until
* the system goes down.
*/
static int rcu_kthread(void *arg)
{ {
unsigned long work; __rcu_process_callbacks(&rcu_sched_ctrlblk);
unsigned long morework; __rcu_process_callbacks(&rcu_bh_ctrlblk);
unsigned long flags; rcu_preempt_process_callbacks();
for (;;) {
wait_event_interruptible(rcu_kthread_wq,
have_rcu_kthread_work != 0);
morework = rcu_boost();
local_irq_save(flags);
work = have_rcu_kthread_work;
have_rcu_kthread_work = morework;
local_irq_restore(flags);
if (work) {
rcu_process_callbacks(&rcu_sched_ctrlblk);
rcu_process_callbacks(&rcu_bh_ctrlblk);
rcu_preempt_process_callbacks();
}
schedule_timeout_interruptible(1); /* Leave CPU for others. */
}
return 0; /* Not reached, but needed to shut gcc up. */
} }
/* /*
...@@ -291,17 +251,3 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu)) ...@@ -291,17 +251,3 @@ void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
__call_rcu(head, func, &rcu_bh_ctrlblk); __call_rcu(head, func, &rcu_bh_ctrlblk);
} }
EXPORT_SYMBOL_GPL(call_rcu_bh); EXPORT_SYMBOL_GPL(call_rcu_bh);
/*
* Spawn the kthread that invokes RCU callbacks.
*/
static int __init rcu_spawn_kthreads(void)
{
struct sched_param sp;
rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
sp.sched_priority = RCU_BOOST_PRIO;
sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
return 0;
}
early_initcall(rcu_spawn_kthreads);
...@@ -245,6 +245,13 @@ static void show_tiny_preempt_stats(struct seq_file *m) ...@@ -245,6 +245,13 @@ static void show_tiny_preempt_stats(struct seq_file *m)
#include "rtmutex_common.h" #include "rtmutex_common.h"
#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
/* Controls for rcu_kthread() kthread. */
static struct task_struct *rcu_kthread_task;
static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
static unsigned long have_rcu_kthread_work;
/* /*
* Carry out RCU priority boosting on the task indicated by ->boost_tasks, * Carry out RCU priority boosting on the task indicated by ->boost_tasks,
* and advance ->boost_tasks to the next task in the ->blkd_tasks list. * and advance ->boost_tasks to the next task in the ->blkd_tasks list.
...@@ -332,7 +339,7 @@ static int rcu_initiate_boost(void) ...@@ -332,7 +339,7 @@ static int rcu_initiate_boost(void)
if (rcu_preempt_ctrlblk.exp_tasks == NULL) if (rcu_preempt_ctrlblk.exp_tasks == NULL)
rcu_preempt_ctrlblk.boost_tasks = rcu_preempt_ctrlblk.boost_tasks =
rcu_preempt_ctrlblk.gp_tasks; rcu_preempt_ctrlblk.gp_tasks;
invoke_rcu_kthread(); invoke_rcu_callbacks();
} else } else
RCU_TRACE(rcu_initiate_boost_trace()); RCU_TRACE(rcu_initiate_boost_trace());
return 1; return 1;
...@@ -350,14 +357,6 @@ static void rcu_preempt_boost_start_gp(void) ...@@ -350,14 +357,6 @@ static void rcu_preempt_boost_start_gp(void)
#else /* #ifdef CONFIG_RCU_BOOST */ #else /* #ifdef CONFIG_RCU_BOOST */
/*
* If there is no RCU priority boosting, we don't boost.
*/
static int rcu_boost(void)
{
return 0;
}
/* /*
* If there is no RCU priority boosting, we don't initiate boosting, * If there is no RCU priority boosting, we don't initiate boosting,
* but we do indicate whether there are blocked readers blocking the * but we do indicate whether there are blocked readers blocking the
...@@ -425,7 +424,7 @@ static void rcu_preempt_cpu_qs(void) ...@@ -425,7 +424,7 @@ static void rcu_preempt_cpu_qs(void)
/* If there are done callbacks, cause them to be invoked. */ /* If there are done callbacks, cause them to be invoked. */
if (*rcu_preempt_ctrlblk.rcb.donetail != NULL) if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
invoke_rcu_kthread(); invoke_rcu_callbacks();
} }
/* /*
...@@ -646,7 +645,7 @@ static void rcu_preempt_check_callbacks(void) ...@@ -646,7 +645,7 @@ static void rcu_preempt_check_callbacks(void)
rcu_preempt_cpu_qs(); rcu_preempt_cpu_qs();
if (&rcu_preempt_ctrlblk.rcb.rcucblist != if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
rcu_preempt_ctrlblk.rcb.donetail) rcu_preempt_ctrlblk.rcb.donetail)
invoke_rcu_kthread(); invoke_rcu_callbacks();
if (rcu_preempt_gp_in_progress() && if (rcu_preempt_gp_in_progress() &&
rcu_cpu_blocking_cur_gp() && rcu_cpu_blocking_cur_gp() &&
rcu_preempt_running_reader()) rcu_preempt_running_reader())
...@@ -672,7 +671,7 @@ static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp) ...@@ -672,7 +671,7 @@ static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
*/ */
static void rcu_preempt_process_callbacks(void) static void rcu_preempt_process_callbacks(void)
{ {
rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb); __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
} }
/* /*
...@@ -847,15 +846,6 @@ static void show_tiny_preempt_stats(struct seq_file *m) ...@@ -847,15 +846,6 @@ static void show_tiny_preempt_stats(struct seq_file *m)
#endif /* #ifdef CONFIG_RCU_TRACE */ #endif /* #ifdef CONFIG_RCU_TRACE */
/*
* Because preemptible RCU does not exist, it is never necessary to
* boost preempted RCU readers.
*/
static int rcu_boost(void)
{
return 0;
}
/* /*
* Because preemptible RCU does not exist, it never has any callbacks * Because preemptible RCU does not exist, it never has any callbacks
* to check. * to check.
...@@ -882,6 +872,78 @@ static void rcu_preempt_process_callbacks(void) ...@@ -882,6 +872,78 @@ static void rcu_preempt_process_callbacks(void)
#endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */ #endif /* #else #ifdef CONFIG_TINY_PREEMPT_RCU */
#ifdef CONFIG_RCU_BOOST
/*
* Wake up rcu_kthread() to process callbacks now eligible for invocation
* or to boost readers.
*/
static void invoke_rcu_callbacks(void)
{
have_rcu_kthread_work = 1;
wake_up(&rcu_kthread_wq);
}
/*
* This kthread invokes RCU callbacks whose grace periods have
* elapsed. It is awakened as needed, and takes the place of the
* RCU_SOFTIRQ that is used for this purpose when boosting is disabled.
* This is a kthread, but it is never stopped, at least not until
* the system goes down.
*/
static int rcu_kthread(void *arg)
{
unsigned long work;
unsigned long morework;
unsigned long flags;
for (;;) {
wait_event_interruptible(rcu_kthread_wq,
have_rcu_kthread_work != 0);
morework = rcu_boost();
local_irq_save(flags);
work = have_rcu_kthread_work;
have_rcu_kthread_work = morework;
local_irq_restore(flags);
if (work)
rcu_process_callbacks(NULL);
schedule_timeout_interruptible(1); /* Leave CPU for others. */
}
return 0; /* Not reached, but needed to shut gcc up. */
}
/*
* Spawn the kthread that invokes RCU callbacks.
*/
static int __init rcu_spawn_kthreads(void)
{
struct sched_param sp;
rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
sp.sched_priority = RCU_BOOST_PRIO;
sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
return 0;
}
early_initcall(rcu_spawn_kthreads);
#else /* #ifdef CONFIG_RCU_BOOST */
/*
* Start up softirq processing of callbacks.
*/
void invoke_rcu_callbacks(void)
{
raise_softirq(RCU_SOFTIRQ);
}
void rcu_init(void)
{
open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
}
#endif /* #else #ifdef CONFIG_RCU_BOOST */
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
#include <linux/kernel_stat.h> #include <linux/kernel_stat.h>
...@@ -897,12 +959,6 @@ void __init rcu_scheduler_starting(void) ...@@ -897,12 +959,6 @@ void __init rcu_scheduler_starting(void)
#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
#ifdef CONFIG_RCU_BOOST
#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
#else /* #ifdef CONFIG_RCU_BOOST */
#define RCU_BOOST_PRIO 1
#endif /* #else #ifdef CONFIG_RCU_BOOST */
#ifdef CONFIG_RCU_TRACE #ifdef CONFIG_RCU_TRACE
#ifdef CONFIG_RCU_BOOST #ifdef CONFIG_RCU_BOOST
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment