Commit 45975c7d authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Define RCU-sched API in terms of RCU for Tree RCU PREEMPT builds

Now that RCU-preempt knows about preemption disabling, its implementation
of synchronize_rcu() works for synchronize_sched(), and likewise for the
other RCU-sched update-side API members.  This commit therefore confines
the RCU-sched update-side code to CONFIG_PREEMPT=n builds, and defines
RCU-sched's update-side API members in terms of those of RCU-preempt.

This means that any given build of the Linux kernel has only one
update-side flavor of RCU, namely RCU-preempt for CONFIG_PREEMPT=y builds
and RCU-sched for CONFIG_PREEMPT=n builds.  This in turn means that kernels
built with CONFIG_RCU_NOCB_CPU=y have only one rcuo kthread per CPU.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Andi Kleen <ak@linux.intel.com>
parent 4cf439a2
...@@ -49,11 +49,11 @@ ...@@ -49,11 +49,11 @@
/* Exported common interfaces */ /* Exported common interfaces */
#ifdef CONFIG_PREEMPT_RCU #ifdef CONFIG_TINY_RCU
void call_rcu(struct rcu_head *head, rcu_callback_t func);
#else /* #ifdef CONFIG_PREEMPT_RCU */
#define call_rcu call_rcu_sched #define call_rcu call_rcu_sched
#endif /* #else #ifdef CONFIG_PREEMPT_RCU */ #else
void call_rcu(struct rcu_head *head, rcu_callback_t func);
#endif
void call_rcu_sched(struct rcu_head *head, rcu_callback_t func); void call_rcu_sched(struct rcu_head *head, rcu_callback_t func);
void synchronize_sched(void); void synchronize_sched(void);
...@@ -92,11 +92,6 @@ static inline void __rcu_read_unlock(void) ...@@ -92,11 +92,6 @@ static inline void __rcu_read_unlock(void)
preempt_enable(); preempt_enable();
} }
static inline void synchronize_rcu(void)
{
synchronize_sched();
}
static inline int rcu_preempt_depth(void) static inline int rcu_preempt_depth(void)
{ {
return 0; return 0;
...@@ -107,7 +102,6 @@ static inline int rcu_preempt_depth(void) ...@@ -107,7 +102,6 @@ static inline int rcu_preempt_depth(void)
/* Internal to kernel */ /* Internal to kernel */
void rcu_init(void); void rcu_init(void);
extern int rcu_scheduler_active __read_mostly; extern int rcu_scheduler_active __read_mostly;
void rcu_sched_qs(void);
void rcu_check_callbacks(int user); void rcu_check_callbacks(int user);
void rcu_report_dead(unsigned int cpu); void rcu_report_dead(unsigned int cpu);
void rcutree_migrate_callbacks(int cpu); void rcutree_migrate_callbacks(int cpu);
......
...@@ -36,6 +36,11 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp) ...@@ -36,6 +36,11 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
/* Never flag non-existent other CPUs! */ /* Never flag non-existent other CPUs! */
static inline bool rcu_eqs_special_set(int cpu) { return false; } static inline bool rcu_eqs_special_set(int cpu) { return false; }
static inline void synchronize_rcu(void)
{
synchronize_sched();
}
static inline unsigned long get_state_synchronize_rcu(void) static inline unsigned long get_state_synchronize_rcu(void)
{ {
return 0; return 0;
...@@ -94,6 +99,8 @@ static inline void kfree_call_rcu(struct rcu_head *head, ...@@ -94,6 +99,8 @@ static inline void kfree_call_rcu(struct rcu_head *head,
call_rcu(head, func); call_rcu(head, func);
} }
void rcu_sched_qs(void);
static inline void rcu_softirq_qs(void) static inline void rcu_softirq_qs(void)
{ {
rcu_sched_qs(); rcu_sched_qs();
......
...@@ -45,14 +45,19 @@ static inline void rcu_virt_note_context_switch(int cpu) ...@@ -45,14 +45,19 @@ static inline void rcu_virt_note_context_switch(int cpu)
rcu_note_context_switch(false); rcu_note_context_switch(false);
} }
void synchronize_rcu(void);
static inline void synchronize_rcu_bh(void) static inline void synchronize_rcu_bh(void)
{ {
synchronize_rcu(); synchronize_rcu();
} }
void synchronize_sched_expedited(void);
void synchronize_rcu_expedited(void); void synchronize_rcu_expedited(void);
static inline void synchronize_sched_expedited(void)
{
synchronize_rcu_expedited();
}
void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func); void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
/** /**
......
This diff is collapsed.
...@@ -225,9 +225,6 @@ struct rcu_data { ...@@ -225,9 +225,6 @@ struct rcu_data {
/* 5) _rcu_barrier(), OOM callbacks, and expediting. */ /* 5) _rcu_barrier(), OOM callbacks, and expediting. */
struct rcu_head barrier_head; struct rcu_head barrier_head;
#ifdef CONFIG_RCU_FAST_NO_HZ
struct rcu_head oom_head;
#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
int exp_dynticks_snap; /* Double-check need for IPI. */ int exp_dynticks_snap; /* Double-check need for IPI. */
/* 6) Callback offloading. */ /* 6) Callback offloading. */
...@@ -433,8 +430,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work); ...@@ -433,8 +430,7 @@ DECLARE_PER_CPU(char, rcu_cpu_has_work);
/* Forward declarations for rcutree_plugin.h */ /* Forward declarations for rcutree_plugin.h */
static void rcu_bootup_announce(void); static void rcu_bootup_announce(void);
static void rcu_preempt_qs(void); static void rcu_qs(void);
static void rcu_preempt_note_context_switch(bool preempt);
static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static bool rcu_preempt_has_tasks(struct rcu_node *rnp); static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
...@@ -444,9 +440,8 @@ static int rcu_print_task_stall(struct rcu_node *rnp); ...@@ -444,9 +440,8 @@ static int rcu_print_task_stall(struct rcu_node *rnp);
static int rcu_print_task_exp_stall(struct rcu_node *rnp); static int rcu_print_task_exp_stall(struct rcu_node *rnp);
static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp, static void rcu_preempt_check_blocked_tasks(struct rcu_state *rsp,
struct rcu_node *rnp); struct rcu_node *rnp);
static void rcu_preempt_check_callbacks(void); static void rcu_flavor_check_callbacks(int user);
void call_rcu(struct rcu_head *head, rcu_callback_t func); void call_rcu(struct rcu_head *head, rcu_callback_t func);
static void __init __rcu_init_preempt(void);
static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp, static void dump_blkd_tasks(struct rcu_state *rsp, struct rcu_node *rnp,
int ncheck); int ncheck);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
......
...@@ -265,7 +265,7 @@ static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp) ...@@ -265,7 +265,7 @@ static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp)
rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, true); rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, true);
} }
/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */ /* Common code for work-done checking. */
static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s) static bool sync_exp_work_done(struct rcu_state *rsp, unsigned long s)
{ {
if (rcu_exp_gp_seq_done(rsp, s)) { if (rcu_exp_gp_seq_done(rsp, s)) {
...@@ -337,45 +337,6 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s) ...@@ -337,45 +337,6 @@ static bool exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
return false; return false;
} }
/* Invoked on each online non-idle CPU for expedited quiescent state. */
static void sync_sched_exp_handler(void *data)
{
struct rcu_data *rdp;
struct rcu_node *rnp;
struct rcu_state *rsp = data;
rdp = this_cpu_ptr(rsp->rda);
rnp = rdp->mynode;
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
return;
if (rcu_is_cpu_rrupt_from_idle()) {
rcu_report_exp_rdp(&rcu_sched_state,
this_cpu_ptr(&rcu_sched_data));
return;
}
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
/* Store .exp before .rcu_urgent_qs. */
smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
resched_cpu(smp_processor_id());
}
/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
static void sync_sched_exp_online_cleanup(int cpu)
{
struct rcu_data *rdp;
int ret;
struct rcu_node *rnp;
struct rcu_state *rsp = &rcu_sched_state;
rdp = per_cpu_ptr(rsp->rda, cpu);
rnp = rdp->mynode;
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
return;
ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
WARN_ON_ONCE(ret);
}
/* /*
* Select the CPUs within the specified rcu_node that the upcoming * Select the CPUs within the specified rcu_node that the upcoming
* expedited grace period needs to wait for. * expedited grace period needs to wait for.
...@@ -691,39 +652,6 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp, ...@@ -691,39 +652,6 @@ static void _synchronize_rcu_expedited(struct rcu_state *rsp,
mutex_unlock(&rsp->exp_mutex); mutex_unlock(&rsp->exp_mutex);
} }
/**
* synchronize_sched_expedited - Brute-force RCU-sched grace period
*
* Wait for an RCU-sched grace period to elapse, but use a "big hammer"
* approach to force the grace period to end quickly. This consumes
* significant time on all CPUs and is unfriendly to real-time workloads,
* so is thus not recommended for any sort of common-case code. In fact,
* if you are using synchronize_sched_expedited() in a loop, please
* restructure your code to batch your updates, and then use a single
* synchronize_sched() instead.
*
* This implementation can be thought of as an application of sequence
* locking to expedited grace periods, but using the sequence counter to
* determine when someone else has already done the work instead of for
* retrying readers.
*/
void synchronize_sched_expedited(void)
{
struct rcu_state *rsp = &rcu_sched_state;
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_sched_expedited() in RCU read-side critical section");
/* If only one CPU, this is automatically a grace period. */
if (rcu_blocking_is_gp())
return;
_synchronize_rcu_expedited(rsp, sync_sched_exp_handler);
}
EXPORT_SYMBOL_GPL(synchronize_sched_expedited);
#ifdef CONFIG_PREEMPT_RCU #ifdef CONFIG_PREEMPT_RCU
/* /*
...@@ -801,6 +729,11 @@ static void sync_rcu_exp_handler(void *info) ...@@ -801,6 +729,11 @@ static void sync_rcu_exp_handler(void *info)
resched_cpu(rdp->cpu); resched_cpu(rdp->cpu);
} }
/* PREEMPT=y, so no RCU-sched to clean up after. */
static void sync_sched_exp_online_cleanup(int cpu)
{
}
/** /**
* synchronize_rcu_expedited - Brute-force RCU grace period * synchronize_rcu_expedited - Brute-force RCU grace period
* *
...@@ -818,6 +751,8 @@ static void sync_rcu_exp_handler(void *info) ...@@ -818,6 +751,8 @@ static void sync_rcu_exp_handler(void *info)
* you are using synchronize_rcu_expedited() in a loop, please restructure * you are using synchronize_rcu_expedited() in a loop, please restructure
* your code to batch your updates, and then Use a single synchronize_rcu() * your code to batch your updates, and then Use a single synchronize_rcu()
* instead. * instead.
*
* This has the same semantics as (but is more brutal than) synchronize_rcu().
*/ */
void synchronize_rcu_expedited(void) void synchronize_rcu_expedited(void)
{ {
...@@ -836,13 +771,79 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); ...@@ -836,13 +771,79 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
#else /* #ifdef CONFIG_PREEMPT_RCU */ #else /* #ifdef CONFIG_PREEMPT_RCU */
/* Invoked on each online non-idle CPU for expedited quiescent state. */
static void sync_sched_exp_handler(void *data)
{
struct rcu_data *rdp;
struct rcu_node *rnp;
struct rcu_state *rsp = data;
rdp = this_cpu_ptr(rsp->rda);
rnp = rdp->mynode;
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask) ||
__this_cpu_read(rcu_data.cpu_no_qs.b.exp))
return;
if (rcu_is_cpu_rrupt_from_idle()) {
rcu_report_exp_rdp(&rcu_state, this_cpu_ptr(&rcu_data));
return;
}
__this_cpu_write(rcu_data.cpu_no_qs.b.exp, true);
/* Store .exp before .rcu_urgent_qs. */
smp_store_release(this_cpu_ptr(&rcu_dynticks.rcu_urgent_qs), true);
resched_cpu(smp_processor_id());
}
/* Send IPI for expedited cleanup if needed at end of CPU-hotplug operation. */
static void sync_sched_exp_online_cleanup(int cpu)
{
struct rcu_data *rdp;
int ret;
struct rcu_node *rnp;
struct rcu_state *rsp = &rcu_state;
rdp = per_cpu_ptr(rsp->rda, cpu);
rnp = rdp->mynode;
if (!(READ_ONCE(rnp->expmask) & rdp->grpmask))
return;
ret = smp_call_function_single(cpu, sync_sched_exp_handler, rsp, 0);
WARN_ON_ONCE(ret);
}
/* /*
* Wait for an rcu-preempt grace period, but make it happen quickly. * Because a context switch is a grace period for RCU-sched, any blocking
* But because preemptible RCU does not exist, map to rcu-sched. * grace-period wait automatically implies a grace period if there
* is only one CPU online at any point time during execution of either
* synchronize_sched() or synchronize_rcu_bh(). It is OK to occasionally
* incorrectly indicate that there are multiple CPUs online when there
* was in fact only one the whole time, as this just adds some overhead:
* RCU still operates correctly.
*/ */
static int rcu_blocking_is_gp(void)
{
int ret;
might_sleep(); /* Check for RCU read-side critical section. */
preempt_disable();
ret = num_online_cpus() <= 1;
preempt_enable();
return ret;
}
/* PREEMPT=n implementation of synchronize_rcu_expedited(). */
void synchronize_rcu_expedited(void) void synchronize_rcu_expedited(void)
{ {
synchronize_sched_expedited(); struct rcu_state *rsp = &rcu_state;
RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
lock_is_held(&rcu_lock_map) ||
lock_is_held(&rcu_sched_lock_map),
"Illegal synchronize_sched_expedited() in RCU read-side critical section");
/* If only one CPU, this is automatically a grace period. */
if (rcu_blocking_is_gp())
return;
_synchronize_rcu_expedited(rsp, sync_sched_exp_handler);
} }
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment