Commit 6ce75a23 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Introduce for_each_rcu_flavor() and use it

The arrival of TREE_PREEMPT_RCU some years back included some ugly
code involving either #ifdef or #ifdef'ed wrapper functions to iterate
over all non-SRCU flavors of RCU.  This commit therefore introduces
a for_each_rcu_flavor() iterator over the rcu_state structures for each
flavor of RCU to clean up a bit of the ugliness.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 1bca8cf1
...@@ -84,6 +84,7 @@ struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, call_rcu_bh); ...@@ -84,6 +84,7 @@ struct rcu_state rcu_bh_state = RCU_STATE_INITIALIZER(rcu_bh, call_rcu_bh);
DEFINE_PER_CPU(struct rcu_data, rcu_bh_data); DEFINE_PER_CPU(struct rcu_data, rcu_bh_data);
static struct rcu_state *rcu_state; static struct rcu_state *rcu_state;
LIST_HEAD(rcu_struct_flavors);
/* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */ /* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF; static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
...@@ -860,9 +861,10 @@ static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr) ...@@ -860,9 +861,10 @@ static int rcu_panic(struct notifier_block *this, unsigned long ev, void *ptr)
*/ */
void rcu_cpu_stall_reset(void) void rcu_cpu_stall_reset(void)
{ {
rcu_sched_state.jiffies_stall = jiffies + ULONG_MAX / 2; struct rcu_state *rsp;
rcu_bh_state.jiffies_stall = jiffies + ULONG_MAX / 2;
rcu_preempt_stall_reset(); for_each_rcu_flavor(rsp)
rsp->jiffies_stall = jiffies + ULONG_MAX / 2;
} }
static struct notifier_block rcu_panic_block = { static struct notifier_block rcu_panic_block = {
...@@ -1827,10 +1829,11 @@ __rcu_process_callbacks(struct rcu_state *rsp) ...@@ -1827,10 +1829,11 @@ __rcu_process_callbacks(struct rcu_state *rsp)
*/ */
static void rcu_process_callbacks(struct softirq_action *unused) static void rcu_process_callbacks(struct softirq_action *unused)
{ {
struct rcu_state *rsp;
trace_rcu_utilization("Start RCU core"); trace_rcu_utilization("Start RCU core");
__rcu_process_callbacks(&rcu_sched_state); for_each_rcu_flavor(rsp)
__rcu_process_callbacks(&rcu_bh_state); __rcu_process_callbacks(rsp);
rcu_preempt_process_callbacks();
trace_rcu_utilization("End RCU core"); trace_rcu_utilization("End RCU core");
} }
...@@ -2241,9 +2244,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp) ...@@ -2241,9 +2244,12 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
*/ */
static int rcu_pending(int cpu) static int rcu_pending(int cpu)
{ {
return __rcu_pending(&rcu_sched_state, &per_cpu(rcu_sched_data, cpu)) || struct rcu_state *rsp;
__rcu_pending(&rcu_bh_state, &per_cpu(rcu_bh_data, cpu)) ||
rcu_preempt_pending(cpu); for_each_rcu_flavor(rsp)
if (__rcu_pending(rsp, per_cpu_ptr(rsp->rda, cpu)))
return 1;
return 0;
} }
/* /*
...@@ -2253,10 +2259,13 @@ static int rcu_pending(int cpu) ...@@ -2253,10 +2259,13 @@ static int rcu_pending(int cpu)
*/ */
static int rcu_cpu_has_callbacks(int cpu) static int rcu_cpu_has_callbacks(int cpu)
{ {
struct rcu_state *rsp;
/* RCU callbacks either ready or pending? */ /* RCU callbacks either ready or pending? */
return per_cpu(rcu_sched_data, cpu).nxtlist || for_each_rcu_flavor(rsp)
per_cpu(rcu_bh_data, cpu).nxtlist || if (per_cpu_ptr(rsp->rda, cpu)->nxtlist)
rcu_preempt_cpu_has_callbacks(cpu); return 1;
return 0;
} }
/* /*
...@@ -2551,9 +2560,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) ...@@ -2551,9 +2560,11 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
static void __cpuinit rcu_prepare_cpu(int cpu) static void __cpuinit rcu_prepare_cpu(int cpu)
{ {
rcu_init_percpu_data(cpu, &rcu_sched_state, 0); struct rcu_state *rsp;
rcu_init_percpu_data(cpu, &rcu_bh_state, 0);
rcu_preempt_init_percpu_data(cpu); for_each_rcu_flavor(rsp)
rcu_init_percpu_data(cpu, rsp,
strcmp(rsp->name, "rcu_preempt") == 0);
} }
/* /*
...@@ -2565,6 +2576,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, ...@@ -2565,6 +2576,7 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
long cpu = (long)hcpu; long cpu = (long)hcpu;
struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu); struct rcu_data *rdp = per_cpu_ptr(rcu_state->rda, cpu);
struct rcu_node *rnp = rdp->mynode; struct rcu_node *rnp = rdp->mynode;
struct rcu_state *rsp;
trace_rcu_utilization("Start CPU hotplug"); trace_rcu_utilization("Start CPU hotplug");
switch (action) { switch (action) {
...@@ -2589,18 +2601,16 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self, ...@@ -2589,18 +2601,16 @@ static int __cpuinit rcu_cpu_notify(struct notifier_block *self,
* touch any data without introducing corruption. We send the * touch any data without introducing corruption. We send the
* dying CPU's callbacks to an arbitrarily chosen online CPU. * dying CPU's callbacks to an arbitrarily chosen online CPU.
*/ */
rcu_cleanup_dying_cpu(&rcu_bh_state); for_each_rcu_flavor(rsp)
rcu_cleanup_dying_cpu(&rcu_sched_state); rcu_cleanup_dying_cpu(rsp);
rcu_preempt_cleanup_dying_cpu();
rcu_cleanup_after_idle(cpu); rcu_cleanup_after_idle(cpu);
break; break;
case CPU_DEAD: case CPU_DEAD:
case CPU_DEAD_FROZEN: case CPU_DEAD_FROZEN:
case CPU_UP_CANCELED: case CPU_UP_CANCELED:
case CPU_UP_CANCELED_FROZEN: case CPU_UP_CANCELED_FROZEN:
rcu_cleanup_dead_cpu(cpu, &rcu_bh_state); for_each_rcu_flavor(rsp)
rcu_cleanup_dead_cpu(cpu, &rcu_sched_state); rcu_cleanup_dead_cpu(cpu, rsp);
rcu_preempt_cleanup_dead_cpu(cpu);
break; break;
default: default:
break; break;
...@@ -2717,6 +2727,7 @@ static void __init rcu_init_one(struct rcu_state *rsp, ...@@ -2717,6 +2727,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
per_cpu_ptr(rsp->rda, i)->mynode = rnp; per_cpu_ptr(rsp->rda, i)->mynode = rnp;
rcu_boot_init_percpu_data(i, rsp); rcu_boot_init_percpu_data(i, rsp);
} }
list_add(&rsp->flavors, &rcu_struct_flavors);
} }
/* /*
......
...@@ -422,8 +422,13 @@ struct rcu_state { ...@@ -422,8 +422,13 @@ struct rcu_state {
unsigned long gp_max; /* Maximum GP duration in */ unsigned long gp_max; /* Maximum GP duration in */
/* jiffies. */ /* jiffies. */
char *name; /* Name of structure. */ char *name; /* Name of structure. */
struct list_head flavors; /* List of RCU flavors. */
}; };
extern struct list_head rcu_struct_flavors;
#define for_each_rcu_flavor(rsp) \
list_for_each_entry((rsp), &rcu_struct_flavors, flavors)
/* Return values for rcu_preempt_offline_tasks(). */ /* Return values for rcu_preempt_offline_tasks(). */
#define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */ #define RCU_OFL_TASKS_NORM_GP 0x1 /* Tasks blocking normal */
...@@ -466,25 +471,18 @@ static void rcu_stop_cpu_kthread(int cpu); ...@@ -466,25 +471,18 @@ static void rcu_stop_cpu_kthread(int cpu);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
static void rcu_print_detail_task_stall(struct rcu_state *rsp); static void rcu_print_detail_task_stall(struct rcu_state *rsp);
static int rcu_print_task_stall(struct rcu_node *rnp); static int rcu_print_task_stall(struct rcu_node *rnp);
static void rcu_preempt_stall_reset(void);
static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp); static void rcu_preempt_check_blocked_tasks(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static int rcu_preempt_offline_tasks(struct rcu_state *rsp, static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
struct rcu_node *rnp, struct rcu_node *rnp,
struct rcu_data *rdp); struct rcu_data *rdp);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
static void rcu_preempt_cleanup_dead_cpu(int cpu);
static void rcu_preempt_check_callbacks(int cpu); static void rcu_preempt_check_callbacks(int cpu);
static void rcu_preempt_process_callbacks(void);
void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu)); void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu));
#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU)
static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
bool wake); bool wake);
#endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */ #endif /* #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_TREE_PREEMPT_RCU) */
static int rcu_preempt_pending(int cpu);
static int rcu_preempt_cpu_has_callbacks(int cpu);
static void __cpuinit rcu_preempt_init_percpu_data(int cpu);
static void rcu_preempt_cleanup_dying_cpu(void);
static void __init __rcu_init_preempt(void); static void __init __rcu_init_preempt(void);
static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags); static void rcu_initiate_boost(struct rcu_node *rnp, unsigned long flags);
static void rcu_preempt_boost_start_gp(struct rcu_node *rnp); static void rcu_preempt_boost_start_gp(struct rcu_node *rnp);
......
...@@ -544,16 +544,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp) ...@@ -544,16 +544,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
return ndetected; return ndetected;
} }
/*
* Suppress preemptible RCU's CPU stall warnings by pushing the
* time of the next stall-warning message comfortably far into the
* future.
*/
static void rcu_preempt_stall_reset(void)
{
rcu_preempt_state.jiffies_stall = jiffies + ULONG_MAX / 2;
}
/* /*
* Check that the list of blocked tasks for the newly completed grace * Check that the list of blocked tasks for the newly completed grace
* period is in fact empty. It is a serious bug to complete a grace * period is in fact empty. It is a serious bug to complete a grace
...@@ -654,14 +644,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, ...@@ -654,14 +644,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
#endif /* #ifdef CONFIG_HOTPLUG_CPU */ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
/*
* Do CPU-offline processing for preemptible RCU.
*/
static void rcu_preempt_cleanup_dead_cpu(int cpu)
{
rcu_cleanup_dead_cpu(cpu, &rcu_preempt_state);
}
/* /*
* Check for a quiescent state from the current CPU. When a task blocks, * Check for a quiescent state from the current CPU. When a task blocks,
* the task is recorded in the corresponding CPU's rcu_node structure, * the task is recorded in the corresponding CPU's rcu_node structure,
...@@ -682,14 +664,6 @@ static void rcu_preempt_check_callbacks(int cpu) ...@@ -682,14 +664,6 @@ static void rcu_preempt_check_callbacks(int cpu)
t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS; t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
} }
/*
* Process callbacks for preemptible RCU.
*/
static void rcu_preempt_process_callbacks(void)
{
__rcu_process_callbacks(&rcu_preempt_state);
}
#ifdef CONFIG_RCU_BOOST #ifdef CONFIG_RCU_BOOST
static void rcu_preempt_do_callbacks(void) static void rcu_preempt_do_callbacks(void)
...@@ -921,24 +895,6 @@ void synchronize_rcu_expedited(void) ...@@ -921,24 +895,6 @@ void synchronize_rcu_expedited(void)
} }
EXPORT_SYMBOL_GPL(synchronize_rcu_expedited); EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
/*
* Check to see if there is any immediate preemptible-RCU-related work
* to be done.
*/
static int rcu_preempt_pending(int cpu)
{
return __rcu_pending(&rcu_preempt_state,
&per_cpu(rcu_preempt_data, cpu));
}
/*
* Does preemptible RCU have callbacks on this CPU?
*/
static int rcu_preempt_cpu_has_callbacks(int cpu)
{
return !!per_cpu(rcu_preempt_data, cpu).nxtlist;
}
/** /**
* rcu_barrier - Wait until all in-flight call_rcu() callbacks complete. * rcu_barrier - Wait until all in-flight call_rcu() callbacks complete.
*/ */
...@@ -948,23 +904,6 @@ void rcu_barrier(void) ...@@ -948,23 +904,6 @@ void rcu_barrier(void)
} }
EXPORT_SYMBOL_GPL(rcu_barrier); EXPORT_SYMBOL_GPL(rcu_barrier);
/*
* Initialize preemptible RCU's per-CPU data.
*/
static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
{
rcu_init_percpu_data(cpu, &rcu_preempt_state, 1);
}
/*
* Move preemptible RCU's callbacks from dying CPU to other online CPU
* and record a quiescent state.
*/
static void rcu_preempt_cleanup_dying_cpu(void)
{
rcu_cleanup_dying_cpu(&rcu_preempt_state);
}
/* /*
* Initialize preemptible RCU's state structures. * Initialize preemptible RCU's state structures.
*/ */
...@@ -1049,14 +988,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp) ...@@ -1049,14 +988,6 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
return 0; return 0;
} }
/*
* Because preemptible RCU does not exist, there is no need to suppress
* its CPU stall warnings.
*/
static void rcu_preempt_stall_reset(void)
{
}
/* /*
* Because there is no preemptible RCU, there can be no readers blocked, * Because there is no preemptible RCU, there can be no readers blocked,
* so there is no need to check for blocked tasks. So check only for * so there is no need to check for blocked tasks. So check only for
...@@ -1084,14 +1015,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp, ...@@ -1084,14 +1015,6 @@ static int rcu_preempt_offline_tasks(struct rcu_state *rsp,
#endif /* #ifdef CONFIG_HOTPLUG_CPU */ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
/*
* Because preemptible RCU does not exist, it never needs CPU-offline
* processing.
*/
static void rcu_preempt_cleanup_dead_cpu(int cpu)
{
}
/* /*
* Because preemptible RCU does not exist, it never has any callbacks * Because preemptible RCU does not exist, it never has any callbacks
* to check. * to check.
...@@ -1100,14 +1023,6 @@ static void rcu_preempt_check_callbacks(int cpu) ...@@ -1100,14 +1023,6 @@ static void rcu_preempt_check_callbacks(int cpu)
{ {
} }
/*
* Because preemptible RCU does not exist, it never has any callbacks
* to process.
*/
static void rcu_preempt_process_callbacks(void)
{
}
/* /*
* Queue an RCU callback for lazy invocation after a grace period. * Queue an RCU callback for lazy invocation after a grace period.
* This will likely be later named something like "call_rcu_lazy()", * This will likely be later named something like "call_rcu_lazy()",
...@@ -1148,22 +1063,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp, ...@@ -1148,22 +1063,6 @@ static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
#endif /* #ifdef CONFIG_HOTPLUG_CPU */ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
/*
* Because preemptible RCU does not exist, it never has any work to do.
*/
static int rcu_preempt_pending(int cpu)
{
return 0;
}
/*
* Because preemptible RCU does not exist, it never has callbacks
*/
static int rcu_preempt_cpu_has_callbacks(int cpu)
{
return 0;
}
/* /*
* Because preemptible RCU does not exist, rcu_barrier() is just * Because preemptible RCU does not exist, rcu_barrier() is just
* another name for rcu_barrier_sched(). * another name for rcu_barrier_sched().
...@@ -1174,21 +1073,6 @@ void rcu_barrier(void) ...@@ -1174,21 +1073,6 @@ void rcu_barrier(void)
} }
EXPORT_SYMBOL_GPL(rcu_barrier); EXPORT_SYMBOL_GPL(rcu_barrier);
/*
* Because preemptible RCU does not exist, there is no per-CPU
* data to initialize.
*/
static void __cpuinit rcu_preempt_init_percpu_data(int cpu)
{
}
/*
* Because there is no preemptible RCU, there is no cleanup to do.
*/
static void rcu_preempt_cleanup_dying_cpu(void)
{
}
/* /*
* Because preemptible RCU does not exist, it need not be initialized. * Because preemptible RCU does not exist, it need not be initialized.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment