Commit 8af3a5e7 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Abstract rcu_cleanup_dead_rnp() from rcu_cleanup_dead_cpu()

This commit abstracts rcu_cleanup_dead_rnp() from rcu_cleanup_dead_cpu()
in preparation for the rework of RCU priority boosting.  This new function
will be invoked from rcu_read_unlock_special() in the reworked scheme,
which is why rcu_cleanup_dead_rnp() assumes that the leaf rcu_node
structure's ->qsmaskinit field has already been updated.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 74e871ac
...@@ -2226,6 +2226,46 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) ...@@ -2226,6 +2226,46 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
TPS("cpuofl")); TPS("cpuofl"));
} }
/*
* All CPUs for the specified rcu_node structure have gone offline,
* and all tasks that were preempted within an RCU read-side critical
* section while running on one of those CPUs have since exited their RCU
* read-side critical section. Some other CPU is reporting this fact with
* the specified rcu_node structure's ->lock held and interrupts disabled.
* This function therefore goes up the tree of rcu_node structures,
* clearing the corresponding bits in the ->qsmaskinit fields. Note that
* the leaf rcu_node structure's ->qsmaskinit field has already been
* updated
*
* This function does check that the specified rcu_node structure has
* all CPUs offline and no blocked tasks, so it is OK to invoke it
* prematurely. That said, invoking it after the fact will cost you
* a needless lock acquisition. So once it has done its work, don't
* invoke it again.
*/
static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
{
long mask;
struct rcu_node *rnp = rnp_leaf;
if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
return;
for (;;) {
mask = rnp->grpmask;
rnp = rnp->parent;
if (!rnp)
break;
raw_spin_lock(&rnp->lock); /* irqs already disabled. */
smp_mb__after_unlock_lock(); /* GP memory ordering. */
rnp->qsmaskinit &= ~mask;
if (rnp->qsmaskinit) {
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
return;
}
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
}
}
/* /*
* The CPU has been completely removed, and some other CPU is reporting * The CPU has been completely removed, and some other CPU is reporting
* this fact from process context. Do the remainder of the cleanup, * this fact from process context. Do the remainder of the cleanup,
...@@ -2236,7 +2276,6 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp) ...@@ -2236,7 +2276,6 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
{ {
unsigned long flags; unsigned long flags;
unsigned long mask;
int need_report = 0; int need_report = 0;
struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu); struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */ struct rcu_node *rnp = rdp->mynode; /* Outgoing CPU's rdp & rnp. */
...@@ -2252,24 +2291,14 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp) ...@@ -2252,24 +2291,14 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
rcu_adopt_orphan_cbs(rsp, flags); rcu_adopt_orphan_cbs(rsp, flags);
/* Remove the outgoing CPU from the masks in the rcu_node hierarchy. */ /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
mask = rdp->grpmask; /* rnp->grplo is constant. */
do {
raw_spin_lock(&rnp->lock); /* irqs already disabled. */ raw_spin_lock(&rnp->lock); /* irqs already disabled. */
smp_mb__after_unlock_lock(); smp_mb__after_unlock_lock(); /* Enforce GP memory-order guarantee. */
rnp->qsmaskinit &= ~mask; rnp->qsmaskinit &= ~rdp->grpmask;
if (rnp->qsmaskinit != 0) { if (rnp->qsmaskinit == 0) {
if (rnp != rdp->mynode)
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
break;
}
if (rnp == rdp->mynode)
need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp); need_report = rcu_preempt_offline_tasks(rsp, rnp, rdp);
else rcu_cleanup_dead_rnp(rnp);
raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */ }
mask = rnp->grpmask;
rnp = rnp->parent;
} while (rnp != NULL);
/* /*
* We still hold the leaf rcu_node structure lock here, and * We still hold the leaf rcu_node structure lock here, and
......
...@@ -552,6 +552,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp); ...@@ -552,6 +552,7 @@ static int rcu_preempt_blocked_readers_cgp(struct rcu_node *rnp);
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp,
unsigned long flags); unsigned long flags);
static bool rcu_preempt_has_tasks(struct rcu_node *rnp);
#endif /* #ifdef CONFIG_HOTPLUG_CPU */ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
static void rcu_print_detail_task_stall(struct rcu_state *rsp); static void rcu_print_detail_task_stall(struct rcu_state *rsp);
static int rcu_print_task_stall(struct rcu_node *rnp); static int rcu_print_task_stall(struct rcu_node *rnp);
......
...@@ -306,6 +306,15 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t, ...@@ -306,6 +306,15 @@ static struct list_head *rcu_next_node_entry(struct task_struct *t,
return np; return np;
} }
/*
* Return true if the specified rcu_node structure has tasks that were
* preempted within an RCU read-side critical section.
*/
static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
{
return !list_empty(&rnp->blkd_tasks);
}
/* /*
* Handle special cases during rcu_read_unlock(), such as needing to * Handle special cases during rcu_read_unlock(), such as needing to
* notify RCU core processing or task having blocked during the RCU * notify RCU core processing or task having blocked during the RCU
...@@ -967,6 +976,14 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags) ...@@ -967,6 +976,14 @@ static void rcu_report_unblock_qs_rnp(struct rcu_node *rnp, unsigned long flags)
raw_spin_unlock_irqrestore(&rnp->lock, flags); raw_spin_unlock_irqrestore(&rnp->lock, flags);
} }
/*
* Because there is no preemptible RCU, there can be no readers blocked.
*/
static bool rcu_preempt_has_tasks(struct rcu_node *rnp)
{
return false;
}
#endif /* #ifdef CONFIG_HOTPLUG_CPU */ #endif /* #ifdef CONFIG_HOTPLUG_CPU */
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment