Commit b8462084 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Paul E. McKenney

rcu: Push lock release to rcu_start_gp()'s callers

If CPUs are to give prior notice of needed grace periods, it will be
necessary to invoke rcu_start_gp() without dropping the root rcu_node
structure's ->lock.  This commit takes a second step in this direction
by moving the release of this lock to rcu_start_gp()'s callers.
Signed-off-by: default avatarPaul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent bd9f0686
...@@ -1487,16 +1487,14 @@ static int __noreturn rcu_gp_kthread(void *arg) ...@@ -1487,16 +1487,14 @@ static int __noreturn rcu_gp_kthread(void *arg)
/* /*
* Start a new RCU grace period if warranted, re-initializing the hierarchy * Start a new RCU grace period if warranted, re-initializing the hierarchy
* in preparation for detecting the next grace period. The caller must hold * in preparation for detecting the next grace period. The caller must hold
* the root node's ->lock, which is released before return. Hard irqs must * the root node's ->lock and hard irqs must be disabled.
* be disabled.
* *
* Note that it is legal for a dying CPU (which is marked as offline) to * Note that it is legal for a dying CPU (which is marked as offline) to
* invoke this function. This can happen when the dying CPU reports its * invoke this function. This can happen when the dying CPU reports its
* quiescent state. * quiescent state.
*/ */
static void static void
rcu_start_gp(struct rcu_state *rsp, unsigned long flags) rcu_start_gp(struct rcu_state *rsp)
__releases(rcu_get_root(rsp)->lock)
{ {
struct rcu_data *rdp = this_cpu_ptr(rsp->rda); struct rcu_data *rdp = this_cpu_ptr(rsp->rda);
struct rcu_node *rnp = rcu_get_root(rsp); struct rcu_node *rnp = rcu_get_root(rsp);
...@@ -1510,15 +1508,13 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) ...@@ -1510,15 +1508,13 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
*/ */
rcu_advance_cbs(rsp, rnp, rdp); rcu_advance_cbs(rsp, rnp, rdp);
if (!rsp->gp_kthread || if (!rsp->gp_kthread || !cpu_needs_another_gp(rsp, rdp)) {
!cpu_needs_another_gp(rsp, rdp)) {
/* /*
* Either we have not yet spawned the grace-period * Either we have not yet spawned the grace-period
* task, this CPU does not need another grace period, * task, this CPU does not need another grace period,
* or a grace period is already in progress. * or a grace period is already in progress.
* Either way, don't start a new grace period. * Either way, don't start a new grace period.
*/ */
raw_spin_unlock_irqrestore(&rnp->lock, flags);
return; return;
} }
rsp->gp_flags = RCU_GP_FLAG_INIT; rsp->gp_flags = RCU_GP_FLAG_INIT;
...@@ -1528,15 +1524,14 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags) ...@@ -1528,15 +1524,14 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
/* Wake up rcu_gp_kthread() to start the grace period. */ /* Wake up rcu_gp_kthread() to start the grace period. */
wake_up(&rsp->gp_wq); wake_up(&rsp->gp_wq);
raw_spin_unlock_irqrestore(&rnp->lock, flags);
} }
/* /*
* Report a full set of quiescent states to the specified rcu_state * Report a full set of quiescent states to the specified rcu_state
* data structure. This involves cleaning up after the prior grace * data structure. This involves cleaning up after the prior grace
* period and letting rcu_start_gp() start up the next grace period * period and letting rcu_start_gp() start up the next grace period
* if one is needed. Note that the caller must hold rnp->lock, as * if one is needed. Note that the caller must hold rnp->lock, which
* required by rcu_start_gp(), which will release it. * is released before return.
*/ */
static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags) static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
__releases(rcu_get_root(rsp)->lock) __releases(rcu_get_root(rsp)->lock)
...@@ -2134,7 +2129,8 @@ __rcu_process_callbacks(struct rcu_state *rsp) ...@@ -2134,7 +2129,8 @@ __rcu_process_callbacks(struct rcu_state *rsp)
local_irq_save(flags); local_irq_save(flags);
if (cpu_needs_another_gp(rsp, rdp)) { if (cpu_needs_another_gp(rsp, rdp)) {
raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */ raw_spin_lock(&rcu_get_root(rsp)->lock); /* irqs disabled. */
rcu_start_gp(rsp, flags); /* releases above lock */ rcu_start_gp(rsp);
raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
} else { } else {
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -2214,11 +2210,11 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, ...@@ -2214,11 +2210,11 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
/* Start a new grace period if one not already started. */ /* Start a new grace period if one not already started. */
if (!rcu_gp_in_progress(rsp)) { if (!rcu_gp_in_progress(rsp)) {
unsigned long nestflag;
struct rcu_node *rnp_root = rcu_get_root(rsp); struct rcu_node *rnp_root = rcu_get_root(rsp);
raw_spin_lock_irqsave(&rnp_root->lock, nestflag); raw_spin_lock(&rnp_root->lock);
rcu_start_gp(rsp, nestflag); /* rlses rnp_root->lock */ rcu_start_gp(rsp);
raw_spin_unlock(&rnp_root->lock);
} else { } else {
/* Give the grace period a kick. */ /* Give the grace period a kick. */
rdp->blimit = LONG_MAX; rdp->blimit = LONG_MAX;
......
...@@ -2174,7 +2174,6 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) ...@@ -2174,7 +2174,6 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
unsigned long c; unsigned long c;
bool d; bool d;
unsigned long flags; unsigned long flags;
unsigned long flags1;
struct rcu_node *rnp = rdp->mynode; struct rcu_node *rnp = rdp->mynode;
struct rcu_node *rnp_root = rcu_get_root(rdp->rsp); struct rcu_node *rnp_root = rcu_get_root(rdp->rsp);
...@@ -2236,8 +2235,8 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) ...@@ -2236,8 +2235,8 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
c, rnp->level, c, rnp->level,
rnp->grplo, rnp->grphi, rnp->grplo, rnp->grphi,
"Startedroot"); "Startedroot");
local_save_flags(flags1); rcu_start_gp(rdp->rsp);
rcu_start_gp(rdp->rsp, flags1); /* Rlses ->lock. */ raw_spin_unlock(&rnp->lock);
} }
/* Clean up locking and irq state. */ /* Clean up locking and irq state. */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment