Commit e44e73ca authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Make simple callback acceleration refer to rdp->gp_seq_needed

Now that the rcu_data structure contains ->gp_seq_needed, create an
rcu_accelerate_cbs_unlocked() helper function that locklessly checks to
see if new callbacks' required grace period has already been requested.
If so, update the callback list locally and again locklessly.  (Though
interrupts must be and are disabled to avoid racing with conflicting
updates in interrupt handlers.)

Otherwise, call rcu_accelerate_cbs() as before.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent ff3bb6f4
...@@ -1701,6 +1701,34 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, ...@@ -1701,6 +1701,34 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
return ret; return ret;
} }
/*
* Similar to rcu_accelerate_cbs(), but does not require that the leaf
* rcu_node structure's ->lock be held. It consults the cached value
* of ->gp_seq_needed in the rcu_data structure, and if that indicates
* that a new grace-period request be made, invokes rcu_accelerate_cbs()
* while holding the leaf rcu_node structure's ->lock.
*/
static void rcu_accelerate_cbs_unlocked(struct rcu_state *rsp,
struct rcu_node *rnp,
struct rcu_data *rdp)
{
unsigned long c;
bool needwake;
lockdep_assert_irqs_disabled();
c = rcu_seq_snap(&rsp->gp_seq);
if (!rdp->gpwrap && ULONG_CMP_GE(rdp->gp_seq_needed, c)) {
/* Old request still live, so mark recent callbacks. */
(void)rcu_segcblist_accelerate(&rdp->cblist, c);
return;
}
raw_spin_lock_rcu_node(rnp); /* irqs already disabled. */
needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
if (needwake)
rcu_gp_kthread_wake(rsp);
}
/* /*
* Move any callbacks whose grace period has completed to the * Move any callbacks whose grace period has completed to the
* RCU_DONE_TAIL sublist, then compact the remaining sublists and * RCU_DONE_TAIL sublist, then compact the remaining sublists and
...@@ -2739,7 +2767,6 @@ static void ...@@ -2739,7 +2767,6 @@ static void
__rcu_process_callbacks(struct rcu_state *rsp) __rcu_process_callbacks(struct rcu_state *rsp)
{ {
unsigned long flags; unsigned long flags;
bool needwake;
struct rcu_data *rdp = raw_cpu_ptr(rsp->rda); struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
struct rcu_node *rnp = rdp->mynode; struct rcu_node *rnp = rdp->mynode;
...@@ -2752,15 +2779,9 @@ __rcu_process_callbacks(struct rcu_state *rsp) ...@@ -2752,15 +2779,9 @@ __rcu_process_callbacks(struct rcu_state *rsp)
if (!rcu_gp_in_progress(rsp) && if (!rcu_gp_in_progress(rsp) &&
rcu_segcblist_is_enabled(&rdp->cblist)) { rcu_segcblist_is_enabled(&rdp->cblist)) {
local_irq_save(flags); local_irq_save(flags);
if (rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL)) { if (!rcu_segcblist_restempty(&rdp->cblist, RCU_NEXT_READY_TAIL))
rcu_accelerate_cbs_unlocked(rsp, rnp, rdp);
local_irq_restore(flags); local_irq_restore(flags);
} else {
raw_spin_lock_rcu_node(rnp); /* irqs disabled. */
needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (needwake)
rcu_gp_kthread_wake(rsp);
}
} }
rcu_check_gp_start_stall(rsp, rnp, rdp); rcu_check_gp_start_stall(rsp, rnp, rdp);
...@@ -2818,8 +2839,6 @@ static void invoke_rcu_core(void) ...@@ -2818,8 +2839,6 @@ static void invoke_rcu_core(void)
static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
struct rcu_head *head, unsigned long flags) struct rcu_head *head, unsigned long flags)
{ {
bool needwake;
/* /*
* If called from an extended quiescent state, invoke the RCU * If called from an extended quiescent state, invoke the RCU
* core in order to force a re-evaluation of RCU's idleness. * core in order to force a re-evaluation of RCU's idleness.
...@@ -2846,13 +2865,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp, ...@@ -2846,13 +2865,7 @@ static void __call_rcu_core(struct rcu_state *rsp, struct rcu_data *rdp,
/* Start a new grace period if one not already started. */ /* Start a new grace period if one not already started. */
if (!rcu_gp_in_progress(rsp)) { if (!rcu_gp_in_progress(rsp)) {
struct rcu_node *rnp = rdp->mynode; rcu_accelerate_cbs_unlocked(rsp, rdp->mynode, rdp);
raw_spin_lock_rcu_node(rnp);
needwake = rcu_accelerate_cbs(rsp, rnp, rdp);
raw_spin_unlock_rcu_node(rnp);
if (needwake)
rcu_gp_kthread_wake(rsp);
} else { } else {
/* Give the grace period a kick. */ /* Give the grace period a kick. */
rdp->blimit = LONG_MAX; rdp->blimit = LONG_MAX;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment