Commit 41e80595 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Make rcu_start_future_gp() caller select grace period

The rcu_accelerate_cbs() function selects a grace-period target, which
it uses to have rcu_segcblist_accelerate() assign numbers to recently
queued callbacks.  Then it invokes rcu_start_future_gp(), which selects
a grace-period target again, which is a bit pointless.  This commit
therefore changes rcu_start_future_gp() to take the grace-period target as
a parameter, thus avoiding double selection.  This commit also changes
the name of rcu_start_future_gp() to rcu_start_this_gp() to reflect
this change in functionality, and also makes a similar change to the
name of trace_rcu_future_gp().
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
Tested-by: default avatarNicholas Piggin <npiggin@gmail.com>
parent d5cd9685
...@@ -1659,12 +1659,9 @@ static unsigned long rcu_cbs_completed(struct rcu_state *rsp, ...@@ -1659,12 +1659,9 @@ static unsigned long rcu_cbs_completed(struct rcu_state *rsp,
return rnp->completed + 2; return rnp->completed + 2;
} }
/* /* Trace-event wrapper function for trace_rcu_future_grace_period. */
* Trace-event helper function for rcu_start_future_gp() and static void trace_rcu_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
* rcu_nocb_wait_gp(). unsigned long c, const char *s)
*/
static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
unsigned long c, const char *s)
{ {
trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum, trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
rnp->completed, c, rnp->level, rnp->completed, c, rnp->level,
...@@ -1672,33 +1669,27 @@ static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, ...@@ -1672,33 +1669,27 @@ static void trace_rcu_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
} }
/* /*
* Start some future grace period, as needed to handle newly arrived * Start the specified grace period, as needed to handle newly arrived
* callbacks. The required future grace periods are recorded in each * callbacks. The required future grace periods are recorded in each
* rcu_node structure's ->need_future_gp field. Returns true if there * rcu_node structure's ->need_future_gp[] field. Returns true if there
* is reason to awaken the grace-period kthread. * is reason to awaken the grace-period kthread.
* *
* The caller must hold the specified rcu_node structure's ->lock, which * The caller must hold the specified rcu_node structure's ->lock, which
* is why the caller is responsible for waking the grace-period kthread. * is why the caller is responsible for waking the grace-period kthread.
*/ */
static bool __maybe_unused static bool rcu_start_this_gp(struct rcu_node *rnp, struct rcu_data *rdp,
rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, unsigned long c)
unsigned long *c_out)
{ {
unsigned long c;
bool ret = false; bool ret = false;
struct rcu_state *rsp = rdp->rsp; struct rcu_state *rsp = rdp->rsp;
struct rcu_node *rnp_root = rcu_get_root(rsp); struct rcu_node *rnp_root = rcu_get_root(rsp);
raw_lockdep_assert_held_rcu_node(rnp); raw_lockdep_assert_held_rcu_node(rnp);
/* /* If the specified GP is already known needed, return to caller. */
* Pick up grace-period number for new callbacks. If this trace_rcu_this_gp(rnp, rdp, c, TPS("Startleaf"));
* grace period is already marked as needed, return to the caller.
*/
c = rcu_cbs_completed(rsp, rnp);
trace_rcu_future_gp(rnp, rdp, c, TPS("Startleaf"));
if (need_future_gp_element(rnp, c)) { if (need_future_gp_element(rnp, c)) {
trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartleaf")); trace_rcu_this_gp(rnp, rdp, c, TPS("Prestartleaf"));
goto out; goto out;
} }
...@@ -1710,7 +1701,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, ...@@ -1710,7 +1701,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
*/ */
if (rnp->gpnum != rnp->completed) { if (rnp->gpnum != rnp->completed) {
need_future_gp_element(rnp, c) = true; need_future_gp_element(rnp, c) = true;
trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf")); trace_rcu_this_gp(rnp, rdp, c, TPS("Startedleaf"));
goto out; goto out;
} }
...@@ -1736,7 +1727,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, ...@@ -1736,7 +1727,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
* recorded, trace and leave. * recorded, trace and leave.
*/ */
if (need_future_gp_element(rnp_root, c)) { if (need_future_gp_element(rnp_root, c)) {
trace_rcu_future_gp(rnp, rdp, c, TPS("Prestartedroot")); trace_rcu_this_gp(rnp, rdp, c, TPS("Prestartedroot"));
goto unlock_out; goto unlock_out;
} }
...@@ -1745,9 +1736,9 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, ...@@ -1745,9 +1736,9 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
/* If a grace period is not already in progress, start one. */ /* If a grace period is not already in progress, start one. */
if (rnp_root->gpnum != rnp_root->completed) { if (rnp_root->gpnum != rnp_root->completed) {
trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleafroot")); trace_rcu_this_gp(rnp, rdp, c, TPS("Startedleafroot"));
} else { } else {
trace_rcu_future_gp(rnp, rdp, c, TPS("Startedroot")); trace_rcu_this_gp(rnp, rdp, c, TPS("Startedroot"));
if (!rsp->gp_kthread) if (!rsp->gp_kthread)
goto unlock_out; /* No grace-period kthread yet! */ goto unlock_out; /* No grace-period kthread yet! */
WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT); WRITE_ONCE(rsp->gp_flags, rsp->gp_flags | RCU_GP_FLAG_INIT);
...@@ -1759,8 +1750,6 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp, ...@@ -1759,8 +1750,6 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
if (rnp != rnp_root) if (rnp != rnp_root)
raw_spin_unlock_rcu_node(rnp_root); raw_spin_unlock_rcu_node(rnp_root);
out: out:
if (c_out != NULL)
*c_out = c;
return ret; return ret;
} }
...@@ -1776,8 +1765,8 @@ static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) ...@@ -1776,8 +1765,8 @@ static bool rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
need_future_gp_element(rnp, c) = false; need_future_gp_element(rnp, c) = false;
needmore = need_any_future_gp(rnp); needmore = need_any_future_gp(rnp);
trace_rcu_future_gp(rnp, rdp, c, trace_rcu_this_gp(rnp, rdp, c,
needmore ? TPS("CleanupMore") : TPS("Cleanup")); needmore ? TPS("CleanupMore") : TPS("Cleanup"));
return needmore; return needmore;
} }
...@@ -1812,6 +1801,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp) ...@@ -1812,6 +1801,7 @@ static void rcu_gp_kthread_wake(struct rcu_state *rsp)
static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
struct rcu_data *rdp) struct rcu_data *rdp)
{ {
unsigned long c;
bool ret = false; bool ret = false;
raw_lockdep_assert_held_rcu_node(rnp); raw_lockdep_assert_held_rcu_node(rnp);
...@@ -1830,8 +1820,9 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp, ...@@ -1830,8 +1820,9 @@ static bool rcu_accelerate_cbs(struct rcu_state *rsp, struct rcu_node *rnp,
* accelerating callback invocation to an earlier grace-period * accelerating callback invocation to an earlier grace-period
* number. * number.
*/ */
if (rcu_segcblist_accelerate(&rdp->cblist, rcu_cbs_completed(rsp, rnp))) c = rcu_cbs_completed(rsp, rnp);
ret = rcu_start_future_gp(rnp, rdp, NULL); if (rcu_segcblist_accelerate(&rdp->cblist, c))
ret = rcu_start_this_gp(rnp, rdp, c);
/* Trace depending on how much we were able to accelerate. */ /* Trace depending on how much we were able to accelerate. */
if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL)) if (rcu_segcblist_restempty(&rdp->cblist, RCU_WAIT_TAIL))
...@@ -2174,8 +2165,8 @@ static void rcu_gp_cleanup(struct rcu_state *rsp) ...@@ -2174,8 +2165,8 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
/* Check for GP requests since above loop. */ /* Check for GP requests since above loop. */
rdp = this_cpu_ptr(rsp->rda); rdp = this_cpu_ptr(rsp->rda);
if (need_any_future_gp(rnp)) { if (need_any_future_gp(rnp)) {
trace_rcu_future_gp(rnp, rdp, rsp->completed - 1, trace_rcu_this_gp(rnp, rdp, rsp->completed - 1,
TPS("CleanupMore")); TPS("CleanupMore"));
needgp = true; needgp = true;
} }
/* Advance CBs to reduce false positives below. */ /* Advance CBs to reduce false positives below. */
......
...@@ -2035,7 +2035,8 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) ...@@ -2035,7 +2035,8 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
struct rcu_node *rnp = rdp->mynode; struct rcu_node *rnp = rdp->mynode;
raw_spin_lock_irqsave_rcu_node(rnp, flags); raw_spin_lock_irqsave_rcu_node(rnp, flags);
needwake = rcu_start_future_gp(rnp, rdp, &c); c = rcu_cbs_completed(rdp->rsp, rnp);
needwake = rcu_start_this_gp(rnp, rdp, c);
raw_spin_unlock_irqrestore_rcu_node(rnp, flags); raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
if (needwake) if (needwake)
rcu_gp_kthread_wake(rdp->rsp); rcu_gp_kthread_wake(rdp->rsp);
...@@ -2044,7 +2045,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) ...@@ -2044,7 +2045,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
* Wait for the grace period. Do so interruptibly to avoid messing * Wait for the grace period. Do so interruptibly to avoid messing
* up the load average. * up the load average.
*/ */
trace_rcu_future_gp(rnp, rdp, c, TPS("StartWait")); trace_rcu_this_gp(rnp, rdp, c, TPS("StartWait"));
for (;;) { for (;;) {
swait_event_interruptible( swait_event_interruptible(
rnp->nocb_gp_wq[c & 0x1], rnp->nocb_gp_wq[c & 0x1],
...@@ -2052,9 +2053,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) ...@@ -2052,9 +2053,9 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
if (likely(d)) if (likely(d))
break; break;
WARN_ON(signal_pending(current)); WARN_ON(signal_pending(current));
trace_rcu_future_gp(rnp, rdp, c, TPS("ResumeWait")); trace_rcu_this_gp(rnp, rdp, c, TPS("ResumeWait"));
} }
trace_rcu_future_gp(rnp, rdp, c, TPS("EndWait")); trace_rcu_this_gp(rnp, rdp, c, TPS("EndWait"));
smp_mb(); /* Ensure that CB invocation happens after GP end. */ smp_mb(); /* Ensure that CB invocation happens after GP end. */
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment