Commit 8b425aa8 authored by Paul E. McKenney's avatar Paul E. McKenney Committed by Paul E. McKenney

rcu: Rename n_nocb_gp_requests to need_future_gp

CPUs going idle need to be able to indicate their need for future grace
periods.  A mechanism for doing this already exists for no-callbacks
CPUs, so the idea is to re-use that mechanism.  This commit therefore
moves the ->n_nocb_gp_requests field of the rcu_node structure out from
under the CONFIG_RCU_NOCB_CPU #ifdef and renames it to ->need_future_gp.
Signed-off-by: default avatarPaul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent b8462084
...@@ -194,9 +194,9 @@ struct rcu_node { ...@@ -194,9 +194,9 @@ struct rcu_node {
#ifdef CONFIG_RCU_NOCB_CPU #ifdef CONFIG_RCU_NOCB_CPU
wait_queue_head_t nocb_gp_wq[2]; wait_queue_head_t nocb_gp_wq[2];
/* Place for rcu_nocb_kthread() to wait GP. */ /* Place for rcu_nocb_kthread() to wait GP. */
int n_nocb_gp_requests[2];
/* Counts of upcoming no-CB GP requests. */
#endif /* #ifdef CONFIG_RCU_NOCB_CPU */ #endif /* #ifdef CONFIG_RCU_NOCB_CPU */
int need_future_gp[2];
/* Counts of upcoming no-CB GP requests. */
raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp; raw_spinlock_t fqslock ____cacheline_internodealigned_in_smp;
} ____cacheline_internodealigned_in_smp; } ____cacheline_internodealigned_in_smp;
......
...@@ -2018,7 +2018,7 @@ static int rcu_nocb_needs_gp(struct rcu_state *rsp) ...@@ -2018,7 +2018,7 @@ static int rcu_nocb_needs_gp(struct rcu_state *rsp)
{ {
struct rcu_node *rnp = rcu_get_root(rsp); struct rcu_node *rnp = rcu_get_root(rsp);
return rnp->n_nocb_gp_requests[(ACCESS_ONCE(rnp->completed) + 1) & 0x1]; return rnp->need_future_gp[(ACCESS_ONCE(rnp->completed) + 1) & 0x1];
} }
/* /*
...@@ -2032,8 +2032,8 @@ static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) ...@@ -2032,8 +2032,8 @@ static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
int needmore; int needmore;
wake_up_all(&rnp->nocb_gp_wq[c & 0x1]); wake_up_all(&rnp->nocb_gp_wq[c & 0x1]);
rnp->n_nocb_gp_requests[c & 0x1] = 0; rnp->need_future_gp[c & 0x1] = 0;
needmore = rnp->n_nocb_gp_requests[(c + 1) & 0x1]; needmore = rnp->need_future_gp[(c + 1) & 0x1];
trace_rcu_future_grace_period(rsp->name, rnp->gpnum, rnp->completed, trace_rcu_future_grace_period(rsp->name, rnp->gpnum, rnp->completed,
c, rnp->level, rnp->grplo, rnp->grphi, c, rnp->level, rnp->grplo, rnp->grphi,
needmore ? "CleanupMore" : "Cleanup"); needmore ? "CleanupMore" : "Cleanup");
...@@ -2041,7 +2041,7 @@ static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) ...@@ -2041,7 +2041,7 @@ static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
} }
/* /*
* Set the root rcu_node structure's ->n_nocb_gp_requests field * Set the root rcu_node structure's ->need_future_gp field
* based on the sum of those of all rcu_node structures. This does * based on the sum of those of all rcu_node structures. This does
* double-count the root rcu_node structure's requests, but this * double-count the root rcu_node structure's requests, but this
* is necessary to handle the possibility of a rcu_nocb_kthread() * is necessary to handle the possibility of a rcu_nocb_kthread()
...@@ -2050,7 +2050,7 @@ static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) ...@@ -2050,7 +2050,7 @@ static int rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
*/ */
static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq) static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq)
{ {
rnp->n_nocb_gp_requests[(rnp->completed + 1) & 0x1] += nrq; rnp->need_future_gp[(rnp->completed + 1) & 0x1] += nrq;
} }
static void rcu_init_one_nocb(struct rcu_node *rnp) static void rcu_init_one_nocb(struct rcu_node *rnp)
...@@ -2181,7 +2181,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) ...@@ -2181,7 +2181,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
c = rnp->completed + 2; c = rnp->completed + 2;
/* Count our request for a grace period. */ /* Count our request for a grace period. */
rnp->n_nocb_gp_requests[c & 0x1]++; rnp->need_future_gp[c & 0x1]++;
trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum, trace_rcu_future_grace_period(rdp->rsp->name, rnp->gpnum,
rnp->completed, c, rnp->level, rnp->completed, c, rnp->level,
rnp->grplo, rnp->grphi, "Startleaf"); rnp->grplo, rnp->grphi, "Startleaf");
...@@ -2225,10 +2225,10 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp) ...@@ -2225,10 +2225,10 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
* Adjust counters accordingly and start the * Adjust counters accordingly and start the
* needed grace period. * needed grace period.
*/ */
rnp->n_nocb_gp_requests[c & 0x1]--; rnp->need_future_gp[c & 0x1]--;
c = rnp_root->completed + 1; c = rnp_root->completed + 1;
rnp->n_nocb_gp_requests[c & 0x1]++; rnp->need_future_gp[c & 0x1]++;
rnp_root->n_nocb_gp_requests[c & 0x1]++; rnp_root->need_future_gp[c & 0x1]++;
trace_rcu_future_grace_period(rdp->rsp->name, trace_rcu_future_grace_period(rdp->rsp->name,
rnp->gpnum, rnp->gpnum,
rnp->completed, rnp->completed,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment