Commit 2bbfc25b authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Drop "wake" parameter from rcu_report_exp_rdp()

The rcu_report_exp_rdp() function is always invoked with its "wake"
argument set to "true", so this commit drops this parameter.  The only
potential call site that would use "false" is in the code driving the
expedited grace period, and that code uses rcu_report_exp_cpu_mult()
instead, which therefore retains its "wake" parameter.
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 82fcecfa
......@@ -165,8 +165,7 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
static void invoke_rcu_core(void);
static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
static void rcu_report_exp_rdp(struct rcu_state *rsp,
struct rcu_data *rdp, bool wake);
static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp);
static void sync_sched_exp_online_cleanup(int cpu);
/* rcuc/rcub kthread realtime priority */
......@@ -239,8 +238,7 @@ void rcu_sched_qs(void)
if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
return;
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
rcu_report_exp_rdp(&rcu_sched_state,
this_cpu_ptr(&rcu_sched_data), true);
rcu_report_exp_rdp(&rcu_sched_state, this_cpu_ptr(&rcu_sched_data));
}
void rcu_softirq_qs(void)
......@@ -3758,8 +3756,7 @@ void rcu_report_dead(unsigned int cpu)
/* QS for any half-done expedited RCU-sched GP. */
preempt_disable();
rcu_report_exp_rdp(&rcu_sched_state,
this_cpu_ptr(rcu_sched_state.rda), true);
rcu_report_exp_rdp(&rcu_sched_state, this_cpu_ptr(rcu_sched_state.rda));
preempt_enable();
rcu_preempt_deferred_qs(current);
for_each_rcu_flavor(rsp)
......
......@@ -259,11 +259,10 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
/*
* Report expedited quiescent state for specified rcu_data (CPU).
*/
static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
bool wake)
static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp)
{
WRITE_ONCE(rdp->deferred_qs, false);
rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, true);
}
/* Common code for synchronize_{rcu,sched}_expedited() work-done checking. */
......@@ -352,7 +351,7 @@ static void sync_sched_exp_handler(void *data)
return;
if (rcu_is_cpu_rrupt_from_idle()) {
rcu_report_exp_rdp(&rcu_sched_state,
this_cpu_ptr(&rcu_sched_data), true);
this_cpu_ptr(&rcu_sched_data));
return;
}
__this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
......@@ -750,7 +749,7 @@ static void sync_rcu_exp_handler(void *info)
if (!t->rcu_read_lock_nesting) {
if (!(preempt_count() & (PREEMPT_MASK | SOFTIRQ_MASK)) ||
rcu_dynticks_curr_cpu_in_eqs()) {
rcu_report_exp_rdp(rsp, rdp, true);
rcu_report_exp_rdp(rsp, rdp);
} else {
rdp->deferred_qs = true;
resched_cpu(rdp->cpu);
......
......@@ -285,7 +285,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
* still in a quiescent state in any case.)
*/
if (blkd_state & RCU_EXP_BLKD && rdp->deferred_qs)
rcu_report_exp_rdp(rdp->rsp, rdp, true);
rcu_report_exp_rdp(rdp->rsp, rdp);
else
WARN_ON_ONCE(rdp->deferred_qs);
}
......@@ -383,7 +383,7 @@ static void rcu_preempt_note_context_switch(bool preempt)
*/
rcu_preempt_qs();
if (rdp->deferred_qs)
rcu_report_exp_rdp(rcu_state_p, rdp, true);
rcu_report_exp_rdp(rcu_state_p, rdp);
}
/*
......@@ -508,7 +508,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
* blocked-tasks list below.
*/
if (rdp->deferred_qs) {
rcu_report_exp_rdp(rcu_state_p, rdp, true);
rcu_report_exp_rdp(rcu_state_p, rdp);
if (!t->rcu_read_unlock_special.s) {
local_irq_restore(flags);
return;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment