Commit 9b683874 authored by Paul E. McKenney's avatar Paul E. McKenney

rcu: Stop disabling CPU hotplug in synchronize_rcu_expedited()

The fact that tasks could be migrated from leaf to root rcu_node
structures meant that synchronize_rcu_expedited() had to disable
CPU hotplug.  However, tasks now stay put, so this commit removes the
CPU-hotplug disabling from synchronize_rcu_expedited().
Signed-off-by: default avatarPaul E. McKenney <paulmck@linux.vnet.ibm.com>
parent 13bd6494
...@@ -727,20 +727,6 @@ void synchronize_rcu_expedited(void) ...@@ -727,20 +727,6 @@ void synchronize_rcu_expedited(void)
snap = READ_ONCE(sync_rcu_preempt_exp_count) + 1; snap = READ_ONCE(sync_rcu_preempt_exp_count) + 1;
smp_mb(); /* Above access cannot bleed into critical section. */ smp_mb(); /* Above access cannot bleed into critical section. */
/*
* Block CPU-hotplug operations. This means that any CPU-hotplug
* operation that finds an rcu_node structure with tasks in the
* process of being boosted will know that all tasks blocking
* this expedited grace period will already be in the process of
* being boosted. This simplifies the process of moving tasks
* from leaf to root rcu_node structures.
*/
if (!try_get_online_cpus()) {
/* CPU-hotplug operation in flight, fall back to normal GP. */
wait_rcu_gp(call_rcu);
return;
}
/* /*
* Acquire lock, falling back to synchronize_rcu() if too many * Acquire lock, falling back to synchronize_rcu() if too many
* lock-acquisition failures. Of course, if someone does the * lock-acquisition failures. Of course, if someone does the
...@@ -748,22 +734,17 @@ void synchronize_rcu_expedited(void) ...@@ -748,22 +734,17 @@ void synchronize_rcu_expedited(void)
*/ */
while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) { while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
if (ULONG_CMP_LT(snap, if (ULONG_CMP_LT(snap,
READ_ONCE(sync_rcu_preempt_exp_count))) { READ_ONCE(sync_rcu_preempt_exp_count)))
put_online_cpus();
goto mb_ret; /* Others did our work for us. */ goto mb_ret; /* Others did our work for us. */
}
if (trycount++ < 10) { if (trycount++ < 10) {
udelay(trycount * num_online_cpus()); udelay(trycount * num_online_cpus());
} else { } else {
put_online_cpus();
wait_rcu_gp(call_rcu); wait_rcu_gp(call_rcu);
return; return;
} }
} }
if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count))) { if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count)))
put_online_cpus();
goto unlock_mb_ret; /* Others did our work for us. */ goto unlock_mb_ret; /* Others did our work for us. */
}
/* force all RCU readers onto ->blkd_tasks lists. */ /* force all RCU readers onto ->blkd_tasks lists. */
synchronize_sched_expedited(); synchronize_sched_expedited();
...@@ -779,8 +760,6 @@ void synchronize_rcu_expedited(void) ...@@ -779,8 +760,6 @@ void synchronize_rcu_expedited(void)
rcu_for_each_leaf_node(rsp, rnp) rcu_for_each_leaf_node(rsp, rnp)
sync_rcu_preempt_exp_init2(rsp, rnp); sync_rcu_preempt_exp_init2(rsp, rnp);
put_online_cpus();
/* Wait for snapshotted ->blkd_tasks lists to drain. */ /* Wait for snapshotted ->blkd_tasks lists to drain. */
rnp = rcu_get_root(rsp); rnp = rcu_get_root(rsp);
wait_event(sync_rcu_preempt_exp_wq, wait_event(sync_rcu_preempt_exp_wq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment