Commit 9a15193e authored by Uros Bizjak's avatar Uros Bizjak Committed by Andrew Morton

smpboot: use atomic_try_cmpxchg in cpu_wait_death and cpu_report_death

Use atomic_try_cmpxchg instead of atomic_cmpxchg (*ptr, old, new) == old
in cpu_wait_death and cpu_report_death.  x86 CMPXCHG instruction returns
success in ZF flag, so this change saves a compare after cmpxchg (and
related move instruction in front of cmpxchg).  Also, atomic_try_cmpxchg
implicitly assigns old *ptr value to "old" when cmpxchg fails, enabling
further code simplifications.

No functional change intended.

Link: https://lkml.kernel.org/r/20220825145603.5811-1-ubizjak@gmail.comSigned-off-by: default avatarUros Bizjak <ubizjak@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 5fdfa161
...@@ -433,7 +433,7 @@ bool cpu_wait_death(unsigned int cpu, int seconds) ...@@ -433,7 +433,7 @@ bool cpu_wait_death(unsigned int cpu, int seconds)
/* The outgoing CPU will normally get done quite quickly. */ /* The outgoing CPU will normally get done quite quickly. */
if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD) if (atomic_read(&per_cpu(cpu_hotplug_state, cpu)) == CPU_DEAD)
goto update_state; goto update_state_early;
udelay(5); udelay(5);
/* But if the outgoing CPU dawdles, wait increasingly long times. */ /* But if the outgoing CPU dawdles, wait increasingly long times. */
...@@ -444,16 +444,17 @@ bool cpu_wait_death(unsigned int cpu, int seconds) ...@@ -444,16 +444,17 @@ bool cpu_wait_death(unsigned int cpu, int seconds)
break; break;
sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10); sleep_jf = DIV_ROUND_UP(sleep_jf * 11, 10);
} }
update_state: update_state_early:
oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu)); oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
update_state:
if (oldstate == CPU_DEAD) { if (oldstate == CPU_DEAD) {
/* Outgoing CPU died normally, update state. */ /* Outgoing CPU died normally, update state. */
smp_mb(); /* atomic_read() before update. */ smp_mb(); /* atomic_read() before update. */
atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD); atomic_set(&per_cpu(cpu_hotplug_state, cpu), CPU_POST_DEAD);
} else { } else {
/* Outgoing CPU still hasn't died, set state accordingly. */ /* Outgoing CPU still hasn't died, set state accordingly. */
if (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu), if (!atomic_try_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
oldstate, CPU_BROKEN) != oldstate) &oldstate, CPU_BROKEN))
goto update_state; goto update_state;
ret = false; ret = false;
} }
...@@ -475,14 +476,14 @@ bool cpu_report_death(void) ...@@ -475,14 +476,14 @@ bool cpu_report_death(void)
int newstate; int newstate;
int cpu = smp_processor_id(); int cpu = smp_processor_id();
oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
do { do {
oldstate = atomic_read(&per_cpu(cpu_hotplug_state, cpu));
if (oldstate != CPU_BROKEN) if (oldstate != CPU_BROKEN)
newstate = CPU_DEAD; newstate = CPU_DEAD;
else else
newstate = CPU_DEAD_FROZEN; newstate = CPU_DEAD_FROZEN;
} while (atomic_cmpxchg(&per_cpu(cpu_hotplug_state, cpu), } while (!atomic_try_cmpxchg(&per_cpu(cpu_hotplug_state, cpu),
oldstate, newstate) != oldstate); &oldstate, newstate));
return newstate == CPU_DEAD; return newstate == CPU_DEAD;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment