Commit 470ada6b authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/spinlock: refactor arch_spin_lock_wait[_flags]

Reorder the spinlock wait code to make it more readable.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 939c5ae4
......@@ -31,23 +31,31 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
int count;
while (1) {
owner = lp->lock;
if (!owner || smp_vcpu_scheduled(~owner)) {
count = spin_retry;
do {
if (arch_spin_is_locked(lp))
continue;
owner = ACCESS_ONCE(lp->lock);
/* Try to get the lock if it is free. */
if (!owner) {
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
return;
} while (count-- > 0);
if (MACHINE_IS_LPAR)
continue;
}
owner = lp->lock;
if (owner)
/* Check if the lock owner is running. */
if (!smp_vcpu_scheduled(~owner)) {
smp_yield_cpu(~owner);
continue;
}
/* Loop for a while on the lock value. */
count = spin_retry;
do {
owner = ACCESS_ONCE(lp->lock);
} while (owner && count-- > 0);
if (!owner)
continue;
/*
* For multiple layers of hypervisors, e.g. z/VM + LPAR
* yield the CPU if the lock is still unavailable.
*/
if (!MACHINE_IS_LPAR)
smp_yield_cpu(~owner);
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
return;
}
}
EXPORT_SYMBOL(arch_spin_lock_wait);
......@@ -60,27 +68,32 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
local_irq_restore(flags);
while (1) {
owner = lp->lock;
if (!owner || smp_vcpu_scheduled(~owner)) {
count = spin_retry;
do {
if (arch_spin_is_locked(lp))
continue;
owner = ACCESS_ONCE(lp->lock);
/* Try to get the lock if it is free. */
if (!owner) {
local_irq_disable();
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
return;
local_irq_restore(flags);
} while (count-- > 0);
if (MACHINE_IS_LPAR)
}
/* Check if the lock owner is running. */
if (!smp_vcpu_scheduled(~owner)) {
smp_yield_cpu(~owner);
continue;
}
owner = lp->lock;
if (owner)
/* Loop for a while on the lock value. */
count = spin_retry;
do {
owner = ACCESS_ONCE(lp->lock);
} while (owner && count-- > 0);
if (!owner)
continue;
/*
* For multiple layers of hypervisors, e.g. z/VM + LPAR
* yield the CPU if the lock is still unavailable.
*/
if (!MACHINE_IS_LPAR)
smp_yield_cpu(~owner);
local_irq_disable();
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
return;
local_irq_restore(flags);
}
}
EXPORT_SYMBOL(arch_spin_lock_wait_flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment