Commit 470ada6b authored by Martin Schwidefsky's avatar Martin Schwidefsky

s390/spinlock: refactor arch_spin_lock_wait[_flags]

Reorder the spinlock wait code to make it more readable.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 939c5ae4
...@@ -31,23 +31,31 @@ void arch_spin_lock_wait(arch_spinlock_t *lp) ...@@ -31,23 +31,31 @@ void arch_spin_lock_wait(arch_spinlock_t *lp)
int count; int count;
while (1) { while (1) {
owner = lp->lock; owner = ACCESS_ONCE(lp->lock);
if (!owner || smp_vcpu_scheduled(~owner)) { /* Try to get the lock if it is free. */
count = spin_retry; if (!owner) {
do { if (_raw_compare_and_swap(&lp->lock, 0, cpu))
if (arch_spin_is_locked(lp)) return;
continue; continue;
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
return;
} while (count-- > 0);
if (MACHINE_IS_LPAR)
continue;
} }
owner = lp->lock; /* Check if the lock owner is running. */
if (owner) if (!smp_vcpu_scheduled(~owner)) {
smp_yield_cpu(~owner);
continue;
}
/* Loop for a while on the lock value. */
count = spin_retry;
do {
owner = ACCESS_ONCE(lp->lock);
} while (owner && count-- > 0);
if (!owner)
continue;
/*
* For multiple layers of hypervisors, e.g. z/VM + LPAR
* yield the CPU if the lock is still unavailable.
*/
if (!MACHINE_IS_LPAR)
smp_yield_cpu(~owner); smp_yield_cpu(~owner);
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
return;
} }
} }
EXPORT_SYMBOL(arch_spin_lock_wait); EXPORT_SYMBOL(arch_spin_lock_wait);
...@@ -60,27 +68,32 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags) ...@@ -60,27 +68,32 @@ void arch_spin_lock_wait_flags(arch_spinlock_t *lp, unsigned long flags)
local_irq_restore(flags); local_irq_restore(flags);
while (1) { while (1) {
owner = lp->lock; owner = ACCESS_ONCE(lp->lock);
if (!owner || smp_vcpu_scheduled(~owner)) { /* Try to get the lock if it is free. */
count = spin_retry; if (!owner) {
do { local_irq_disable();
if (arch_spin_is_locked(lp)) if (_raw_compare_and_swap(&lp->lock, 0, cpu))
continue; return;
local_irq_disable(); local_irq_restore(flags);
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
return;
local_irq_restore(flags);
} while (count-- > 0);
if (MACHINE_IS_LPAR)
continue;
} }
owner = lp->lock; /* Check if the lock owner is running. */
if (owner) if (!smp_vcpu_scheduled(~owner)) {
smp_yield_cpu(~owner);
continue;
}
/* Loop for a while on the lock value. */
count = spin_retry;
do {
owner = ACCESS_ONCE(lp->lock);
} while (owner && count-- > 0);
if (!owner)
continue;
/*
* For multiple layers of hypervisors, e.g. z/VM + LPAR
* yield the CPU if the lock is still unavailable.
*/
if (!MACHINE_IS_LPAR)
smp_yield_cpu(~owner); smp_yield_cpu(~owner);
local_irq_disable();
if (_raw_compare_and_swap(&lp->lock, 0, cpu))
return;
local_irq_restore(flags);
} }
} }
EXPORT_SYMBOL(arch_spin_lock_wait_flags); EXPORT_SYMBOL(arch_spin_lock_wait_flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment