Commit 0cbad9c9 authored by Will Deacon's avatar Will Deacon Committed by Russell King

ARM: 7854/1: lockref: add support for lockless lockrefs using cmpxchg64

Our spinlocks are only 32-bit (2x16-bit tickets) and, on processors
with 64-bit atomic instructions, cmpxchg64 makes use of the double-word
exclusive accessors.

This patch wires up the cmpxchg-based lockless lockref implementation
for ARM.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 775ebcc1
...@@ -5,6 +5,7 @@ config ARM ...@@ -5,6 +5,7 @@ config ARM
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_CUSTOM_GPIO_H select ARCH_HAVE_CUSTOM_GPIO_H
select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_EXTABLE_SORT if MMU select BUILDTIME_EXTABLE_SORT if MMU
select CLONE_BACKWARDS select CLONE_BACKWARDS
......
...@@ -127,10 +127,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) ...@@ -127,10 +127,14 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
dsb_sev(); dsb_sev();
} }
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.tickets.owner == lock.tickets.next;
}
static inline int arch_spin_is_locked(arch_spinlock_t *lock) static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{ {
struct __raw_tickets tickets = ACCESS_ONCE(lock->tickets); return !arch_spin_value_unlocked(ACCESS_ONCE(*lock));
return tickets.owner != tickets.next;
} }
static inline int arch_spin_is_contended(arch_spinlock_t *lock) static inline int arch_spin_is_contended(arch_spinlock_t *lock)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment