Commit 087133ac authored by Will Deacon's avatar Will Deacon Committed by Ingo Molnar

locking/qrwlock, arm64: Move rwlock implementation over to qrwlocks

Now that the qrwlock can make use of WFE, remove our homebrewed rwlock
code in favour of the generic queued implementation.
Tested-by: default avatarWaiman Long <longman@redhat.com>
Tested-by: default avatarJeremy Linton <jeremy.linton@arm.com>
Tested-by: default avatarAdam Wallis <awallis@codeaurora.org>
Tested-by: default avatarJan Glauber <jglauber@cavium.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Acked-by: default avatarPeter Zijlstra <peterz@infradead.org>
Cc: Jeremy.Linton@arm.com
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: boqun.feng@gmail.com
Cc: linux-arm-kernel@lists.infradead.org
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/1507810851-306-5-git-send-email-will.deacon@arm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b519b56e
...@@ -22,7 +22,24 @@ config ARM64 ...@@ -22,7 +22,24 @@ config ARM64
select ARCH_HAS_STRICT_MODULE_RWX select ARCH_HAS_STRICT_MODULE_RWX
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
select ARCH_HAVE_NMI_SAFE_CMPXCHG if ACPI_APEI_SEA select ARCH_HAVE_NMI_SAFE_CMPXCHG if ACPI_APEI_SEA
select ARCH_INLINE_READ_LOCK if !PREEMPT
select ARCH_INLINE_READ_LOCK_BH if !PREEMPT
select ARCH_INLINE_READ_LOCK_IRQ if !PREEMPT
select ARCH_INLINE_READ_LOCK_IRQSAVE if !PREEMPT
select ARCH_INLINE_READ_UNLOCK if !PREEMPT
select ARCH_INLINE_READ_UNLOCK_BH if !PREEMPT
select ARCH_INLINE_READ_UNLOCK_IRQ if !PREEMPT
select ARCH_INLINE_READ_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_INLINE_WRITE_LOCK if !PREEMPT
select ARCH_INLINE_WRITE_LOCK_BH if !PREEMPT
select ARCH_INLINE_WRITE_LOCK_IRQ if !PREEMPT
select ARCH_INLINE_WRITE_LOCK_IRQSAVE if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_BH if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_IRQ if !PREEMPT
select ARCH_INLINE_WRITE_UNLOCK_IRQRESTORE if !PREEMPT
select ARCH_USE_CMPXCHG_LOCKREF select ARCH_USE_CMPXCHG_LOCKREF
select ARCH_USE_QUEUED_RWLOCKS
select ARCH_SUPPORTS_MEMORY_FAILURE select ARCH_SUPPORTS_MEMORY_FAILURE
select ARCH_SUPPORTS_ATOMIC_RMW select ARCH_SUPPORTS_ATOMIC_RMW
select ARCH_SUPPORTS_NUMA_BALANCING select ARCH_SUPPORTS_NUMA_BALANCING
......
...@@ -16,6 +16,7 @@ generic-y += mcs_spinlock.h ...@@ -16,6 +16,7 @@ generic-y += mcs_spinlock.h
generic-y += mm-arch-hooks.h generic-y += mm-arch-hooks.h
generic-y += msi.h generic-y += msi.h
generic-y += preempt.h generic-y += preempt.h
generic-y += qrwlock.h
generic-y += rwsem.h generic-y += rwsem.h
generic-y += segment.h generic-y += segment.h
generic-y += serial.h generic-y += serial.h
......
...@@ -137,169 +137,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock) ...@@ -137,169 +137,7 @@ static inline int arch_spin_is_contended(arch_spinlock_t *lock)
} }
#define arch_spin_is_contended arch_spin_is_contended #define arch_spin_is_contended arch_spin_is_contended
/* #include <asm/qrwlock.h>
* Write lock implementation.
*
* Write locks set bit 31. Unlocking, is done by writing 0 since the lock is
* exclusively held.
*
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
*/
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned int tmp;
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
" sevl\n"
"1: wfe\n"
"2: ldaxr %w0, %1\n"
" cbnz %w0, 1b\n"
" stxr %w0, %w2, %1\n"
" cbnz %w0, 2b\n"
__nops(1),
/* LSE atomics */
"1: mov %w0, wzr\n"
"2: casa %w0, %w2, %1\n"
" cbz %w0, 3f\n"
" ldxr %w0, %1\n"
" cbz %w0, 2b\n"
" wfe\n"
" b 1b\n"
"3:")
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
: "memory");
}
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned int tmp;
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
"1: ldaxr %w0, %1\n"
" cbnz %w0, 2f\n"
" stxr %w0, %w2, %1\n"
" cbnz %w0, 1b\n"
"2:",
/* LSE atomics */
" mov %w0, wzr\n"
" casa %w0, %w2, %1\n"
__nops(2))
: "=&r" (tmp), "+Q" (rw->lock)
: "r" (0x80000000)
: "memory");
return !tmp;
}
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
asm volatile(ARM64_LSE_ATOMIC_INSN(
" stlr wzr, %0",
" swpl wzr, wzr, %0")
: "=Q" (rw->lock) :: "memory");
}
/* write_can_lock - would write_trylock() succeed? */
#define arch_write_can_lock(x) ((x)->lock == 0)
/*
* Read lock implementation.
*
* It exclusively loads the lock value, increments it and stores the new value
* back if positive and the CPU still exclusively owns the location. If the
* value is negative, the lock is already held.
*
* During unlocking there may be multiple active read locks but no write lock.
*
* The memory barriers are implicit with the load-acquire and store-release
* instructions.
*
* Note that in UNDEFINED cases, such as unlocking a lock twice, the LL/SC
* and LSE implementations may exhibit different behaviour (although this
* will have no effect on lockdep).
*/
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned int tmp, tmp2;
asm volatile(
" sevl\n"
ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
"1: wfe\n"
"2: ldaxr %w0, %2\n"
" add %w0, %w0, #1\n"
" tbnz %w0, #31, 1b\n"
" stxr %w1, %w0, %2\n"
" cbnz %w1, 2b\n"
__nops(1),
/* LSE atomics */
"1: wfe\n"
"2: ldxr %w0, %2\n"
" adds %w1, %w0, #1\n"
" tbnz %w1, #31, 1b\n"
" casa %w0, %w1, %2\n"
" sbc %w0, %w1, %w0\n"
" cbnz %w0, 2b")
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
: "cc", "memory");
}
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned int tmp, tmp2;
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
"1: ldxr %w0, %2\n"
" sub %w0, %w0, #1\n"
" stlxr %w1, %w0, %2\n"
" cbnz %w1, 1b",
/* LSE atomics */
" movn %w0, #0\n"
" staddl %w0, %2\n"
__nops(2))
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
: "memory");
}
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned int tmp, tmp2;
asm volatile(ARM64_LSE_ATOMIC_INSN(
/* LL/SC */
" mov %w1, #1\n"
"1: ldaxr %w0, %2\n"
" add %w0, %w0, #1\n"
" tbnz %w0, #31, 2f\n"
" stxr %w1, %w0, %2\n"
" cbnz %w1, 1b\n"
"2:",
/* LSE atomics */
" ldr %w0, %2\n"
" adds %w1, %w0, #1\n"
" tbnz %w1, #31, 1f\n"
" casa %w0, %w1, %2\n"
" sbc %w1, %w1, %w0\n"
__nops(1)
"1:")
: "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
:
: "cc", "memory");
return !tmp2;
}
/* read_can_lock - would read_trylock() succeed? */
#define arch_read_can_lock(x) ((x)->lock < 0x80000000)
/* See include/linux/spinlock.h */ /* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb() #define smp_mb__after_spinlock() smp_mb()
......
...@@ -36,10 +36,6 @@ typedef struct { ...@@ -36,10 +36,6 @@ typedef struct {
#define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 } #define __ARCH_SPIN_LOCK_UNLOCKED { 0 , 0 }
typedef struct { #include <asm-generic/qrwlock_types.h>
volatile unsigned int lock;
} arch_rwlock_t;
#define __ARCH_RW_LOCK_UNLOCKED { 0 }
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment