Commit 59fb586b authored by Will Deacon's avatar Will Deacon Committed by Ingo Molnar

locking/qspinlock: Remove unbounded cmpxchg() loop from locking slowpath

The qspinlock locking slowpath utilises a "pending" bit as a simple form
of an embedded test-and-set lock that can avoid the overhead of explicit
queuing in cases where the lock is held but uncontended. This bit is
managed using a cmpxchg() loop which tries to transition the uncontended
lock word from (0,0,0) -> (0,0,1) or (0,0,1) -> (0,1,1).

Unfortunately, the cmpxchg() loop is unbounded and lockers can be starved
indefinitely if the lock word is seen to oscillate between unlocked
(0,0,0) and locked (0,0,1). This could happen if concurrent lockers are
able to take the lock in the cmpxchg() loop without queuing and pass it
around amongst themselves.

This patch fixes the problem by unconditionally setting _Q_PENDING_VAL
using atomic_fetch_or, and then inspecting the old value to see whether
we need to spin on the current lock owner, or whether we now effectively
hold the lock. The tricky scenario is when concurrent lockers end up
queuing on the lock and the lock becomes available, causing us to see
a lockword of (n,0,0). With pending now set, simply queuing could lead
to deadlock as the head of the queue may not have observed the pending
flag being cleared. Conversely, if the head of the queue did observe
pending being cleared, then it could transition the lock from (n,0,0) ->
(0,0,1) meaning that any attempt to "undo" our setting of the pending
bit could race with a concurrent locker trying to set it.

We handle this race by preserving the pending bit when taking the lock
after reaching the head of the queue and leaving the tail entry intact
if we saw pending set, because we know that the tail is going to be
updated shortly.
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
Acked-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarWaiman Long <longman@redhat.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: boqun.feng@gmail.com
Cc: linux-arm-kernel@lists.infradead.org
Cc: paulmck@linux.vnet.ibm.com
Link: http://lkml.kernel.org/r/1524738868-31318-6-git-send-email-will.deacon@arm.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b247be3f
...@@ -127,6 +127,17 @@ static inline __pure struct mcs_spinlock *decode_tail(u32 tail) ...@@ -127,6 +127,17 @@ static inline __pure struct mcs_spinlock *decode_tail(u32 tail)
#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK) #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
#if _Q_PENDING_BITS == 8 #if _Q_PENDING_BITS == 8
/**
* clear_pending - clear the pending bit.
* @lock: Pointer to queued spinlock structure
*
* *,1,* -> *,0,*
*/
static __always_inline void clear_pending(struct qspinlock *lock)
{
WRITE_ONCE(lock->pending, 0);
}
/** /**
* clear_pending_set_locked - take ownership and clear the pending bit. * clear_pending_set_locked - take ownership and clear the pending bit.
* @lock: Pointer to queued spinlock structure * @lock: Pointer to queued spinlock structure
...@@ -162,6 +173,17 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail) ...@@ -162,6 +173,17 @@ static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
#else /* _Q_PENDING_BITS == 8 */ #else /* _Q_PENDING_BITS == 8 */
/**
* clear_pending - clear the pending bit.
* @lock: Pointer to queued spinlock structure
*
* *,1,* -> *,0,*
*/
static __always_inline void clear_pending(struct qspinlock *lock)
{
atomic_andnot(_Q_PENDING_VAL, &lock->val);
}
/** /**
* clear_pending_set_locked - take ownership and clear the pending bit. * clear_pending_set_locked - take ownership and clear the pending bit.
* @lock: Pointer to queued spinlock structure * @lock: Pointer to queued spinlock structure
...@@ -266,7 +288,7 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock, ...@@ -266,7 +288,7 @@ static __always_inline u32 __pv_wait_head_or_lock(struct qspinlock *lock,
void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
{ {
struct mcs_spinlock *prev, *next, *node; struct mcs_spinlock *prev, *next, *node;
u32 new, old, tail; u32 old, tail;
int idx; int idx;
BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS)); BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
...@@ -289,51 +311,35 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) ...@@ -289,51 +311,35 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
(VAL != _Q_PENDING_VAL) || !cnt--); (VAL != _Q_PENDING_VAL) || !cnt--);
} }
/*
* trylock || pending
*
* 0,0,0 -> 0,0,1 ; trylock
* 0,0,1 -> 0,1,1 ; pending
*/
for (;;) {
/* /*
* If we observe any contention; queue. * If we observe any contention; queue.
*/ */
if (val & ~_Q_LOCKED_MASK) if (val & ~_Q_LOCKED_MASK)
goto queue; goto queue;
new = _Q_LOCKED_VAL;
if (val == new)
new |= _Q_PENDING_VAL;
/*
* Acquire semantic is required here as the function may
* return immediately if the lock was free.
*/
old = atomic_cmpxchg_acquire(&lock->val, val, new);
if (old == val)
break;
val = old;
}
/* /*
* we won the trylock * trylock || pending
*
* 0,0,0 -> 0,0,1 ; trylock
* 0,0,1 -> 0,1,1 ; pending
*/ */
if (new == _Q_LOCKED_VAL) val = atomic_fetch_or_acquire(_Q_PENDING_VAL, &lock->val);
return; if (!(val & ~_Q_LOCKED_MASK)) {
/* /*
* we're pending, wait for the owner to go away. * We're pending, wait for the owner to go away.
* *
* *,1,1 -> *,1,0 * *,1,1 -> *,1,0
* *
* this wait loop must be a load-acquire such that we match the * this wait loop must be a load-acquire such that we match the
* store-release that clears the locked bit and create lock * store-release that clears the locked bit and create lock
* sequentiality; this is because not all clear_pending_set_locked() * sequentiality; this is because not all
* implementations imply full barriers. * clear_pending_set_locked() implementations imply full
* barriers.
*/ */
smp_cond_load_acquire(&lock->val.counter, !(VAL & _Q_LOCKED_MASK)); if (val & _Q_LOCKED_MASK) {
smp_cond_load_acquire(&lock->val.counter,
!(VAL & _Q_LOCKED_MASK));
}
/* /*
* take ownership and clear the pending bit. * take ownership and clear the pending bit.
...@@ -342,6 +348,14 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) ...@@ -342,6 +348,14 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
*/ */
clear_pending_set_locked(lock); clear_pending_set_locked(lock);
return; return;
}
/*
* If pending was clear but there are waiters in the queue, then
* we need to undo our setting of pending before we queue ourselves.
*/
if (!(val & _Q_PENDING_MASK))
clear_pending(lock);
/* /*
* End of pending bit optimistic spinning and beginning of MCS * End of pending bit optimistic spinning and beginning of MCS
...@@ -445,15 +459,15 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val) ...@@ -445,15 +459,15 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
* claim the lock: * claim the lock:
* *
* n,0,0 -> 0,0,1 : lock, uncontended * n,0,0 -> 0,0,1 : lock, uncontended
* *,0,0 -> *,0,1 : lock, contended * *,*,0 -> *,*,1 : lock, contended
* *
* If the queue head is the only one in the queue (lock value == tail), * If the queue head is the only one in the queue (lock value == tail)
* clear the tail code and grab the lock. Otherwise, we only need * and nobody is pending, clear the tail code and grab the lock.
* to grab the lock. * Otherwise, we only need to grab the lock.
*/ */
for (;;) { for (;;) {
/* In the PV case we might already have _Q_LOCKED_VAL set */ /* In the PV case we might already have _Q_LOCKED_VAL set */
if ((val & _Q_TAIL_MASK) != tail) { if ((val & _Q_TAIL_MASK) != tail || (val & _Q_PENDING_MASK)) {
set_locked(lock); set_locked(lock);
break; break;
} }
......
...@@ -118,11 +118,6 @@ static __always_inline void set_pending(struct qspinlock *lock) ...@@ -118,11 +118,6 @@ static __always_inline void set_pending(struct qspinlock *lock)
WRITE_ONCE(lock->pending, 1); WRITE_ONCE(lock->pending, 1);
} }
static __always_inline void clear_pending(struct qspinlock *lock)
{
WRITE_ONCE(lock->pending, 0);
}
/* /*
* The pending bit check in pv_queued_spin_steal_lock() isn't a memory * The pending bit check in pv_queued_spin_steal_lock() isn't a memory
* barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the * barrier. Therefore, an atomic cmpxchg_acquire() is used to acquire the
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment