Commit 1a728dff authored by Waiman Long's avatar Waiman Long Committed by Peter Zijlstra

locking/rwsem: Enable reader optimistic lock stealing

If the optimistic spinning queue is empty and the rwsem does not have
the handoff or write-lock bits set, it is actually not necessary to
call rwsem_optimistic_spin() to spin on it. Instead, it can steal the
lock directly as its reader bias is in the count already.  If it is
the first reader in this state, it will try to wake up other readers
in the wait queue.

With this patch applied, the following were the lock event counts
after rebooting a 2-socket system and a "make -j96" kernel rebuild.

  rwsem_opt_rlock=4437
  rwsem_rlock=29
  rwsem_rlock_steal=19

So lock stealing represents about 0.4% of all the read locks acquired
in the slow path.
Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Link: https://lkml.kernel.org/r/20201121041416.12285-4-longman@redhat.com
parent 2f06f702
...@@ -63,6 +63,7 @@ LOCK_EVENT(rwsem_opt_nospin) /* # of disabled optspins */ ...@@ -63,6 +63,7 @@ LOCK_EVENT(rwsem_opt_nospin) /* # of disabled optspins */
LOCK_EVENT(rwsem_opt_norspin) /* # of disabled reader-only optspins */ LOCK_EVENT(rwsem_opt_norspin) /* # of disabled reader-only optspins */
LOCK_EVENT(rwsem_opt_rlock2) /* # of opt-acquired 2ndary read locks */ LOCK_EVENT(rwsem_opt_rlock2) /* # of opt-acquired 2ndary read locks */
LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */ LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */
LOCK_EVENT(rwsem_rlock_steal) /* # of read locks by lock stealing */
LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */ LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */
LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */ LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */
LOCK_EVENT(rwsem_rlock_handoff) /* # of read lock handoffs */ LOCK_EVENT(rwsem_rlock_handoff) /* # of read lock handoffs */
......
...@@ -976,6 +976,12 @@ static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem, ...@@ -976,6 +976,12 @@ static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
} }
return false; return false;
} }
static inline bool rwsem_no_spinners(struct rw_semaphore *sem)
{
return !osq_is_locked(&sem->osq);
}
#else #else
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem, static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem,
unsigned long nonspinnable) unsigned long nonspinnable)
...@@ -996,6 +1002,11 @@ static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem, ...@@ -996,6 +1002,11 @@ static inline bool rwsem_reader_phase_trylock(struct rw_semaphore *sem,
return false; return false;
} }
static inline bool rwsem_no_spinners(sem)
{
return false;
}
static inline int static inline int
rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable) rwsem_spin_on_owner(struct rw_semaphore *sem, unsigned long nonspinnable)
{ {
...@@ -1026,6 +1037,22 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, int state) ...@@ -1026,6 +1037,22 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, int state)
!(count & RWSEM_WRITER_LOCKED)) !(count & RWSEM_WRITER_LOCKED))
goto queue; goto queue;
/*
* Reader optimistic lock stealing
*
* We can take the read lock directly without doing
* rwsem_optimistic_spin() if the conditions are right.
* Also wake up other readers if it is the first reader.
*/
if (!(count & (RWSEM_WRITER_LOCKED | RWSEM_FLAG_HANDOFF)) &&
rwsem_no_spinners(sem)) {
rwsem_set_reader_owned(sem);
lockevent_inc(rwsem_rlock_steal);
if (rcnt == 1)
goto wake_readers;
return sem;
}
/* /*
* Save the current read-owner of rwsem, if available, and the * Save the current read-owner of rwsem, if available, and the
* reader nonspinnable bit. * reader nonspinnable bit.
...@@ -1048,6 +1075,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, int state) ...@@ -1048,6 +1075,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, int state)
* Wake up other readers in the wait list if the front * Wake up other readers in the wait list if the front
* waiter is a reader. * waiter is a reader.
*/ */
wake_readers:
if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) { if ((atomic_long_read(&sem->count) & RWSEM_FLAG_WAITERS)) {
raw_spin_lock_irq(&sem->wait_lock); raw_spin_lock_irq(&sem->wait_lock);
if (!list_empty(&sem->wait_list)) if (!list_empty(&sem->wait_list))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment