Commit f5e29a26 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'locking-urgent-2021-09-19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Thomas Gleixner:
 "A set of updates for the RT specific reader/writer locking base code:

   - Make the fast path reader ordering guarantees correct.

   - Code reshuffling to make the fix simpler"

[ This plays ugly games with atomic_add_return_release() because we
  don't have a plain atomic_add_release(), and should really be cleaned
  up, I think    - Linus ]

* tag 'locking-urgent-2021-09-19' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/rwbase: Take care of ordering guarantee for fastpath reader
  locking/rwbase: Extract __rwbase_write_trylock()
  locking/rwbase: Properly match set_and_save_state() to restore_state()
parents 62453a46 81121524
...@@ -41,6 +41,12 @@ ...@@ -41,6 +41,12 @@
* The risk of writer starvation is there, but the pathological use cases * The risk of writer starvation is there, but the pathological use cases
* which trigger it are not necessarily the typical RT workloads. * which trigger it are not necessarily the typical RT workloads.
* *
* Fast-path orderings:
* The lock/unlock of readers can run in fast paths: lock and unlock are only
* atomic ops, and there is no inner lock to provide ACQUIRE and RELEASE
* semantics of rwbase_rt. Atomic ops should thus provide _acquire()
* and _release() (or stronger).
*
* Common code shared between RT rw_semaphore and rwlock * Common code shared between RT rw_semaphore and rwlock
*/ */
...@@ -53,6 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb) ...@@ -53,6 +59,7 @@ static __always_inline int rwbase_read_trylock(struct rwbase_rt *rwb)
* set. * set.
*/ */
for (r = atomic_read(&rwb->readers); r < 0;) { for (r = atomic_read(&rwb->readers); r < 0;) {
/* Fully-ordered if cmpxchg() succeeds, provides ACQUIRE */
if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1))) if (likely(atomic_try_cmpxchg(&rwb->readers, &r, r + 1)))
return 1; return 1;
} }
...@@ -162,6 +169,8 @@ static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb, ...@@ -162,6 +169,8 @@ static __always_inline void rwbase_read_unlock(struct rwbase_rt *rwb,
/* /*
* rwb->readers can only hit 0 when a writer is waiting for the * rwb->readers can only hit 0 when a writer is waiting for the
* active readers to leave the critical section. * active readers to leave the critical section.
*
* dec_and_test() is fully ordered, provides RELEASE.
*/ */
if (unlikely(atomic_dec_and_test(&rwb->readers))) if (unlikely(atomic_dec_and_test(&rwb->readers)))
__rwbase_read_unlock(rwb, state); __rwbase_read_unlock(rwb, state);
...@@ -172,7 +181,11 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias, ...@@ -172,7 +181,11 @@ static inline void __rwbase_write_unlock(struct rwbase_rt *rwb, int bias,
{ {
struct rt_mutex_base *rtm = &rwb->rtmutex; struct rt_mutex_base *rtm = &rwb->rtmutex;
atomic_add(READER_BIAS - bias, &rwb->readers); /*
* _release() is needed in case that reader is in fast path, pairing
* with atomic_try_cmpxchg() in rwbase_read_trylock(), provides RELEASE
*/
(void)atomic_add_return_release(READER_BIAS - bias, &rwb->readers);
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
rwbase_rtmutex_unlock(rtm); rwbase_rtmutex_unlock(rtm);
} }
...@@ -196,6 +209,23 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb) ...@@ -196,6 +209,23 @@ static inline void rwbase_write_downgrade(struct rwbase_rt *rwb)
__rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags); __rwbase_write_unlock(rwb, WRITER_BIAS - 1, flags);
} }
static inline bool __rwbase_write_trylock(struct rwbase_rt *rwb)
{
/* Can do without CAS because we're serialized by wait_lock. */
lockdep_assert_held(&rwb->rtmutex.wait_lock);
/*
* _acquire is needed in case the reader is in the fast path, pairing
* with rwbase_read_unlock(), provides ACQUIRE.
*/
if (!atomic_read_acquire(&rwb->readers)) {
atomic_set(&rwb->readers, WRITER_BIAS);
return 1;
}
return 0;
}
static int __sched rwbase_write_lock(struct rwbase_rt *rwb, static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
unsigned int state) unsigned int state)
{ {
...@@ -210,34 +240,30 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb, ...@@ -210,34 +240,30 @@ static int __sched rwbase_write_lock(struct rwbase_rt *rwb,
atomic_sub(READER_BIAS, &rwb->readers); atomic_sub(READER_BIAS, &rwb->readers);
raw_spin_lock_irqsave(&rtm->wait_lock, flags); raw_spin_lock_irqsave(&rtm->wait_lock, flags);
/* if (__rwbase_write_trylock(rwb))
* set_current_state() for rw_semaphore goto out_unlock;
* current_save_and_set_rtlock_wait_state() for rwlock
*/
rwbase_set_and_save_current_state(state);
/* Block until all readers have left the critical section. */ rwbase_set_and_save_current_state(state);
for (; atomic_read(&rwb->readers);) { for (;;) {
/* Optimized out for rwlocks */ /* Optimized out for rwlocks */
if (rwbase_signal_pending_state(state, current)) { if (rwbase_signal_pending_state(state, current)) {
__set_current_state(TASK_RUNNING); rwbase_restore_current_state();
__rwbase_write_unlock(rwb, 0, flags); __rwbase_write_unlock(rwb, 0, flags);
return -EINTR; return -EINTR;
} }
if (__rwbase_write_trylock(rwb))
break;
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
rwbase_schedule();
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
/*
* Schedule and wait for the readers to leave the critical
* section. The last reader leaving it wakes the waiter.
*/
if (atomic_read(&rwb->readers) != 0)
rwbase_schedule();
set_current_state(state); set_current_state(state);
raw_spin_lock_irqsave(&rtm->wait_lock, flags);
} }
atomic_set(&rwb->readers, WRITER_BIAS);
rwbase_restore_current_state(); rwbase_restore_current_state();
out_unlock:
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
return 0; return 0;
} }
...@@ -253,8 +279,7 @@ static inline int rwbase_write_trylock(struct rwbase_rt *rwb) ...@@ -253,8 +279,7 @@ static inline int rwbase_write_trylock(struct rwbase_rt *rwb)
atomic_sub(READER_BIAS, &rwb->readers); atomic_sub(READER_BIAS, &rwb->readers);
raw_spin_lock_irqsave(&rtm->wait_lock, flags); raw_spin_lock_irqsave(&rtm->wait_lock, flags);
if (!atomic_read(&rwb->readers)) { if (__rwbase_write_trylock(rwb)) {
atomic_set(&rwb->readers, WRITER_BIAS);
raw_spin_unlock_irqrestore(&rtm->wait_lock, flags); raw_spin_unlock_irqrestore(&rtm->wait_lock, flags);
return 1; return 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment