Commit 3f524553 authored by Waiman Long's avatar Waiman Long Committed by Ingo Molnar

locking/rwsem: Disable preemption in all down_read*() and up_read() code paths

Commit:

  91d2a812 ("locking/rwsem: Make handoff writer optimistically spin on owner")

... assumes that when the owner field is changed to NULL, the lock will
become free soon. But commit:

  48dfb5d2 ("locking/rwsem: Disable preemption while trying for rwsem lock")

... disabled preemption when acquiring rwsem for write.

However, preemption has not yet been disabled when acquiring a read lock
on a rwsem.  So a reader can add a RWSEM_READER_BIAS to count without
setting owner to signal a reader, got preempted out by a RT task which
then spins in the writer slowpath as owner remains NULL leading to live lock.

One easy way to fix this problem is to disable preemption at all the
down_read*() and up_read() code paths as implemented in this patch.

Fixes: 91d2a812 ("locking/rwsem: Make handoff writer optimistically spin on owner")
Reported-by: default avatarMukesh Ojha <quic_mojha@quicinc.com>
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230126003628.365092-3-longman@redhat.com
parent b613c7f3
......@@ -1091,7 +1091,7 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
/* Ordered by sem->wait_lock against rwsem_mark_wake(). */
break;
}
schedule();
schedule_preempt_disabled();
lockevent_inc(rwsem_sleep_reader);
}
......@@ -1253,14 +1253,20 @@ static struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem)
*/
static inline int __down_read_common(struct rw_semaphore *sem, int state)
{
int ret = 0;
long count;
preempt_disable();
if (!rwsem_read_trylock(sem, &count)) {
if (IS_ERR(rwsem_down_read_slowpath(sem, count, state)))
return -EINTR;
if (IS_ERR(rwsem_down_read_slowpath(sem, count, state))) {
ret = -EINTR;
goto out;
}
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
}
return 0;
out:
preempt_enable();
return ret;
}
static inline void __down_read(struct rw_semaphore *sem)
......@@ -1280,19 +1286,23 @@ static inline int __down_read_killable(struct rw_semaphore *sem)
static inline int __down_read_trylock(struct rw_semaphore *sem)
{
int ret = 0;
long tmp;
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
preempt_disable();
tmp = atomic_long_read(&sem->count);
while (!(tmp & RWSEM_READ_FAILED_MASK)) {
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp,
tmp + RWSEM_READER_BIAS)) {
rwsem_set_reader_owned(sem);
return 1;
ret = 1;
break;
}
}
return 0;
preempt_enable();
return ret;
}
/*
......@@ -1334,6 +1344,7 @@ static inline void __up_read(struct rw_semaphore *sem)
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
DEBUG_RWSEMS_WARN_ON(!is_rwsem_reader_owned(sem), sem);
preempt_disable();
rwsem_clear_reader_owned(sem);
tmp = atomic_long_add_return_release(-RWSEM_READER_BIAS, &sem->count);
DEBUG_RWSEMS_WARN_ON(tmp < 0, sem);
......@@ -1342,6 +1353,7 @@ static inline void __up_read(struct rw_semaphore *sem)
clear_nonspinnable(sem);
rwsem_wake(sem);
}
preempt_enable();
}
/*
......@@ -1661,6 +1673,12 @@ void down_read_non_owner(struct rw_semaphore *sem)
{
might_sleep();
__down_read(sem);
/*
* The owner value for a reader-owned lock is mostly for debugging
* purpose only and is not critical to the correct functioning of
* rwsem. So it is perfectly fine to set it in a preempt-enabled
* context here.
*/
__rwsem_set_reader_owned(sem, NULL);
}
EXPORT_SYMBOL(down_read_non_owner);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment