Commit 1d61659c authored by Waiman Long's avatar Waiman Long Committed by Ingo Molnar

locking/rwsem: Disable preemption in all down_write*() and up_write() code paths

The previous patch has disabled preemption in all the down_read() and
up_read() code paths. For symmetry, this patch extends commit:

  48dfb5d2 ("locking/rwsem: Disable preemption while trying for rwsem lock")

... to have preemption disabled in all the down_write() and up_write()
code paths, including downgrade_write().
Suggested-by: default avatarPeter Zijlstra <peterz@infradead.org>
Signed-off-by: default avatarWaiman Long <longman@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20230126003628.365092-4-longman@redhat.com
parent 3f524553
...@@ -256,16 +256,13 @@ static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp) ...@@ -256,16 +256,13 @@ static inline bool rwsem_read_trylock(struct rw_semaphore *sem, long *cntp)
static inline bool rwsem_write_trylock(struct rw_semaphore *sem) static inline bool rwsem_write_trylock(struct rw_semaphore *sem)
{ {
long tmp = RWSEM_UNLOCKED_VALUE; long tmp = RWSEM_UNLOCKED_VALUE;
bool ret = false;
preempt_disable();
if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) { if (atomic_long_try_cmpxchg_acquire(&sem->count, &tmp, RWSEM_WRITER_LOCKED)) {
rwsem_set_owner(sem); rwsem_set_owner(sem);
ret = true; return true;
} }
preempt_enable(); return false;
return ret;
} }
/* /*
...@@ -716,7 +713,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) ...@@ -716,7 +713,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
return false; return false;
} }
preempt_disable();
/* /*
* Disable preemption is equal to the RCU read-side crital section, * Disable preemption is equal to the RCU read-side crital section,
* thus the task_strcut structure won't go away. * thus the task_strcut structure won't go away.
...@@ -728,7 +724,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) ...@@ -728,7 +724,6 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
if ((flags & RWSEM_NONSPINNABLE) || if ((flags & RWSEM_NONSPINNABLE) ||
(owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner))) (owner && !(flags & RWSEM_READER_OWNED) && !owner_on_cpu(owner)))
ret = false; ret = false;
preempt_enable();
lockevent_cond_inc(rwsem_opt_fail, !ret); lockevent_cond_inc(rwsem_opt_fail, !ret);
return ret; return ret;
...@@ -828,8 +823,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) ...@@ -828,8 +823,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
int loop = 0; int loop = 0;
u64 rspin_threshold = 0; u64 rspin_threshold = 0;
preempt_disable();
/* sem->wait_lock should not be held when doing optimistic spinning */ /* sem->wait_lock should not be held when doing optimistic spinning */
if (!osq_lock(&sem->osq)) if (!osq_lock(&sem->osq))
goto done; goto done;
...@@ -937,7 +930,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) ...@@ -937,7 +930,6 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
} }
osq_unlock(&sem->osq); osq_unlock(&sem->osq);
done: done:
preempt_enable();
lockevent_cond_inc(rwsem_opt_fail, !taken); lockevent_cond_inc(rwsem_opt_fail, !taken);
return taken; return taken;
} }
...@@ -1178,15 +1170,12 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state) ...@@ -1178,15 +1170,12 @@ rwsem_down_write_slowpath(struct rw_semaphore *sem, int state)
if (waiter.handoff_set) { if (waiter.handoff_set) {
enum owner_state owner_state; enum owner_state owner_state;
preempt_disable();
owner_state = rwsem_spin_on_owner(sem); owner_state = rwsem_spin_on_owner(sem);
preempt_enable();
if (owner_state == OWNER_NULL) if (owner_state == OWNER_NULL)
goto trylock_again; goto trylock_again;
} }
schedule(); schedule_preempt_disabled();
lockevent_inc(rwsem_sleep_writer); lockevent_inc(rwsem_sleep_writer);
set_current_state(state); set_current_state(state);
trylock_again: trylock_again:
...@@ -1310,12 +1299,15 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -1310,12 +1299,15 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
*/ */
static inline int __down_write_common(struct rw_semaphore *sem, int state) static inline int __down_write_common(struct rw_semaphore *sem, int state)
{ {
int ret = 0;
preempt_disable();
if (unlikely(!rwsem_write_trylock(sem))) { if (unlikely(!rwsem_write_trylock(sem))) {
if (IS_ERR(rwsem_down_write_slowpath(sem, state))) if (IS_ERR(rwsem_down_write_slowpath(sem, state)))
return -EINTR; ret = -EINTR;
} }
preempt_enable();
return 0; return ret;
} }
static inline void __down_write(struct rw_semaphore *sem) static inline void __down_write(struct rw_semaphore *sem)
...@@ -1330,8 +1322,14 @@ static inline int __down_write_killable(struct rw_semaphore *sem) ...@@ -1330,8 +1322,14 @@ static inline int __down_write_killable(struct rw_semaphore *sem)
static inline int __down_write_trylock(struct rw_semaphore *sem) static inline int __down_write_trylock(struct rw_semaphore *sem)
{ {
int ret;
preempt_disable();
DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem); DEBUG_RWSEMS_WARN_ON(sem->magic != sem, sem);
return rwsem_write_trylock(sem); ret = rwsem_write_trylock(sem);
preempt_enable();
return ret;
} }
/* /*
...@@ -1374,9 +1372,9 @@ static inline void __up_write(struct rw_semaphore *sem) ...@@ -1374,9 +1372,9 @@ static inline void __up_write(struct rw_semaphore *sem)
preempt_disable(); preempt_disable();
rwsem_clear_owner(sem); rwsem_clear_owner(sem);
tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count); tmp = atomic_long_fetch_add_release(-RWSEM_WRITER_LOCKED, &sem->count);
preempt_enable();
if (unlikely(tmp & RWSEM_FLAG_WAITERS)) if (unlikely(tmp & RWSEM_FLAG_WAITERS))
rwsem_wake(sem); rwsem_wake(sem);
preempt_enable();
} }
/* /*
...@@ -1394,11 +1392,13 @@ static inline void __downgrade_write(struct rw_semaphore *sem) ...@@ -1394,11 +1392,13 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
* write side. As such, rely on RELEASE semantics. * write side. As such, rely on RELEASE semantics.
*/ */
DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem); DEBUG_RWSEMS_WARN_ON(rwsem_owner(sem) != current, sem);
preempt_disable();
tmp = atomic_long_fetch_add_release( tmp = atomic_long_fetch_add_release(
-RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count); -RWSEM_WRITER_LOCKED+RWSEM_READER_BIAS, &sem->count);
rwsem_set_reader_owned(sem); rwsem_set_reader_owned(sem);
if (tmp & RWSEM_FLAG_WAITERS) if (tmp & RWSEM_FLAG_WAITERS)
rwsem_downgrade_wake(sem); rwsem_downgrade_wake(sem);
preempt_enable();
} }
#else /* !CONFIG_PREEMPT_RT */ #else /* !CONFIG_PREEMPT_RT */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment