Commit a5b1a017 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'locking-core-2024-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:

 - Micro-optimize local_xchg() and the rtmutex code on x86

 - Fix percpu-rwsem contention tracepoints

 - Simplify debugging Kconfig dependencies

 - Update/clarify the documentation of atomic primitives

 - Misc cleanups

* tag 'locking-core-2024-03-11' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/rtmutex: Use try_cmpxchg_relaxed() in mark_rt_mutex_waiters()
  locking/x86: Implement local_xchg() using CMPXCHG without the LOCK prefix
  locking/percpu-rwsem: Trigger contention tracepoints only if contended
  locking/rwsem: Make DEBUG_RWSEMS and PREEMPT_RT mutually exclusive
  locking/rwsem: Clarify that RWSEM_READER_OWNED is just a hint
  locking/mutex: Simplify <linux/mutex.h>
  locking/qspinlock: Fix 'wait_early' set but not used warning
  locking/atomic: scripts: Clarify ordering of conditional atomics
parents b0402403 ce3576eb
......@@ -131,8 +131,20 @@ static inline bool local_try_cmpxchg(local_t *l, long *old, long new)
(typeof(l->a.counter) *) old, new);
}
/* Always has a lock prefix */
#define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
/*
* Implement local_xchg using CMPXCHG instruction without the LOCK prefix.
* XCHG is expensive due to the implied LOCK prefix. The processor
* cannot prefetch cachelines if XCHG is used.
*/
static __always_inline long
local_xchg(local_t *l, long n)
{
long c = local_read(l);
do { } while (!local_try_cmpxchg(l, &c, n));
return c;
}
/**
* local_add_unless - add unless the number is already a given value
......
......@@ -2005,6 +2005,7 @@ raw_atomic_xchg_relaxed(atomic_t *v, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_cmpxchg() elsewhere.
*
......@@ -2033,6 +2034,7 @@ raw_atomic_cmpxchg(atomic_t *v, int old, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_cmpxchg_acquire() elsewhere.
*
......@@ -2061,6 +2063,7 @@ raw_atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_cmpxchg_release() elsewhere.
*
......@@ -2088,6 +2091,7 @@ raw_atomic_cmpxchg_release(atomic_t *v, int old, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_cmpxchg_relaxed() elsewhere.
*
......@@ -2112,7 +2116,8 @@ raw_atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg() elsewhere.
*
......@@ -2145,7 +2150,8 @@ raw_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_acquire() elsewhere.
*
......@@ -2178,7 +2184,8 @@ raw_atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_release() elsewhere.
*
......@@ -2210,7 +2217,8 @@ raw_atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_try_cmpxchg_relaxed() elsewhere.
*
......@@ -2403,6 +2411,7 @@ raw_atomic_add_negative_relaxed(int i, atomic_t *v)
* @u: int value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_fetch_add_unless() elsewhere.
*
......@@ -2432,6 +2441,7 @@ raw_atomic_fetch_add_unless(atomic_t *v, int a, int u)
* @u: int value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_add_unless() elsewhere.
*
......@@ -2452,6 +2462,7 @@ raw_atomic_add_unless(atomic_t *v, int a, int u)
* @v: pointer to atomic_t
*
* If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_inc_not_zero() elsewhere.
*
......@@ -2472,6 +2483,7 @@ raw_atomic_inc_not_zero(atomic_t *v)
* @v: pointer to atomic_t
*
* If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_inc_unless_negative() elsewhere.
*
......@@ -2499,6 +2511,7 @@ raw_atomic_inc_unless_negative(atomic_t *v)
* @v: pointer to atomic_t
*
* If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_dec_unless_positive() elsewhere.
*
......@@ -2526,6 +2539,7 @@ raw_atomic_dec_unless_positive(atomic_t *v)
* @v: pointer to atomic_t
*
* If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_dec_if_positive() elsewhere.
*
......@@ -4117,6 +4131,7 @@ raw_atomic64_xchg_relaxed(atomic64_t *v, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic64_cmpxchg() elsewhere.
*
......@@ -4145,6 +4160,7 @@ raw_atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic64_cmpxchg_acquire() elsewhere.
*
......@@ -4173,6 +4189,7 @@ raw_atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic64_cmpxchg_release() elsewhere.
*
......@@ -4200,6 +4217,7 @@ raw_atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic64_cmpxchg_relaxed() elsewhere.
*
......@@ -4224,7 +4242,8 @@ raw_atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg() elsewhere.
*
......@@ -4257,7 +4276,8 @@ raw_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_acquire() elsewhere.
*
......@@ -4290,7 +4310,8 @@ raw_atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_release() elsewhere.
*
......@@ -4322,7 +4343,8 @@ raw_atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic64_try_cmpxchg_relaxed() elsewhere.
*
......@@ -4515,6 +4537,7 @@ raw_atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
* @u: s64 value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic64_fetch_add_unless() elsewhere.
*
......@@ -4544,6 +4567,7 @@ raw_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
* @u: s64 value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic64_add_unless() elsewhere.
*
......@@ -4564,6 +4588,7 @@ raw_atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
* @v: pointer to atomic64_t
*
* If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic64_inc_not_zero() elsewhere.
*
......@@ -4584,6 +4609,7 @@ raw_atomic64_inc_not_zero(atomic64_t *v)
* @v: pointer to atomic64_t
*
* If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic64_inc_unless_negative() elsewhere.
*
......@@ -4611,6 +4637,7 @@ raw_atomic64_inc_unless_negative(atomic64_t *v)
* @v: pointer to atomic64_t
*
* If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic64_dec_unless_positive() elsewhere.
*
......@@ -4638,6 +4665,7 @@ raw_atomic64_dec_unless_positive(atomic64_t *v)
* @v: pointer to atomic64_t
*
* If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic64_dec_if_positive() elsewhere.
*
......@@ -4662,4 +4690,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
// eec048affea735b8464f58e6d96992101f8f85f1
// 14850c0b0db20c62fdc78ccd1d42b98b88d76331
......@@ -1182,6 +1182,7 @@ atomic_xchg_relaxed(atomic_t *v, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_cmpxchg() there.
*
......@@ -1202,6 +1203,7 @@ atomic_cmpxchg(atomic_t *v, int old, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_cmpxchg_acquire() there.
*
......@@ -1221,6 +1223,7 @@ atomic_cmpxchg_acquire(atomic_t *v, int old, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_cmpxchg_release() there.
*
......@@ -1241,6 +1244,7 @@ atomic_cmpxchg_release(atomic_t *v, int old, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_cmpxchg_relaxed() there.
*
......@@ -1260,7 +1264,8 @@ atomic_cmpxchg_relaxed(atomic_t *v, int old, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg() there.
*
......@@ -1282,7 +1287,8 @@ atomic_try_cmpxchg(atomic_t *v, int *old, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_acquire() there.
*
......@@ -1303,7 +1309,8 @@ atomic_try_cmpxchg_acquire(atomic_t *v, int *old, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_release() there.
*
......@@ -1325,7 +1332,8 @@ atomic_try_cmpxchg_release(atomic_t *v, int *old, int new)
* @new: int value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_try_cmpxchg_relaxed() there.
*
......@@ -1475,6 +1483,7 @@ atomic_add_negative_relaxed(int i, atomic_t *v)
* @u: int value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_fetch_add_unless() there.
*
......@@ -1495,6 +1504,7 @@ atomic_fetch_add_unless(atomic_t *v, int a, int u)
* @u: int value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_add_unless() there.
*
......@@ -1513,6 +1523,7 @@ atomic_add_unless(atomic_t *v, int a, int u)
* @v: pointer to atomic_t
*
* If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_inc_not_zero() there.
*
......@@ -1531,6 +1542,7 @@ atomic_inc_not_zero(atomic_t *v)
* @v: pointer to atomic_t
*
* If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_inc_unless_negative() there.
*
......@@ -1549,6 +1561,7 @@ atomic_inc_unless_negative(atomic_t *v)
* @v: pointer to atomic_t
*
* If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_dec_unless_positive() there.
*
......@@ -1567,6 +1580,7 @@ atomic_dec_unless_positive(atomic_t *v)
* @v: pointer to atomic_t
*
* If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_dec_if_positive() there.
*
......@@ -2746,6 +2760,7 @@ atomic64_xchg_relaxed(atomic64_t *v, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_cmpxchg() there.
*
......@@ -2766,6 +2781,7 @@ atomic64_cmpxchg(atomic64_t *v, s64 old, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_acquire() there.
*
......@@ -2785,6 +2801,7 @@ atomic64_cmpxchg_acquire(atomic64_t *v, s64 old, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_release() there.
*
......@@ -2805,6 +2822,7 @@ atomic64_cmpxchg_release(atomic64_t *v, s64 old, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_cmpxchg_relaxed() there.
*
......@@ -2824,7 +2842,8 @@ atomic64_cmpxchg_relaxed(atomic64_t *v, s64 old, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg() there.
*
......@@ -2846,7 +2865,8 @@ atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_acquire() there.
*
......@@ -2867,7 +2887,8 @@ atomic64_try_cmpxchg_acquire(atomic64_t *v, s64 *old, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_release() there.
*
......@@ -2889,7 +2910,8 @@ atomic64_try_cmpxchg_release(atomic64_t *v, s64 *old, s64 new)
* @new: s64 value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_try_cmpxchg_relaxed() there.
*
......@@ -3039,6 +3061,7 @@ atomic64_add_negative_relaxed(s64 i, atomic64_t *v)
* @u: s64 value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_fetch_add_unless() there.
*
......@@ -3059,6 +3082,7 @@ atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
* @u: s64 value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_add_unless() there.
*
......@@ -3077,6 +3101,7 @@ atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
* @v: pointer to atomic64_t
*
* If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_inc_not_zero() there.
*
......@@ -3095,6 +3120,7 @@ atomic64_inc_not_zero(atomic64_t *v)
* @v: pointer to atomic64_t
*
* If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_inc_unless_negative() there.
*
......@@ -3113,6 +3139,7 @@ atomic64_inc_unless_negative(atomic64_t *v)
* @v: pointer to atomic64_t
*
* If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_dec_unless_positive() there.
*
......@@ -3131,6 +3158,7 @@ atomic64_dec_unless_positive(atomic64_t *v)
* @v: pointer to atomic64_t
*
* If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic64_dec_if_positive() there.
*
......@@ -4310,6 +4338,7 @@ atomic_long_xchg_relaxed(atomic_long_t *v, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg() there.
*
......@@ -4330,6 +4359,7 @@ atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_acquire() there.
*
......@@ -4349,6 +4379,7 @@ atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_release() there.
*
......@@ -4369,6 +4400,7 @@ atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_cmpxchg_relaxed() there.
*
......@@ -4388,7 +4420,8 @@ atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg() there.
*
......@@ -4410,7 +4443,8 @@ atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_acquire() there.
*
......@@ -4431,7 +4465,8 @@ atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_release() there.
*
......@@ -4453,7 +4488,8 @@ atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_try_cmpxchg_relaxed() there.
*
......@@ -4603,6 +4639,7 @@ atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
* @u: long value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_fetch_add_unless() there.
*
......@@ -4623,6 +4660,7 @@ atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
* @u: long value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_add_unless() there.
*
......@@ -4641,6 +4679,7 @@ atomic_long_add_unless(atomic_long_t *v, long a, long u)
* @v: pointer to atomic_long_t
*
* If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_inc_not_zero() there.
*
......@@ -4659,6 +4698,7 @@ atomic_long_inc_not_zero(atomic_long_t *v)
* @v: pointer to atomic_long_t
*
* If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_inc_unless_negative() there.
*
......@@ -4677,6 +4717,7 @@ atomic_long_inc_unless_negative(atomic_long_t *v)
* @v: pointer to atomic_long_t
*
* If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_dec_unless_positive() there.
*
......@@ -4695,6 +4736,7 @@ atomic_long_dec_unless_positive(atomic_long_t *v)
* @v: pointer to atomic_long_t
*
* If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Unsafe to use in noinstr code; use raw_atomic_long_dec_if_positive() there.
*
......@@ -5008,4 +5050,4 @@ atomic_long_dec_if_positive(atomic_long_t *v)
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
// 2cc4bc990fef44d3836ec108f11b610f3f438184
// ce5b65e0f1f8a276268b667194581d24bed219d4
......@@ -1352,6 +1352,7 @@ raw_atomic_long_xchg_relaxed(atomic_long_t *v, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_cmpxchg() elsewhere.
*
......@@ -1374,6 +1375,7 @@ raw_atomic_long_cmpxchg(atomic_long_t *v, long old, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_cmpxchg_acquire() elsewhere.
*
......@@ -1396,6 +1398,7 @@ raw_atomic_long_cmpxchg_acquire(atomic_long_t *v, long old, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_cmpxchg_release() elsewhere.
*
......@@ -1418,6 +1421,7 @@ raw_atomic_long_cmpxchg_release(atomic_long_t *v, long old, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_cmpxchg_relaxed() elsewhere.
*
......@@ -1440,7 +1444,8 @@ raw_atomic_long_cmpxchg_relaxed(atomic_long_t *v, long old, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with full ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg() elsewhere.
*
......@@ -1463,7 +1468,8 @@ raw_atomic_long_try_cmpxchg(atomic_long_t *v, long *old, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with acquire ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_acquire() elsewhere.
*
......@@ -1486,7 +1492,8 @@ raw_atomic_long_try_cmpxchg_acquire(atomic_long_t *v, long *old, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with release ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_release() elsewhere.
*
......@@ -1509,7 +1516,8 @@ raw_atomic_long_try_cmpxchg_release(atomic_long_t *v, long *old, long new)
* @new: long value to assign
*
* If (@v == @old), atomically updates @v to @new with relaxed ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_try_cmpxchg_relaxed() elsewhere.
*
......@@ -1677,6 +1685,7 @@ raw_atomic_long_add_negative_relaxed(long i, atomic_long_t *v)
* @u: long value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_fetch_add_unless() elsewhere.
*
......@@ -1699,6 +1708,7 @@ raw_atomic_long_fetch_add_unless(atomic_long_t *v, long a, long u)
* @u: long value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_add_unless() elsewhere.
*
......@@ -1719,6 +1729,7 @@ raw_atomic_long_add_unless(atomic_long_t *v, long a, long u)
* @v: pointer to atomic_long_t
*
* If (@v != 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_inc_not_zero() elsewhere.
*
......@@ -1739,6 +1750,7 @@ raw_atomic_long_inc_not_zero(atomic_long_t *v)
* @v: pointer to atomic_long_t
*
* If (@v >= 0), atomically updates @v to (@v + 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_inc_unless_negative() elsewhere.
*
......@@ -1759,6 +1771,7 @@ raw_atomic_long_inc_unless_negative(atomic_long_t *v)
* @v: pointer to atomic_long_t
*
* If (@v <= 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_dec_unless_positive() elsewhere.
*
......@@ -1779,6 +1792,7 @@ raw_atomic_long_dec_unless_positive(atomic_long_t *v)
* @v: pointer to atomic_long_t
*
* If (@v > 0), atomically updates @v to (@v - 1) with full ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* Safe to use in noinstr code; prefer atomic_long_dec_if_positive() elsewhere.
*
......@@ -1795,4 +1809,4 @@ raw_atomic_long_dec_if_positive(atomic_long_t *v)
}
#endif /* _LINUX_ATOMIC_LONG_H */
// 4ef23f98c73cff96d239896175fd26b10b88899e
// 1c4a26fc77f345342953770ebe3c4d08e7ce2f9a
......@@ -32,11 +32,9 @@
# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
#ifndef CONFIG_PREEMPT_RT
#ifdef CONFIG_DEBUG_MUTEXES
#define __DEBUG_MUTEX_INITIALIZER(lockname) \
# define __DEBUG_MUTEX_INITIALIZER(lockname) \
, .magic = &lockname
extern void mutex_destroy(struct mutex *lock);
......@@ -49,6 +47,7 @@ static inline void mutex_destroy(struct mutex *lock) {}
#endif
#ifndef CONFIG_PREEMPT_RT
/**
* mutex_init - initialize the mutex
* @mutex: the mutex to be initialized
......@@ -101,9 +100,6 @@ extern bool mutex_is_locked(struct mutex *lock);
extern void __mutex_rt_init(struct mutex *lock, const char *name,
struct lock_class_key *key);
extern int mutex_trylock(struct mutex *lock);
static inline void mutex_destroy(struct mutex *lock) { }
#define mutex_is_locked(l) rt_mutex_base_is_locked(&(l)->rtmutex)
......
......@@ -223,9 +223,10 @@ static bool readers_active_check(struct percpu_rw_semaphore *sem)
void __sched percpu_down_write(struct percpu_rw_semaphore *sem)
{
bool contended = false;
might_sleep();
rwsem_acquire(&sem->dep_map, 0, 0, _RET_IP_);
trace_contention_begin(sem, LCB_F_PERCPU | LCB_F_WRITE);
/* Notify readers to take the slow path. */
rcu_sync_enter(&sem->rss);
......@@ -234,8 +235,11 @@ void __sched percpu_down_write(struct percpu_rw_semaphore *sem)
* Try set sem->block; this provides writer-writer exclusion.
* Having sem->block set makes new readers block.
*/
if (!__percpu_down_write_trylock(sem))
if (!__percpu_down_write_trylock(sem)) {
trace_contention_begin(sem, LCB_F_PERCPU | LCB_F_WRITE);
percpu_rwsem_wait(sem, /* .reader = */ false);
contended = true;
}
/* smp_mb() implied by __percpu_down_write_trylock() on success -- D matches A */
......@@ -247,7 +251,8 @@ void __sched percpu_down_write(struct percpu_rw_semaphore *sem)
/* Wait for all active readers to complete. */
rcuwait_wait_event(&sem->writer, readers_active_check(sem), TASK_UNINTERRUPTIBLE);
trace_contention_end(sem, 0);
if (contended)
trace_contention_end(sem, 0);
}
EXPORT_SYMBOL_GPL(percpu_down_write);
......
......@@ -294,8 +294,8 @@ static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
{
struct pv_node *pn = (struct pv_node *)node;
struct pv_node *pp = (struct pv_node *)prev;
bool __maybe_unused wait_early;
int loop;
bool wait_early;
for (;;) {
for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) {
......
......@@ -237,12 +237,13 @@ static __always_inline bool rt_mutex_cmpxchg_release(struct rt_mutex_base *lock,
*/
static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
{
unsigned long owner, *p = (unsigned long *) &lock->owner;
unsigned long *p = (unsigned long *) &lock->owner;
unsigned long owner, new;
owner = READ_ONCE(*p);
do {
owner = *p;
} while (cmpxchg_relaxed(p, owner,
owner | RT_MUTEX_HAS_WAITERS) != owner);
new = owner | RT_MUTEX_HAS_WAITERS;
} while (!try_cmpxchg_relaxed(p, &owner, new));
/*
* The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
......
......@@ -35,7 +35,7 @@
/*
* The least significant 2 bits of the owner value has the following
* meanings when set.
* - Bit 0: RWSEM_READER_OWNED - The rwsem is owned by readers
* - Bit 0: RWSEM_READER_OWNED - rwsem may be owned by readers (just a hint)
* - Bit 1: RWSEM_NONSPINNABLE - Cannot spin on a reader-owned lock
*
* When the rwsem is reader-owned and a spinning writer has timed out,
......@@ -1002,8 +1002,8 @@ rwsem_down_read_slowpath(struct rw_semaphore *sem, long count, unsigned int stat
/*
* To prevent a constant stream of readers from starving a sleeping
* waiter, don't attempt optimistic lock stealing if the lock is
* currently owned by readers.
* writer, don't attempt optimistic lock stealing if the lock is
* very likely owned by readers.
*/
if ((atomic_long_read(&sem->owner) & RWSEM_READER_OWNED) &&
(rcnt > 1) && !(count & RWSEM_WRITER_LOCKED))
......
......@@ -1303,7 +1303,7 @@ config PROVE_LOCKING
select DEBUG_SPINLOCK
select DEBUG_MUTEXES if !PREEMPT_RT
select DEBUG_RT_MUTEXES if RT_MUTEXES
select DEBUG_RWSEMS
select DEBUG_RWSEMS if !PREEMPT_RT
select DEBUG_WW_MUTEX_SLOWPATH
select DEBUG_LOCK_ALLOC
select PREEMPT_COUNT if !ARCH_NO_PREEMPT
......@@ -1426,7 +1426,7 @@ config DEBUG_WW_MUTEX_SLOWPATH
config DEBUG_RWSEMS
bool "RW Semaphore debugging: basic checks"
depends on DEBUG_KERNEL
depends on DEBUG_KERNEL && !PREEMPT_RT
help
This debugging feature allows mismatched rw semaphore locks
and unlocks to be detected and reported.
......
......@@ -10,6 +10,7 @@ cat <<EOF
* @u: ${int} value to compare with
*
* If (@v != @u), atomically updates @v to (@v + @a) with ${desc_order} ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* ${desc_noinstr}
*
......
......@@ -6,6 +6,7 @@ cat <<EOF
* @new: ${int} value to assign
*
* If (@v == @old), atomically updates @v to @new with ${desc_order} ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* ${desc_noinstr}
*
......
......@@ -4,6 +4,7 @@ cat <<EOF
* @v: pointer to ${atomic}_t
*
* If (@v > 0), atomically updates @v to (@v - 1) with ${desc_order} ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* ${desc_noinstr}
*
......
......@@ -4,6 +4,7 @@ cat <<EOF
* @v: pointer to ${atomic}_t
*
* If (@v <= 0), atomically updates @v to (@v - 1) with ${desc_order} ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* ${desc_noinstr}
*
......
......@@ -4,6 +4,7 @@ cat <<EOF
* @v: pointer to ${atomic}_t
*
* If (@v != 0), atomically updates @v to (@v + 1) with ${desc_order} ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* ${desc_noinstr}
*
......
......@@ -4,6 +4,7 @@ cat <<EOF
* @v: pointer to ${atomic}_t
*
* If (@v >= 0), atomically updates @v to (@v + 1) with ${desc_order} ordering.
* Otherwise, @v is not modified and relaxed ordering is provided.
*
* ${desc_noinstr}
*
......
......@@ -6,7 +6,8 @@ cat <<EOF
* @new: ${int} value to assign
*
* If (@v == @old), atomically updates @v to @new with ${desc_order} ordering.
* Otherwise, updates @old to the current value of @v.
* Otherwise, @v is not modified, @old is updated to the current value of @v,
* and relaxed ordering is provided.
*
* ${desc_noinstr}
*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment