Commit e857b6fc authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'locking-core-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Thomas Gleixner:
 "A moderate set of locking updates:

   - A few extensions to the rwsem API and support for opportunistic
     spinning and lock stealing

   - lockdep selftest improvements

   - Documentation updates

   - Cleanups and small fixes all over the place"

* tag 'locking-core-2020-12-14' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (21 commits)
  seqlock: kernel-doc: Specify when preemption is automatically altered
  seqlock: Prefix internal seqcount_t-only macros with a "do_"
  Documentation: seqlock: s/LOCKTYPE/LOCKNAME/g
  locking/rwsem: Remove reader optimistic spinning
  locking/rwsem: Enable reader optimistic lock stealing
  locking/rwsem: Prevent potential lock starvation
  locking/rwsem: Pass the current atomic count to rwsem_down_read_slowpath()
  locking/rwsem: Fold __down_{read,write}*()
  locking/rwsem: Introduce rwsem_write_trylock()
  locking/rwsem: Better collate rwsem_read_trylock()
  rwsem: Implement down_read_interruptible
  rwsem: Implement down_read_killable_nested
  refcount: Fix a kernel-doc markup
  completion: Drop init_completion define
  atomic: Update MAINTAINERS
  atomic: Delete obsolete documentation
  seqlock: Rename __seqprop() users
  lockdep/selftest: Add spin_nest_lock test
  lockdep/selftests: Fix PROVE_RAW_LOCK_NESTING
  seqlock: avoid -Wshadow warnings
  ...
parents 8c1dccc8 cb262935
This diff is collapsed.
......@@ -89,7 +89,7 @@ Read path::
.. _seqcount_locktype_t:
Sequence counters with associated locks (``seqcount_LOCKTYPE_t``)
Sequence counters with associated locks (``seqcount_LOCKNAME_t``)
-----------------------------------------------------------------
As discussed at :ref:`seqcount_t`, sequence count write side critical
......@@ -115,27 +115,26 @@ The following sequence counters with associated locks are defined:
- ``seqcount_mutex_t``
- ``seqcount_ww_mutex_t``
The plain seqcount read and write APIs branch out to the specific
seqcount_LOCKTYPE_t implementation at compile-time. This avoids kernel
API explosion per each new seqcount LOCKTYPE.
The sequence counter read and write APIs can take either a plain
seqcount_t or any of the seqcount_LOCKNAME_t variants above.
Initialization (replace "LOCKTYPE" with one of the supported locks)::
Initialization (replace "LOCKNAME" with one of the supported locks)::
/* dynamic */
seqcount_LOCKTYPE_t foo_seqcount;
seqcount_LOCKTYPE_init(&foo_seqcount, &lock);
seqcount_LOCKNAME_t foo_seqcount;
seqcount_LOCKNAME_init(&foo_seqcount, &lock);
/* static */
static seqcount_LOCKTYPE_t foo_seqcount =
SEQCNT_LOCKTYPE_ZERO(foo_seqcount, &lock);
static seqcount_LOCKNAME_t foo_seqcount =
SEQCNT_LOCKNAME_ZERO(foo_seqcount, &lock);
/* C99 struct init */
struct {
.seq = SEQCNT_LOCKTYPE_ZERO(foo.seq, &lock),
.seq = SEQCNT_LOCKNAME_ZERO(foo.seq, &lock),
} foo;
Write path: same as in :ref:`seqcount_t`, while running from a context
with the associated LOCKTYPE lock acquired.
with the associated write serialization lock acquired.
Read path: same as in :ref:`seqcount_t`.
......
......@@ -2982,6 +2982,8 @@ L: linux-kernel@vger.kernel.org
S: Maintained
F: arch/*/include/asm/atomic*.h
F: include/*/atomic*.h
F: include/linux/refcount.h
F: Documentation/atomic_*.txt
F: scripts/atomic/
ATTO EXPRESSSAS SAS/SATA RAID SCSI DRIVER
......
......@@ -28,8 +28,7 @@ struct completion {
struct swait_queue_head wait;
};
#define init_completion_map(x, m) __init_completion(x)
#define init_completion(x) __init_completion(x)
#define init_completion_map(x, m) init_completion(x)
static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {}
......@@ -82,7 +81,7 @@ static inline void complete_release(struct completion *x) {}
* This inline function will initialize a dynamically created completion
* structure.
*/
static inline void __init_completion(struct completion *x)
static inline void init_completion(struct completion *x)
{
x->done = 0;
init_swait_queue_head(&x->wait);
......
......@@ -101,7 +101,7 @@
struct mutex;
/**
* struct refcount_t - variant of atomic_t specialized for reference counts
* typedef refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field
*
* The counter saturates at REFCOUNT_SATURATED and will not move once
......
......@@ -123,6 +123,7 @@ static inline int rwsem_is_contended(struct rw_semaphore *sem)
* lock for reading
*/
extern void down_read(struct rw_semaphore *sem);
extern int __must_check down_read_interruptible(struct rw_semaphore *sem);
extern int __must_check down_read_killable(struct rw_semaphore *sem);
/*
......@@ -171,6 +172,7 @@ extern void downgrade_write(struct rw_semaphore *sem);
* See Documentation/locking/lockdep-design.rst for more details.)
*/
extern void down_read_nested(struct rw_semaphore *sem, int subclass);
extern int __must_check down_read_killable_nested(struct rw_semaphore *sem, int subclass);
extern void down_write_nested(struct rw_semaphore *sem, int subclass);
extern int down_write_killable_nested(struct rw_semaphore *sem, int subclass);
extern void _down_write_nest_lock(struct rw_semaphore *sem, struct lockdep_map *nest_lock);
......@@ -191,6 +193,7 @@ extern void down_read_non_owner(struct rw_semaphore *sem);
extern void up_read_non_owner(struct rw_semaphore *sem);
#else
# define down_read_nested(sem, subclass) down_read(sem)
# define down_read_killable_nested(sem, subclass) down_read_killable(sem)
# define down_write_nest_lock(sem, nest_lock) down_write(sem)
# define down_write_nested(sem, subclass) down_write(sem)
# define down_write_killable_nested(sem, subclass) down_write_killable(sem)
......
This diff is collapsed.
......@@ -310,8 +310,6 @@ static inline bool should_fail_futex(bool fshared)
#ifdef CONFIG_COMPAT
static void compat_exit_robust_list(struct task_struct *curr);
#else
static inline void compat_exit_robust_list(struct task_struct *curr) { }
#endif
/*
......
......@@ -56,13 +56,11 @@ LOCK_EVENT(rwsem_sleep_reader) /* # of reader sleeps */
LOCK_EVENT(rwsem_sleep_writer) /* # of writer sleeps */
LOCK_EVENT(rwsem_wake_reader) /* # of reader wakeups */
LOCK_EVENT(rwsem_wake_writer) /* # of writer wakeups */
LOCK_EVENT(rwsem_opt_rlock) /* # of opt-acquired read locks */
LOCK_EVENT(rwsem_opt_wlock) /* # of opt-acquired write locks */
LOCK_EVENT(rwsem_opt_lock) /* # of opt-acquired write locks */
LOCK_EVENT(rwsem_opt_fail) /* # of failed optspins */
LOCK_EVENT(rwsem_opt_nospin) /* # of disabled optspins */
LOCK_EVENT(rwsem_opt_norspin) /* # of disabled reader-only optspins */
LOCK_EVENT(rwsem_opt_rlock2) /* # of opt-acquired 2ndary read locks */
LOCK_EVENT(rwsem_rlock) /* # of read locks acquired */
LOCK_EVENT(rwsem_rlock_steal) /* # of read locks by lock stealing */
LOCK_EVENT(rwsem_rlock_fast) /* # of fast read locks acquired */
LOCK_EVENT(rwsem_rlock_fail) /* # of failed read lock acquisitions */
LOCK_EVENT(rwsem_rlock_handoff) /* # of read lock handoffs */
......
This diff is collapsed.
......@@ -58,10 +58,10 @@ static struct ww_mutex o, o2, o3;
* Normal standalone locks, for the circular and irq-context
* dependency tests:
*/
static DEFINE_RAW_SPINLOCK(lock_A);
static DEFINE_RAW_SPINLOCK(lock_B);
static DEFINE_RAW_SPINLOCK(lock_C);
static DEFINE_RAW_SPINLOCK(lock_D);
static DEFINE_SPINLOCK(lock_A);
static DEFINE_SPINLOCK(lock_B);
static DEFINE_SPINLOCK(lock_C);
static DEFINE_SPINLOCK(lock_D);
static DEFINE_RWLOCK(rwlock_A);
static DEFINE_RWLOCK(rwlock_B);
......@@ -93,12 +93,12 @@ static DEFINE_RT_MUTEX(rtmutex_D);
* but X* and Y* are different classes. We do this so that
* we do not trigger a real lockup:
*/
static DEFINE_RAW_SPINLOCK(lock_X1);
static DEFINE_RAW_SPINLOCK(lock_X2);
static DEFINE_RAW_SPINLOCK(lock_Y1);
static DEFINE_RAW_SPINLOCK(lock_Y2);
static DEFINE_RAW_SPINLOCK(lock_Z1);
static DEFINE_RAW_SPINLOCK(lock_Z2);
static DEFINE_SPINLOCK(lock_X1);
static DEFINE_SPINLOCK(lock_X2);
static DEFINE_SPINLOCK(lock_Y1);
static DEFINE_SPINLOCK(lock_Y2);
static DEFINE_SPINLOCK(lock_Z1);
static DEFINE_SPINLOCK(lock_Z2);
static DEFINE_RWLOCK(rwlock_X1);
static DEFINE_RWLOCK(rwlock_X2);
......@@ -138,10 +138,10 @@ static DEFINE_RT_MUTEX(rtmutex_Z2);
*/
#define INIT_CLASS_FUNC(class) \
static noinline void \
init_class_##class(raw_spinlock_t *lock, rwlock_t *rwlock, \
init_class_##class(spinlock_t *lock, rwlock_t *rwlock, \
struct mutex *mutex, struct rw_semaphore *rwsem)\
{ \
raw_spin_lock_init(lock); \
spin_lock_init(lock); \
rwlock_init(rwlock); \
mutex_init(mutex); \
init_rwsem(rwsem); \
......@@ -210,10 +210,10 @@ static void init_shared_classes(void)
* Shortcuts for lock/unlock API variants, to keep
* the testcases compact:
*/
#define L(x) raw_spin_lock(&lock_##x)
#define U(x) raw_spin_unlock(&lock_##x)
#define L(x) spin_lock(&lock_##x)
#define U(x) spin_unlock(&lock_##x)
#define LU(x) L(x); U(x)
#define SI(x) raw_spin_lock_init(&lock_##x)
#define SI(x) spin_lock_init(&lock_##x)
#define WL(x) write_lock(&rwlock_##x)
#define WU(x) write_unlock(&rwlock_##x)
......@@ -1341,7 +1341,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
#define I2(x) \
do { \
raw_spin_lock_init(&lock_##x); \
spin_lock_init(&lock_##x); \
rwlock_init(&rwlock_##x); \
mutex_init(&mutex_##x); \
init_rwsem(&rwsem_##x); \
......@@ -2005,10 +2005,23 @@ static void ww_test_edeadlk_acquire_wrong_slow(void)
static void ww_test_spin_nest_unlocked(void)
{
raw_spin_lock_nest_lock(&lock_A, &o.base);
spin_lock_nest_lock(&lock_A, &o.base);
U(A);
}
/* This is not a deadlock, because we have X1 to serialize Y1 and Y2 */
static void ww_test_spin_nest_lock(void)
{
spin_lock(&lock_X1);
spin_lock_nest_lock(&lock_Y1, &lock_X1);
spin_lock(&lock_A);
spin_lock_nest_lock(&lock_Y2, &lock_X1);
spin_unlock(&lock_A);
spin_unlock(&lock_Y2);
spin_unlock(&lock_Y1);
spin_unlock(&lock_X1);
}
static void ww_test_unneeded_slow(void)
{
WWAI(&t);
......@@ -2226,6 +2239,10 @@ static void ww_tests(void)
dotest(ww_test_spin_nest_unlocked, FAILURE, LOCKTYPE_WW);
pr_cont("\n");
print_testname("spinlock nest test");
dotest(ww_test_spin_nest_lock, SUCCESS, LOCKTYPE_WW);
pr_cont("\n");
printk(" -----------------------------------------------------\n");
printk(" |block | try |context|\n");
printk(" -----------------------------------------------------\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment