Commit 0ff0edb5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'locking-core-2021-04-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:

 - rtmutex cleanup & spring cleaning pass that removes ~400 lines of
   code

 - Futex simplifications & cleanups

 - Add debugging to the CSD code, to help track down a tenacious race
   (or hw problem)

 - Add lockdep_assert_not_held(), to allow code to require a lock to not
   be held, and propagate this into the ath10k driver

 - Misc LKMM documentation updates

 - Misc KCSAN updates: cleanups & documentation updates

 - Misc fixes and cleanups

 - Fix locktorture bugs with ww_mutexes

* tag 'locking-core-2021-04-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (44 commits)
  kcsan: Fix printk format string
  static_call: Relax static_call_update() function argument type
  static_call: Fix unused variable warn w/o MODULE
  locking/rtmutex: Clean up signal handling in __rt_mutex_slowlock()
  locking/rtmutex: Restrict the trylock WARN_ON() to debug
  locking/rtmutex: Fix misleading comment in rt_mutex_postunlock()
  locking/rtmutex: Consolidate the fast/slowpath invocation
  locking/rtmutex: Make text section and inlining consistent
  locking/rtmutex: Move debug functions as inlines into common header
  locking/rtmutex: Decrapify __rt_mutex_init()
  locking/rtmutex: Remove pointless CONFIG_RT_MUTEXES=n stubs
  locking/rtmutex: Inline chainwalk depth check
  locking/rtmutex: Move rt_mutex_debug_task_free() to rtmutex.c
  locking/rtmutex: Remove empty and unused debug stubs
  locking/rtmutex: Consolidate rt_mutex_init()
  locking/rtmutex: Remove output from deadlock detector
  locking/rtmutex: Remove rtmutex deadlock tester leftovers
  locking/rtmutex: Remove rt_mutex_timed_lock()
  MAINTAINERS: Add myself as futex reviewer
  locking/mutex: Remove repeated declaration
  ...
parents 9a45da92 f4abe996
...@@ -782,6 +782,16 @@ ...@@ -782,6 +782,16 @@
cs89x0_media= [HW,NET] cs89x0_media= [HW,NET]
Format: { rj45 | aui | bnc } Format: { rj45 | aui | bnc }
csdlock_debug= [KNL] Enable debug add-ons of cross-CPU function call
handling. When switched on, additional debug data is
printed to the console in case a hanging CPU is
detected, and that CPU is pinged again in order to try
to resolve the hang situation.
0: disable csdlock debugging (default)
1: enable basic csdlock debugging (minor impact)
ext: enable extended csdlock debugging (more impact,
but more data)
dasd= [HW,NET] dasd= [HW,NET]
See header of drivers/s390/block/dasd_devmap.c. See header of drivers/s390/block/dasd_devmap.c.
......
.. SPDX-License-Identifier: GPL-2.0
.. Copyright (C) 2019, Google LLC.
The Kernel Concurrency Sanitizer (KCSAN) The Kernel Concurrency Sanitizer (KCSAN)
======================================== ========================================
......
...@@ -7452,6 +7452,7 @@ M: Thomas Gleixner <tglx@linutronix.de> ...@@ -7452,6 +7452,7 @@ M: Thomas Gleixner <tglx@linutronix.de>
M: Ingo Molnar <mingo@redhat.com> M: Ingo Molnar <mingo@redhat.com>
R: Peter Zijlstra <peterz@infradead.org> R: Peter Zijlstra <peterz@infradead.org>
R: Darren Hart <dvhart@infradead.org> R: Darren Hart <dvhart@infradead.org>
R: Davidlohr Bueso <dave@stgolabs.net>
L: linux-kernel@vger.kernel.org L: linux-kernel@vger.kernel.org
S: Maintained S: Maintained
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git locking/core
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
* assembler to insert a extra (16-bit) IT instruction, depending on the * assembler to insert a extra (16-bit) IT instruction, depending on the
* presence or absence of neighbouring conditional instructions. * presence or absence of neighbouring conditional instructions.
* *
* To avoid this unpredictableness, an approprite IT is inserted explicitly: * To avoid this unpredictability, an appropriate IT is inserted explicitly:
* the assembler won't change IT instructions which are explicitly present * the assembler won't change IT instructions which are explicitly present
* in the input. * in the input.
*/ */
......
...@@ -14,7 +14,7 @@ ...@@ -14,7 +14,7 @@
#include <linux/stringify.h> #include <linux/stringify.h>
#include <linux/types.h> #include <linux/types.h>
static __always_inline bool arch_static_branch(struct static_key *key, bool branch) static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch)
{ {
asm_volatile_goto("1:" asm_volatile_goto("1:"
".byte " __stringify(BYTES_NOP5) "\n\t" ".byte " __stringify(BYTES_NOP5) "\n\t"
...@@ -30,7 +30,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran ...@@ -30,7 +30,7 @@ static __always_inline bool arch_static_branch(struct static_key *key, bool bran
return true; return true;
} }
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch) static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch)
{ {
asm_volatile_goto("1:" asm_volatile_goto("1:"
".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t" ".byte 0xe9\n\t .long %l[l_yes] - 2f\n\t"
......
...@@ -4727,6 +4727,8 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, ...@@ -4727,6 +4727,8 @@ static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
/* Must not be called with conf_mutex held as workers can use that also. */ /* Must not be called with conf_mutex held as workers can use that also. */
void ath10k_drain_tx(struct ath10k *ar) void ath10k_drain_tx(struct ath10k *ar)
{ {
lockdep_assert_not_held(&ar->conf_mutex);
/* make sure rcu-protected mac80211 tx path itself is drained */ /* make sure rcu-protected mac80211 tx path itself is drained */
synchronize_net(); synchronize_net();
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/*
* KCSAN access checks and modifiers. These can be used to explicitly check
* uninstrumented accesses, or change KCSAN checking behaviour of accesses.
*
* Copyright (C) 2019, Google LLC.
*/
#ifndef _LINUX_KCSAN_CHECKS_H #ifndef _LINUX_KCSAN_CHECKS_H
#define _LINUX_KCSAN_CHECKS_H #define _LINUX_KCSAN_CHECKS_H
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/*
* The Kernel Concurrency Sanitizer (KCSAN) infrastructure. Public interface and
* data structures to set up runtime. See kcsan-checks.h for explicit checks and
* modifiers. For more info please see Documentation/dev-tools/kcsan.rst.
*
* Copyright (C) 2019, Google LLC.
*/
#ifndef _LINUX_KCSAN_H #ifndef _LINUX_KCSAN_H
#define _LINUX_KCSAN_H #define _LINUX_KCSAN_H
......
...@@ -155,7 +155,7 @@ extern void lockdep_set_selftest_task(struct task_struct *task); ...@@ -155,7 +155,7 @@ extern void lockdep_set_selftest_task(struct task_struct *task);
extern void lockdep_init_task(struct task_struct *task); extern void lockdep_init_task(struct task_struct *task);
/* /*
* Split the recrursion counter in two to readily detect 'off' vs recursion. * Split the recursion counter in two to readily detect 'off' vs recursion.
*/ */
#define LOCKDEP_RECURSION_BITS 16 #define LOCKDEP_RECURSION_BITS 16
#define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS) #define LOCKDEP_OFF (1U << LOCKDEP_RECURSION_BITS)
...@@ -268,6 +268,11 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass, ...@@ -268,6 +268,11 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
extern void lock_release(struct lockdep_map *lock, unsigned long ip); extern void lock_release(struct lockdep_map *lock, unsigned long ip);
/* lock_is_held_type() returns */
#define LOCK_STATE_UNKNOWN -1
#define LOCK_STATE_NOT_HELD 0
#define LOCK_STATE_HELD 1
/* /*
* Same "read" as for lock_acquire(), except -1 means any. * Same "read" as for lock_acquire(), except -1 means any.
*/ */
...@@ -302,7 +307,13 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); ...@@ -302,7 +307,13 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
#define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0) #define lockdep_depth(tsk) (debug_locks ? (tsk)->lockdep_depth : 0)
#define lockdep_assert_held(l) do { \ #define lockdep_assert_held(l) do { \
WARN_ON(debug_locks && !lockdep_is_held(l)); \ WARN_ON(debug_locks && \
lockdep_is_held(l) == LOCK_STATE_NOT_HELD); \
} while (0)
#define lockdep_assert_not_held(l) do { \
WARN_ON(debug_locks && \
lockdep_is_held(l) == LOCK_STATE_HELD); \
} while (0) } while (0)
#define lockdep_assert_held_write(l) do { \ #define lockdep_assert_held_write(l) do { \
...@@ -397,6 +408,7 @@ extern int lockdep_is_held(const void *); ...@@ -397,6 +408,7 @@ extern int lockdep_is_held(const void *);
#define lockdep_is_held_type(l, r) (1) #define lockdep_is_held_type(l, r) (1)
#define lockdep_assert_held(l) do { (void)(l); } while (0) #define lockdep_assert_held(l) do { (void)(l); } while (0)
#define lockdep_assert_not_held(l) do { (void)(l); } while (0)
#define lockdep_assert_held_write(l) do { (void)(l); } while (0) #define lockdep_assert_held_write(l) do { (void)(l); } while (0)
#define lockdep_assert_held_read(l) do { (void)(l); } while (0) #define lockdep_assert_held_read(l) do { (void)(l); } while (0)
#define lockdep_assert_held_once(l) do { (void)(l); } while (0) #define lockdep_assert_held_once(l) do { (void)(l); } while (0)
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/osq_lock.h> #include <linux/osq_lock.h>
#include <linux/debug_locks.h> #include <linux/debug_locks.h>
struct ww_class;
struct ww_acquire_ctx; struct ww_acquire_ctx;
/* /*
...@@ -65,9 +66,6 @@ struct mutex { ...@@ -65,9 +66,6 @@ struct mutex {
#endif #endif
}; };
struct ww_class;
struct ww_acquire_ctx;
struct ww_mutex { struct ww_mutex {
struct mutex base; struct mutex base;
struct ww_acquire_ctx *ctx; struct ww_acquire_ctx *ctx;
......
...@@ -31,12 +31,6 @@ struct rt_mutex { ...@@ -31,12 +31,6 @@ struct rt_mutex {
raw_spinlock_t wait_lock; raw_spinlock_t wait_lock;
struct rb_root_cached waiters; struct rb_root_cached waiters;
struct task_struct *owner; struct task_struct *owner;
#ifdef CONFIG_DEBUG_RT_MUTEXES
int save_state;
const char *name, *file;
int line;
void *magic;
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map; struct lockdep_map dep_map;
#endif #endif
...@@ -46,35 +40,17 @@ struct rt_mutex_waiter; ...@@ -46,35 +40,17 @@ struct rt_mutex_waiter;
struct hrtimer_sleeper; struct hrtimer_sleeper;
#ifdef CONFIG_DEBUG_RT_MUTEXES #ifdef CONFIG_DEBUG_RT_MUTEXES
extern int rt_mutex_debug_check_no_locks_freed(const void *from, extern void rt_mutex_debug_task_free(struct task_struct *tsk);
unsigned long len);
extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task);
#else #else
static inline int rt_mutex_debug_check_no_locks_freed(const void *from, static inline void rt_mutex_debug_task_free(struct task_struct *tsk) { }
unsigned long len)
{
return 0;
}
# define rt_mutex_debug_check_no_locks_held(task) do { } while (0)
#endif #endif
#ifdef CONFIG_DEBUG_RT_MUTEXES #define rt_mutex_init(mutex) \
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
, .name = #mutexname, .file = __FILE__, .line = __LINE__
# define rt_mutex_init(mutex) \
do { \ do { \
static struct lock_class_key __key; \ static struct lock_class_key __key; \
__rt_mutex_init(mutex, __func__, &__key); \ __rt_mutex_init(mutex, __func__, &__key); \
} while (0) } while (0)
extern void rt_mutex_debug_task_free(struct task_struct *tsk);
#else
# define __DEBUG_RT_MUTEX_INITIALIZER(mutexname)
# define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL, NULL)
# define rt_mutex_debug_task_free(t) do { } while (0)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
#define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \ #define __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname) \
, .dep_map = { .name = #mutexname } , .dep_map = { .name = #mutexname }
...@@ -86,7 +62,6 @@ do { \ ...@@ -86,7 +62,6 @@ do { \
{ .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \ { .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(mutexname.wait_lock) \
, .waiters = RB_ROOT_CACHED \ , .waiters = RB_ROOT_CACHED \
, .owner = NULL \ , .owner = NULL \
__DEBUG_RT_MUTEX_INITIALIZER(mutexname) \
__DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)} __DEP_MAP_RT_MUTEX_INITIALIZER(mutexname)}
#define DEFINE_RT_MUTEX(mutexname) \ #define DEFINE_RT_MUTEX(mutexname) \
...@@ -104,7 +79,6 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock) ...@@ -104,7 +79,6 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
} }
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key); extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
extern void rt_mutex_destroy(struct rt_mutex *lock);
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
...@@ -115,9 +89,6 @@ extern void rt_mutex_lock(struct rt_mutex *lock); ...@@ -115,9 +89,6 @@ extern void rt_mutex_lock(struct rt_mutex *lock);
#endif #endif
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout);
extern int rt_mutex_trylock(struct rt_mutex *lock); extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock); extern void rt_mutex_unlock(struct rt_mutex *lock);
......
...@@ -110,7 +110,7 @@ do { \ ...@@ -110,7 +110,7 @@ do { \
/* /*
* This is the same regardless of which rwsem implementation that is being used. * This is the same regardless of which rwsem implementation that is being used.
* It is just a heuristic meant to be called by somebody alreadying holding the * It is just a heuristic meant to be called by somebody already holding the
* rwsem to see if somebody from an incompatible type is wanting access to the * rwsem to see if somebody from an incompatible type is wanting access to the
* lock. * lock.
*/ */
......
...@@ -118,9 +118,9 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool ...@@ -118,9 +118,9 @@ extern void arch_static_call_transform(void *site, void *tramp, void *func, bool
#define static_call_update(name, func) \ #define static_call_update(name, func) \
({ \ ({ \
BUILD_BUG_ON(!__same_type(*(func), STATIC_CALL_TRAMP(name))); \ typeof(&STATIC_CALL_TRAMP(name)) __F = (func); \
__static_call_update(&STATIC_CALL_KEY(name), \ __static_call_update(&STATIC_CALL_KEY(name), \
STATIC_CALL_TRAMP_ADDR(name), func); \ STATIC_CALL_TRAMP_ADDR(name), __F); \
}) })
#define static_call_query(name) (READ_ONCE(STATIC_CALL_KEY(name).func)) #define static_call_query(name) (READ_ONCE(STATIC_CALL_KEY(name).func))
......
...@@ -48,39 +48,26 @@ struct ww_acquire_ctx { ...@@ -48,39 +48,26 @@ struct ww_acquire_ctx {
#endif #endif
}; };
#ifdef CONFIG_DEBUG_LOCK_ALLOC
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class) \
, .ww_class = class
#else
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
#endif
#define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \ #define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \
{ .stamp = ATOMIC_LONG_INIT(0) \ { .stamp = ATOMIC_LONG_INIT(0) \
, .acquire_name = #ww_class "_acquire" \ , .acquire_name = #ww_class "_acquire" \
, .mutex_name = #ww_class "_mutex" \ , .mutex_name = #ww_class "_mutex" \
, .is_wait_die = _is_wait_die } , .is_wait_die = _is_wait_die }
#define __WW_MUTEX_INITIALIZER(lockname, class) \
{ .base = __MUTEX_INITIALIZER(lockname.base) \
__WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
#define DEFINE_WD_CLASS(classname) \ #define DEFINE_WD_CLASS(classname) \
struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1) struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1)
#define DEFINE_WW_CLASS(classname) \ #define DEFINE_WW_CLASS(classname) \
struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0) struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0)
#define DEFINE_WW_MUTEX(mutexname, ww_class) \
struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
/** /**
* ww_mutex_init - initialize the w/w mutex * ww_mutex_init - initialize the w/w mutex
* @lock: the mutex to be initialized * @lock: the mutex to be initialized
* @ww_class: the w/w class the mutex should belong to * @ww_class: the w/w class the mutex should belong to
* *
* Initialize the w/w mutex to unlocked state and associate it with the given * Initialize the w/w mutex to unlocked state and associate it with the given
* class. * class. Static define macro for w/w mutex is not provided and this function
* is the only way to properly initialize the w/w mutex.
* *
* It is not allowed to initialize an already locked mutex. * It is not allowed to initialize an already locked mutex.
*/ */
......
...@@ -981,6 +981,7 @@ static inline void exit_pi_state_list(struct task_struct *curr) { } ...@@ -981,6 +981,7 @@ static inline void exit_pi_state_list(struct task_struct *curr) { }
* p->pi_lock: * p->pi_lock:
* *
* p->pi_state_list -> pi_state->list, relation * p->pi_state_list -> pi_state->list, relation
* pi_mutex->owner -> pi_state->owner, relation
* *
* pi_state->refcount: * pi_state->refcount:
* *
...@@ -1494,13 +1495,14 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) ...@@ -1494,13 +1495,14 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state) static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_state)
{ {
u32 curval, newval; u32 curval, newval;
struct rt_mutex_waiter *top_waiter;
struct task_struct *new_owner; struct task_struct *new_owner;
bool postunlock = false; bool postunlock = false;
DEFINE_WAKE_Q(wake_q); DEFINE_WAKE_Q(wake_q);
int ret = 0; int ret = 0;
new_owner = rt_mutex_next_owner(&pi_state->pi_mutex); top_waiter = rt_mutex_top_waiter(&pi_state->pi_mutex);
if (WARN_ON_ONCE(!new_owner)) { if (WARN_ON_ONCE(!top_waiter)) {
/* /*
* As per the comment in futex_unlock_pi() this should not happen. * As per the comment in futex_unlock_pi() this should not happen.
* *
...@@ -1513,6 +1515,8 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_ ...@@ -1513,6 +1515,8 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
goto out_unlock; goto out_unlock;
} }
new_owner = top_waiter->task;
/* /*
* We pass it to the next owner. The WAITERS bit is always kept * We pass it to the next owner. The WAITERS bit is always kept
* enabled while there is PI state around. We cleanup the owner * enabled while there is PI state around. We cleanup the owner
...@@ -2315,19 +2319,15 @@ static int unqueue_me(struct futex_q *q) ...@@ -2315,19 +2319,15 @@ static int unqueue_me(struct futex_q *q)
/* /*
* PI futexes can not be requeued and must remove themself from the * PI futexes can not be requeued and must remove themself from the
* hash bucket. The hash bucket lock (i.e. lock_ptr) is held on entry * hash bucket. The hash bucket lock (i.e. lock_ptr) is held.
* and dropped here.
*/ */
static void unqueue_me_pi(struct futex_q *q) static void unqueue_me_pi(struct futex_q *q)
__releases(q->lock_ptr)
{ {
__unqueue_futex(q); __unqueue_futex(q);
BUG_ON(!q->pi_state); BUG_ON(!q->pi_state);
put_pi_state(q->pi_state); put_pi_state(q->pi_state);
q->pi_state = NULL; q->pi_state = NULL;
spin_unlock(q->lock_ptr);
} }
static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, static int __fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q,
...@@ -2909,8 +2909,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags, ...@@ -2909,8 +2909,8 @@ static int futex_lock_pi(u32 __user *uaddr, unsigned int flags,
if (res) if (res)
ret = (res < 0) ? res : 0; ret = (res < 0) ? res : 0;
/* Unqueue and drop the lock */
unqueue_me_pi(&q); unqueue_me_pi(&q);
spin_unlock(q.lock_ptr);
goto out; goto out;
out_unlock_put_key: out_unlock_put_key:
...@@ -3237,15 +3237,14 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, ...@@ -3237,15 +3237,14 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* reference count. * reference count.
*/ */
/* Check if the requeue code acquired the second futex for us. */
if (!q.rt_waiter) {
/* /*
* Got the lock. We might not be the anticipated owner if we * Check if the requeue code acquired the second futex for us and do
* did a lock-steal - fix up the PI-state in that case. * any pertinent fixup.
*/ */
if (!q.rt_waiter) {
if (q.pi_state && (q.pi_state->owner != current)) { if (q.pi_state && (q.pi_state->owner != current)) {
spin_lock(q.lock_ptr); spin_lock(q.lock_ptr);
ret = fixup_pi_state_owner(uaddr2, &q, current); ret = fixup_owner(uaddr2, &q, true);
/* /*
* Drop the reference to the pi state which * Drop the reference to the pi state which
* the requeue_pi() code acquired for us. * the requeue_pi() code acquired for us.
...@@ -3287,8 +3286,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags, ...@@ -3287,8 +3286,8 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
if (res) if (res)
ret = (res < 0) ? res : 0; ret = (res < 0) ? res : 0;
/* Unqueue and drop the lock. */
unqueue_me_pi(&q); unqueue_me_pi(&q);
spin_unlock(q.lock_ptr);
} }
if (ret == -EINTR) { if (ret == -EINTR) {
......
...@@ -13,5 +13,5 @@ CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \ ...@@ -13,5 +13,5 @@ CFLAGS_core.o := $(call cc-option,-fno-conserve-stack) \
obj-y := core.o debugfs.o report.o obj-y := core.o debugfs.o report.o
obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o obj-$(CONFIG_KCSAN_SELFTEST) += selftest.o
CFLAGS_kcsan-test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer CFLAGS_kcsan_test.o := $(CFLAGS_KCSAN) -g -fno-omit-frame-pointer
obj-$(CONFIG_KCSAN_TEST) += kcsan-test.o obj-$(CONFIG_KCSAN_KUNIT_TEST) += kcsan_test.o
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/*
* Rules for implicitly atomic memory accesses.
*
* Copyright (C) 2019, Google LLC.
*/
#ifndef _KERNEL_KCSAN_ATOMIC_H #ifndef _KERNEL_KCSAN_ATOMIC_H
#define _KERNEL_KCSAN_ATOMIC_H #define _KERNEL_KCSAN_ATOMIC_H
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/*
* KCSAN core runtime.
*
* Copyright (C) 2019, Google LLC.
*/
#define pr_fmt(fmt) "kcsan: " fmt #define pr_fmt(fmt) "kcsan: " fmt
...@@ -639,8 +644,6 @@ void __init kcsan_init(void) ...@@ -639,8 +644,6 @@ void __init kcsan_init(void)
BUG_ON(!in_task()); BUG_ON(!in_task());
kcsan_debugfs_init();
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles(); per_cpu(kcsan_rand_state, cpu) = (u32)get_cycles();
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/*
* KCSAN debugfs interface.
*
* Copyright (C) 2019, Google LLC.
*/
#define pr_fmt(fmt) "kcsan: " fmt #define pr_fmt(fmt) "kcsan: " fmt
...@@ -261,7 +266,9 @@ static const struct file_operations debugfs_ops = ...@@ -261,7 +266,9 @@ static const struct file_operations debugfs_ops =
.release = single_release .release = single_release
}; };
void __init kcsan_debugfs_init(void) static void __init kcsan_debugfs_init(void)
{ {
debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops); debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
} }
late_initcall(kcsan_debugfs_init);
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/*
* KCSAN watchpoint encoding.
*
* Copyright (C) 2019, Google LLC.
*/
#ifndef _KERNEL_KCSAN_ENCODING_H #ifndef _KERNEL_KCSAN_ENCODING_H
#define _KERNEL_KCSAN_ENCODING_H #define _KERNEL_KCSAN_ENCODING_H
......
/* SPDX-License-Identifier: GPL-2.0 */ /* SPDX-License-Identifier: GPL-2.0 */
/* /*
* The Kernel Concurrency Sanitizer (KCSAN) infrastructure. For more info please * The Kernel Concurrency Sanitizer (KCSAN) infrastructure. For more info please
* see Documentation/dev-tools/kcsan.rst. * see Documentation/dev-tools/kcsan.rst.
*
* Copyright (C) 2019, Google LLC.
*/ */
#ifndef _KERNEL_KCSAN_KCSAN_H #ifndef _KERNEL_KCSAN_KCSAN_H
...@@ -30,11 +31,6 @@ extern bool kcsan_enabled; ...@@ -30,11 +31,6 @@ extern bool kcsan_enabled;
void kcsan_save_irqtrace(struct task_struct *task); void kcsan_save_irqtrace(struct task_struct *task);
void kcsan_restore_irqtrace(struct task_struct *task); void kcsan_restore_irqtrace(struct task_struct *task);
/*
* Initialize debugfs file.
*/
void kcsan_debugfs_init(void);
/* /*
* Statistics counters displayed via debugfs; should only be modified in * Statistics counters displayed via debugfs; should only be modified in
* slow-paths. * slow-paths.
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
* Author: Marco Elver <elver@google.com> * Author: Marco Elver <elver@google.com>
*/ */
#define pr_fmt(fmt) "kcsan_test: " fmt
#include <kunit/test.h> #include <kunit/test.h>
#include <linux/jiffies.h> #include <linux/jiffies.h>
#include <linux/kcsan-checks.h> #include <linux/kcsan-checks.h>
...@@ -951,22 +953,53 @@ static void test_atomic_builtins(struct kunit *test) ...@@ -951,22 +953,53 @@ static void test_atomic_builtins(struct kunit *test)
} }
/* /*
* Each test case is run with different numbers of threads. Until KUnit supports * Generate thread counts for all test cases. Values generated are in interval
* passing arguments for each test case, we encode #threads in the test case * [2, 5] followed by exponentially increasing thread counts from 8 to 32.
* name (read by get_num_threads()). [The '-' was chosen as a stylistic
* preference to separate test name and #threads.]
* *
* The thread counts are chosen to cover potentially interesting boundaries and * The thread counts are chosen to cover potentially interesting boundaries and
* corner cases (range 2-5), and then stress the system with larger counts. * corner cases (2 to 5), and then stress the system with larger counts.
*/
static const void *nthreads_gen_params(const void *prev, char *desc)
{
long nthreads = (long)prev;
if (nthreads < 0 || nthreads >= 32)
nthreads = 0; /* stop */
else if (!nthreads)
nthreads = 2; /* initial value */
else if (nthreads < 5)
nthreads++;
else if (nthreads == 5)
nthreads = 8;
else
nthreads *= 2;
if (!IS_ENABLED(CONFIG_PREEMPT) || !IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER)) {
/*
* Without any preemption, keep 2 CPUs free for other tasks, one
* of which is the main test case function checking for
* completion or failure.
*/ */
#define KCSAN_KUNIT_CASE(test_name) \ const long min_unused_cpus = IS_ENABLED(CONFIG_PREEMPT_NONE) ? 2 : 0;
{ .run_case = test_name, .name = #test_name "-02" }, \ const long min_required_cpus = 2 + min_unused_cpus;
{ .run_case = test_name, .name = #test_name "-03" }, \
{ .run_case = test_name, .name = #test_name "-04" }, \
{ .run_case = test_name, .name = #test_name "-05" }, \
{ .run_case = test_name, .name = #test_name "-08" }, \
{ .run_case = test_name, .name = #test_name "-16" }
if (num_online_cpus() < min_required_cpus) {
pr_err_once("Too few online CPUs (%u < %ld) for test\n",
num_online_cpus(), min_required_cpus);
nthreads = 0;
} else if (nthreads >= num_online_cpus() - min_unused_cpus) {
/* Use negative value to indicate last param. */
nthreads = -(num_online_cpus() - min_unused_cpus);
pr_warn_once("Limiting number of threads to %ld (only %d online CPUs)\n",
-nthreads, num_online_cpus());
}
}
snprintf(desc, KUNIT_PARAM_DESC_SIZE, "threads=%ld", abs(nthreads));
return (void *)nthreads;
}
#define KCSAN_KUNIT_CASE(test_name) KUNIT_CASE_PARAM(test_name, nthreads_gen_params)
static struct kunit_case kcsan_test_cases[] = { static struct kunit_case kcsan_test_cases[] = {
KCSAN_KUNIT_CASE(test_basic), KCSAN_KUNIT_CASE(test_basic),
KCSAN_KUNIT_CASE(test_concurrent_races), KCSAN_KUNIT_CASE(test_concurrent_races),
...@@ -996,24 +1029,6 @@ static struct kunit_case kcsan_test_cases[] = { ...@@ -996,24 +1029,6 @@ static struct kunit_case kcsan_test_cases[] = {
/* ===== End test cases ===== */ /* ===== End test cases ===== */
/* Get number of threads encoded in test name. */
static bool __no_kcsan
get_num_threads(const char *test, int *nthreads)
{
int len = strlen(test);
if (WARN_ON(len < 3))
return false;
*nthreads = test[len - 1] - '0';
*nthreads += (test[len - 2] - '0') * 10;
if (WARN_ON(*nthreads < 0))
return false;
return true;
}
/* Concurrent accesses from interrupts. */ /* Concurrent accesses from interrupts. */
__no_kcsan __no_kcsan
static void access_thread_timer(struct timer_list *timer) static void access_thread_timer(struct timer_list *timer)
...@@ -1076,9 +1091,6 @@ static int test_init(struct kunit *test) ...@@ -1076,9 +1091,6 @@ static int test_init(struct kunit *test)
if (!torture_init_begin((char *)test->name, 1)) if (!torture_init_begin((char *)test->name, 1))
return -EBUSY; return -EBUSY;
if (!get_num_threads(test->name, &nthreads))
goto err;
if (WARN_ON(threads)) if (WARN_ON(threads))
goto err; goto err;
...@@ -1087,39 +1099,19 @@ static int test_init(struct kunit *test) ...@@ -1087,39 +1099,19 @@ static int test_init(struct kunit *test)
goto err; goto err;
} }
if (!IS_ENABLED(CONFIG_PREEMPT) || !IS_ENABLED(CONFIG_KCSAN_INTERRUPT_WATCHER)) { nthreads = abs((long)test->param_value);
/* if (WARN_ON(!nthreads))
* Without any preemption, keep 2 CPUs free for other tasks, one
* of which is the main test case function checking for
* completion or failure.
*/
const int min_unused_cpus = IS_ENABLED(CONFIG_PREEMPT_NONE) ? 2 : 0;
const int min_required_cpus = 2 + min_unused_cpus;
if (num_online_cpus() < min_required_cpus) {
pr_err("%s: too few online CPUs (%u < %d) for test",
test->name, num_online_cpus(), min_required_cpus);
goto err; goto err;
} else if (nthreads > num_online_cpus() - min_unused_cpus) {
nthreads = num_online_cpus() - min_unused_cpus;
pr_warn("%s: limiting number of threads to %d\n",
test->name, nthreads);
}
}
if (nthreads) { threads = kcalloc(nthreads + 1, sizeof(struct task_struct *), GFP_KERNEL);
threads = kcalloc(nthreads + 1, sizeof(struct task_struct *),
GFP_KERNEL);
if (WARN_ON(!threads)) if (WARN_ON(!threads))
goto err; goto err;
threads[nthreads] = NULL; threads[nthreads] = NULL;
for (i = 0; i < nthreads; ++i) { for (i = 0; i < nthreads; ++i) {
if (torture_create_kthread(access_thread, NULL, if (torture_create_kthread(access_thread, NULL, threads[i]))
threads[i]))
goto err; goto err;
} }
}
torture_init_end(); torture_init_end();
...@@ -1156,7 +1148,7 @@ static void test_exit(struct kunit *test) ...@@ -1156,7 +1148,7 @@ static void test_exit(struct kunit *test)
} }
static struct kunit_suite kcsan_test_suite = { static struct kunit_suite kcsan_test_suite = {
.name = "kcsan-test", .name = "kcsan",
.test_cases = kcsan_test_cases, .test_cases = kcsan_test_cases,
.init = test_init, .init = test_init,
.exit = test_exit, .exit = test_exit,
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/*
* KCSAN reporting.
*
* Copyright (C) 2019, Google LLC.
*/
#include <linux/debug_locks.h> #include <linux/debug_locks.h>
#include <linux/delay.h> #include <linux/delay.h>
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/*
* KCSAN short boot-time selftests.
*
* Copyright (C) 2019, Google LLC.
*/
#define pr_fmt(fmt) "kcsan: " fmt #define pr_fmt(fmt) "kcsan: " fmt
......
...@@ -12,7 +12,6 @@ ifdef CONFIG_FUNCTION_TRACER ...@@ -12,7 +12,6 @@ ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_lockdep.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_lockdep_proc.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_lockdep_proc.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE) CFLAGS_REMOVE_mutex-debug.o = $(CC_FLAGS_FTRACE)
CFLAGS_REMOVE_rtmutex-debug.o = $(CC_FLAGS_FTRACE)
endif endif
obj-$(CONFIG_DEBUG_IRQFLAGS) += irqflag-debug.o obj-$(CONFIG_DEBUG_IRQFLAGS) += irqflag-debug.o
...@@ -26,7 +25,6 @@ obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o ...@@ -26,7 +25,6 @@ obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
obj-$(CONFIG_PROVE_LOCKING) += spinlock.o obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
obj-$(CONFIG_RT_MUTEXES) += rtmutex.o obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
......
...@@ -54,6 +54,7 @@ ...@@ -54,6 +54,7 @@
#include <linux/nmi.h> #include <linux/nmi.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
#include <linux/kprobes.h> #include <linux/kprobes.h>
#include <linux/lockdep.h>
#include <asm/sections.h> #include <asm/sections.h>
...@@ -1747,7 +1748,7 @@ static enum bfs_result __bfs(struct lock_list *source_entry, ...@@ -1747,7 +1748,7 @@ static enum bfs_result __bfs(struct lock_list *source_entry,
/* /*
* Step 4: if not match, expand the path by adding the * Step 4: if not match, expand the path by adding the
* forward or backwards dependencis in the search * forward or backwards dependencies in the search
* *
*/ */
first = true; first = true;
...@@ -1916,7 +1917,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth, ...@@ -1916,7 +1917,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth,
* -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the * -> B is -(ER)-> or -(EN)->, then we don't need to add A -> B into the
* dependency graph, as any strong path ..-> A -> B ->.. we can get with * dependency graph, as any strong path ..-> A -> B ->.. we can get with
* having dependency A -> B, we could already get a equivalent path ..-> A -> * having dependency A -> B, we could already get a equivalent path ..-> A ->
* .. -> B -> .. with A -> .. -> B. Therefore A -> B is reduntant. * .. -> B -> .. with A -> .. -> B. Therefore A -> B is redundant.
* *
* We need to make sure both the start and the end of A -> .. -> B is not * We need to make sure both the start and the end of A -> .. -> B is not
* weaker than A -> B. For the start part, please see the comment in * weaker than A -> B. For the start part, please see the comment in
...@@ -5253,13 +5254,13 @@ int __lock_is_held(const struct lockdep_map *lock, int read) ...@@ -5253,13 +5254,13 @@ int __lock_is_held(const struct lockdep_map *lock, int read)
if (match_held_lock(hlock, lock)) { if (match_held_lock(hlock, lock)) {
if (read == -1 || hlock->read == read) if (read == -1 || hlock->read == read)
return 1; return LOCK_STATE_HELD;
return 0; return LOCK_STATE_NOT_HELD;
} }
} }
return 0; return LOCK_STATE_NOT_HELD;
} }
static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock) static struct pin_cookie __lock_pin_lock(struct lockdep_map *lock)
...@@ -5538,10 +5539,14 @@ EXPORT_SYMBOL_GPL(lock_release); ...@@ -5538,10 +5539,14 @@ EXPORT_SYMBOL_GPL(lock_release);
noinstr int lock_is_held_type(const struct lockdep_map *lock, int read) noinstr int lock_is_held_type(const struct lockdep_map *lock, int read)
{ {
unsigned long flags; unsigned long flags;
int ret = 0; int ret = LOCK_STATE_NOT_HELD;
/*
* Avoid false negative lockdep_assert_held() and
* lockdep_assert_not_held().
*/
if (unlikely(!lockdep_enabled())) if (unlikely(!lockdep_enabled()))
return 1; /* avoid false negative lockdep_assert_held() */ return LOCK_STATE_UNKNOWN;
raw_local_irq_save(flags); raw_local_irq_save(flags);
check_flags(flags); check_flags(flags);
......
...@@ -348,7 +348,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v) ...@@ -348,7 +348,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
debug_locks); debug_locks);
/* /*
* Zappped classes and lockdep data buffers reuse statistics. * Zapped classes and lockdep data buffers reuse statistics.
*/ */
seq_puts(m, "\n"); seq_puts(m, "\n");
seq_printf(m, " zapped classes: %11lu\n", seq_printf(m, " zapped classes: %11lu\n",
......
This diff is collapsed.
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
* The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock * The MCS lock (proposed by Mellor-Crummey and Scott) is a simple spin-lock
* with the desirable properties of being fair, and with each cpu trying * with the desirable properties of being fair, and with each cpu trying
* to acquire the lock spinning on a local variable. * to acquire the lock spinning on a local variable.
* It avoids expensive cache bouncings that common test-and-set spin-lock * It avoids expensive cache bounces that common test-and-set spin-lock
* implementations incur. * implementations incur.
*/ */
#ifndef __LINUX_MCS_SPINLOCK_H #ifndef __LINUX_MCS_SPINLOCK_H
......
...@@ -92,7 +92,7 @@ static inline unsigned long __owner_flags(unsigned long owner) ...@@ -92,7 +92,7 @@ static inline unsigned long __owner_flags(unsigned long owner)
} }
/* /*
* Trylock variant that retuns the owning task on failure. * Trylock variant that returns the owning task on failure.
*/ */
static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock) static inline struct task_struct *__mutex_trylock_or_owner(struct mutex *lock)
{ {
...@@ -207,7 +207,7 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, ...@@ -207,7 +207,7 @@ __mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
/* /*
* Give up ownership to a specific task, when @task = NULL, this is equivalent * Give up ownership to a specific task, when @task = NULL, this is equivalent
* to a regular unlock. Sets PICKUP on a handoff, clears HANDOF, preserves * to a regular unlock. Sets PICKUP on a handoff, clears HANDOFF, preserves
* WAITERS. Provides RELEASE semantics like a regular unlock, the * WAITERS. Provides RELEASE semantics like a regular unlock, the
* __mutex_trylock() provides a matching ACQUIRE semantics for the handoff. * __mutex_trylock() provides a matching ACQUIRE semantics for the handoff.
*/ */
......
...@@ -135,7 +135,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) ...@@ -135,7 +135,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
*/ */
/* /*
* Wait to acquire the lock or cancelation. Note that need_resched() * Wait to acquire the lock or cancellation. Note that need_resched()
* will come with an IPI, which will wake smp_cond_load_relaxed() if it * will come with an IPI, which will wake smp_cond_load_relaxed() if it
* is implemented with a monitor-wait. vcpu_is_preempted() relies on * is implemented with a monitor-wait. vcpu_is_preempted() relies on
* polling, be careful. * polling, be careful.
...@@ -164,7 +164,7 @@ bool osq_lock(struct optimistic_spin_queue *lock) ...@@ -164,7 +164,7 @@ bool osq_lock(struct optimistic_spin_queue *lock)
/* /*
* We can only fail the cmpxchg() racing against an unlock(), * We can only fail the cmpxchg() racing against an unlock(),
* in which case we should observe @node->locked becomming * in which case we should observe @node->locked becoming
* true. * true.
*/ */
if (smp_load_acquire(&node->locked)) if (smp_load_acquire(&node->locked))
......
// SPDX-License-Identifier: GPL-2.0
/*
* RT-Mutexes: blocking mutual exclusion locks with PI support
*
* started by Ingo Molnar and Thomas Gleixner:
*
* Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
*
* This code is based on the rt.c implementation in the preempt-rt tree.
* Portions of said code are
*
* Copyright (C) 2004 LynuxWorks, Inc., Igor Manyilov, Bill Huey
* Copyright (C) 2006 Esben Nielsen
* Copyright (C) 2006 Kihon Technologies Inc.,
* Steven Rostedt <rostedt@goodmis.org>
*
* See rt.c in preempt-rt for proper credits and further information
*/
#include <linux/sched.h>
#include <linux/sched/rt.h>
#include <linux/sched/debug.h>
#include <linux/delay.h>
#include <linux/export.h>
#include <linux/spinlock.h>
#include <linux/kallsyms.h>
#include <linux/syscalls.h>
#include <linux/interrupt.h>
#include <linux/rbtree.h>
#include <linux/fs.h>
#include <linux/debug_locks.h>
#include "rtmutex_common.h"
static void printk_task(struct task_struct *p)
{
if (p)
printk("%16s:%5d [%p, %3d]", p->comm, task_pid_nr(p), p, p->prio);
else
printk("<none>");
}
static void printk_lock(struct rt_mutex *lock, int print_owner)
{
if (lock->name)
printk(" [%p] {%s}\n",
lock, lock->name);
else
printk(" [%p] {%s:%d}\n",
lock, lock->file, lock->line);
if (print_owner && rt_mutex_owner(lock)) {
printk(".. ->owner: %p\n", lock->owner);
printk(".. held by: ");
printk_task(rt_mutex_owner(lock));
printk("\n");
}
}
void rt_mutex_debug_task_free(struct task_struct *task)
{
DEBUG_LOCKS_WARN_ON(!RB_EMPTY_ROOT(&task->pi_waiters.rb_root));
DEBUG_LOCKS_WARN_ON(task->pi_blocked_on);
}
/*
* We fill out the fields in the waiter to store the information about
* the deadlock. We print when we return. act_waiter can be NULL in
* case of a remove waiter operation.
*/
void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
struct rt_mutex_waiter *act_waiter,
struct rt_mutex *lock)
{
struct task_struct *task;
if (!debug_locks || chwalk == RT_MUTEX_FULL_CHAINWALK || !act_waiter)
return;
task = rt_mutex_owner(act_waiter->lock);
if (task && task != current) {
act_waiter->deadlock_task_pid = get_pid(task_pid(task));
act_waiter->deadlock_lock = lock;
}
}
void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
{
struct task_struct *task;
if (!waiter->deadlock_lock || !debug_locks)
return;
rcu_read_lock();
task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID);
if (!task) {
rcu_read_unlock();
return;
}
if (!debug_locks_off()) {
rcu_read_unlock();
return;
}
pr_warn("\n");
pr_warn("============================================\n");
pr_warn("WARNING: circular locking deadlock detected!\n");
pr_warn("%s\n", print_tainted());
pr_warn("--------------------------------------------\n");
printk("%s/%d is deadlocking current task %s/%d\n\n",
task->comm, task_pid_nr(task),
current->comm, task_pid_nr(current));
printk("\n1) %s/%d is trying to acquire this lock:\n",
current->comm, task_pid_nr(current));
printk_lock(waiter->lock, 1);
printk("\n2) %s/%d is blocked on this lock:\n",
task->comm, task_pid_nr(task));
printk_lock(waiter->deadlock_lock, 1);
debug_show_held_locks(current);
debug_show_held_locks(task);
printk("\n%s/%d's [blocked] stackdump:\n\n",
task->comm, task_pid_nr(task));
show_stack(task, NULL, KERN_DEFAULT);
printk("\n%s/%d's [current] stackdump:\n\n",
current->comm, task_pid_nr(current));
dump_stack();
debug_show_all_locks();
rcu_read_unlock();
printk("[ turning off deadlock detection."
"Please report this trace. ]\n\n");
}
void debug_rt_mutex_lock(struct rt_mutex *lock)
{
}
void debug_rt_mutex_unlock(struct rt_mutex *lock)
{
DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
}
void
debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner)
{
}
void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
{
DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
}
void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
{
memset(waiter, 0x11, sizeof(*waiter));
waiter->deadlock_task_pid = NULL;
}
void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
{
put_pid(waiter->deadlock_task_pid);
memset(waiter, 0x22, sizeof(*waiter));
}
void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key)
{
/*
* Make sure we are not reinitializing a held lock:
*/
debug_check_no_locks_freed((void *)lock, sizeof(*lock));
lock->name = name;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
lockdep_init_map(&lock->dep_map, name, key, 0);
#endif
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
* RT-Mutexes: blocking mutual exclusion locks with PI support
*
* started by Ingo Molnar and Thomas Gleixner:
*
* Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
*
* This file contains macros used solely by rtmutex.c. Debug version.
*/
extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
extern void debug_rt_mutex_lock(struct rt_mutex *lock);
extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
struct task_struct *powner);
extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
extern void debug_rt_mutex_deadlock(enum rtmutex_chainwalk chwalk,
struct rt_mutex_waiter *waiter,
struct rt_mutex *lock);
extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter);
# define debug_rt_mutex_reset_waiter(w) \
do { (w)->deadlock_lock = NULL; } while (0)
static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
enum rtmutex_chainwalk walk)
{
return (waiter != NULL);
}
static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
{
debug_rt_mutex_print_deadlock(w);
}
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/*
* RT-Mutexes: blocking mutual exclusion locks with PI support
*
* started by Ingo Molnar and Thomas Gleixner:
*
* Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
* Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com>
*
* This file contains macros used solely by rtmutex.c.
* Non-debug version.
*/
#define rt_mutex_deadlock_check(l) (0)
#define debug_rt_mutex_init_waiter(w) do { } while (0)
#define debug_rt_mutex_free_waiter(w) do { } while (0)
#define debug_rt_mutex_lock(l) do { } while (0)
#define debug_rt_mutex_proxy_lock(l,p) do { } while (0)
#define debug_rt_mutex_proxy_unlock(l) do { } while (0)
#define debug_rt_mutex_unlock(l) do { } while (0)
#define debug_rt_mutex_init(m, n, k) do { } while (0)
#define debug_rt_mutex_deadlock(d, a ,l) do { } while (0)
#define debug_rt_mutex_print_deadlock(w) do { } while (0)
#define debug_rt_mutex_reset_waiter(w) do { } while (0)
static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
{
WARN(1, "rtmutex deadlock detected\n");
}
static inline bool debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *w,
enum rtmutex_chainwalk walk)
{
return walk == RT_MUTEX_FULL_CHAINWALK;
}
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#ifndef __KERNEL_RTMUTEX_COMMON_H #ifndef __KERNEL_RTMUTEX_COMMON_H
#define __KERNEL_RTMUTEX_COMMON_H #define __KERNEL_RTMUTEX_COMMON_H
#include <linux/debug_locks.h>
#include <linux/rtmutex.h> #include <linux/rtmutex.h>
#include <linux/sched/wake_q.h> #include <linux/sched/wake_q.h>
...@@ -23,34 +24,30 @@ ...@@ -23,34 +24,30 @@
* @tree_entry: pi node to enqueue into the mutex waiters tree * @tree_entry: pi node to enqueue into the mutex waiters tree
* @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree * @pi_tree_entry: pi node to enqueue into the mutex owner waiters tree
* @task: task reference to the blocked task * @task: task reference to the blocked task
* @lock: Pointer to the rt_mutex on which the waiter blocks
* @prio: Priority of the waiter
* @deadline: Deadline of the waiter if applicable
*/ */
struct rt_mutex_waiter { struct rt_mutex_waiter {
struct rb_node tree_entry; struct rb_node tree_entry;
struct rb_node pi_tree_entry; struct rb_node pi_tree_entry;
struct task_struct *task; struct task_struct *task;
struct rt_mutex *lock; struct rt_mutex *lock;
#ifdef CONFIG_DEBUG_RT_MUTEXES
unsigned long ip;
struct pid *deadlock_task_pid;
struct rt_mutex *deadlock_lock;
#endif
int prio; int prio;
u64 deadline; u64 deadline;
}; };
/* /*
* Various helpers to access the waiters-tree: * Must be guarded because this header is included from rcu/tree_plugin.h
* unconditionally.
*/ */
#ifdef CONFIG_RT_MUTEXES #ifdef CONFIG_RT_MUTEXES
static inline int rt_mutex_has_waiters(struct rt_mutex *lock) static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
{ {
return !RB_EMPTY_ROOT(&lock->waiters.rb_root); return !RB_EMPTY_ROOT(&lock->waiters.rb_root);
} }
static inline struct rt_mutex_waiter * static inline struct rt_mutex_waiter *rt_mutex_top_waiter(struct rt_mutex *lock)
rt_mutex_top_waiter(struct rt_mutex *lock)
{ {
struct rb_node *leftmost = rb_first_cached(&lock->waiters); struct rb_node *leftmost = rb_first_cached(&lock->waiters);
struct rt_mutex_waiter *w = NULL; struct rt_mutex_waiter *w = NULL;
...@@ -67,42 +64,12 @@ static inline int task_has_pi_waiters(struct task_struct *p) ...@@ -67,42 +64,12 @@ static inline int task_has_pi_waiters(struct task_struct *p)
return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root); return !RB_EMPTY_ROOT(&p->pi_waiters.rb_root);
} }
static inline struct rt_mutex_waiter * static inline struct rt_mutex_waiter *task_top_pi_waiter(struct task_struct *p)
task_top_pi_waiter(struct task_struct *p)
{
return rb_entry(p->pi_waiters.rb_leftmost,
struct rt_mutex_waiter, pi_tree_entry);
}
#else
static inline int rt_mutex_has_waiters(struct rt_mutex *lock)
{
return false;
}
static inline struct rt_mutex_waiter *
rt_mutex_top_waiter(struct rt_mutex *lock)
{ {
return NULL; return rb_entry(p->pi_waiters.rb_leftmost, struct rt_mutex_waiter,
} pi_tree_entry);
static inline int task_has_pi_waiters(struct task_struct *p)
{
return false;
} }
static inline struct rt_mutex_waiter *
task_top_pi_waiter(struct task_struct *p)
{
return NULL;
}
#endif
/*
* lock->owner state tracking:
*/
#define RT_MUTEX_HAS_WAITERS 1UL #define RT_MUTEX_HAS_WAITERS 1UL
static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
...@@ -111,6 +78,13 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock) ...@@ -111,6 +78,13 @@ static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS); return (struct task_struct *) (owner & ~RT_MUTEX_HAS_WAITERS);
} }
#else /* CONFIG_RT_MUTEXES */
/* Used in rcu/tree_plugin.h */
static inline struct task_struct *rt_mutex_owner(struct rt_mutex *lock)
{
return NULL;
}
#endif /* !CONFIG_RT_MUTEXES */
/* /*
* Constants for rt mutex functions which have a selectable deadlock * Constants for rt mutex functions which have a selectable deadlock
...@@ -127,10 +101,16 @@ enum rtmutex_chainwalk { ...@@ -127,10 +101,16 @@ enum rtmutex_chainwalk {
RT_MUTEX_FULL_CHAINWALK, RT_MUTEX_FULL_CHAINWALK,
}; };
static inline void __rt_mutex_basic_init(struct rt_mutex *lock)
{
lock->owner = NULL;
raw_spin_lock_init(&lock->wait_lock);
lock->waiters = RB_ROOT_CACHED;
}
/* /*
* PI-futex support (proxy locking functions, etc.): * PI-futex support (proxy locking functions, etc.):
*/ */
extern struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock);
extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock, extern void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
struct task_struct *proxy_owner); struct task_struct *proxy_owner);
extern void rt_mutex_proxy_unlock(struct rt_mutex *lock); extern void rt_mutex_proxy_unlock(struct rt_mutex *lock);
...@@ -156,10 +136,29 @@ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock, ...@@ -156,10 +136,29 @@ extern bool __rt_mutex_futex_unlock(struct rt_mutex *lock,
extern void rt_mutex_postunlock(struct wake_q_head *wake_q); extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
#ifdef CONFIG_DEBUG_RT_MUTEXES /* Debug functions */
# include "rtmutex-debug.h" static inline void debug_rt_mutex_unlock(struct rt_mutex *lock)
#else {
# include "rtmutex.h" if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
#endif DEBUG_LOCKS_WARN_ON(rt_mutex_owner(lock) != current);
}
static inline void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
{
if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
DEBUG_LOCKS_WARN_ON(!rt_mutex_owner(lock));
}
static inline void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
{
if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
memset(waiter, 0x11, sizeof(*waiter));
}
static inline void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
{
if (IS_ENABLED(CONFIG_DEBUG_RT_MUTEXES))
memset(waiter, 0x22, sizeof(*waiter));
}
#endif #endif
...@@ -632,7 +632,7 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) ...@@ -632,7 +632,7 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
} }
/* /*
* The rwsem_spin_on_owner() function returns the folowing 4 values * The rwsem_spin_on_owner() function returns the following 4 values
* depending on the lock owner state. * depending on the lock owner state.
* OWNER_NULL : owner is currently NULL * OWNER_NULL : owner is currently NULL
* OWNER_WRITER: when owner changes and is a writer * OWNER_WRITER: when owner changes and is a writer
...@@ -819,7 +819,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem) ...@@ -819,7 +819,7 @@ static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
* we try to get it. The new owner may be a spinnable * we try to get it. The new owner may be a spinnable
* writer. * writer.
* *
* To take advantage of two scenarios listed agove, the RT * To take advantage of two scenarios listed above, the RT
* task is made to retry one more time to see if it can * task is made to retry one more time to see if it can
* acquire the lock or continue spinning on the new owning * acquire the lock or continue spinning on the new owning
* writer. Of course, if the time lag is long enough or the * writer. Of course, if the time lag is long enough or the
......
...@@ -58,10 +58,10 @@ EXPORT_PER_CPU_SYMBOL(__mmiowb_state); ...@@ -58,10 +58,10 @@ EXPORT_PER_CPU_SYMBOL(__mmiowb_state);
/* /*
* We build the __lock_function inlines here. They are too large for * We build the __lock_function inlines here. They are too large for
* inlining all over the place, but here is only one user per function * inlining all over the place, but here is only one user per function
* which embedds them into the calling _lock_function below. * which embeds them into the calling _lock_function below.
* *
* This could be a long-held lock. We both prepare to spin for a long * This could be a long-held lock. We both prepare to spin for a long
* time (making _this_ CPU preemptable if possible), and we also signal * time (making _this_ CPU preemptible if possible), and we also signal
* towards that other CPU that it should break the lock ASAP. * towards that other CPU that it should break the lock ASAP.
*/ */
#define BUILD_LOCK_OPS(op, locktype) \ #define BUILD_LOCK_OPS(op, locktype) \
......
...@@ -5396,25 +5396,25 @@ static void sched_dynamic_update(int mode) ...@@ -5396,25 +5396,25 @@ static void sched_dynamic_update(int mode)
switch (mode) { switch (mode) {
case preempt_dynamic_none: case preempt_dynamic_none:
static_call_update(cond_resched, __cond_resched); static_call_update(cond_resched, __cond_resched);
static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0); static_call_update(might_resched, (void *)&__static_call_return0);
static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL); static_call_update(preempt_schedule, NULL);
static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL); static_call_update(preempt_schedule_notrace, NULL);
static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL); static_call_update(irqentry_exit_cond_resched, NULL);
pr_info("Dynamic Preempt: none\n"); pr_info("Dynamic Preempt: none\n");
break; break;
case preempt_dynamic_voluntary: case preempt_dynamic_voluntary:
static_call_update(cond_resched, __cond_resched); static_call_update(cond_resched, __cond_resched);
static_call_update(might_resched, __cond_resched); static_call_update(might_resched, __cond_resched);
static_call_update(preempt_schedule, (typeof(&preempt_schedule)) NULL); static_call_update(preempt_schedule, NULL);
static_call_update(preempt_schedule_notrace, (typeof(&preempt_schedule_notrace)) NULL); static_call_update(preempt_schedule_notrace, NULL);
static_call_update(irqentry_exit_cond_resched, (typeof(&irqentry_exit_cond_resched)) NULL); static_call_update(irqentry_exit_cond_resched, NULL);
pr_info("Dynamic Preempt: voluntary\n"); pr_info("Dynamic Preempt: voluntary\n");
break; break;
case preempt_dynamic_full: case preempt_dynamic_full:
static_call_update(cond_resched, (typeof(&__cond_resched)) __static_call_return0); static_call_update(cond_resched, (void *)&__static_call_return0);
static_call_update(might_resched, (typeof(&__cond_resched)) __static_call_return0); static_call_update(might_resched, (void *)&__static_call_return0);
static_call_update(preempt_schedule, __preempt_schedule_func); static_call_update(preempt_schedule, __preempt_schedule_func);
static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func); static_call_update(preempt_schedule_notrace, __preempt_schedule_notrace_func);
static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched); static_call_update(irqentry_exit_cond_resched, irqentry_exit_cond_resched);
......
This diff is collapsed.
...@@ -165,13 +165,13 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func) ...@@ -165,13 +165,13 @@ void __static_call_update(struct static_call_key *key, void *tramp, void *func)
stop = __stop_static_call_sites; stop = __stop_static_call_sites;
#ifdef CONFIG_MODULES
if (mod) { if (mod) {
#ifdef CONFIG_MODULES
stop = mod->static_call_sites + stop = mod->static_call_sites +
mod->num_static_call_sites; mod->num_static_call_sites;
init = mod->state == MODULE_STATE_COMING; init = mod->state == MODULE_STATE_COMING;
}
#endif #endif
}
for (site = site_mod->sites; for (site = site_mod->sites;
site < stop && static_call_key(site) == key; site++) { site < stop && static_call_key(site) == key; site++) {
......
...@@ -69,8 +69,9 @@ config KCSAN_SELFTEST ...@@ -69,8 +69,9 @@ config KCSAN_SELFTEST
panic. Recommended to be enabled, ensuring critical functionality panic. Recommended to be enabled, ensuring critical functionality
works as intended. works as intended.
config KCSAN_TEST config KCSAN_KUNIT_TEST
tristate "KCSAN test for integrated runtime behaviour" tristate "KCSAN test for integrated runtime behaviour" if !KUNIT_ALL_TESTS
default KUNIT_ALL_TESTS
depends on TRACEPOINTS && KUNIT depends on TRACEPOINTS && KUNIT
select TORTURE_TEST select TORTURE_TEST
help help
......
This diff is collapsed.
...@@ -189,7 +189,6 @@ Additional information may be found in these files: ...@@ -189,7 +189,6 @@ Additional information may be found in these files:
Documentation/atomic_t.txt Documentation/atomic_t.txt
Documentation/atomic_bitops.txt Documentation/atomic_bitops.txt
Documentation/core-api/atomic_ops.rst
Documentation/core-api/refcount-vs-atomic.rst Documentation/core-api/refcount-vs-atomic.rst
Reading code using these primitives is often also quite helpful. Reading code using these primitives is often also quite helpful.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment