Commit d8f14b84 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'core-debugobjects-2023-05-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull debugobjects fixes from Thomas Gleixner:
 "Two fixes for debugobjects:

   - Prevent the allocation path from waking up kswapd.

     That's a long standing issue due to the GFP_ATOMIC allocation flag.
     As debug objects can be invoked from pretty much any context waking
     kswapd can end up in arbitrary lock chains versus the waitqueue
     lock

   - Correct the explicit lockdep wait-type violation in
     debug_object_fill_pool()"

* tag 'core-debugobjects-2023-05-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  debugobjects: Don't wake up kswapd from fill_pool()
  debugobjects,locking: Annotate debug_object_fill_pool() wait type violation
parents 9bd5386c eb799279
...@@ -344,6 +344,16 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie); ...@@ -344,6 +344,16 @@ extern void lock_unpin_lock(struct lockdep_map *lock, struct pin_cookie);
#define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c)) #define lockdep_repin_lock(l,c) lock_repin_lock(&(l)->dep_map, (c))
#define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c)) #define lockdep_unpin_lock(l,c) lock_unpin_lock(&(l)->dep_map, (c))
/*
* Must use lock_map_aquire_try() with override maps to avoid
* lockdep thinking they participate in the block chain.
*/
#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
struct lockdep_map _name = { \
.name = #_name "-wait-type-override", \
.wait_type_inner = _wait_type, \
.lock_type = LD_LOCK_WAIT_OVERRIDE, }
#else /* !CONFIG_LOCKDEP */ #else /* !CONFIG_LOCKDEP */
static inline void lockdep_init_task(struct task_struct *task) static inline void lockdep_init_task(struct task_struct *task)
...@@ -432,6 +442,9 @@ extern int lockdep_is_held(const void *); ...@@ -432,6 +442,9 @@ extern int lockdep_is_held(const void *);
#define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0) #define lockdep_repin_lock(l, c) do { (void)(l); (void)(c); } while (0)
#define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0) #define lockdep_unpin_lock(l, c) do { (void)(l); (void)(c); } while (0)
#define DEFINE_WAIT_OVERRIDE_MAP(_name, _wait_type) \
struct lockdep_map __maybe_unused _name = {}
#endif /* !LOCKDEP */ #endif /* !LOCKDEP */
enum xhlock_context_t { enum xhlock_context_t {
...@@ -556,6 +569,7 @@ do { \ ...@@ -556,6 +569,7 @@ do { \
#define rwsem_release(l, i) lock_release(l, i) #define rwsem_release(l, i) lock_release(l, i)
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_) #define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_try(l) lock_acquire_exclusive(l, 0, 1, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_) #define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_) #define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
#define lock_map_release(l) lock_release(l, _THIS_IP_) #define lock_map_release(l) lock_release(l, _THIS_IP_)
......
...@@ -33,6 +33,7 @@ enum lockdep_wait_type { ...@@ -33,6 +33,7 @@ enum lockdep_wait_type {
enum lockdep_lock_type { enum lockdep_lock_type {
LD_LOCK_NORMAL = 0, /* normal, catch all */ LD_LOCK_NORMAL = 0, /* normal, catch all */
LD_LOCK_PERCPU, /* percpu */ LD_LOCK_PERCPU, /* percpu */
LD_LOCK_WAIT_OVERRIDE, /* annotation */
LD_LOCK_MAX, LD_LOCK_MAX,
}; };
......
...@@ -2263,6 +2263,9 @@ static inline bool usage_match(struct lock_list *entry, void *mask) ...@@ -2263,6 +2263,9 @@ static inline bool usage_match(struct lock_list *entry, void *mask)
static inline bool usage_skip(struct lock_list *entry, void *mask) static inline bool usage_skip(struct lock_list *entry, void *mask)
{ {
if (entry->class->lock_type == LD_LOCK_NORMAL)
return false;
/* /*
* Skip local_lock() for irq inversion detection. * Skip local_lock() for irq inversion detection.
* *
...@@ -2289,14 +2292,16 @@ static inline bool usage_skip(struct lock_list *entry, void *mask) ...@@ -2289,14 +2292,16 @@ static inline bool usage_skip(struct lock_list *entry, void *mask)
* As a result, we will skip local_lock(), when we search for irq * As a result, we will skip local_lock(), when we search for irq
* inversion bugs. * inversion bugs.
*/ */
if (entry->class->lock_type == LD_LOCK_PERCPU) { if (entry->class->lock_type == LD_LOCK_PERCPU &&
if (DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG)) DEBUG_LOCKS_WARN_ON(entry->class->wait_type_inner < LD_WAIT_CONFIG))
return false; return false;
return true; /*
} * Skip WAIT_OVERRIDE for irq inversion detection -- it's not actually
* a lock and only used to override the wait_type.
*/
return false; return true;
} }
/* /*
...@@ -4768,7 +4773,8 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next) ...@@ -4768,7 +4773,8 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
for (; depth < curr->lockdep_depth; depth++) { for (; depth < curr->lockdep_depth; depth++) {
struct held_lock *prev = curr->held_locks + depth; struct held_lock *prev = curr->held_locks + depth;
u8 prev_inner = hlock_class(prev)->wait_type_inner; struct lock_class *class = hlock_class(prev);
u8 prev_inner = class->wait_type_inner;
if (prev_inner) { if (prev_inner) {
/* /*
...@@ -4778,6 +4784,14 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next) ...@@ -4778,6 +4784,14 @@ static int check_wait_context(struct task_struct *curr, struct held_lock *next)
* Also due to trylocks. * Also due to trylocks.
*/ */
curr_inner = min(curr_inner, prev_inner); curr_inner = min(curr_inner, prev_inner);
/*
* Allow override for annotations -- this is typically
* only valid/needed for code that only exists when
* CONFIG_PREEMPT_RT=n.
*/
if (unlikely(class->lock_type == LD_LOCK_WAIT_OVERRIDE))
curr_inner = prev_inner;
} }
} }
......
...@@ -126,7 +126,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = { ...@@ -126,7 +126,7 @@ static const char *obj_states[ODEBUG_STATE_MAX] = {
static void fill_pool(void) static void fill_pool(void)
{ {
gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN; gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
struct debug_obj *obj; struct debug_obj *obj;
unsigned long flags; unsigned long flags;
...@@ -591,10 +591,21 @@ static void debug_objects_fill_pool(void) ...@@ -591,10 +591,21 @@ static void debug_objects_fill_pool(void)
{ {
/* /*
* On RT enabled kernels the pool refill must happen in preemptible * On RT enabled kernels the pool refill must happen in preemptible
* context: * context -- for !RT kernels we rely on the fact that spinlock_t and
* raw_spinlock_t are basically the same type and this lock-type
* inversion works just fine.
*/ */
if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
/*
* Annotate away the spinlock_t inside raw_spinlock_t warning
* by temporarily raising the wait-type to WAIT_SLEEP, matching
* the preemptible() condition above.
*/
static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
lock_map_acquire_try(&fill_pool_map);
fill_pool(); fill_pool();
lock_map_release(&fill_pool_map);
}
} }
static void static void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment