Commit 1b1d2fb4 authored by Colin Cross's avatar Colin Cross Committed by Rafael J. Wysocki

lockdep: remove task argument from debug_check_no_locks_held

The only existing caller to debug_check_no_locks_held calls it
with 'current' as the task, and the freezer needs to call
debug_check_no_locks_held but doesn't already have a current
task pointer, so remove the argument.  It is already assuming
that the current task is relevant by dumping the current stack
trace as part of the warning.

This was originally part of 6aa97070 (lockdep: check that
no locks held at freeze time) which was reverted in
dbf520a9.

Original-author: Mandeep Singh Baines <msb@chromium.org>
Acked-by: default avatarPavel Machek <pavel@ucw.cz>
Acked-by: default avatarTejun Heo <tj@kernel.org>
Signed-off-by: default avatarColin Cross <ccross@android.com>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 5853cc2a
...@@ -51,7 +51,7 @@ struct task_struct; ...@@ -51,7 +51,7 @@ struct task_struct;
extern void debug_show_all_locks(void); extern void debug_show_all_locks(void);
extern void debug_show_held_locks(struct task_struct *task); extern void debug_show_held_locks(struct task_struct *task);
extern void debug_check_no_locks_freed(const void *from, unsigned long len); extern void debug_check_no_locks_freed(const void *from, unsigned long len);
extern void debug_check_no_locks_held(struct task_struct *task); extern void debug_check_no_locks_held(void);
#else #else
static inline void debug_show_all_locks(void) static inline void debug_show_all_locks(void)
{ {
...@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len) ...@@ -67,7 +67,7 @@ debug_check_no_locks_freed(const void *from, unsigned long len)
} }
static inline void static inline void
debug_check_no_locks_held(struct task_struct *task) debug_check_no_locks_held(void)
{ {
} }
#endif #endif
......
...@@ -835,7 +835,7 @@ void do_exit(long code) ...@@ -835,7 +835,7 @@ void do_exit(long code)
/* /*
* Make sure we are holding no locks: * Make sure we are holding no locks:
*/ */
debug_check_no_locks_held(tsk); debug_check_no_locks_held();
/* /*
* We can do this unlocked here. The futex code uses this flag * We can do this unlocked here. The futex code uses this flag
* just to verify whether the pi state cleanup has been done * just to verify whether the pi state cleanup has been done
......
...@@ -4090,7 +4090,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) ...@@ -4090,7 +4090,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len)
} }
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
static void print_held_locks_bug(struct task_struct *curr) static void print_held_locks_bug(void)
{ {
if (!debug_locks_off()) if (!debug_locks_off())
return; return;
...@@ -4099,22 +4099,21 @@ static void print_held_locks_bug(struct task_struct *curr) ...@@ -4099,22 +4099,21 @@ static void print_held_locks_bug(struct task_struct *curr)
printk("\n"); printk("\n");
printk("=====================================\n"); printk("=====================================\n");
printk("[ BUG: lock held at task exit time! ]\n"); printk("[ BUG: %s/%d still has locks held! ]\n",
current->comm, task_pid_nr(current));
print_kernel_ident(); print_kernel_ident();
printk("-------------------------------------\n"); printk("-------------------------------------\n");
printk("%s/%d is exiting with locks still held!\n", lockdep_print_held_locks(current);
curr->comm, task_pid_nr(curr));
lockdep_print_held_locks(curr);
printk("\nstack backtrace:\n"); printk("\nstack backtrace:\n");
dump_stack(); dump_stack();
} }
void debug_check_no_locks_held(struct task_struct *task) void debug_check_no_locks_held(void)
{ {
if (unlikely(task->lockdep_depth > 0)) if (unlikely(current->lockdep_depth > 0))
print_held_locks_bug(task); print_held_locks_bug();
} }
EXPORT_SYMBOL_GPL(debug_check_no_locks_held);
void debug_show_all_locks(void) void debug_show_all_locks(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment