Commit c35aea39 authored by Tejun Heo's avatar Tejun Heo

workqueue: Update lock debugging code

These changes are in preparation of BH workqueue which will execute work
items from BH context.

- Update lock and RCU depth checks in process_one_work() so that it
  remembers and checks against the starting depths and prints out the depth
  changes.

- Factor out lockdep annotations in the flush paths into
  touch_{wq|work}_lockdep_map(). The work->lockdep_map touching is moved
  from __flush_work() to its callee - start_flush_work(). This brings it
  closer to the wq counterpart and will allow testing the associated wq's
  flags which will be needed to support BH workqueues. This is not expected
  to cause any functional changes.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Tested-by: default avatarAllen Pais <allen.lkml@gmail.com>
parent d412ace1
...@@ -2965,6 +2965,7 @@ __acquires(&pool->lock) ...@@ -2965,6 +2965,7 @@ __acquires(&pool->lock)
struct pool_workqueue *pwq = get_work_pwq(work); struct pool_workqueue *pwq = get_work_pwq(work);
struct worker_pool *pool = worker->pool; struct worker_pool *pool = worker->pool;
unsigned long work_data; unsigned long work_data;
int lockdep_start_depth, rcu_start_depth;
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
/* /*
* It is permissible to free the struct work_struct from * It is permissible to free the struct work_struct from
...@@ -3027,6 +3028,8 @@ __acquires(&pool->lock) ...@@ -3027,6 +3028,8 @@ __acquires(&pool->lock)
pwq->stats[PWQ_STAT_STARTED]++; pwq->stats[PWQ_STAT_STARTED]++;
raw_spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
rcu_start_depth = rcu_preempt_depth();
lockdep_start_depth = lockdep_depth(current);
lock_map_acquire(&pwq->wq->lockdep_map); lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_acquire(&lockdep_map); lock_map_acquire(&lockdep_map);
/* /*
...@@ -3062,12 +3065,15 @@ __acquires(&pool->lock) ...@@ -3062,12 +3065,15 @@ __acquires(&pool->lock)
lock_map_release(&lockdep_map); lock_map_release(&lockdep_map);
lock_map_release(&pwq->wq->lockdep_map); lock_map_release(&pwq->wq->lockdep_map);
if (unlikely(in_atomic() || lockdep_depth(current) > 0 || if (unlikely((worker->task && in_atomic()) ||
rcu_preempt_depth() > 0)) { lockdep_depth(current) != lockdep_start_depth ||
pr_err("BUG: workqueue leaked lock or atomic: %s/0x%08x/%d/%d\n" rcu_preempt_depth() != rcu_start_depth)) {
" last function: %ps\n", pr_err("BUG: workqueue leaked atomic, lock or RCU: %s[%d]\n"
current->comm, preempt_count(), rcu_preempt_depth(), " preempt=0x%08x lock=%d->%d RCU=%d->%d workfn=%ps\n",
task_pid_nr(current), worker->current_func); current->comm, task_pid_nr(current), preempt_count(),
lockdep_start_depth, lockdep_depth(current),
rcu_start_depth, rcu_preempt_depth(),
worker->current_func);
debug_show_held_locks(current); debug_show_held_locks(current);
dump_stack(); dump_stack();
} }
...@@ -3549,6 +3555,19 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq, ...@@ -3549,6 +3555,19 @@ static bool flush_workqueue_prep_pwqs(struct workqueue_struct *wq,
return wait; return wait;
} }
static void touch_wq_lockdep_map(struct workqueue_struct *wq)
{
lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);
}
static void touch_work_lockdep_map(struct work_struct *work,
struct workqueue_struct *wq)
{
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
}
/** /**
* __flush_workqueue - ensure that any scheduled work has run to completion. * __flush_workqueue - ensure that any scheduled work has run to completion.
* @wq: workqueue to flush * @wq: workqueue to flush
...@@ -3568,8 +3587,7 @@ void __flush_workqueue(struct workqueue_struct *wq) ...@@ -3568,8 +3587,7 @@ void __flush_workqueue(struct workqueue_struct *wq)
if (WARN_ON(!wq_online)) if (WARN_ON(!wq_online))
return; return;
lock_map_acquire(&wq->lockdep_map); touch_wq_lockdep_map(wq);
lock_map_release(&wq->lockdep_map);
mutex_lock(&wq->mutex); mutex_lock(&wq->mutex);
...@@ -3768,6 +3786,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, ...@@ -3768,6 +3786,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
struct worker *worker = NULL; struct worker *worker = NULL;
struct worker_pool *pool; struct worker_pool *pool;
struct pool_workqueue *pwq; struct pool_workqueue *pwq;
struct workqueue_struct *wq;
might_sleep(); might_sleep();
...@@ -3791,11 +3810,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, ...@@ -3791,11 +3810,14 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
pwq = worker->current_pwq; pwq = worker->current_pwq;
} }
check_flush_dependency(pwq->wq, work); wq = pwq->wq;
check_flush_dependency(wq, work);
insert_wq_barrier(pwq, barr, work, worker); insert_wq_barrier(pwq, barr, work, worker);
raw_spin_unlock_irq(&pool->lock); raw_spin_unlock_irq(&pool->lock);
touch_work_lockdep_map(work, wq);
/* /*
* Force a lock recursion deadlock when using flush_work() inside a * Force a lock recursion deadlock when using flush_work() inside a
* single-threaded or rescuer equipped workqueue. * single-threaded or rescuer equipped workqueue.
...@@ -3805,11 +3827,9 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr, ...@@ -3805,11 +3827,9 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
* workqueues the deadlock happens when the rescuer stalls, blocking * workqueues the deadlock happens when the rescuer stalls, blocking
* forward progress. * forward progress.
*/ */
if (!from_cancel && if (!from_cancel && (wq->saved_max_active == 1 || wq->rescuer))
(pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) { touch_wq_lockdep_map(wq);
lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map);
}
rcu_read_unlock(); rcu_read_unlock();
return true; return true;
already_gone: already_gone:
...@@ -3828,9 +3848,6 @@ static bool __flush_work(struct work_struct *work, bool from_cancel) ...@@ -3828,9 +3848,6 @@ static bool __flush_work(struct work_struct *work, bool from_cancel)
if (WARN_ON(!work->func)) if (WARN_ON(!work->func))
return false; return false;
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
if (start_flush_work(work, &barr, from_cancel)) { if (start_flush_work(work, &barr, from_cancel)) {
wait_for_completion(&barr.done); wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work); destroy_work_on_stack(&barr.work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment