Commit 9022ada8 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-4.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq

Pull workqueue updates from Tejun Heo:
 "Over the lockdep cross-release churn, workqueue lost some of the
  existing annotations. Johannes Berg restored it and also improved
  them"

* 'for-4.19' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/wq:
  workqueue: re-add lockdep dependencies for flushing
  workqueue: skip lockdep wq dependency in cancel_work_sync()
parents 18b8bfdf 87915adc
...@@ -2652,6 +2652,9 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2652,6 +2652,9 @@ void flush_workqueue(struct workqueue_struct *wq)
if (WARN_ON(!wq_online)) if (WARN_ON(!wq_online))
return; return;
lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);
mutex_lock(&wq->mutex); mutex_lock(&wq->mutex);
/* /*
...@@ -2843,7 +2846,8 @@ void drain_workqueue(struct workqueue_struct *wq) ...@@ -2843,7 +2846,8 @@ void drain_workqueue(struct workqueue_struct *wq)
} }
EXPORT_SYMBOL_GPL(drain_workqueue); EXPORT_SYMBOL_GPL(drain_workqueue);
static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr,
bool from_cancel)
{ {
struct worker *worker = NULL; struct worker *worker = NULL;
struct worker_pool *pool; struct worker_pool *pool;
...@@ -2885,7 +2889,8 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) ...@@ -2885,7 +2889,8 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
* workqueues the deadlock happens when the rescuer stalls, blocking * workqueues the deadlock happens when the rescuer stalls, blocking
* forward progress. * forward progress.
*/ */
if (pwq->wq->saved_max_active == 1 || pwq->wq->rescuer) { if (!from_cancel &&
(pwq->wq->saved_max_active == 1 || pwq->wq->rescuer)) {
lock_map_acquire(&pwq->wq->lockdep_map); lock_map_acquire(&pwq->wq->lockdep_map);
lock_map_release(&pwq->wq->lockdep_map); lock_map_release(&pwq->wq->lockdep_map);
} }
...@@ -2896,6 +2901,27 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) ...@@ -2896,6 +2901,27 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
return false; return false;
} }
static bool __flush_work(struct work_struct *work, bool from_cancel)
{
struct wq_barrier barr;
if (WARN_ON(!wq_online))
return false;
if (!from_cancel) {
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
}
if (start_flush_work(work, &barr, from_cancel)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
return true;
} else {
return false;
}
}
/** /**
* flush_work - wait for a work to finish executing the last queueing instance * flush_work - wait for a work to finish executing the last queueing instance
* @work: the work to flush * @work: the work to flush
...@@ -2909,18 +2935,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr) ...@@ -2909,18 +2935,7 @@ static bool start_flush_work(struct work_struct *work, struct wq_barrier *barr)
*/ */
bool flush_work(struct work_struct *work) bool flush_work(struct work_struct *work)
{ {
struct wq_barrier barr; return __flush_work(work, false);
if (WARN_ON(!wq_online))
return false;
if (start_flush_work(work, &barr)) {
wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work);
return true;
} else {
return false;
}
} }
EXPORT_SYMBOL_GPL(flush_work); EXPORT_SYMBOL_GPL(flush_work);
...@@ -2986,7 +3001,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) ...@@ -2986,7 +3001,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork)
* isn't executing. * isn't executing.
*/ */
if (wq_online) if (wq_online)
flush_work(work); __flush_work(work, true);
clear_work_data(work); clear_work_data(work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment