Commit fd1a5b04 authored by Byungchul Park's avatar Byungchul Park Committed by Ingo Molnar

workqueue: Remove now redundant lock acquisitions wrt. workqueue flushes

The workqueue code added manual lock acquisition annotations to catch
deadlocks.

After lockdepcrossrelease was introduced, some of those became redundant,
since wait_for_completion() already does the acquisition and tracking.

Remove the duplicate annotations.
Signed-off-by: default avatarByungchul Park <byungchul.park@lge.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: amir73il@gmail.com
Cc: axboe@kernel.dk
Cc: darrick.wong@oracle.com
Cc: david@fromorbit.com
Cc: hch@infradead.org
Cc: idryomov@gmail.com
Cc: johan@kernel.org
Cc: johannes.berg@intel.com
Cc: kernel-team@lge.com
Cc: linux-block@vger.kernel.org
Cc: linux-fsdevel@vger.kernel.org
Cc: linux-mm@kvack.org
Cc: linux-xfs@vger.kernel.org
Cc: oleg@redhat.com
Cc: tj@kernel.org
Link: http://lkml.kernel.org/r/1508921765-15396-9-git-send-email-byungchul.park@lge.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent a7967bc3
...@@ -218,7 +218,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; } ...@@ -218,7 +218,7 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
\ \
__init_work((_work), _onstack); \ __init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \ (_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
lockdep_init_map(&(_work)->lockdep_map, #_work, &__key, 0); \ lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
INIT_LIST_HEAD(&(_work)->entry); \ INIT_LIST_HEAD(&(_work)->entry); \
(_work)->func = (_func); \ (_work)->func = (_func); \
} while (0) } while (0)
...@@ -398,7 +398,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active, ...@@ -398,7 +398,7 @@ __alloc_workqueue_key(const char *fmt, unsigned int flags, int max_active,
static struct lock_class_key __key; \ static struct lock_class_key __key; \
const char *__lock_name; \ const char *__lock_name; \
\ \
__lock_name = #fmt#args; \ __lock_name = "(wq_completion)"#fmt#args; \
\ \
__alloc_workqueue_key((fmt), (flags), (max_active), \ __alloc_workqueue_key((fmt), (flags), (max_active), \
&__key, __lock_name, ##args); \ &__key, __lock_name, ##args); \
......
...@@ -2497,15 +2497,8 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, ...@@ -2497,15 +2497,8 @@ static void insert_wq_barrier(struct pool_workqueue *pwq,
INIT_WORK_ONSTACK(&barr->work, wq_barrier_func); INIT_WORK_ONSTACK(&barr->work, wq_barrier_func);
__set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work)); __set_bit(WORK_STRUCT_PENDING_BIT, work_data_bits(&barr->work));
/* init_completion_map(&barr->done, &target->lockdep_map);
* Explicitly init the crosslock for wq_barrier::done, make its lock
* key a subkey of the corresponding work. As a result we won't
* build a dependency between wq_barrier::done and unrelated work.
*/
lockdep_init_map_crosslock((struct lockdep_map *)&barr->done.map,
"(complete)wq_barr::done",
target->lockdep_map.key, 1);
__init_completion(&barr->done);
barr->task = current; barr->task = current;
/* /*
...@@ -2611,16 +2604,13 @@ void flush_workqueue(struct workqueue_struct *wq) ...@@ -2611,16 +2604,13 @@ void flush_workqueue(struct workqueue_struct *wq)
struct wq_flusher this_flusher = { struct wq_flusher this_flusher = {
.list = LIST_HEAD_INIT(this_flusher.list), .list = LIST_HEAD_INIT(this_flusher.list),
.flush_color = -1, .flush_color = -1,
.done = COMPLETION_INITIALIZER_ONSTACK(this_flusher.done), .done = COMPLETION_INITIALIZER_ONSTACK_MAP(this_flusher.done, wq->lockdep_map),
}; };
int next_color; int next_color;
if (WARN_ON(!wq_online)) if (WARN_ON(!wq_online))
return; return;
lock_map_acquire(&wq->lockdep_map);
lock_map_release(&wq->lockdep_map);
mutex_lock(&wq->mutex); mutex_lock(&wq->mutex);
/* /*
...@@ -2883,9 +2873,6 @@ bool flush_work(struct work_struct *work) ...@@ -2883,9 +2873,6 @@ bool flush_work(struct work_struct *work)
if (WARN_ON(!wq_online)) if (WARN_ON(!wq_online))
return false; return false;
lock_map_acquire(&work->lockdep_map);
lock_map_release(&work->lockdep_map);
if (start_flush_work(work, &barr)) { if (start_flush_work(work, &barr)) {
wait_for_completion(&barr.done); wait_for_completion(&barr.done);
destroy_work_on_stack(&barr.work); destroy_work_on_stack(&barr.work);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment