Commit 200b1874 authored by Nicolai Hähnle's avatar Nicolai Hähnle Committed by Ingo Molnar

locking/ww_mutex: Notify waiters that have to back off while adding tasks to wait list

While adding our task as a waiter, detect if another task should back off
because of us.

With this patch, we establish the invariant that the wait list contains
at most one (sleeping) waiter with ww_ctx->acquired > 0, and this waiter
will be the first waiter with a context.

Since only waiters with ww_ctx->acquired > 0 have to back off, this allows
us to be much more economical with wakeups.
Signed-off-by: default avatarNicolai Hähnle <Nicolai.Haehnle@amd.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Maarten Lankhorst <dev@mblankhorst.nl>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dri-devel@lists.freedesktop.org
Link: http://lkml.kernel.org/r/1482346000-9927-8-git-send-email-nhaehnle@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6baa5c60
...@@ -596,23 +596,34 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock) ...@@ -596,23 +596,34 @@ void __sched ww_mutex_unlock(struct ww_mutex *lock)
EXPORT_SYMBOL(ww_mutex_unlock); EXPORT_SYMBOL(ww_mutex_unlock);
static inline int __sched static inline int __sched
__ww_mutex_lock_check_stamp(struct mutex *lock, struct ww_acquire_ctx *ctx) __ww_mutex_lock_check_stamp(struct mutex *lock, struct mutex_waiter *waiter,
struct ww_acquire_ctx *ctx)
{ {
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx); struct ww_acquire_ctx *hold_ctx = READ_ONCE(ww->ctx);
struct mutex_waiter *cur;
if (hold_ctx && __ww_ctx_stamp_after(ctx, hold_ctx))
goto deadlock;
/*
* If there is a waiter in front of us that has a context, then its
* stamp is earlier than ours and we must back off.
*/
cur = waiter;
list_for_each_entry_continue_reverse(cur, &lock->wait_list, list) {
if (cur->ww_ctx)
goto deadlock;
}
if (!hold_ctx)
return 0; return 0;
if (__ww_ctx_stamp_after(ctx, hold_ctx)) { deadlock:
#ifdef CONFIG_DEBUG_MUTEXES #ifdef CONFIG_DEBUG_MUTEXES
DEBUG_LOCKS_WARN_ON(ctx->contending_lock); DEBUG_LOCKS_WARN_ON(ctx->contending_lock);
ctx->contending_lock = ww; ctx->contending_lock = ww;
#endif #endif
return -EDEADLK; return -EDEADLK;
}
return 0;
} }
static inline int __sched static inline int __sched
...@@ -655,6 +666,15 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter, ...@@ -655,6 +666,15 @@ __ww_mutex_add_waiter(struct mutex_waiter *waiter,
} }
pos = &cur->list; pos = &cur->list;
/*
* Wake up the waiter so that it gets a chance to back
* off.
*/
if (cur->ww_ctx->acquired > 0) {
debug_mutex_wake_waiter(lock, cur);
wake_up_process(cur->task);
}
} }
list_add_tail(&waiter->list, pos); list_add_tail(&waiter->list, pos);
...@@ -746,7 +766,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -746,7 +766,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
} }
if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) { if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
ret = __ww_mutex_lock_check_stamp(lock, ww_ctx); ret = __ww_mutex_lock_check_stamp(lock, &waiter, ww_ctx);
if (ret) if (ret)
goto err; goto err;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment