Commit ea9e0fb8 authored by Nicolai Hähnle's avatar Nicolai Hähnle Committed by Ingo Molnar

locking/ww_mutex: Set use_ww_ctx even when locking without a context

We will add a new field to struct mutex_waiter.  This field must be
initialized for all waiters if any waiter uses the ww_use_ctx path.

So there is a trade-off: Keep ww_mutex locking without a context on
the faster non-use_ww_ctx path, at the cost of adding the
initialization to all mutex locks (including non-ww_mutexes), or avoid
the additional cost for non-ww_mutex locks, at the cost of adding
additional checks to the use_ww_ctx path.

We take the latter choice.  It may be worth eliminating the users of
ww_mutex_lock(lock, NULL), but there are a lot of them.
Signed-off-by: default avatarNicolai Hähnle <Nicolai.Haehnle@amd.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Maarten Lankhorst <dev@mblankhorst.nl>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dri-devel@lists.freedesktop.org
Link: http://lkml.kernel.org/r/1482346000-9927-5-git-send-email-nhaehnle@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 3822da3e
...@@ -222,11 +222,7 @@ extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock, ...@@ -222,11 +222,7 @@ extern int __must_check __ww_mutex_lock_interruptible(struct ww_mutex *lock,
*/ */
static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
{ {
if (ctx) return __ww_mutex_lock(lock, ctx);
return __ww_mutex_lock(lock, ctx);
mutex_lock(&lock->base);
return 0;
} }
/** /**
...@@ -262,10 +258,7 @@ static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ct ...@@ -262,10 +258,7 @@ static inline int ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ct
static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock, static inline int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx) struct ww_acquire_ctx *ctx)
{ {
if (ctx) return __ww_mutex_lock_interruptible(lock, ctx);
return __ww_mutex_lock_interruptible(lock, ctx);
else
return mutex_lock_interruptible(&lock->base);
} }
/** /**
......
...@@ -469,7 +469,7 @@ static bool mutex_optimistic_spin(struct mutex *lock, ...@@ -469,7 +469,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
for (;;) { for (;;) {
struct task_struct *owner; struct task_struct *owner;
if (use_ww_ctx && ww_ctx->acquired > 0) { if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
struct ww_mutex *ww; struct ww_mutex *ww;
ww = container_of(lock, struct ww_mutex, base); ww = container_of(lock, struct ww_mutex, base);
...@@ -629,8 +629,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -629,8 +629,9 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
struct ww_mutex *ww; struct ww_mutex *ww;
int ret; int ret;
if (use_ww_ctx) { ww = container_of(lock, struct ww_mutex, base);
ww = container_of(lock, struct ww_mutex, base);
if (use_ww_ctx && ww_ctx) {
if (unlikely(ww_ctx == READ_ONCE(ww->ctx))) if (unlikely(ww_ctx == READ_ONCE(ww->ctx)))
return -EALREADY; return -EALREADY;
} }
...@@ -642,7 +643,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -642,7 +643,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) { mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
/* got the lock, yay! */ /* got the lock, yay! */
lock_acquired(&lock->dep_map, ip); lock_acquired(&lock->dep_map, ip);
if (use_ww_ctx) if (use_ww_ctx && ww_ctx)
ww_mutex_set_context_fastpath(ww, ww_ctx); ww_mutex_set_context_fastpath(ww, ww_ctx);
preempt_enable(); preempt_enable();
return 0; return 0;
...@@ -688,7 +689,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -688,7 +689,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
goto err; goto err;
} }
if (use_ww_ctx && ww_ctx->acquired > 0) { if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
ret = __ww_mutex_lock_check_stamp(lock, ww_ctx); ret = __ww_mutex_lock_check_stamp(lock, ww_ctx);
if (ret) if (ret)
goto err; goto err;
...@@ -728,7 +729,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -728,7 +729,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
/* got the lock - cleanup and rejoice! */ /* got the lock - cleanup and rejoice! */
lock_acquired(&lock->dep_map, ip); lock_acquired(&lock->dep_map, ip);
if (use_ww_ctx) if (use_ww_ctx && ww_ctx)
ww_mutex_set_context_slowpath(ww, ww_ctx); ww_mutex_set_context_slowpath(ww, ww_ctx);
spin_unlock_mutex(&lock->wait_lock, flags); spin_unlock_mutex(&lock->wait_lock, flags);
...@@ -816,8 +817,9 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ...@@ -816,8 +817,9 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
might_sleep(); might_sleep();
ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE, ret = __mutex_lock_common(&lock->base, TASK_UNINTERRUPTIBLE,
0, &ctx->dep_map, _RET_IP_, ctx, 1); 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
if (!ret && ctx->acquired > 1) ctx, 1);
if (!ret && ctx && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx); return ww_mutex_deadlock_injection(lock, ctx);
return ret; return ret;
...@@ -831,9 +833,10 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ...@@ -831,9 +833,10 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
might_sleep(); might_sleep();
ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE, ret = __mutex_lock_common(&lock->base, TASK_INTERRUPTIBLE,
0, &ctx->dep_map, _RET_IP_, ctx, 1); 0, ctx ? &ctx->dep_map : NULL, _RET_IP_,
ctx, 1);
if (!ret && ctx->acquired > 1) if (!ret && ctx && ctx->acquired > 1)
return ww_mutex_deadlock_injection(lock, ctx); return ww_mutex_deadlock_injection(lock, ctx);
return ret; return ret;
...@@ -1021,7 +1024,8 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ...@@ -1021,7 +1024,8 @@ __ww_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
might_sleep(); might_sleep();
if (__mutex_trylock_fast(&lock->base)) { if (__mutex_trylock_fast(&lock->base)) {
ww_mutex_set_context_fastpath(lock, ctx); if (ctx)
ww_mutex_set_context_fastpath(lock, ctx);
return 0; return 0;
} }
...@@ -1035,7 +1039,8 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ...@@ -1035,7 +1039,8 @@ __ww_mutex_lock_interruptible(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
might_sleep(); might_sleep();
if (__mutex_trylock_fast(&lock->base)) { if (__mutex_trylock_fast(&lock->base)) {
ww_mutex_set_context_fastpath(lock, ctx); if (ctx)
ww_mutex_set_context_fastpath(lock, ctx);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment