Commit 25f13b40 authored by Nicolai Hähnle's avatar Nicolai Hähnle Committed by Ingo Molnar

locking/ww_mutex: Re-check ww->ctx in the inner optimistic spin loop

In the following scenario, thread #1 should back off its attempt to lock
ww1 and unlock ww2 (assuming the acquire context stamps are ordered
accordingly).

    Thread #0               Thread #1
    ---------               ---------
                            successfully lock ww2
    set ww1->base.owner
                            attempt to lock ww1
                            confirm ww1->ctx == NULL
                            enter mutex_spin_on_owner
    set ww1->ctx

What was likely to happen previously is:

    attempt to lock ww2
    refuse to spin because
      ww2->ctx != NULL
    schedule()
                            detect thread #0 is off CPU
                            stop optimistic spin
                            return -EDEADLK
                            unlock ww2
                            wakeup thread #0
    lock ww2

Now, we are more likely to see:

                            detect ww1->ctx != NULL
                            stop optimistic spin
                            return -EDEADLK
                            unlock ww2
    successfully lock ww2

... because thread #1 will stop its optimistic spin as soon as possible.

The whole scenario is quite unlikely, since it requires thread #1 to get
between thread #0 setting the owner and setting the ctx. But since we're
idling here anyway, the additional check is basically free.

Found by inspection.
Signed-off-by: default avatarNicolai Hähnle <Nicolai.Haehnle@amd.com>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Maarten Lankhorst <dev@mblankhorst.nl>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: dri-devel@lists.freedesktop.org
Link: http://lkml.kernel.org/r/1482346000-9927-10-git-send-email-nhaehnle@gmail.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 427b1820
...@@ -372,11 +372,14 @@ ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx) ...@@ -372,11 +372,14 @@ ww_mutex_set_context_slowpath(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
/* /*
* Look out! "owner" is an entirely speculative pointer * Look out! "owner" is an entirely speculative pointer access and not
* access and not reliable. * reliable.
*
* "noinline" so that this function shows up on perf profiles.
*/ */
static noinline static noinline
bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
struct ww_acquire_ctx *ww_ctx)
{ {
bool ret = true; bool ret = true;
...@@ -399,6 +402,28 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner) ...@@ -399,6 +402,28 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
break; break;
} }
if (ww_ctx && ww_ctx->acquired > 0) {
struct ww_mutex *ww;
ww = container_of(lock, struct ww_mutex, base);
/*
* If ww->ctx is set the contents are undefined, only
* by acquiring wait_lock there is a guarantee that
* they are not invalid when reading.
*
* As such, when deadlock detection needs to be
* performed the optimistic spinning cannot be done.
*
* Check this in every inner iteration because we may
* be racing against another thread's ww_mutex_lock.
*/
if (READ_ONCE(ww->ctx)) {
ret = false;
break;
}
}
cpu_relax(); cpu_relax();
} }
rcu_read_unlock(); rcu_read_unlock();
...@@ -484,22 +509,6 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, ...@@ -484,22 +509,6 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
for (;;) { for (;;) {
struct task_struct *owner; struct task_struct *owner;
if (use_ww_ctx && ww_ctx && ww_ctx->acquired > 0) {
struct ww_mutex *ww;
ww = container_of(lock, struct ww_mutex, base);
/*
* If ww->ctx is set the contents are undefined, only
* by acquiring wait_lock there is a guarantee that
* they are not invalid when reading.
*
* As such, when deadlock detection needs to be
* performed the optimistic spinning cannot be done.
*/
if (READ_ONCE(ww->ctx))
goto fail_unlock;
}
/* Try to acquire the mutex... */ /* Try to acquire the mutex... */
owner = __mutex_trylock_or_owner(lock); owner = __mutex_trylock_or_owner(lock);
if (!owner) if (!owner)
...@@ -509,7 +518,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx, ...@@ -509,7 +518,7 @@ mutex_optimistic_spin(struct mutex *lock, struct ww_acquire_ctx *ww_ctx,
* There's an owner, wait for it to either * There's an owner, wait for it to either
* release the lock or go to sleep. * release the lock or go to sleep.
*/ */
if (!mutex_spin_on_owner(lock, owner)) if (!mutex_spin_on_owner(lock, owner, ww_ctx))
goto fail_unlock; goto fail_unlock;
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment