Commit 4bd19084 authored by Davidlohr Bueso's avatar Davidlohr Bueso Committed by Ingo Molnar

locking/mutex: Introduce ww_mutex_set_context_slowpath()

... which is equivalent to the fastpath counter part.
This mainly allows getting some WW specific code out
of generic mutex paths.
Signed-off-by: default avatarDavidlohr Bueso <dbueso@suse.de>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Link: http://lkml.kernel.org/r/1420573509-24774-4-git-send-email-dave@stgolabs.netSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent e42f678a
...@@ -147,7 +147,7 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww, ...@@ -147,7 +147,7 @@ static __always_inline void ww_mutex_lock_acquired(struct ww_mutex *ww,
} }
/* /*
* after acquiring lock with fastpath or when we lost out in contested * After acquiring lock with fastpath or when we lost out in contested
* slowpath, set ctx and wake up any waiters so they can recheck. * slowpath, set ctx and wake up any waiters so they can recheck.
* *
* This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set, * This function is never called when CONFIG_DEBUG_LOCK_ALLOC is set,
...@@ -191,6 +191,30 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock, ...@@ -191,6 +191,30 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
spin_unlock_mutex(&lock->base.wait_lock, flags); spin_unlock_mutex(&lock->base.wait_lock, flags);
} }
/*
* After acquiring lock in the slowpath set ctx and wake up any
* waiters so they can recheck.
*
* Callers must hold the mutex wait_lock.
*/
static __always_inline void
ww_mutex_set_context_slowpath(struct ww_mutex *lock,
struct ww_acquire_ctx *ctx)
{
struct mutex_waiter *cur;
ww_mutex_lock_acquired(lock, ctx);
lock->ctx = ctx;
/*
* Give any possible sleeping processes the chance to wake up,
* so they can recheck if they have to back off.
*/
list_for_each_entry(cur, &lock->base.wait_list, list) {
debug_mutex_wake_waiter(&lock->base, cur);
wake_up_process(cur->task);
}
}
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
static inline bool owner_running(struct mutex *lock, struct task_struct *owner) static inline bool owner_running(struct mutex *lock, struct task_struct *owner)
...@@ -576,23 +600,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass, ...@@ -576,23 +600,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
if (use_ww_ctx) { if (use_ww_ctx) {
struct ww_mutex *ww = container_of(lock, struct ww_mutex, base); struct ww_mutex *ww = container_of(lock, struct ww_mutex, base);
struct mutex_waiter *cur; ww_mutex_set_context_slowpath(ww, ww_ctx);
/*
* This branch gets optimized out for the common case,
* and is only important for ww_mutex_lock.
*/
ww_mutex_lock_acquired(ww, ww_ctx);
ww->ctx = ww_ctx;
/*
* Give any possible sleeping processes the chance to wake up,
* so they can recheck if they have to back off.
*/
list_for_each_entry(cur, &lock->wait_list, list) {
debug_mutex_wake_waiter(lock, cur);
wake_up_process(cur->task);
}
} }
spin_unlock_mutex(&lock->wait_lock, flags); spin_unlock_mutex(&lock->wait_lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment