Commit a2629483 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'sched-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Thomas Gleixner:
 "These locking updates depend on the alreay merged sched/core branch:

   - Lockless top waiter wakeup for rtmutex (Davidlohr)

   - Reduce hash bucket lock contention for PI futexes (Sebastian)

   - Documentation update (Davidlohr)"

* 'sched-locking-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/rtmutex: Update stale plist comments
  futex: Lower the lock contention on the HB lock during wake up
  locking/rtmutex: Implement lockless top-waiter wakeup
parents e3d8238d 9f40a51a
...@@ -1117,11 +1117,14 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q) ...@@ -1117,11 +1117,14 @@ static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
q->lock_ptr = NULL; q->lock_ptr = NULL;
} }
static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this,
struct futex_hash_bucket *hb)
{ {
struct task_struct *new_owner; struct task_struct *new_owner;
struct futex_pi_state *pi_state = this->pi_state; struct futex_pi_state *pi_state = this->pi_state;
u32 uninitialized_var(curval), newval; u32 uninitialized_var(curval), newval;
WAKE_Q(wake_q);
bool deboost;
int ret = 0; int ret = 0;
if (!pi_state) if (!pi_state)
...@@ -1173,7 +1176,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) ...@@ -1173,7 +1176,19 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
raw_spin_unlock_irq(&new_owner->pi_lock); raw_spin_unlock_irq(&new_owner->pi_lock);
raw_spin_unlock(&pi_state->pi_mutex.wait_lock); raw_spin_unlock(&pi_state->pi_mutex.wait_lock);
rt_mutex_unlock(&pi_state->pi_mutex);
deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
/*
* First unlock HB so the waiter does not spin on it once he got woken
* up. Second wake up the waiter before the priority is adjusted. If we
* deboost first (and lose our higher priority), then the task might get
* scheduled away before the wake up can take place.
*/
spin_unlock(&hb->lock);
wake_up_q(&wake_q);
if (deboost)
rt_mutex_adjust_prio(current);
return 0; return 0;
} }
...@@ -2410,13 +2425,23 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) ...@@ -2410,13 +2425,23 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
*/ */
match = futex_top_waiter(hb, &key); match = futex_top_waiter(hb, &key);
if (match) { if (match) {
ret = wake_futex_pi(uaddr, uval, match); ret = wake_futex_pi(uaddr, uval, match, hb);
/*
* In case of success wake_futex_pi dropped the hash
* bucket lock.
*/
if (!ret)
goto out_putkey;
/* /*
* The atomic access to the futex value generated a * The atomic access to the futex value generated a
* pagefault, so retry the user-access and the wakeup: * pagefault, so retry the user-access and the wakeup:
*/ */
if (ret == -EFAULT) if (ret == -EFAULT)
goto pi_faulted; goto pi_faulted;
/*
* wake_futex_pi has detected invalid state. Tell user
* space.
*/
goto out_unlock; goto out_unlock;
} }
...@@ -2437,6 +2462,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) ...@@ -2437,6 +2462,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags)
out_unlock: out_unlock:
spin_unlock(&hb->lock); spin_unlock(&hb->lock);
out_putkey:
put_futex_key(&key); put_futex_key(&key);
return ret; return ret;
......
...@@ -300,7 +300,7 @@ static void __rt_mutex_adjust_prio(struct task_struct *task) ...@@ -300,7 +300,7 @@ static void __rt_mutex_adjust_prio(struct task_struct *task)
* of task. We do not use the spin_xx_mutex() variants here as we are * of task. We do not use the spin_xx_mutex() variants here as we are
* outside of the debug path.) * outside of the debug path.)
*/ */
static void rt_mutex_adjust_prio(struct task_struct *task) void rt_mutex_adjust_prio(struct task_struct *task)
{ {
unsigned long flags; unsigned long flags;
...@@ -624,7 +624,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, ...@@ -624,7 +624,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
*/ */
prerequeue_top_waiter = rt_mutex_top_waiter(lock); prerequeue_top_waiter = rt_mutex_top_waiter(lock);
/* [7] Requeue the waiter in the lock waiter list. */ /* [7] Requeue the waiter in the lock waiter tree. */
rt_mutex_dequeue(lock, waiter); rt_mutex_dequeue(lock, waiter);
waiter->prio = task->prio; waiter->prio = task->prio;
rt_mutex_enqueue(lock, waiter); rt_mutex_enqueue(lock, waiter);
...@@ -662,7 +662,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, ...@@ -662,7 +662,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
/* /*
* The waiter became the new top (highest priority) * The waiter became the new top (highest priority)
* waiter on the lock. Replace the previous top waiter * waiter on the lock. Replace the previous top waiter
* in the owner tasks pi waiters list with this waiter * in the owner tasks pi waiters tree with this waiter
* and adjust the priority of the owner. * and adjust the priority of the owner.
*/ */
rt_mutex_dequeue_pi(task, prerequeue_top_waiter); rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
...@@ -673,7 +673,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, ...@@ -673,7 +673,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
/* /*
* The waiter was the top waiter on the lock, but is * The waiter was the top waiter on the lock, but is
* no longer the top prority waiter. Replace waiter in * no longer the top prority waiter. Replace waiter in
* the owner tasks pi waiters list with the new top * the owner tasks pi waiters tree with the new top
* (highest priority) waiter and adjust the priority * (highest priority) waiter and adjust the priority
* of the owner. * of the owner.
* The new top waiter is stored in @waiter so that * The new top waiter is stored in @waiter so that
...@@ -747,7 +747,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task, ...@@ -747,7 +747,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
* *
* @lock: The lock to be acquired. * @lock: The lock to be acquired.
* @task: The task which wants to acquire the lock * @task: The task which wants to acquire the lock
* @waiter: The waiter that is queued to the lock's wait list if the * @waiter: The waiter that is queued to the lock's wait tree if the
* callsite called task_blocked_on_lock(), otherwise NULL * callsite called task_blocked_on_lock(), otherwise NULL
*/ */
static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
...@@ -782,7 +782,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, ...@@ -782,7 +782,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
/* /*
* If @waiter != NULL, @task has already enqueued the waiter * If @waiter != NULL, @task has already enqueued the waiter
* into @lock waiter list. If @waiter == NULL then this is a * into @lock waiter tree. If @waiter == NULL then this is a
* trylock attempt. * trylock attempt.
*/ */
if (waiter) { if (waiter) {
...@@ -795,7 +795,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, ...@@ -795,7 +795,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
/* /*
* We can acquire the lock. Remove the waiter from the * We can acquire the lock. Remove the waiter from the
* lock waiters list. * lock waiters tree.
*/ */
rt_mutex_dequeue(lock, waiter); rt_mutex_dequeue(lock, waiter);
...@@ -827,7 +827,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, ...@@ -827,7 +827,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
* No waiters. Take the lock without the * No waiters. Take the lock without the
* pi_lock dance.@task->pi_blocked_on is NULL * pi_lock dance.@task->pi_blocked_on is NULL
* and we have no waiters to enqueue in @task * and we have no waiters to enqueue in @task
* pi waiters list. * pi waiters tree.
*/ */
goto takeit; goto takeit;
} }
...@@ -844,7 +844,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task, ...@@ -844,7 +844,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
/* /*
* Finish the lock acquisition. @task is the new owner. If * Finish the lock acquisition. @task is the new owner. If
* other waiters exist we have to insert the highest priority * other waiters exist we have to insert the highest priority
* waiter into @task->pi_waiters list. * waiter into @task->pi_waiters tree.
*/ */
if (rt_mutex_has_waiters(lock)) if (rt_mutex_has_waiters(lock))
rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock)); rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
...@@ -955,14 +955,13 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock, ...@@ -955,14 +955,13 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
} }
/* /*
* Wake up the next waiter on the lock. * Remove the top waiter from the current tasks pi waiter tree and
* * queue it up.
* Remove the top waiter from the current tasks pi waiter list and
* wake it up.
* *
* Called with lock->wait_lock held. * Called with lock->wait_lock held.
*/ */
static void wakeup_next_waiter(struct rt_mutex *lock) static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
struct rt_mutex *lock)
{ {
struct rt_mutex_waiter *waiter; struct rt_mutex_waiter *waiter;
unsigned long flags; unsigned long flags;
...@@ -991,12 +990,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock) ...@@ -991,12 +990,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
raw_spin_unlock_irqrestore(&current->pi_lock, flags); raw_spin_unlock_irqrestore(&current->pi_lock, flags);
/* wake_q_add(wake_q, waiter->task);
* It's safe to dereference waiter as it cannot go away as
* long as we hold lock->wait_lock. The waiter task needs to
* acquire it in order to dequeue the waiter.
*/
wake_up_process(waiter->task);
} }
/* /*
...@@ -1250,10 +1244,11 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock) ...@@ -1250,10 +1244,11 @@ static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
} }
/* /*
* Slow path to release a rt-mutex: * Slow path to release a rt-mutex.
* Return whether the current task needs to undo a potential priority boosting.
*/ */
static void __sched static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
rt_mutex_slowunlock(struct rt_mutex *lock) struct wake_q_head *wake_q)
{ {
raw_spin_lock(&lock->wait_lock); raw_spin_lock(&lock->wait_lock);
...@@ -1295,7 +1290,7 @@ rt_mutex_slowunlock(struct rt_mutex *lock) ...@@ -1295,7 +1290,7 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
while (!rt_mutex_has_waiters(lock)) { while (!rt_mutex_has_waiters(lock)) {
/* Drops lock->wait_lock ! */ /* Drops lock->wait_lock ! */
if (unlock_rt_mutex_safe(lock) == true) if (unlock_rt_mutex_safe(lock) == true)
return; return false;
/* Relock the rtmutex and try again */ /* Relock the rtmutex and try again */
raw_spin_lock(&lock->wait_lock); raw_spin_lock(&lock->wait_lock);
} }
...@@ -1303,13 +1298,15 @@ rt_mutex_slowunlock(struct rt_mutex *lock) ...@@ -1303,13 +1298,15 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
/* /*
* The wakeup next waiter path does not suffer from the above * The wakeup next waiter path does not suffer from the above
* race. See the comments there. * race. See the comments there.
*
* Queue the next waiter for wakeup once we release the wait_lock.
*/ */
wakeup_next_waiter(lock); mark_wakeup_next_waiter(wake_q, lock);
raw_spin_unlock(&lock->wait_lock); raw_spin_unlock(&lock->wait_lock);
/* Undo pi boosting if necessary: */ /* check PI boosting */
rt_mutex_adjust_prio(current); return true;
} }
/* /*
...@@ -1360,12 +1357,23 @@ rt_mutex_fasttrylock(struct rt_mutex *lock, ...@@ -1360,12 +1357,23 @@ rt_mutex_fasttrylock(struct rt_mutex *lock,
static inline void static inline void
rt_mutex_fastunlock(struct rt_mutex *lock, rt_mutex_fastunlock(struct rt_mutex *lock,
void (*slowfn)(struct rt_mutex *lock)) bool (*slowfn)(struct rt_mutex *lock,
struct wake_q_head *wqh))
{ {
if (likely(rt_mutex_cmpxchg(lock, current, NULL))) WAKE_Q(wake_q);
if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
rt_mutex_deadlock_account_unlock(current); rt_mutex_deadlock_account_unlock(current);
else
slowfn(lock); } else {
bool deboost = slowfn(lock, &wake_q);
wake_up_q(&wake_q);
/* Undo pi boosting if necessary: */
if (deboost)
rt_mutex_adjust_prio(current);
}
} }
/** /**
...@@ -1466,6 +1474,23 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock) ...@@ -1466,6 +1474,23 @@ void __sched rt_mutex_unlock(struct rt_mutex *lock)
} }
EXPORT_SYMBOL_GPL(rt_mutex_unlock); EXPORT_SYMBOL_GPL(rt_mutex_unlock);
/**
* rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
* @lock: the rt_mutex to be unlocked
*
* Returns: true/false indicating whether priority adjustment is
* required or not.
*/
bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
struct wake_q_head *wqh)
{
if (likely(rt_mutex_cmpxchg(lock, current, NULL))) {
rt_mutex_deadlock_account_unlock(current);
return false;
}
return rt_mutex_slowunlock(lock, wqh);
}
/** /**
* rt_mutex_destroy - mark a mutex unusable * rt_mutex_destroy - mark a mutex unusable
* @lock: the mutex to be destroyed * @lock: the mutex to be destroyed
......
...@@ -131,6 +131,9 @@ extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock, ...@@ -131,6 +131,9 @@ extern int rt_mutex_finish_proxy_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *to, struct hrtimer_sleeper *to,
struct rt_mutex_waiter *waiter); struct rt_mutex_waiter *waiter);
extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to); extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
struct wake_q_head *wqh);
extern void rt_mutex_adjust_prio(struct task_struct *task);
#ifdef CONFIG_DEBUG_RT_MUTEXES #ifdef CONFIG_DEBUG_RT_MUTEXES
# include "rtmutex-debug.h" # include "rtmutex-debug.h"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment