rtmutex.c 49.2 KB
Newer Older
1 2 3 4 5 6 7 8 9
/*
 * RT-Mutexes: simple blocking mutual exclusion locks with PI support
 *
 * started by Ingo Molnar and Thomas Gleixner.
 *
 *  Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
 *  Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
 *  Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
 *  Copyright (C) 2006 Esben Nielsen
10
 *
11
 *  See Documentation/locking/rt-mutex-design.txt for details.
12 13
 */
#include <linux/spinlock.h>
14
#include <linux/export.h>
15
#include <linux/sched/signal.h>
16
#include <linux/sched/rt.h>
17
#include <linux/sched/deadline.h>
18
#include <linux/sched/wake_q.h>
19
#include <linux/sched/debug.h>
20 21 22 23 24 25 26
#include <linux/timer.h>

#include "rtmutex_common.h"

/*
 * lock->owner state tracking:
 *
27 28
 * lock->owner holds the task_struct pointer of the owner. Bit 0
 * is used to keep track of the "lock has waiters" state.
29
 *
30 31 32 33 34 35
 * owner	bit0
 * NULL		0	lock is free (fast acquire possible)
 * NULL		1	lock is free and has waiters and the top waiter
 *				is going to take the lock*
 * taskpointer	0	lock is held (fast release possible)
 * taskpointer	1	lock is held and has waiters**
36 37
 *
 * The fast atomic compare exchange based acquire and release is only
38 39 40 41 42 43
 * possible when bit 0 of lock->owner is 0.
 *
 * (*) It also can be a transitional state when grabbing the lock
 * with ->wait_lock is held. To prevent any fast path cmpxchg to the lock,
 * we need to set the bit0 before looking at the lock, and the owner may be
 * NULL in this small time, hence this can be a transitional state.
44
 *
45 46 47 48
 * (**) There is a small time when bit 0 is set but there are no
 * waiters. This can happen when grabbing the lock in the slow path.
 * To prevent a cmpxchg of the owner releasing the lock, we need to
 * set this bit before looking at the lock.
49 50
 */

51
static void
52
rt_mutex_set_owner(struct rt_mutex *lock, struct task_struct *owner)
53
{
54
	unsigned long val = (unsigned long)owner;
55 56 57 58 59 60 61 62 63 64 65 66 67 68 69

	if (rt_mutex_has_waiters(lock))
		val |= RT_MUTEX_HAS_WAITERS;

	lock->owner = (struct task_struct *)val;
}

static inline void clear_rt_mutex_waiters(struct rt_mutex *lock)
{
	lock->owner = (struct task_struct *)
			((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
}

static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
{
70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135
	unsigned long owner, *p = (unsigned long *) &lock->owner;

	if (rt_mutex_has_waiters(lock))
		return;

	/*
	 * The rbtree has no waiters enqueued, now make sure that the
	 * lock->owner still has the waiters bit set, otherwise the
	 * following can happen:
	 *
	 * CPU 0	CPU 1		CPU2
	 * l->owner=T1
	 *		rt_mutex_lock(l)
	 *		lock(l->lock)
	 *		l->owner = T1 | HAS_WAITERS;
	 *		enqueue(T2)
	 *		boost()
	 *		  unlock(l->lock)
	 *		block()
	 *
	 *				rt_mutex_lock(l)
	 *				lock(l->lock)
	 *				l->owner = T1 | HAS_WAITERS;
	 *				enqueue(T3)
	 *				boost()
	 *				  unlock(l->lock)
	 *				block()
	 *		signal(->T2)	signal(->T3)
	 *		lock(l->lock)
	 *		dequeue(T2)
	 *		deboost()
	 *		  unlock(l->lock)
	 *				lock(l->lock)
	 *				dequeue(T3)
	 *				 ==> wait list is empty
	 *				deboost()
	 *				 unlock(l->lock)
	 *		lock(l->lock)
	 *		fixup_rt_mutex_waiters()
	 *		  if (wait_list_empty(l) {
	 *		    l->owner = owner
	 *		    owner = l->owner & ~HAS_WAITERS;
	 *		      ==> l->owner = T1
	 *		  }
	 *				lock(l->lock)
	 * rt_mutex_unlock(l)		fixup_rt_mutex_waiters()
	 *				  if (wait_list_empty(l) {
	 *				    owner = l->owner & ~HAS_WAITERS;
	 * cmpxchg(l->owner, T1, NULL)
	 *  ===> Success (l->owner = NULL)
	 *
	 *				    l->owner = owner
	 *				      ==> l->owner = T1
	 *				  }
	 *
	 * With the check for the waiter bit in place T3 on CPU2 will not
	 * overwrite. All tasks fiddling with the waiters bit are
	 * serialized by l->lock, so nothing else can modify the waiters
	 * bit. If the bit is set then nothing can change l->owner either
	 * so the simple RMW is safe. The cmpxchg() will simply fail if it
	 * happens in the middle of the RMW because the waiters bit is
	 * still set.
	 */
	owner = READ_ONCE(*p);
	if (owner & RT_MUTEX_HAS_WAITERS)
		WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
136 137
}

138
/*
139 140
 * We can speed up the acquire/release, if there's no debugging state to be
 * set up.
141
 */
142
#ifndef CONFIG_DEBUG_RT_MUTEXES
143 144 145 146 147 148 149 150 151
# define rt_mutex_cmpxchg_relaxed(l,c,n) (cmpxchg_relaxed(&l->owner, c, n) == c)
# define rt_mutex_cmpxchg_acquire(l,c,n) (cmpxchg_acquire(&l->owner, c, n) == c)
# define rt_mutex_cmpxchg_release(l,c,n) (cmpxchg_release(&l->owner, c, n) == c)

/*
 * Callers must hold the ->wait_lock -- which is the whole purpose as we force
 * all future threads that attempt to [Rmw] the lock to the slowpath. As such
 * relaxed semantics suffice.
 */
152 153 154 155 156 157
static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
{
	unsigned long owner, *p = (unsigned long *) &lock->owner;

	do {
		owner = *p;
158 159
	} while (cmpxchg_relaxed(p, owner,
				 owner | RT_MUTEX_HAS_WAITERS) != owner);
160
}
161 162 163 164 165 166 167

/*
 * Safe fastpath aware unlock:
 * 1) Clear the waiters bit
 * 2) Drop lock->wait_lock
 * 3) Try to unlock the lock with cmpxchg
 */
168 169
static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
					unsigned long flags)
170 171 172 173 174
	__releases(lock->wait_lock)
{
	struct task_struct *owner = rt_mutex_owner(lock);

	clear_rt_mutex_waiters(lock);
175
	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199
	/*
	 * If a new waiter comes in between the unlock and the cmpxchg
	 * we have two situations:
	 *
	 * unlock(wait_lock);
	 *					lock(wait_lock);
	 * cmpxchg(p, owner, 0) == owner
	 *					mark_rt_mutex_waiters(lock);
	 *					acquire(lock);
	 * or:
	 *
	 * unlock(wait_lock);
	 *					lock(wait_lock);
	 *					mark_rt_mutex_waiters(lock);
	 *
	 * cmpxchg(p, owner, 0) != owner
	 *					enqueue_waiter();
	 *					unlock(wait_lock);
	 * lock(wait_lock);
	 * wake waiter();
	 * unlock(wait_lock);
	 *					lock(wait_lock);
	 *					acquire(lock);
	 */
200
	return rt_mutex_cmpxchg_release(lock, owner, NULL);
201 202
}

203
#else
204 205 206 207
# define rt_mutex_cmpxchg_relaxed(l,c,n)	(0)
# define rt_mutex_cmpxchg_acquire(l,c,n)	(0)
# define rt_mutex_cmpxchg_release(l,c,n)	(0)

208 209 210 211 212
static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
{
	lock->owner = (struct task_struct *)
			((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
}
213 214 215 216

/*
 * Simple slow path only version: lock->owner is protected by lock->wait_lock.
 */
217 218
static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock,
					unsigned long flags)
219 220 221
	__releases(lock->wait_lock)
{
	lock->owner = NULL;
222
	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
223 224
	return true;
}
225 226
#endif

227 228 229 230 231 232
/*
 * Only use with rt_mutex_waiter_{less,equal}()
 */
#define task_to_waiter(p)	\
	&(struct rt_mutex_waiter){ .prio = (p)->prio, .deadline = (p)->dl.deadline }

233 234 235 236
static inline int
rt_mutex_waiter_less(struct rt_mutex_waiter *left,
		     struct rt_mutex_waiter *right)
{
237
	if (left->prio < right->prio)
238 239 240
		return 1;

	/*
241 242 243 244
	 * If both waiters have dl_prio(), we check the deadlines of the
	 * associated tasks.
	 * If left waiter has a dl_prio(), and we didn't return 1 above,
	 * then right waiter has a dl_prio() too.
245
	 */
246
	if (dl_prio(left->prio))
247
		return dl_time_before(left->deadline, right->deadline);
248 249 250 251

	return 0;
}

252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270
static inline int
rt_mutex_waiter_equal(struct rt_mutex_waiter *left,
		      struct rt_mutex_waiter *right)
{
	if (left->prio != right->prio)
		return 0;

	/*
	 * If both waiters have dl_prio(), we check the deadlines of the
	 * associated tasks.
	 * If left waiter has a dl_prio(), and we didn't return 0 above,
	 * then right waiter has a dl_prio() too.
	 */
	if (dl_prio(left->prio))
		return left->deadline == right->deadline;

	return 1;
}

271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348
static void
rt_mutex_enqueue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
{
	struct rb_node **link = &lock->waiters.rb_node;
	struct rb_node *parent = NULL;
	struct rt_mutex_waiter *entry;
	int leftmost = 1;

	while (*link) {
		parent = *link;
		entry = rb_entry(parent, struct rt_mutex_waiter, tree_entry);
		if (rt_mutex_waiter_less(waiter, entry)) {
			link = &parent->rb_left;
		} else {
			link = &parent->rb_right;
			leftmost = 0;
		}
	}

	if (leftmost)
		lock->waiters_leftmost = &waiter->tree_entry;

	rb_link_node(&waiter->tree_entry, parent, link);
	rb_insert_color(&waiter->tree_entry, &lock->waiters);
}

static void
rt_mutex_dequeue(struct rt_mutex *lock, struct rt_mutex_waiter *waiter)
{
	if (RB_EMPTY_NODE(&waiter->tree_entry))
		return;

	if (lock->waiters_leftmost == &waiter->tree_entry)
		lock->waiters_leftmost = rb_next(&waiter->tree_entry);

	rb_erase(&waiter->tree_entry, &lock->waiters);
	RB_CLEAR_NODE(&waiter->tree_entry);
}

static void
rt_mutex_enqueue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
{
	struct rb_node **link = &task->pi_waiters.rb_node;
	struct rb_node *parent = NULL;
	struct rt_mutex_waiter *entry;
	int leftmost = 1;

	while (*link) {
		parent = *link;
		entry = rb_entry(parent, struct rt_mutex_waiter, pi_tree_entry);
		if (rt_mutex_waiter_less(waiter, entry)) {
			link = &parent->rb_left;
		} else {
			link = &parent->rb_right;
			leftmost = 0;
		}
	}

	if (leftmost)
		task->pi_waiters_leftmost = &waiter->pi_tree_entry;

	rb_link_node(&waiter->pi_tree_entry, parent, link);
	rb_insert_color(&waiter->pi_tree_entry, &task->pi_waiters);
}

static void
rt_mutex_dequeue_pi(struct task_struct *task, struct rt_mutex_waiter *waiter)
{
	if (RB_EMPTY_NODE(&waiter->pi_tree_entry))
		return;

	if (task->pi_waiters_leftmost == &waiter->pi_tree_entry)
		task->pi_waiters_leftmost = rb_next(&waiter->pi_tree_entry);

	rb_erase(&waiter->pi_tree_entry, &task->pi_waiters);
	RB_CLEAR_NODE(&waiter->pi_tree_entry);
}

349
static void rt_mutex_adjust_prio(struct task_struct *p)
350
{
351
	struct task_struct *pi_task = NULL;
352

353
	lockdep_assert_held(&p->pi_lock);
354

355 356
	if (task_has_pi_waiters(p))
		pi_task = task_top_pi_waiter(p)->task;
357

358
	rt_mutex_setprio(p, pi_task);
359 360
}

361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386
/*
 * Deadlock detection is conditional:
 *
 * If CONFIG_DEBUG_RT_MUTEXES=n, deadlock detection is only conducted
 * if the detect argument is == RT_MUTEX_FULL_CHAINWALK.
 *
 * If CONFIG_DEBUG_RT_MUTEXES=y, deadlock detection is always
 * conducted independent of the detect argument.
 *
 * If the waiter argument is NULL this indicates the deboost path and
 * deadlock detection is disabled independent of the detect argument
 * and the config settings.
 */
static bool rt_mutex_cond_detect_deadlock(struct rt_mutex_waiter *waiter,
					  enum rtmutex_chainwalk chwalk)
{
	/*
	 * This is just a wrapper function for the following call,
	 * because debug_rt_mutex_detect_deadlock() smells like a magic
	 * debug feature and I wanted to keep the cond function in the
	 * main source file along with the comments instead of having
	 * two of the same in the headers.
	 */
	return debug_rt_mutex_detect_deadlock(waiter, chwalk);
}

387 388 389 390 391
/*
 * Max number of times we'll walk the boosting chain:
 */
int max_lock_depth = 1024;

392 393 394 395 396
static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
{
	return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
}

397 398 399
/*
 * Adjust the priority chain. Also used for deadlock detection.
 * Decreases task's usage by one - may thus free the task.
400
 *
401 402
 * @task:	the task owning the mutex (owner) for which a chain walk is
 *		probably needed
403
 * @chwalk:	do we have to carry out deadlock detection?
404 405 406 407 408 409
 * @orig_lock:	the mutex (can be NULL if we are walking the chain to recheck
 *		things for a task that has just got its priority adjusted, and
 *		is waiting on a mutex)
 * @next_lock:	the mutex on which the owner of @orig_lock was blocked before
 *		we dropped its pi_lock. Is never dereferenced, only used for
 *		comparison to detect lock chain changes.
410
 * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
411 412 413 414
 *		its priority to the mutex owner (can be NULL in the case
 *		depicted above or if the top waiter is gone away and we are
 *		actually deboosting the owner)
 * @top_task:	the current top waiter
415
 *
416
 * Returns 0 or -EDEADLK.
417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458
 *
 * Chain walk basics and protection scope
 *
 * [R] refcount on task
 * [P] task->pi_lock held
 * [L] rtmutex->wait_lock held
 *
 * Step	Description				Protected by
 *	function arguments:
 *	@task					[R]
 *	@orig_lock if != NULL			@top_task is blocked on it
 *	@next_lock				Unprotected. Cannot be
 *						dereferenced. Only used for
 *						comparison.
 *	@orig_waiter if != NULL			@top_task is blocked on it
 *	@top_task				current, or in case of proxy
 *						locking protected by calling
 *						code
 *	again:
 *	  loop_sanity_check();
 *	retry:
 * [1]	  lock(task->pi_lock);			[R] acquire [P]
 * [2]	  waiter = task->pi_blocked_on;		[P]
 * [3]	  check_exit_conditions_1();		[P]
 * [4]	  lock = waiter->lock;			[P]
 * [5]	  if (!try_lock(lock->wait_lock)) {	[P] try to acquire [L]
 *	    unlock(task->pi_lock);		release [P]
 *	    goto retry;
 *	  }
 * [6]	  check_exit_conditions_2();		[P] + [L]
 * [7]	  requeue_lock_waiter(lock, waiter);	[P] + [L]
 * [8]	  unlock(task->pi_lock);		release [P]
 *	  put_task_struct(task);		release [R]
 * [9]	  check_exit_conditions_3();		[L]
 * [10]	  task = owner(lock);			[L]
 *	  get_task_struct(task);		[L] acquire [R]
 *	  lock(task->pi_lock);			[L] acquire [P]
 * [11]	  requeue_pi_waiter(tsk, waiters(lock));[P] + [L]
 * [12]	  check_exit_conditions_4();		[P] + [L]
 * [13]	  unlock(task->pi_lock);		release [P]
 *	  unlock(lock->wait_lock);		release [L]
 *	  goto again;
459
 */
460
static int rt_mutex_adjust_prio_chain(struct task_struct *task,
461
				      enum rtmutex_chainwalk chwalk,
462
				      struct rt_mutex *orig_lock,
463
				      struct rt_mutex *next_lock,
464 465
				      struct rt_mutex_waiter *orig_waiter,
				      struct task_struct *top_task)
466 467
{
	struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
468
	struct rt_mutex_waiter *prerequeue_top_waiter;
469
	int ret = 0, depth = 0;
470
	struct rt_mutex *lock;
471
	bool detect_deadlock;
472
	bool requeue = true;
473

474
	detect_deadlock = rt_mutex_cond_detect_deadlock(orig_waiter, chwalk);
475 476 477 478 479 480 481 482

	/*
	 * The (de)boosting is a step by step approach with a lot of
	 * pitfalls. We want this to be preemptible and we want hold a
	 * maximum of two locks per step. So we have to check
	 * carefully whether things change under us.
	 */
 again:
483 484 485
	/*
	 * We limit the lock chain length for each invocation.
	 */
486 487 488 489 490 491 492 493 494 495 496
	if (++depth > max_lock_depth) {
		static int prev_max;

		/*
		 * Print this only once. If the admin changes the limit,
		 * print a new message when reaching the limit again.
		 */
		if (prev_max != max_lock_depth) {
			prev_max = max_lock_depth;
			printk(KERN_WARNING "Maximum lock depth %d reached "
			       "task: %s (%d)\n", max_lock_depth,
497
			       top_task->comm, task_pid_nr(top_task));
498 499 500
		}
		put_task_struct(task);

501
		return -EDEADLK;
502
	}
503 504 505 506 507 508 509

	/*
	 * We are fully preemptible here and only hold the refcount on
	 * @task. So everything can have changed under us since the
	 * caller or our own code below (goto retry/again) dropped all
	 * locks.
	 */
510 511
 retry:
	/*
512
	 * [1] Task cannot go away as we did a get_task() before !
513
	 */
514
	raw_spin_lock_irq(&task->pi_lock);
515

516 517 518
	/*
	 * [2] Get the waiter on which @task is blocked on.
	 */
519
	waiter = task->pi_blocked_on;
520 521 522 523 524

	/*
	 * [3] check_exit_conditions_1() protected by task->pi_lock.
	 */

525 526 527 528 529
	/*
	 * Check whether the end of the boosting chain has been
	 * reached or the state of the chain has changed while we
	 * dropped the locks.
	 */
530
	if (!waiter)
531 532
		goto out_unlock_pi;

533 534
	/*
	 * Check the orig_waiter state. After we dropped the locks,
535
	 * the previous owner of the lock might have released the lock.
536
	 */
537
	if (orig_waiter && !rt_mutex_owner(orig_lock))
538 539
		goto out_unlock_pi;

540 541 542 543 544 545 546 547 548 549 550 551
	/*
	 * We dropped all locks after taking a refcount on @task, so
	 * the task might have moved on in the lock chain or even left
	 * the chain completely and blocks now on an unrelated lock or
	 * on @orig_lock.
	 *
	 * We stored the lock on which @task was blocked in @next_lock,
	 * so we can detect the chain change.
	 */
	if (next_lock != waiter->lock)
		goto out_unlock_pi;

552 553 554 555 556
	/*
	 * Drop out, when the task has no waiters. Note,
	 * top_waiter can be NULL, when we are in the deboosting
	 * mode!
	 */
557 558 559 560 561
	if (top_waiter) {
		if (!task_has_pi_waiters(task))
			goto out_unlock_pi;
		/*
		 * If deadlock detection is off, we stop here if we
562 563 564
		 * are not the top pi waiter of the task. If deadlock
		 * detection is enabled we continue, but stop the
		 * requeueing in the chain walk.
565
		 */
566 567 568 569 570 571
		if (top_waiter != task_top_pi_waiter(task)) {
			if (!detect_deadlock)
				goto out_unlock_pi;
			else
				requeue = false;
		}
572
	}
573 574

	/*
575 576 577 578 579
	 * If the waiter priority is the same as the task priority
	 * then there is no further priority adjustment necessary.  If
	 * deadlock detection is off, we stop the chain walk. If its
	 * enabled we continue, but stop the requeueing in the chain
	 * walk.
580
	 */
581
	if (rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
582 583 584 585 586
		if (!detect_deadlock)
			goto out_unlock_pi;
		else
			requeue = false;
	}
587

588 589 590
	/*
	 * [4] Get the next lock
	 */
591
	lock = waiter->lock;
592 593 594 595 596
	/*
	 * [5] We need to trylock here as we are holding task->pi_lock,
	 * which is the reverse lock order versus the other rtmutex
	 * operations.
	 */
597
	if (!raw_spin_trylock(&lock->wait_lock)) {
598
		raw_spin_unlock_irq(&task->pi_lock);
599 600 601 602
		cpu_relax();
		goto retry;
	}

603
	/*
604 605 606
	 * [6] check_exit_conditions_2() protected by task->pi_lock and
	 * lock->wait_lock.
	 *
607 608 609 610 611
	 * Deadlock detection. If the lock is the same as the original
	 * lock which caused us to walk the lock chain or if the
	 * current lock is owned by the task which initiated the chain
	 * walk, we detected a deadlock.
	 */
612
	if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
613
		debug_rt_mutex_deadlock(chwalk, orig_waiter, lock);
614
		raw_spin_unlock(&lock->wait_lock);
615
		ret = -EDEADLK;
616 617 618
		goto out_unlock_pi;
	}

619 620 621 622 623 624 625 626 627 628
	/*
	 * If we just follow the lock chain for deadlock detection, no
	 * need to do all the requeue operations. To avoid a truckload
	 * of conditionals around the various places below, just do the
	 * minimum chain walk checks.
	 */
	if (!requeue) {
		/*
		 * No requeue[7] here. Just release @task [8]
		 */
629
		raw_spin_unlock(&task->pi_lock);
630 631 632 633 634 635 636
		put_task_struct(task);

		/*
		 * [9] check_exit_conditions_3 protected by lock->wait_lock.
		 * If there is no owner of the lock, end of chain.
		 */
		if (!rt_mutex_owner(lock)) {
637
			raw_spin_unlock_irq(&lock->wait_lock);
638 639 640 641 642 643
			return 0;
		}

		/* [10] Grab the next task, i.e. owner of @lock */
		task = rt_mutex_owner(lock);
		get_task_struct(task);
644
		raw_spin_lock(&task->pi_lock);
645 646 647 648 649 650 651 652 653 654 655 656 657 658

		/*
		 * No requeue [11] here. We just do deadlock detection.
		 *
		 * [12] Store whether owner is blocked
		 * itself. Decision is made after dropping the locks
		 */
		next_lock = task_blocked_on_lock(task);
		/*
		 * Get the top waiter for the next iteration
		 */
		top_waiter = rt_mutex_top_waiter(lock);

		/* [13] Drop locks */
659 660
		raw_spin_unlock(&task->pi_lock);
		raw_spin_unlock_irq(&lock->wait_lock);
661 662 663 664 665 666 667

		/* If owner is not blocked, end of chain. */
		if (!next_lock)
			goto out_put_task;
		goto again;
	}

668 669 670 671 672 673
	/*
	 * Store the current top waiter before doing the requeue
	 * operation on @lock. We need it for the boost/deboost
	 * decision below.
	 */
	prerequeue_top_waiter = rt_mutex_top_waiter(lock);
674

675
	/* [7] Requeue the waiter in the lock waiter tree. */
676
	rt_mutex_dequeue(lock, waiter);
677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693

	/*
	 * Update the waiter prio fields now that we're dequeued.
	 *
	 * These values can have changed through either:
	 *
	 *   sys_sched_set_scheduler() / sys_sched_setattr()
	 *
	 * or
	 *
	 *   DL CBS enforcement advancing the effective deadline.
	 *
	 * Even though pi_waiters also uses these fields, and that tree is only
	 * updated in [11], we can do this here, since we hold [L], which
	 * serializes all pi_waiters access and rb_erase() does not care about
	 * the values of the node being removed.
	 */
694
	waiter->prio = task->prio;
695 696
	waiter->deadline = task->dl.deadline;

697
	rt_mutex_enqueue(lock, waiter);
698

699
	/* [8] Release the task */
700
	raw_spin_unlock(&task->pi_lock);
701 702
	put_task_struct(task);

703
	/*
704 705
	 * [9] check_exit_conditions_3 protected by lock->wait_lock.
	 *
706 707 708 709
	 * We must abort the chain walk if there is no lock owner even
	 * in the dead lock detection case, as we have nothing to
	 * follow here. This is the end of the chain we are walking.
	 */
710 711
	if (!rt_mutex_owner(lock)) {
		/*
712 713 714
		 * If the requeue [7] above changed the top waiter,
		 * then we need to wake the new top waiter up to try
		 * to get the lock.
715
		 */
716
		if (prerequeue_top_waiter != rt_mutex_top_waiter(lock))
717
			wake_up_process(rt_mutex_top_waiter(lock)->task);
718
		raw_spin_unlock_irq(&lock->wait_lock);
719
		return 0;
720
	}
721

722
	/* [10] Grab the next task, i.e. the owner of @lock */
723
	task = rt_mutex_owner(lock);
724
	get_task_struct(task);
725
	raw_spin_lock(&task->pi_lock);
726

727
	/* [11] requeue the pi waiters if necessary */
728
	if (waiter == rt_mutex_top_waiter(lock)) {
729 730 731
		/*
		 * The waiter became the new top (highest priority)
		 * waiter on the lock. Replace the previous top waiter
732
		 * in the owner tasks pi waiters tree with this waiter
733 734 735
		 * and adjust the priority of the owner.
		 */
		rt_mutex_dequeue_pi(task, prerequeue_top_waiter);
736
		rt_mutex_enqueue_pi(task, waiter);
737
		rt_mutex_adjust_prio(task);
738

739 740 741 742
	} else if (prerequeue_top_waiter == waiter) {
		/*
		 * The waiter was the top waiter on the lock, but is
		 * no longer the top prority waiter. Replace waiter in
743
		 * the owner tasks pi waiters tree with the new top
744 745 746 747 748 749
		 * (highest priority) waiter and adjust the priority
		 * of the owner.
		 * The new top waiter is stored in @waiter so that
		 * @waiter == @top_waiter evaluates to true below and
		 * we continue to deboost the rest of the chain.
		 */
750
		rt_mutex_dequeue_pi(task, waiter);
751
		waiter = rt_mutex_top_waiter(lock);
752
		rt_mutex_enqueue_pi(task, waiter);
753
		rt_mutex_adjust_prio(task);
754 755 756 757 758
	} else {
		/*
		 * Nothing changed. No need to do any priority
		 * adjustment.
		 */
759 760
	}

761
	/*
762 763 764 765
	 * [12] check_exit_conditions_4() protected by task->pi_lock
	 * and lock->wait_lock. The actual decisions are made after we
	 * dropped the locks.
	 *
766 767 768 769 770 771
	 * Check whether the task which owns the current lock is pi
	 * blocked itself. If yes we store a pointer to the lock for
	 * the lock chain change detection above. After we dropped
	 * task->pi_lock next_lock cannot be dereferenced anymore.
	 */
	next_lock = task_blocked_on_lock(task);
772 773 774 775
	/*
	 * Store the top waiter of @lock for the end of chain walk
	 * decision below.
	 */
776
	top_waiter = rt_mutex_top_waiter(lock);
777 778

	/* [13] Drop the locks */
779 780
	raw_spin_unlock(&task->pi_lock);
	raw_spin_unlock_irq(&lock->wait_lock);
781

782
	/*
783 784 785
	 * Make the actual exit decisions [12], based on the stored
	 * values.
	 *
786 787 788 789 790 791
	 * We reached the end of the lock chain. Stop right here. No
	 * point to go back just to figure that out.
	 */
	if (!next_lock)
		goto out_put_task;

792 793 794 795 796
	/*
	 * If the current waiter is not the top waiter on the lock,
	 * then we can stop the chain walk here if we are not in full
	 * deadlock detection mode.
	 */
797 798 799 800 801 802
	if (!detect_deadlock && waiter != top_waiter)
		goto out_put_task;

	goto again;

 out_unlock_pi:
803
	raw_spin_unlock_irq(&task->pi_lock);
804 805
 out_put_task:
	put_task_struct(task);
806

807 808 809 810 811 812
	return ret;
}

/*
 * Try to take an rt-mutex
 *
813
 * Must be called with lock->wait_lock held and interrupts disabled
814
 *
815 816
 * @lock:   The lock to be acquired.
 * @task:   The task which wants to acquire the lock
817
 * @waiter: The waiter that is queued to the lock's wait tree if the
818
 *	    callsite called task_blocked_on_lock(), otherwise NULL
819
 */
820
static int try_to_take_rt_mutex(struct rt_mutex *lock, struct task_struct *task,
821
				struct rt_mutex_waiter *waiter)
822
{
823 824
	lockdep_assert_held(&lock->wait_lock);

825
	/*
826 827 828 829
	 * Before testing whether we can acquire @lock, we set the
	 * RT_MUTEX_HAS_WAITERS bit in @lock->owner. This forces all
	 * other tasks which try to modify @lock into the slow path
	 * and they serialize on @lock->wait_lock.
830
	 *
831 832
	 * The RT_MUTEX_HAS_WAITERS bit can have a transitional state
	 * as explained at the top of this file if and only if:
833
	 *
834 835 836 837 838 839 840
	 * - There is a lock owner. The caller must fixup the
	 *   transient state if it does a trylock or leaves the lock
	 *   function due to a signal or timeout.
	 *
	 * - @task acquires the lock and there are no other
	 *   waiters. This is undone in rt_mutex_set_owner(@task) at
	 *   the end of this function.
841 842 843
	 */
	mark_rt_mutex_waiters(lock);

844 845 846
	/*
	 * If @lock has an owner, give up.
	 */
847
	if (rt_mutex_owner(lock))
848 849
		return 0;

850
	/*
851
	 * If @waiter != NULL, @task has already enqueued the waiter
852
	 * into @lock waiter tree. If @waiter == NULL then this is a
853
	 * trylock attempt.
854
	 */
855 856 857 858 859 860 861
	if (waiter) {
		/*
		 * If waiter is not the highest priority waiter of
		 * @lock, give up.
		 */
		if (waiter != rt_mutex_top_waiter(lock))
			return 0;
862

863 864
		/*
		 * We can acquire the lock. Remove the waiter from the
865
		 * lock waiters tree.
866 867
		 */
		rt_mutex_dequeue(lock, waiter);
868

869
	} else {
870
		/*
871 872 873 874 875 876
		 * If the lock has waiters already we check whether @task is
		 * eligible to take over the lock.
		 *
		 * If there are no other waiters, @task can acquire
		 * the lock.  @task->pi_blocked_on is NULL, so it does
		 * not need to be dequeued.
877 878
		 */
		if (rt_mutex_has_waiters(lock)) {
879 880 881 882 883
			/*
			 * If @task->prio is greater than or equal to
			 * the top waiter priority (kernel view),
			 * @task lost.
			 */
884 885
			if (!rt_mutex_waiter_less(task_to_waiter(task),
						  rt_mutex_top_waiter(lock)))
886 887 888 889 890 891 892 893 894 895 896 897
				return 0;

			/*
			 * The current top waiter stays enqueued. We
			 * don't have to change anything in the lock
			 * waiters order.
			 */
		} else {
			/*
			 * No waiters. Take the lock without the
			 * pi_lock dance.@task->pi_blocked_on is NULL
			 * and we have no waiters to enqueue in @task
898
			 * pi waiters tree.
899 900
			 */
			goto takeit;
901 902 903
		}
	}

904 905 906 907 908 909
	/*
	 * Clear @task->pi_blocked_on. Requires protection by
	 * @task->pi_lock. Redundant operation for the @waiter == NULL
	 * case, but conditionals are more expensive than a redundant
	 * store.
	 */
910
	raw_spin_lock(&task->pi_lock);
911 912 913 914
	task->pi_blocked_on = NULL;
	/*
	 * Finish the lock acquisition. @task is the new owner. If
	 * other waiters exist we have to insert the highest priority
915
	 * waiter into @task->pi_waiters tree.
916 917 918
	 */
	if (rt_mutex_has_waiters(lock))
		rt_mutex_enqueue_pi(task, rt_mutex_top_waiter(lock));
919
	raw_spin_unlock(&task->pi_lock);
920 921

takeit:
922
	/* We got the lock. */
923
	debug_rt_mutex_lock(lock);
924

925 926 927 928
	/*
	 * This either preserves the RT_MUTEX_HAS_WAITERS bit if there
	 * are still waiters or clears it.
	 */
929
	rt_mutex_set_owner(lock, task);
930 931 932 933 934 935 936 937 938

	return 1;
}

/*
 * Task blocks on lock.
 *
 * Prepare waiter and propagate pi chain
 *
939
 * This must be called with lock->wait_lock held and interrupts disabled
940 941 942
 */
static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
				   struct rt_mutex_waiter *waiter,
943
				   struct task_struct *task,
944
				   enum rtmutex_chainwalk chwalk)
945
{
946
	struct task_struct *owner = rt_mutex_owner(lock);
947
	struct rt_mutex_waiter *top_waiter = waiter;
948
	struct rt_mutex *next_lock;
949
	int chain_walk = 0, res;
950

951 952
	lockdep_assert_held(&lock->wait_lock);

953 954 955 956 957 958 959 960 961
	/*
	 * Early deadlock detection. We really don't want the task to
	 * enqueue on itself just to untangle the mess later. It's not
	 * only an optimization. We drop the locks, so another waiter
	 * can come in before the chain walk detects the deadlock. So
	 * the other will detect the deadlock and return -EDEADLOCK,
	 * which is wrong, as the other waiter is not in a deadlock
	 * situation.
	 */
962
	if (owner == task)
963 964
		return -EDEADLK;

965
	raw_spin_lock(&task->pi_lock);
966
	rt_mutex_adjust_prio(task);
967
	waiter->task = task;
968
	waiter->lock = lock;
969
	waiter->prio = task->prio;
970
	waiter->deadline = task->dl.deadline;
971 972 973 974

	/* Get the top priority waiter on the lock */
	if (rt_mutex_has_waiters(lock))
		top_waiter = rt_mutex_top_waiter(lock);
975
	rt_mutex_enqueue(lock, waiter);
976

977
	task->pi_blocked_on = waiter;
978

979
	raw_spin_unlock(&task->pi_lock);
980

981 982 983
	if (!owner)
		return 0;

984
	raw_spin_lock(&owner->pi_lock);
985
	if (waiter == rt_mutex_top_waiter(lock)) {
986 987
		rt_mutex_dequeue_pi(owner, top_waiter);
		rt_mutex_enqueue_pi(owner, waiter);
988

989
		rt_mutex_adjust_prio(owner);
990 991
		if (owner->pi_blocked_on)
			chain_walk = 1;
992
	} else if (rt_mutex_cond_detect_deadlock(waiter, chwalk)) {
993
		chain_walk = 1;
994
	}
995

996 997 998
	/* Store the lock on which owner is blocked or NULL */
	next_lock = task_blocked_on_lock(owner);

999
	raw_spin_unlock(&owner->pi_lock);
1000 1001 1002 1003 1004 1005
	/*
	 * Even if full deadlock detection is on, if the owner is not
	 * blocked itself, we can avoid finding this out in the chain
	 * walk.
	 */
	if (!chain_walk || !next_lock)
1006 1007
		return 0;

1008 1009 1010 1011 1012 1013 1014
	/*
	 * The owner can't disappear while holding a lock,
	 * so the owner struct is protected by wait_lock.
	 * Gets dropped in rt_mutex_adjust_prio_chain()!
	 */
	get_task_struct(owner);

1015
	raw_spin_unlock_irq(&lock->wait_lock);
1016

1017
	res = rt_mutex_adjust_prio_chain(owner, chwalk, lock,
1018
					 next_lock, waiter, task);
1019

1020
	raw_spin_lock_irq(&lock->wait_lock);
1021 1022 1023 1024 1025

	return res;
}

/*
1026
 * Remove the top waiter from the current tasks pi waiter tree and
1027
 * queue it up.
1028
 *
1029
 * Called with lock->wait_lock held and interrupts disabled.
1030
 */
1031 1032
static void mark_wakeup_next_waiter(struct wake_q_head *wake_q,
				    struct rt_mutex *lock)
1033 1034 1035
{
	struct rt_mutex_waiter *waiter;

1036
	raw_spin_lock(&current->pi_lock);
1037 1038 1039 1040

	waiter = rt_mutex_top_waiter(lock);

	/*
1041 1042 1043 1044 1045
	 * Remove it from current->pi_waiters and deboost.
	 *
	 * We must in fact deboost here in order to ensure we call
	 * rt_mutex_setprio() to update p->pi_top_task before the
	 * task unblocks.
1046
	 */
1047
	rt_mutex_dequeue_pi(current, waiter);
1048
	rt_mutex_adjust_prio(current);
1049

1050 1051 1052 1053 1054 1055 1056 1057 1058
	/*
	 * As we are waking up the top waiter, and the waiter stays
	 * queued on the lock until it gets the lock, this lock
	 * obviously has waiters. Just set the bit here and this has
	 * the added benefit of forcing all new tasks into the
	 * slow path making sure no task of lower priority than
	 * the top waiter can steal this lock.
	 */
	lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
1059

1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070
	/*
	 * We deboosted before waking the top waiter task such that we don't
	 * run two tasks with the 'same' priority (and ensure the
	 * p->pi_top_task pointer points to a blocked task). This however can
	 * lead to priority inversion if we would get preempted after the
	 * deboost but before waking our donor task, hence the preempt_disable()
	 * before unlock.
	 *
	 * Pairs with preempt_enable() in rt_mutex_postunlock();
	 */
	preempt_disable();
1071
	wake_q_add(wake_q, waiter->task);
1072
	raw_spin_unlock(&current->pi_lock);
1073 1074 1075
}

/*
1076
 * Remove a waiter from a lock and give up
1077
 *
1078
 * Must be called with lock->wait_lock held and interrupts disabled. I must
1079
 * have just failed to try_to_take_rt_mutex().
1080
 */
1081 1082
static void remove_waiter(struct rt_mutex *lock,
			  struct rt_mutex_waiter *waiter)
1083
{
1084
	bool is_top_waiter = (waiter == rt_mutex_top_waiter(lock));
1085
	struct task_struct *owner = rt_mutex_owner(lock);
1086
	struct rt_mutex *next_lock;
1087

1088 1089
	lockdep_assert_held(&lock->wait_lock);

1090
	raw_spin_lock(&current->pi_lock);
1091
	rt_mutex_dequeue(lock, waiter);
1092
	current->pi_blocked_on = NULL;
1093
	raw_spin_unlock(&current->pi_lock);
1094

1095 1096 1097 1098 1099
	/*
	 * Only update priority if the waiter was the highest priority
	 * waiter of the lock and there is an owner to update.
	 */
	if (!owner || !is_top_waiter)
1100 1101
		return;

1102
	raw_spin_lock(&owner->pi_lock);
1103

1104
	rt_mutex_dequeue_pi(owner, waiter);
1105

1106 1107
	if (rt_mutex_has_waiters(lock))
		rt_mutex_enqueue_pi(owner, rt_mutex_top_waiter(lock));
1108

1109
	rt_mutex_adjust_prio(owner);
1110

1111 1112
	/* Store the lock on which owner is blocked or NULL */
	next_lock = task_blocked_on_lock(owner);
1113

1114
	raw_spin_unlock(&owner->pi_lock);
1115

1116 1117 1118 1119
	/*
	 * Don't walk the chain, if the owner task is not blocked
	 * itself.
	 */
1120
	if (!next_lock)
1121 1122
		return;

1123 1124 1125
	/* gets dropped in rt_mutex_adjust_prio_chain()! */
	get_task_struct(owner);

1126
	raw_spin_unlock_irq(&lock->wait_lock);
1127

1128 1129
	rt_mutex_adjust_prio_chain(owner, RT_MUTEX_MIN_CHAINWALK, lock,
				   next_lock, NULL, current);
1130

1131
	raw_spin_lock_irq(&lock->wait_lock);
1132 1133
}

1134 1135 1136 1137 1138 1139 1140 1141
/*
 * Recheck the pi chain, in case we got a priority setting
 *
 * Called from sched_setscheduler
 */
void rt_mutex_adjust_pi(struct task_struct *task)
{
	struct rt_mutex_waiter *waiter;
1142
	struct rt_mutex *next_lock;
1143 1144
	unsigned long flags;

1145
	raw_spin_lock_irqsave(&task->pi_lock, flags);
1146 1147

	waiter = task->pi_blocked_on;
1148
	if (!waiter || rt_mutex_waiter_equal(waiter, task_to_waiter(task))) {
1149
		raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1150 1151
		return;
	}
1152
	next_lock = waiter->lock;
1153
	raw_spin_unlock_irqrestore(&task->pi_lock, flags);
1154

1155 1156
	/* gets dropped in rt_mutex_adjust_prio_chain()! */
	get_task_struct(task);
1157

1158 1159
	rt_mutex_adjust_prio_chain(task, RT_MUTEX_MIN_CHAINWALK, NULL,
				   next_lock, NULL, task);
1160 1161
}

1162 1163 1164 1165 1166 1167 1168 1169
void rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
{
	debug_rt_mutex_init_waiter(waiter);
	RB_CLEAR_NODE(&waiter->pi_tree_entry);
	RB_CLEAR_NODE(&waiter->tree_entry);
	waiter->task = NULL;
}

1170 1171 1172 1173
/**
 * __rt_mutex_slowlock() - Perform the wait-wake-try-to-take loop
 * @lock:		 the rt_mutex to take
 * @state:		 the state the task should block in (TASK_INTERRUPTIBLE
1174
 *			 or TASK_UNINTERRUPTIBLE)
1175 1176 1177
 * @timeout:		 the pre-initialized and started timer, or NULL for none
 * @waiter:		 the pre-initialized rt_mutex_waiter
 *
1178
 * Must be called with lock->wait_lock held and interrupts disabled
1179 1180
 */
static int __sched
1181 1182
__rt_mutex_slowlock(struct rt_mutex *lock, int state,
		    struct hrtimer_sleeper *timeout,
1183
		    struct rt_mutex_waiter *waiter)
1184 1185 1186 1187 1188
{
	int ret = 0;

	for (;;) {
		/* Try to acquire the lock: */
1189
		if (try_to_take_rt_mutex(lock, current, waiter))
1190 1191 1192 1193 1194 1195
			break;

		/*
		 * TASK_INTERRUPTIBLE checks for signals and
		 * timeout. Ignored otherwise.
		 */
1196
		if (likely(state == TASK_INTERRUPTIBLE)) {
1197 1198 1199 1200 1201 1202 1203 1204 1205
			/* Signal pending? */
			if (signal_pending(current))
				ret = -EINTR;
			if (timeout && !timeout->task)
				ret = -ETIMEDOUT;
			if (ret)
				break;
		}

1206
		raw_spin_unlock_irq(&lock->wait_lock);
1207

1208
		debug_rt_mutex_print_deadlock(waiter);
1209

1210
		schedule();
1211

1212
		raw_spin_lock_irq(&lock->wait_lock);
1213 1214 1215
		set_current_state(state);
	}

1216
	__set_current_state(TASK_RUNNING);
1217 1218 1219
	return ret;
}

1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239
static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
				     struct rt_mutex_waiter *w)
{
	/*
	 * If the result is not -EDEADLOCK or the caller requested
	 * deadlock detection, nothing to do here.
	 */
	if (res != -EDEADLOCK || detect_deadlock)
		return;

	/*
	 * Yell lowdly and stop the task right here.
	 */
	rt_mutex_print_deadlock(w);
	while (1) {
		set_current_state(TASK_INTERRUPTIBLE);
		schedule();
	}
}

1240 1241 1242 1243 1244 1245
/*
 * Slow path lock function:
 */
static int __sched
rt_mutex_slowlock(struct rt_mutex *lock, int state,
		  struct hrtimer_sleeper *timeout,
1246
		  enum rtmutex_chainwalk chwalk)
1247 1248
{
	struct rt_mutex_waiter waiter;
1249
	unsigned long flags;
1250 1251
	int ret = 0;

1252
	rt_mutex_init_waiter(&waiter);
1253

1254 1255 1256 1257 1258 1259 1260 1261 1262
	/*
	 * Technically we could use raw_spin_[un]lock_irq() here, but this can
	 * be called in early boot if the cmpxchg() fast path is disabled
	 * (debug, no architecture support). In this case we will acquire the
	 * rtmutex with lock->wait_lock held. But we cannot unconditionally
	 * enable interrupts in that early boot case. So we need to use the
	 * irqsave/restore variants.
	 */
	raw_spin_lock_irqsave(&lock->wait_lock, flags);
1263 1264

	/* Try to acquire the lock again: */
1265
	if (try_to_take_rt_mutex(lock, current, NULL)) {
1266
		raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1267 1268 1269 1270 1271 1272
		return 0;
	}

	set_current_state(state);

	/* Setup the timer, when timeout != NULL */
1273
	if (unlikely(timeout))
1274 1275
		hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);

1276
	ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
1277 1278

	if (likely(!ret))
1279
		/* sleep on the mutex */
1280
		ret = __rt_mutex_slowlock(lock, state, timeout, &waiter);
1281

1282
	if (unlikely(ret)) {
1283
		__set_current_state(TASK_RUNNING);
1284 1285
		if (rt_mutex_has_waiters(lock))
			remove_waiter(lock, &waiter);
1286
		rt_mutex_handle_deadlock(ret, chwalk, &waiter);
1287
	}
1288 1289 1290 1291 1292 1293 1294

	/*
	 * try_to_take_rt_mutex() sets the waiter bit
	 * unconditionally. We might have to fix that up.
	 */
	fixup_rt_mutex_waiters(lock);

1295
	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308

	/* Remove pending timer: */
	if (unlikely(timeout))
		hrtimer_cancel(&timeout->timer);

	debug_rt_mutex_free_waiter(&waiter);

	return ret;
}

/*
 * Slow path try-lock function:
 */
1309
static inline int rt_mutex_slowtrylock(struct rt_mutex *lock)
1310
{
1311
	unsigned long flags;
1312 1313 1314 1315 1316 1317 1318 1319 1320
	int ret;

	/*
	 * If the lock already has an owner we fail to get the lock.
	 * This can be done without taking the @lock->wait_lock as
	 * it is only being read, and this is a trylock anyway.
	 */
	if (rt_mutex_owner(lock))
		return 0;
1321

1322
	/*
1323 1324
	 * The mutex has currently no owner. Lock the wait lock and try to
	 * acquire the lock. We use irqsave here to support early boot calls.
1325
	 */
1326
	raw_spin_lock_irqsave(&lock->wait_lock, flags);
1327

1328
	ret = try_to_take_rt_mutex(lock, current, NULL);
1329

1330 1331 1332 1333 1334
	/*
	 * try_to_take_rt_mutex() sets the lock waiters bit
	 * unconditionally. Clean this up.
	 */
	fixup_rt_mutex_waiters(lock);
1335

1336
	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1337 1338 1339 1340 1341

	return ret;
}

/*
1342
 * Slow path to release a rt-mutex.
Peter Zijlstra's avatar
Peter Zijlstra committed
1343 1344
 *
 * Return whether the current task needs to call rt_mutex_postunlock().
1345
 */
1346 1347
static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
					struct wake_q_head *wake_q)
1348
{
1349 1350 1351 1352
	unsigned long flags;

	/* irqsave required to support early boot calls */
	raw_spin_lock_irqsave(&lock->wait_lock, flags);
1353 1354 1355

	debug_rt_mutex_unlock(lock);

1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388
	/*
	 * We must be careful here if the fast path is enabled. If we
	 * have no waiters queued we cannot set owner to NULL here
	 * because of:
	 *
	 * foo->lock->owner = NULL;
	 *			rtmutex_lock(foo->lock);   <- fast path
	 *			free = atomic_dec_and_test(foo->refcnt);
	 *			rtmutex_unlock(foo->lock); <- fast path
	 *			if (free)
	 *				kfree(foo);
	 * raw_spin_unlock(foo->lock->wait_lock);
	 *
	 * So for the fastpath enabled kernel:
	 *
	 * Nothing can set the waiters bit as long as we hold
	 * lock->wait_lock. So we do the following sequence:
	 *
	 *	owner = rt_mutex_owner(lock);
	 *	clear_rt_mutex_waiters(lock);
	 *	raw_spin_unlock(&lock->wait_lock);
	 *	if (cmpxchg(&lock->owner, owner, 0) == owner)
	 *		return;
	 *	goto retry;
	 *
	 * The fastpath disabled variant is simple as all access to
	 * lock->owner is serialized by lock->wait_lock:
	 *
	 *	lock->owner = NULL;
	 *	raw_spin_unlock(&lock->wait_lock);
	 */
	while (!rt_mutex_has_waiters(lock)) {
		/* Drops lock->wait_lock ! */
1389
		if (unlock_rt_mutex_safe(lock, flags) == true)
1390
			return false;
1391
		/* Relock the rtmutex and try again */
1392
		raw_spin_lock_irqsave(&lock->wait_lock, flags);
1393 1394
	}

1395 1396 1397
	/*
	 * The wakeup next waiter path does not suffer from the above
	 * race. See the comments there.
1398 1399
	 *
	 * Queue the next waiter for wakeup once we release the wait_lock.
1400
	 */
1401
	mark_wakeup_next_waiter(wake_q, lock);
1402
	raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
1403

Peter Zijlstra's avatar
Peter Zijlstra committed
1404
	return true; /* call rt_mutex_postunlock() */
1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416
}

/*
 * debug aware fast / slowpath lock,trylock,unlock
 *
 * The atomic acquire/release ops are compiled away, when either the
 * architecture does not support cmpxchg or when debugging is enabled.
 */
static inline int
rt_mutex_fastlock(struct rt_mutex *lock, int state,
		  int (*slowfn)(struct rt_mutex *lock, int state,
				struct hrtimer_sleeper *timeout,
1417
				enum rtmutex_chainwalk chwalk))
1418
{
1419
	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1420
		return 0;
1421 1422

	return slowfn(lock, state, NULL, RT_MUTEX_MIN_CHAINWALK);
1423 1424 1425 1426
}

static inline int
rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
1427 1428
			struct hrtimer_sleeper *timeout,
			enum rtmutex_chainwalk chwalk,
1429 1430
			int (*slowfn)(struct rt_mutex *lock, int state,
				      struct hrtimer_sleeper *timeout,
1431
				      enum rtmutex_chainwalk chwalk))
1432
{
1433
	if (chwalk == RT_MUTEX_MIN_CHAINWALK &&
1434
	    likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1435
		return 0;
1436 1437

	return slowfn(lock, state, timeout, chwalk);
1438 1439 1440 1441
}

static inline int
rt_mutex_fasttrylock(struct rt_mutex *lock,
1442
		     int (*slowfn)(struct rt_mutex *lock))
1443
{
1444
	if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
1445
		return 1;
1446

1447
	return slowfn(lock);
1448 1449
}

1450
/*
Peter Zijlstra's avatar
Peter Zijlstra committed
1451
 * Performs the wakeup of the the top-waiter and re-enables preemption.
1452
 */
Peter Zijlstra's avatar
Peter Zijlstra committed
1453
void rt_mutex_postunlock(struct wake_q_head *wake_q)
1454 1455 1456 1457
{
	wake_up_q(wake_q);

	/* Pairs with preempt_disable() in rt_mutex_slowunlock() */
Peter Zijlstra's avatar
Peter Zijlstra committed
1458
	preempt_enable();
1459 1460
}

1461 1462
static inline void
rt_mutex_fastunlock(struct rt_mutex *lock,
1463 1464
		    bool (*slowfn)(struct rt_mutex *lock,
				   struct wake_q_head *wqh))
1465
{
1466
	DEFINE_WAKE_Q(wake_q);
1467

1468 1469
	if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
		return;
1470

Peter Zijlstra's avatar
Peter Zijlstra committed
1471 1472
	if (slowfn(lock, &wake_q))
		rt_mutex_postunlock(&wake_q);
1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483
}

/**
 * rt_mutex_lock - lock a rt_mutex
 *
 * @lock: the rt_mutex to be locked
 */
void __sched rt_mutex_lock(struct rt_mutex *lock)
{
	might_sleep();

1484
	rt_mutex_fastlock(lock, TASK_UNINTERRUPTIBLE, rt_mutex_slowlock);
1485 1486 1487 1488 1489 1490
}
EXPORT_SYMBOL_GPL(rt_mutex_lock);

/**
 * rt_mutex_lock_interruptible - lock a rt_mutex interruptible
 *
1491
 * @lock:		the rt_mutex to be locked
1492 1493
 *
 * Returns:
1494 1495
 *  0		on success
 * -EINTR	when interrupted by a signal
1496
 */
1497
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
1498 1499 1500
{
	might_sleep();

1501
	return rt_mutex_fastlock(lock, TASK_INTERRUPTIBLE, rt_mutex_slowlock);
1502 1503 1504
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);

1505 1506 1507 1508 1509 1510
/*
 * Futex variant, must not use fastpath.
 */
int __sched rt_mutex_futex_trylock(struct rt_mutex *lock)
{
	return rt_mutex_slowtrylock(lock);
1511 1512
}

1513
/**
1514 1515 1516
 * rt_mutex_timed_lock - lock a rt_mutex interruptible
 *			the timeout structure is provided
 *			by the caller
1517
 *
1518
 * @lock:		the rt_mutex to be locked
1519 1520 1521
 * @timeout:		timeout structure or NULL (no timeout)
 *
 * Returns:
1522 1523
 *  0		on success
 * -EINTR	when interrupted by a signal
1524
 * -ETIMEDOUT	when the timeout expired
1525 1526
 */
int
1527
rt_mutex_timed_lock(struct rt_mutex *lock, struct hrtimer_sleeper *timeout)
1528 1529 1530
{
	might_sleep();

1531 1532
	return rt_mutex_timed_fastlock(lock, TASK_INTERRUPTIBLE, timeout,
				       RT_MUTEX_MIN_CHAINWALK,
1533
				       rt_mutex_slowlock);
1534 1535 1536 1537 1538 1539 1540 1541
}
EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);

/**
 * rt_mutex_trylock - try to lock a rt_mutex
 *
 * @lock:	the rt_mutex to be locked
 *
1542 1543 1544 1545
 * This function can only be called in thread context. It's safe to
 * call it from atomic regions, but not from hard interrupt or soft
 * interrupt context.
 *
1546 1547 1548 1549
 * Returns 1 on success and 0 on contention
 */
int __sched rt_mutex_trylock(struct rt_mutex *lock)
{
1550
	if (WARN_ON_ONCE(in_irq() || in_nmi() || in_serving_softirq()))
1551 1552
		return 0;

1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567
	return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
}
EXPORT_SYMBOL_GPL(rt_mutex_trylock);

/**
 * rt_mutex_unlock - unlock a rt_mutex
 *
 * @lock: the rt_mutex to be unlocked
 */
void __sched rt_mutex_unlock(struct rt_mutex *lock)
{
	rt_mutex_fastunlock(lock, rt_mutex_slowunlock);
}
EXPORT_SYMBOL_GPL(rt_mutex_unlock);

1568
/**
1569 1570
 * Futex variant, that since futex variants do not use the fast-path, can be
 * simple and will not need to retry.
1571
 */
1572 1573
bool __sched __rt_mutex_futex_unlock(struct rt_mutex *lock,
				    struct wake_q_head *wake_q)
1574
{
1575 1576 1577 1578 1579 1580 1581 1582 1583
	lockdep_assert_held(&lock->wait_lock);

	debug_rt_mutex_unlock(lock);

	if (!rt_mutex_has_waiters(lock)) {
		lock->owner = NULL;
		return false; /* done */
	}

1584
	/*
1585 1586 1587 1588
	 * We've already deboosted, mark_wakeup_next_waiter() will
	 * retain preempt_disabled when we drop the wait_lock, to
	 * avoid inversion prior to the wakeup.  preempt_disable()
	 * therein pairs with rt_mutex_postunlock().
1589
	 */
1590
	mark_wakeup_next_waiter(wake_q, lock);
1591

Peter Zijlstra's avatar
Peter Zijlstra committed
1592
	return true; /* call postunlock() */
1593
}
1594

1595 1596 1597
void __sched rt_mutex_futex_unlock(struct rt_mutex *lock)
{
	DEFINE_WAKE_Q(wake_q);
Peter Zijlstra's avatar
Peter Zijlstra committed
1598
	bool postunlock;
1599 1600

	raw_spin_lock_irq(&lock->wait_lock);
Peter Zijlstra's avatar
Peter Zijlstra committed
1601
	postunlock = __rt_mutex_futex_unlock(lock, &wake_q);
1602 1603
	raw_spin_unlock_irq(&lock->wait_lock);

Peter Zijlstra's avatar
Peter Zijlstra committed
1604 1605
	if (postunlock)
		rt_mutex_postunlock(&wake_q);
1606 1607
}

1608
/**
1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637
 * rt_mutex_destroy - mark a mutex unusable
 * @lock: the mutex to be destroyed
 *
 * This function marks the mutex uninitialized, and any subsequent
 * use of the mutex is forbidden. The mutex must not be locked when
 * this function is called.
 */
void rt_mutex_destroy(struct rt_mutex *lock)
{
	WARN_ON(rt_mutex_is_locked(lock));
#ifdef CONFIG_DEBUG_RT_MUTEXES
	lock->magic = NULL;
#endif
}

EXPORT_SYMBOL_GPL(rt_mutex_destroy);

/**
 * __rt_mutex_init - initialize the rt lock
 *
 * @lock: the rt lock to be initialized
 *
 * Initialize the rt lock to unlocked state.
 *
 * Initializing of a locked rt lock is not allowed
 */
void __rt_mutex_init(struct rt_mutex *lock, const char *name)
{
	lock->owner = NULL;
1638
	raw_spin_lock_init(&lock->wait_lock);
1639 1640
	lock->waiters = RB_ROOT;
	lock->waiters_leftmost = NULL;
1641 1642 1643 1644

	debug_rt_mutex_init(lock, name);
}
EXPORT_SYMBOL_GPL(__rt_mutex_init);
1645 1646 1647 1648 1649

/**
 * rt_mutex_init_proxy_locked - initialize and lock a rt_mutex on behalf of a
 *				proxy owner
 *
1650
 * @lock:	the rt_mutex to be locked
1651 1652 1653
 * @proxy_owner:the task to set as owner
 *
 * No locking. Caller has to do serializing itself
1654 1655 1656 1657 1658
 *
 * Special API call for PI-futex support. This initializes the rtmutex and
 * assigns it to @proxy_owner. Concurrent operations on the rtmutex are not
 * possible at this point because the pi_state which contains the rtmutex
 * is not yet visible to other tasks.
1659 1660 1661 1662 1663
 */
void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
				struct task_struct *proxy_owner)
{
	__rt_mutex_init(lock, NULL);
1664
	debug_rt_mutex_proxy_lock(lock, proxy_owner);
1665
	rt_mutex_set_owner(lock, proxy_owner);
1666 1667 1668 1669 1670
}

/**
 * rt_mutex_proxy_unlock - release a lock on behalf of owner
 *
1671
 * @lock:	the rt_mutex to be locked
1672 1673
 *
 * No locking. Caller has to do serializing itself
1674 1675 1676 1677 1678
 *
 * Special API call for PI-futex support. This merrily cleans up the rtmutex
 * (debugging) state. Concurrent operations on this rt_mutex are not
 * possible because it belongs to the pi_state which is about to be freed
 * and it is not longer visible to other tasks.
1679 1680 1681 1682 1683
 */
void rt_mutex_proxy_unlock(struct rt_mutex *lock,
			   struct task_struct *proxy_owner)
{
	debug_rt_mutex_proxy_unlock(lock);
1684
	rt_mutex_set_owner(lock, NULL);
1685 1686
}

1687
int __rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1688
			      struct rt_mutex_waiter *waiter,
1689
			      struct task_struct *task)
1690 1691 1692
{
	int ret;

1693
	if (try_to_take_rt_mutex(lock, task, NULL))
1694 1695
		return 1;

1696
	/* We enforce deadlock detection for futexes */
1697 1698
	ret = task_blocks_on_rt_mutex(lock, waiter, task,
				      RT_MUTEX_FULL_CHAINWALK);
1699

1700
	if (ret && !rt_mutex_owner(lock)) {
1701 1702 1703 1704 1705 1706 1707 1708
		/*
		 * Reset the return value. We might have
		 * returned with -EDEADLK and the owner
		 * released the lock while we were walking the
		 * pi chain.  Let the waiter sort it out.
		 */
		ret = 0;
	}
1709 1710 1711 1712

	if (unlikely(ret))
		remove_waiter(lock, waiter);

1713 1714 1715 1716 1717
	debug_rt_mutex_print_deadlock(waiter);

	return ret;
}

1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743
/**
 * rt_mutex_start_proxy_lock() - Start lock acquisition for another task
 * @lock:		the rt_mutex to take
 * @waiter:		the pre-initialized rt_mutex_waiter
 * @task:		the task to prepare
 *
 * Returns:
 *  0 - task blocked on lock
 *  1 - acquired the lock for task, caller should wake it up
 * <0 - error
 *
 * Special API call for FUTEX_REQUEUE_PI support.
 */
int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
			      struct rt_mutex_waiter *waiter,
			      struct task_struct *task)
{
	int ret;

	raw_spin_lock_irq(&lock->wait_lock);
	ret = __rt_mutex_start_proxy_lock(lock, waiter, task);
	raw_spin_unlock_irq(&lock->wait_lock);

	return ret;
}

1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762
/**
 * rt_mutex_next_owner - return the next owner of the lock
 *
 * @lock: the rt lock query
 *
 * Returns the next owner of the lock or NULL
 *
 * Caller has to serialize against other accessors to the lock
 * itself.
 *
 * Special API call for PI-futex support
 */
struct task_struct *rt_mutex_next_owner(struct rt_mutex *lock)
{
	if (!rt_mutex_has_waiters(lock))
		return NULL;

	return rt_mutex_top_waiter(lock)->task;
}
1763 1764

/**
1765
 * rt_mutex_wait_proxy_lock() - Wait for lock acquisition
1766 1767
 * @lock:		the rt_mutex we were woken on
 * @to:			the timeout, null if none. hrtimer should already have
1768
 *			been started.
1769 1770
 * @waiter:		the pre-initialized rt_mutex_waiter
 *
1771 1772 1773
 * Wait for the the lock acquisition started on our behalf by
 * rt_mutex_start_proxy_lock(). Upon failure, the caller must call
 * rt_mutex_cleanup_proxy_lock().
1774 1775 1776
 *
 * Returns:
 *  0 - success
1777
 * <0 - error, one of -EINTR, -ETIMEDOUT
1778
 *
1779
 * Special API call for PI-futex support
1780
 */
1781
int rt_mutex_wait_proxy_lock(struct rt_mutex *lock,
1782
			       struct hrtimer_sleeper *to,
1783
			       struct rt_mutex_waiter *waiter)
1784 1785 1786
{
	int ret;

1787
	raw_spin_lock_irq(&lock->wait_lock);
1788
	/* sleep on the mutex */
1789
	set_current_state(TASK_INTERRUPTIBLE);
1790
	ret = __rt_mutex_slowlock(lock, TASK_INTERRUPTIBLE, to, waiter);
1791 1792 1793 1794 1795
	/*
	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
	 * have to fix that up.
	 */
	fixup_rt_mutex_waiters(lock);
1796
	raw_spin_unlock_irq(&lock->wait_lock);
1797 1798 1799

	return ret;
}
1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825

/**
 * rt_mutex_cleanup_proxy_lock() - Cleanup failed lock acquisition
 * @lock:		the rt_mutex we were woken on
 * @waiter:		the pre-initialized rt_mutex_waiter
 *
 * Attempt to clean up after a failed rt_mutex_wait_proxy_lock().
 *
 * Unless we acquired the lock; we're still enqueued on the wait-list and can
 * in fact still be granted ownership until we're removed. Therefore we can
 * find we are in fact the owner and must disregard the
 * rt_mutex_wait_proxy_lock() failure.
 *
 * Returns:
 *  true  - did the cleanup, we done.
 *  false - we acquired the lock after rt_mutex_wait_proxy_lock() returned,
 *          caller should disregards its return value.
 *
 * Special API call for PI-futex support
 */
bool rt_mutex_cleanup_proxy_lock(struct rt_mutex *lock,
				 struct rt_mutex_waiter *waiter)
{
	bool cleanup = false;

	raw_spin_lock_irq(&lock->wait_lock);
1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837
	/*
	 * Do an unconditional try-lock, this deals with the lock stealing
	 * state where __rt_mutex_futex_unlock() -> mark_wakeup_next_waiter()
	 * sets a NULL owner.
	 *
	 * We're not interested in the return value, because the subsequent
	 * test on rt_mutex_owner() will infer that. If the trylock succeeded,
	 * we will own the lock and it will have removed the waiter. If we
	 * failed the trylock, we're still not owner and we need to remove
	 * ourselves.
	 */
	try_to_take_rt_mutex(lock, current, waiter);
1838 1839 1840 1841 1842 1843 1844 1845
	/*
	 * Unless we're the owner; we're still enqueued on the wait_list.
	 * So check if we became owner, if not, take us off the wait_list.
	 */
	if (rt_mutex_owner(lock) != current) {
		remove_waiter(lock, waiter);
		cleanup = true;
	}
1846 1847 1848 1849 1850 1851
	/*
	 * try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
	 * have to fix that up.
	 */
	fixup_rt_mutex_waiters(lock);

1852 1853 1854 1855
	raw_spin_unlock_irq(&lock->wait_lock);

	return cleanup;
}