Commit 2180f214 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Ingo Molnar:
 "Just a handful of changes in this cycle: an ARM64 performance
  optimization, a comment fix and a debug output fix"

* 'locking-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/osq: Use optimized spinning loop for arm64
  locking/qspinlock: Fix inaccessible URL of MCS lock paper
  locking/lockdep: Fix lockdep_stats indentation problem
parents 634cd4b6 f5bfdc8e
...@@ -11,4 +11,13 @@ ...@@ -11,4 +11,13 @@
/* See include/linux/spinlock.h */ /* See include/linux/spinlock.h */
#define smp_mb__after_spinlock() smp_mb() #define smp_mb__after_spinlock() smp_mb()
/*
* Changing this will break osq_lock() thanks to the call inside
* smp_cond_load_relaxed().
*
* See:
* https://lore.kernel.org/lkml/20200110100612.GC2827@hirez.programming.kicks-ass.net
*/
#define vcpu_is_preempted(cpu) false
#endif /* __ASM_SPINLOCK_H */ #endif /* __ASM_SPINLOCK_H */
...@@ -286,9 +286,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v) ...@@ -286,9 +286,9 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n", seq_printf(m, " stack-trace entries: %11lu [max: %lu]\n",
nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES); nr_stack_trace_entries, MAX_STACK_TRACE_ENTRIES);
#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
seq_printf(m, " number of stack traces: %llu\n", seq_printf(m, " number of stack traces: %11llu\n",
lockdep_stack_trace_count()); lockdep_stack_trace_count());
seq_printf(m, " number of stack hash chains: %llu\n", seq_printf(m, " number of stack hash chains: %11llu\n",
lockdep_stack_hash_count()); lockdep_stack_hash_count());
#endif #endif
seq_printf(m, " combined max dependencies: %11u\n", seq_printf(m, " combined max dependencies: %11u\n",
......
...@@ -134,20 +134,17 @@ bool osq_lock(struct optimistic_spin_queue *lock) ...@@ -134,20 +134,17 @@ bool osq_lock(struct optimistic_spin_queue *lock)
* cmpxchg in an attempt to undo our queueing. * cmpxchg in an attempt to undo our queueing.
*/ */
while (!READ_ONCE(node->locked)) { /*
/* * Wait to acquire the lock or cancelation. Note that need_resched()
* If we need to reschedule bail... so we can block. * will come with an IPI, which will wake smp_cond_load_relaxed() if it
* Use vcpu_is_preempted() to avoid waiting for a preempted * is implemented with a monitor-wait. vcpu_is_preempted() relies on
* lock holder: * polling, be careful.
*/ */
if (need_resched() || vcpu_is_preempted(node_cpu(node->prev))) if (smp_cond_load_relaxed(&node->locked, VAL || need_resched() ||
goto unqueue; vcpu_is_preempted(node_cpu(node->prev))))
return true;
cpu_relax();
}
return true;
unqueue: /* unqueue */
/* /*
* Step - A -- stabilize @prev * Step - A -- stabilize @prev
* *
......
...@@ -31,14 +31,15 @@ ...@@ -31,14 +31,15 @@
/* /*
* The basic principle of a queue-based spinlock can best be understood * The basic principle of a queue-based spinlock can best be understood
* by studying a classic queue-based spinlock implementation called the * by studying a classic queue-based spinlock implementation called the
* MCS lock. The paper below provides a good description for this kind * MCS lock. A copy of the original MCS lock paper ("Algorithms for Scalable
* of lock. * Synchronization on Shared-Memory Multiprocessors by Mellor-Crummey and
* Scott") is available at
* *
* http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf * https://bugzilla.kernel.org/show_bug.cgi?id=206115
* *
* This queued spinlock implementation is based on the MCS lock, however to make * This queued spinlock implementation is based on the MCS lock, however to
* it fit the 4 bytes we assume spinlock_t to be, and preserve its existing * make it fit the 4 bytes we assume spinlock_t to be, and preserve its
* API, we must modify it somehow. * existing API, we must modify it somehow.
* *
* In particular; where the traditional MCS lock consists of a tail pointer * In particular; where the traditional MCS lock consists of a tail pointer
* (8 bytes) and needs the next pointer (another 8 bytes) of its own node to * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment