Commit 1f8083c6 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking fixes from Ingo Molnar:
 "Misc fixes: lockstat fix, futex fix on !MMU systems, big endian fix
  for qrwlocks and a race fix for pvqspinlocks"

* 'locking-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/pvqspinlock: Fix a bug in qstat_read()
  locking/pvqspinlock: Fix double hash race
  locking/qrwlock: Fix write unlock bug on big endian systems
  futex: Assume all mappings are private on !MMU systems
parents 25db6918 c2ace36b
...@@ -25,7 +25,20 @@ ...@@ -25,7 +25,20 @@
#include <asm-generic/qrwlock_types.h> #include <asm-generic/qrwlock_types.h>
/* /*
* Writer states & reader shift and bias * Writer states & reader shift and bias.
*
* | +0 | +1 | +2 | +3 |
* ----+----+----+----+----+
* LE | 78 | 56 | 34 | 12 | 0x12345678
* ----+----+----+----+----+
* | wr | rd |
* +----+----+----+----+
*
* ----+----+----+----+----+
* BE | 12 | 34 | 56 | 78 | 0x12345678
* ----+----+----+----+----+
* | rd | wr |
* +----+----+----+----+
*/ */
#define _QW_WAITING 1 /* A writer is waiting */ #define _QW_WAITING 1 /* A writer is waiting */
#define _QW_LOCKED 0xff /* A writer holds the lock */ #define _QW_LOCKED 0xff /* A writer holds the lock */
...@@ -133,13 +146,23 @@ static inline void queued_read_unlock(struct qrwlock *lock) ...@@ -133,13 +146,23 @@ static inline void queued_read_unlock(struct qrwlock *lock)
(void)atomic_sub_return_release(_QR_BIAS, &lock->cnts); (void)atomic_sub_return_release(_QR_BIAS, &lock->cnts);
} }
/**
* __qrwlock_write_byte - retrieve the write byte address of a queue rwlock
* @lock : Pointer to queue rwlock structure
* Return: the write byte address of a queue rwlock
*/
static inline u8 *__qrwlock_write_byte(struct qrwlock *lock)
{
return (u8 *)lock + 3 * IS_BUILTIN(CONFIG_CPU_BIG_ENDIAN);
}
/** /**
* queued_write_unlock - release write lock of a queue rwlock * queued_write_unlock - release write lock of a queue rwlock
* @lock : Pointer to queue rwlock structure * @lock : Pointer to queue rwlock structure
*/ */
static inline void queued_write_unlock(struct qrwlock *lock) static inline void queued_write_unlock(struct qrwlock *lock)
{ {
smp_store_release((u8 *)&lock->cnts, 0); smp_store_release(__qrwlock_write_byte(lock), 0);
} }
/* /*
......
...@@ -179,7 +179,15 @@ int __read_mostly futex_cmpxchg_enabled; ...@@ -179,7 +179,15 @@ int __read_mostly futex_cmpxchg_enabled;
* Futex flags used to encode options to functions and preserve them across * Futex flags used to encode options to functions and preserve them across
* restarts. * restarts.
*/ */
#define FLAGS_SHARED 0x01 #ifdef CONFIG_MMU
# define FLAGS_SHARED 0x01
#else
/*
* NOMMU does not have per process address space. Let the compiler optimize
* code away.
*/
# define FLAGS_SHARED 0x00
#endif
#define FLAGS_CLOCKRT 0x02 #define FLAGS_CLOCKRT 0x02
#define FLAGS_HAS_TIMEOUT 0x04 #define FLAGS_HAS_TIMEOUT 0x04
...@@ -405,6 +413,16 @@ static void get_futex_key_refs(union futex_key *key) ...@@ -405,6 +413,16 @@ static void get_futex_key_refs(union futex_key *key)
if (!key->both.ptr) if (!key->both.ptr)
return; return;
/*
* On MMU less systems futexes are always "private" as there is no per
* process address space. We need the smp wmb nevertheless - yes,
* arch/blackfin has MMU less SMP ...
*/
if (!IS_ENABLED(CONFIG_MMU)) {
smp_mb(); /* explicit smp_mb(); (B) */
return;
}
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE: case FUT_OFF_INODE:
ihold(key->shared.inode); /* implies smp_mb(); (B) */ ihold(key->shared.inode); /* implies smp_mb(); (B) */
...@@ -436,6 +454,9 @@ static void drop_futex_key_refs(union futex_key *key) ...@@ -436,6 +454,9 @@ static void drop_futex_key_refs(union futex_key *key)
return; return;
} }
if (!IS_ENABLED(CONFIG_MMU))
return;
switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
case FUT_OFF_INODE: case FUT_OFF_INODE:
iput(key->shared.inode); iput(key->shared.inode);
......
...@@ -450,7 +450,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node) ...@@ -450,7 +450,7 @@ pv_wait_head_or_lock(struct qspinlock *lock, struct mcs_spinlock *node)
goto gotlock; goto gotlock;
} }
} }
WRITE_ONCE(pn->state, vcpu_halted); WRITE_ONCE(pn->state, vcpu_hashed);
qstat_inc(qstat_pv_wait_head, true); qstat_inc(qstat_pv_wait_head, true);
qstat_inc(qstat_pv_wait_again, waitcnt); qstat_inc(qstat_pv_wait_again, waitcnt);
pv_wait(&l->locked, _Q_SLOW_VAL); pv_wait(&l->locked, _Q_SLOW_VAL);
......
...@@ -153,7 +153,6 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf, ...@@ -153,7 +153,6 @@ static ssize_t qstat_read(struct file *file, char __user *user_buf,
*/ */
if ((counter == qstat_pv_latency_kick) || if ((counter == qstat_pv_latency_kick) ||
(counter == qstat_pv_latency_wake)) { (counter == qstat_pv_latency_wake)) {
stat = 0;
if (kicks) if (kicks)
stat = DIV_ROUND_CLOSEST_ULL(stat, kicks); stat = DIV_ROUND_CLOSEST_ULL(stat, kicks);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment