Commit daadb3bd authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'locking_core_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull locking updates from Borislav Petkov:
 "Lots of cleanups and preparation. Highlights:

   - futex: Cleanup and remove runtime futex_cmpxchg detection

   - rtmutex: Some fixes for the PREEMPT_RT locking infrastructure

   - kcsan: Share owner_on_cpu() between mutex,rtmutex and rwsem and
     annotate the racy owner->on_cpu access *once*.

   - atomic64: Dead-Code-Elemination"

[ Description above by Peter Zijlstra ]

* tag 'locking_core_for_v5.17_rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/atomic: atomic64: Remove unusable atomic ops
  futex: Fix additional regressions
  locking: Allow to include asm/spinlock_types.h from linux/spinlock_types_raw.h
  x86/mm: Include spinlock_t definition in pgtable.
  locking: Mark racy reads of owner->on_cpu
  locking: Make owner_on_cpu() into <linux/sched.h>
  lockdep/selftests: Adapt ww-tests for PREEMPT_RT
  lockdep/selftests: Skip the softirq related tests on PREEMPT_RT
  lockdep/selftests: Unbalanced migrate_disable() & rcu_read_lock().
  lockdep/selftests: Avoid using local_lock_{acquire|release}().
  lockdep: Remove softirq accounting on PREEMPT_RT.
  locking/rtmutex: Add rt_mutex_lock_nest_lock() and rt_mutex_lock_killable().
  locking/rtmutex: Squash self-deadlock check for ww_rt_mutex.
  locking: Remove rt_rwlock_is_contended().
  sched: Trigger warning if ->migration_disabled counter underflows.
  futex: Fix sparc32/m68k/nds32 build regression
  futex: Remove futex_cmpxchg detection
  futex: Ensure futex_atomic_cmpxchg_inatomic() is present
  kernel/locking: Use a pointer in ww_mutex_trylock().
parents 6ae71436 f16cc980
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef _ALPHA_SPINLOCK_TYPES_H #ifndef _ALPHA_SPINLOCK_TYPES_H
#define _ALPHA_SPINLOCK_TYPES_H #define _ALPHA_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -32,7 +32,6 @@ config ARC ...@@ -32,7 +32,6 @@ config ARC
select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4 select HAVE_ARCH_TRANSPARENT_HUGEPAGE if ARC_MMU_V4
select HAVE_DEBUG_STACKOVERFLOW select HAVE_DEBUG_STACKOVERFLOW
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_IOREMAP_PROT select HAVE_IOREMAP_PROT
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZMA select HAVE_KERNEL_LZMA
......
...@@ -93,7 +93,6 @@ config ARM ...@@ -93,7 +93,6 @@ config ARM
select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL
select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG select HAVE_FUNCTION_GRAPH_TRACER if !THUMB2_KERNEL && !CC_IS_CLANG
select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !(THUMB2_KERNEL && CC_IS_CLANG) select HAVE_FUNCTION_TRACER if !XIP_KERNEL && !(THUMB2_KERNEL && CC_IS_CLANG)
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS select HAVE_GCC_PLUGINS
select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7) select HAVE_HW_BREAKPOINT if PERF_EVENTS && (CPU_V6 || CPU_V6K || CPU_V7)
select HAVE_IRQ_TIME_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef __ASM_SPINLOCK_TYPES_H #ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -196,7 +196,6 @@ config ARM64 ...@@ -196,7 +196,6 @@ config ARM64
select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_REGS_AND_STACK_ACCESS_API
select HAVE_POSIX_CPU_TIMERS_TASK_WORK select HAVE_POSIX_CPU_TIMERS_TASK_WORK
select HAVE_FUNCTION_ARG_ACCESS_API select HAVE_FUNCTION_ARG_ACCESS_API
select HAVE_FUTEX_CMPXCHG if FUTEX
select MMU_GATHER_RCU_TABLE_FREE select MMU_GATHER_RCU_TABLE_FREE
select HAVE_RSEQ select HAVE_RSEQ
select HAVE_STACKPROTECTOR select HAVE_STACKPROTECTOR
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#ifndef __ASM_SPINLOCK_TYPES_H #ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H
#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) #if !defined(__LINUX_SPINLOCK_TYPES_RAW_H) && !defined(__ASM_SPINLOCK_H)
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -52,7 +52,6 @@ config CSKY ...@@ -52,7 +52,6 @@ config CSKY
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUTEX_CMPXCHG if FUTEX && SMP
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_KERNEL_GZIP select HAVE_KERNEL_GZIP
select HAVE_KERNEL_LZO select HAVE_KERNEL_LZO
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#ifndef __ASM_CSKY_SPINLOCK_TYPES_H #ifndef __ASM_CSKY_SPINLOCK_TYPES_H
#define __ASM_CSKY_SPINLOCK_TYPES_H #define __ASM_CSKY_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
#ifndef _ASM_SPINLOCK_TYPES_H #ifndef _ASM_SPINLOCK_TYPES_H
#define _ASM_SPINLOCK_TYPES_H #define _ASM_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef _ASM_IA64_SPINLOCK_TYPES_H #ifndef _ASM_IA64_SPINLOCK_TYPES_H
#define _ASM_IA64_SPINLOCK_TYPES_H #define _ASM_IA64_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -21,7 +21,6 @@ config M68K ...@@ -21,7 +21,6 @@ config M68K
select HAVE_ASM_MODVERSIONS select HAVE_ASM_MODVERSIONS
select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_BUGVERBOSE
select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED select HAVE_EFFICIENT_UNALIGNED_ACCESS if !CPU_HAS_NO_UNALIGNED
select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
select HAVE_MOD_ARCH_SPECIFIC select HAVE_MOD_ARCH_SPECIFIC
select HAVE_UID16 select HAVE_UID16
select MMU_GATHER_NO_RANGE if MMU select MMU_GATHER_NO_RANGE if MMU
......
...@@ -19,7 +19,11 @@ ...@@ -19,7 +19,11 @@
#include <asm/sync.h> #include <asm/sync.h>
#include <asm/war.h> #include <asm/war.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ #define arch_futex_atomic_op_inuser arch_futex_atomic_op_inuser
#define futex_atomic_cmpxchg_inatomic futex_atomic_cmpxchg_inatomic
#include <asm-generic/futex.h>
#define __futex_atomic_op(op, insn, ret, oldval, uaddr, oparg) \
{ \ { \
if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) { \ if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) { \
__asm__ __volatile__( \ __asm__ __volatile__( \
...@@ -80,8 +84,10 @@ ...@@ -80,8 +84,10 @@
: "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \ : "0" (0), GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oparg), \
"i" (-EFAULT) \ "i" (-EFAULT) \
: "memory"); \ : "memory"); \
} else \ } else { \
ret = -ENOSYS; \ /* fallback for non-SMP */ \
ret = futex_atomic_op_inuser_local(op, oparg, oval, uaddr); \
} \
} }
static inline int static inline int
...@@ -94,23 +100,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) ...@@ -94,23 +100,23 @@ arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
switch (op) { switch (op) {
case FUTEX_OP_SET: case FUTEX_OP_SET:
__futex_atomic_op("move $1, %z5", ret, oldval, uaddr, oparg); __futex_atomic_op(op, "move $1, %z5", ret, oldval, uaddr, oparg);
break; break;
case FUTEX_OP_ADD: case FUTEX_OP_ADD:
__futex_atomic_op("addu $1, %1, %z5", __futex_atomic_op(op, "addu $1, %1, %z5",
ret, oldval, uaddr, oparg); ret, oldval, uaddr, oparg);
break; break;
case FUTEX_OP_OR: case FUTEX_OP_OR:
__futex_atomic_op("or $1, %1, %z5", __futex_atomic_op(op, "or $1, %1, %z5",
ret, oldval, uaddr, oparg); ret, oldval, uaddr, oparg);
break; break;
case FUTEX_OP_ANDN: case FUTEX_OP_ANDN:
__futex_atomic_op("and $1, %1, %z5", __futex_atomic_op(op, "and $1, %1, %z5",
ret, oldval, uaddr, ~oparg); ret, oldval, uaddr, ~oparg);
break; break;
case FUTEX_OP_XOR: case FUTEX_OP_XOR:
__futex_atomic_op("xor $1, %1, %z5", __futex_atomic_op(op, "xor $1, %1, %z5",
ret, oldval, uaddr, oparg); ret, oldval, uaddr, oparg);
break; break;
default: default:
...@@ -193,8 +199,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -193,8 +199,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
: GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval), : GCC_OFF_SMALL_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
"i" (-EFAULT) "i" (-EFAULT)
: "memory"); : "memory");
} else } else {
return -ENOSYS; return futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval);
}
*uval = val; *uval = val;
return ret; return ret;
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H #ifndef _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H
#define _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H #define _ASM_POWERPC_SIMPLE_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef _ASM_POWERPC_SPINLOCK_TYPES_H #ifndef _ASM_POWERPC_SPINLOCK_TYPES_H
#define _ASM_POWERPC_SPINLOCK_TYPES_H #define _ASM_POWERPC_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -83,7 +83,6 @@ config RISCV ...@@ -83,7 +83,6 @@ config RISCV
select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DMA_CONTIGUOUS if MMU
select HAVE_EBPF_JIT if MMU select HAVE_EBPF_JIT if MMU
select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO if MMU && 64BIT select HAVE_GENERIC_VDSO if MMU && 64BIT
select HAVE_IRQ_TIME_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#ifndef _ASM_RISCV_SPINLOCK_TYPES_H #ifndef _ASM_RISCV_SPINLOCK_TYPES_H
#define _ASM_RISCV_SPINLOCK_TYPES_H #define _ASM_RISCV_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -165,7 +165,6 @@ config S390 ...@@ -165,7 +165,6 @@ config S390
select HAVE_FUNCTION_ERROR_INJECTION select HAVE_FUNCTION_ERROR_INJECTION
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_GCC_PLUGINS select HAVE_GCC_PLUGINS
select HAVE_GENERIC_VDSO select HAVE_GENERIC_VDSO
select HAVE_IOREMAP_PROT if PCI select HAVE_IOREMAP_PROT if PCI
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef __ASM_SPINLOCK_TYPES_H #ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -34,7 +34,6 @@ config SUPERH ...@@ -34,7 +34,6 @@ config SUPERH
select HAVE_FAST_GUP if MMU select HAVE_FAST_GUP if MMU
select HAVE_FUNCTION_GRAPH_TRACER select HAVE_FUNCTION_GRAPH_TRACER
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
select HAVE_HW_BREAKPOINT select HAVE_HW_BREAKPOINT
select HAVE_IOREMAP_PROT if MMU && !X2TLB select HAVE_IOREMAP_PROT if MMU && !X2TLB
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef __ASM_SH_SPINLOCK_TYPES_H #ifndef __ASM_SH_SPINLOCK_TYPES_H
#define __ASM_SH_SPINLOCK_TYPES_H #define __ASM_SH_SPINLOCK_TYPES_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -14,7 +14,6 @@ config UML ...@@ -14,7 +14,6 @@ config UML
select HAVE_ARCH_SECCOMP_FILTER select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ASM_MODVERSIONS select HAVE_ASM_MODVERSIONS
select HAVE_UID16 select HAVE_UID16
select HAVE_FUTEX_CMPXCHG if FUTEX
select HAVE_DEBUG_KMEMLEAK select HAVE_DEBUG_KMEMLEAK
select HAVE_DEBUG_BUGVERBOSE select HAVE_DEBUG_BUGVERBOSE
select NO_DMA if !UML_DMA_EMULATION select NO_DMA if !UML_DMA_EMULATION
......
...@@ -323,7 +323,6 @@ EXPORT_SYMBOL(arch_futex_atomic_op_inuser); ...@@ -323,7 +323,6 @@ EXPORT_SYMBOL(arch_futex_atomic_op_inuser);
* 0 - On success * 0 - On success
* -EFAULT - User access resulted in a page fault * -EFAULT - User access resulted in a page fault
* -EAGAIN - Atomic operation was unable to complete due to contention * -EAGAIN - Atomic operation was unable to complete due to contention
* -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
*/ */
int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
......
...@@ -31,7 +31,6 @@ config XTENSA ...@@ -31,7 +31,6 @@ config XTENSA
select HAVE_DMA_CONTIGUOUS select HAVE_DMA_CONTIGUOUS
select HAVE_EXIT_THREAD select HAVE_EXIT_THREAD
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
select HAVE_FUTEX_CMPXCHG if !MMU && FUTEX
select HAVE_HW_BREAKPOINT if PERF_EVENTS select HAVE_HW_BREAKPOINT if PERF_EVENTS
select HAVE_IRQ_TIME_ACCOUNTING select HAVE_IRQ_TIME_ACCOUNTING
select HAVE_PCI select HAVE_PCI
......
...@@ -16,6 +16,10 @@ ...@@ -16,6 +16,10 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/errno.h> #include <linux/errno.h>
#define arch_futex_atomic_op_inuser arch_futex_atomic_op_inuser
#define futex_atomic_cmpxchg_inatomic futex_atomic_cmpxchg_inatomic
#include <asm-generic/futex.h>
#if XCHAL_HAVE_EXCLUSIVE #if XCHAL_HAVE_EXCLUSIVE
#define __futex_atomic_op(insn, ret, old, uaddr, arg) \ #define __futex_atomic_op(insn, ret, old, uaddr, arg) \
__asm__ __volatile( \ __asm__ __volatile( \
...@@ -105,7 +109,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval, ...@@ -105,7 +109,7 @@ static inline int arch_futex_atomic_op_inuser(int op, int oparg, int *oval,
return ret; return ret;
#else #else
return -ENOSYS; return futex_atomic_op_inuser_local(op, oparg, oval, uaddr);
#endif #endif
} }
...@@ -156,7 +160,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -156,7 +160,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return ret; return ret;
#else #else
return -ENOSYS; return futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval);
#endif #endif
} }
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#ifndef __ASM_SPINLOCK_TYPES_H #ifndef __ASM_SPINLOCK_TYPES_H
#define __ASM_SPINLOCK_TYPES_H #define __ASM_SPINLOCK_TYPES_H
#if !defined(__LINUX_SPINLOCK_TYPES_H) && !defined(__ASM_SPINLOCK_H) #if !defined(__LINUX_SPINLOCK_TYPES_RAW_H) && !defined(__ASM_SPINLOCK_H)
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -6,15 +6,22 @@ ...@@ -6,15 +6,22 @@
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <asm/errno.h> #include <asm/errno.h>
#ifndef futex_atomic_cmpxchg_inatomic
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
/* /*
* The following implementation only for uniprocessor machines. * The following implementation only for uniprocessor machines.
* It relies on preempt_disable() ensuring mutual exclusion. * It relies on preempt_disable() ensuring mutual exclusion.
* *
*/ */
#define futex_atomic_cmpxchg_inatomic(uval, uaddr, oldval, newval) \
futex_atomic_cmpxchg_inatomic_local(uval, uaddr, oldval, newval)
#define arch_futex_atomic_op_inuser(op, oparg, oval, uaddr) \
futex_atomic_op_inuser_local(op, oparg, oval, uaddr)
#endif /* CONFIG_SMP */
#endif
/** /**
* arch_futex_atomic_op_inuser() - Atomic arithmetic operation with constant * futex_atomic_op_inuser_local() - Atomic arithmetic operation with constant
* argument and comparison of the previous * argument and comparison of the previous
* futex value with another constant. * futex value with another constant.
* *
...@@ -28,7 +35,7 @@ ...@@ -28,7 +35,7 @@
* -ENOSYS - Operation not supported * -ENOSYS - Operation not supported
*/ */
static inline int static inline int
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) futex_atomic_op_inuser_local(int op, u32 oparg, int *oval, u32 __user *uaddr)
{ {
int oldval, ret; int oldval, ret;
u32 tmp; u32 tmp;
...@@ -75,7 +82,7 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) ...@@ -75,7 +82,7 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
} }
/** /**
* futex_atomic_cmpxchg_inatomic() - Compare and exchange the content of the * futex_atomic_cmpxchg_inatomic_local() - Compare and exchange the content of the
* uaddr with newval if the current value is * uaddr with newval if the current value is
* oldval. * oldval.
* @uval: pointer to store content of @uaddr * @uval: pointer to store content of @uaddr
...@@ -87,10 +94,9 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr) ...@@ -87,10 +94,9 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
* 0 - On success * 0 - On success
* -EFAULT - User access resulted in a page fault * -EFAULT - User access resulted in a page fault
* -EAGAIN - Atomic operation was unable to complete due to contention * -EAGAIN - Atomic operation was unable to complete due to contention
* -ENOSYS - Function not implemented (only if !HAVE_FUTEX_CMPXCHG)
*/ */
static inline int static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, futex_atomic_cmpxchg_inatomic_local(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval) u32 oldval, u32 newval)
{ {
u32 val; u32 val;
...@@ -112,19 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, ...@@ -112,19 +118,4 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
return 0; return 0;
} }
#else
static inline int
arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
{
return -ENOSYS;
}
static inline int
futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
u32 oldval, u32 newval)
{
return -ENOSYS;
}
#endif /* CONFIG_SMP */
#endif #endif
...@@ -71,14 +71,6 @@ do { \ ...@@ -71,14 +71,6 @@ do { \
do { \ do { \
__this_cpu_dec(hardirq_context); \ __this_cpu_dec(hardirq_context); \
} while (0) } while (0)
# define lockdep_softirq_enter() \
do { \
current->softirq_context++; \
} while (0)
# define lockdep_softirq_exit() \
do { \
current->softirq_context--; \
} while (0)
# define lockdep_hrtimer_enter(__hrtimer) \ # define lockdep_hrtimer_enter(__hrtimer) \
({ \ ({ \
...@@ -140,6 +132,21 @@ do { \ ...@@ -140,6 +132,21 @@ do { \
# define lockdep_irq_work_exit(__work) do { } while (0) # define lockdep_irq_work_exit(__work) do { } while (0)
#endif #endif
#if defined(CONFIG_TRACE_IRQFLAGS) && !defined(CONFIG_PREEMPT_RT)
# define lockdep_softirq_enter() \
do { \
current->softirq_context++; \
} while (0)
# define lockdep_softirq_exit() \
do { \
current->softirq_context--; \
} while (0)
#else
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \ #if defined(CONFIG_IRQSOFF_TRACER) || \
defined(CONFIG_PREEMPT_TRACER) defined(CONFIG_PREEMPT_TRACER)
extern void stop_critical_timings(void); extern void stop_critical_timings(void);
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
#include <linux/bits.h> #include <linux/bits.h>
#include <linux/param.h> #include <linux/param.h>
#include <linux/spinlock_types.h> #include <linux/spinlock_types_raw.h>
#define DEFAULT_RATELIMIT_INTERVAL (5 * HZ) #define DEFAULT_RATELIMIT_INTERVAL (5 * HZ)
#define DEFAULT_RATELIMIT_BURST 10 #define DEFAULT_RATELIMIT_BURST 10
......
...@@ -99,13 +99,22 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock ...@@ -99,13 +99,22 @@ extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass); extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
extern void _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock);
#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0) #define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
#define rt_mutex_lock_nest_lock(lock, nest_lock) \
do { \
typecheck(struct lockdep_map *, &(nest_lock)->dep_map); \
_rt_mutex_lock_nest_lock(lock, &(nest_lock)->dep_map); \
} while (0)
#else #else
extern void rt_mutex_lock(struct rt_mutex *lock); extern void rt_mutex_lock(struct rt_mutex *lock);
#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock) #define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
#define rt_mutex_lock_nest_lock(lock, nest_lock) rt_mutex_lock(lock)
#endif #endif
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock); extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
extern int rt_mutex_lock_killable(struct rt_mutex *lock);
extern int rt_mutex_trylock(struct rt_mutex *lock); extern int rt_mutex_trylock(struct rt_mutex *lock);
extern void rt_mutex_unlock(struct rt_mutex *lock); extern void rt_mutex_unlock(struct rt_mutex *lock);
......
...@@ -2178,6 +2178,15 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask); ...@@ -2178,6 +2178,15 @@ extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
#endif #endif
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static inline bool owner_on_cpu(struct task_struct *owner)
{
/*
* As lock holder preemption issue, we both skip spinning if
* task is not on cpu or its cpu is preempted
*/
return READ_ONCE(owner->on_cpu) && !vcpu_is_preempted(task_cpu(owner));
}
/* Returns effective CPU energy utilization, as seen by the scheduler */ /* Returns effective CPU energy utilization, as seen by the scheduler */
unsigned long sched_cpu_util(int cpu, unsigned long max); unsigned long sched_cpu_util(int cpu, unsigned long max);
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
......
#ifndef __LINUX_SPINLOCK_TYPES_UP_H #ifndef __LINUX_SPINLOCK_TYPES_UP_H
#define __LINUX_SPINLOCK_TYPES_UP_H #define __LINUX_SPINLOCK_TYPES_UP_H
#ifndef __LINUX_SPINLOCK_TYPES_H #ifndef __LINUX_SPINLOCK_TYPES_RAW_H
# error "please don't include this file directly" # error "please don't include this file directly"
#endif #endif
......
...@@ -1579,6 +1579,7 @@ config BASE_FULL ...@@ -1579,6 +1579,7 @@ config BASE_FULL
config FUTEX config FUTEX
bool "Enable futex support" if EXPERT bool "Enable futex support" if EXPERT
depends on !(SPARC32 && SMP)
default y default y
imply RT_MUTEXES imply RT_MUTEXES
help help
...@@ -1591,14 +1592,6 @@ config FUTEX_PI ...@@ -1591,14 +1592,6 @@ config FUTEX_PI
depends on FUTEX && RT_MUTEXES depends on FUTEX && RT_MUTEXES
default y default y
config HAVE_FUTEX_CMPXCHG
bool
depends on FUTEX
help
Architectures should select this if futex_atomic_cmpxchg_inatomic()
is implemented and always working. This removes a couple of runtime
checks.
config EPOLL config EPOLL
bool "Enable eventpoll support" if EXPERT bool "Enable eventpoll support" if EXPERT
default y default y
......
...@@ -41,11 +41,6 @@ ...@@ -41,11 +41,6 @@
#include "futex.h" #include "futex.h"
#include "../locking/rtmutex_common.h" #include "../locking/rtmutex_common.h"
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
int __read_mostly futex_cmpxchg_enabled;
#endif
/* /*
* The base of the bucket array and its size are always used together * The base of the bucket array and its size are always used together
* (after initialization only in futex_hash()), so ensure that they * (after initialization only in futex_hash()), so ensure that they
...@@ -776,9 +771,6 @@ static void exit_robust_list(struct task_struct *curr) ...@@ -776,9 +771,6 @@ static void exit_robust_list(struct task_struct *curr)
unsigned long futex_offset; unsigned long futex_offset;
int rc; int rc;
if (!futex_cmpxchg_enabled)
return;
/* /*
* Fetch the list head (which was registered earlier, via * Fetch the list head (which was registered earlier, via
* sys_set_robust_list()): * sys_set_robust_list()):
...@@ -874,9 +866,6 @@ static void compat_exit_robust_list(struct task_struct *curr) ...@@ -874,9 +866,6 @@ static void compat_exit_robust_list(struct task_struct *curr)
compat_long_t futex_offset; compat_long_t futex_offset;
int rc; int rc;
if (!futex_cmpxchg_enabled)
return;
/* /*
* Fetch the list head (which was registered earlier, via * Fetch the list head (which was registered earlier, via
* sys_set_robust_list()): * sys_set_robust_list()):
...@@ -950,8 +939,6 @@ static void exit_pi_state_list(struct task_struct *curr) ...@@ -950,8 +939,6 @@ static void exit_pi_state_list(struct task_struct *curr)
struct futex_hash_bucket *hb; struct futex_hash_bucket *hb;
union futex_key key = FUTEX_KEY_INIT; union futex_key key = FUTEX_KEY_INIT;
if (!futex_cmpxchg_enabled)
return;
/* /*
* We are a ZOMBIE and nobody can enqueue itself on * We are a ZOMBIE and nobody can enqueue itself on
* pi_state_list anymore, but we have to be careful * pi_state_list anymore, but we have to be careful
...@@ -1125,26 +1112,6 @@ void futex_exit_release(struct task_struct *tsk) ...@@ -1125,26 +1112,6 @@ void futex_exit_release(struct task_struct *tsk)
futex_cleanup_end(tsk, FUTEX_STATE_DEAD); futex_cleanup_end(tsk, FUTEX_STATE_DEAD);
} }
static void __init futex_detect_cmpxchg(void)
{
#ifndef CONFIG_HAVE_FUTEX_CMPXCHG
u32 curval;
/*
* This will fail and we want it. Some arch implementations do
* runtime detection of the futex_atomic_cmpxchg_inatomic()
* functionality. We want to know that before we call in any
* of the complex code paths. Also we want to prevent
* registration of robust lists in that case. NULL is
* guaranteed to fault and we get -EFAULT on functional
* implementation, the non-functional ones will return
* -ENOSYS.
*/
if (futex_cmpxchg_value_locked(&curval, NULL, 0, 0) == -EFAULT)
futex_cmpxchg_enabled = 1;
#endif
}
static int __init futex_init(void) static int __init futex_init(void)
{ {
unsigned int futex_shift; unsigned int futex_shift;
...@@ -1163,8 +1130,6 @@ static int __init futex_init(void) ...@@ -1163,8 +1130,6 @@ static int __init futex_init(void)
futex_hashsize, futex_hashsize); futex_hashsize, futex_hashsize);
futex_hashsize = 1UL << futex_shift; futex_hashsize = 1UL << futex_shift;
futex_detect_cmpxchg();
for (i = 0; i < futex_hashsize; i++) { for (i = 0; i < futex_hashsize; i++) {
atomic_set(&futex_queues[i].waiters, 0); atomic_set(&futex_queues[i].waiters, 0);
plist_head_init(&futex_queues[i].chain); plist_head_init(&futex_queues[i].chain);
......
...@@ -27,12 +27,6 @@ ...@@ -27,12 +27,6 @@
#define FLAGS_CLOCKRT 0x02 #define FLAGS_CLOCKRT 0x02
#define FLAGS_HAS_TIMEOUT 0x04 #define FLAGS_HAS_TIMEOUT 0x04
#ifdef CONFIG_HAVE_FUTEX_CMPXCHG
#define futex_cmpxchg_enabled 1
#else
extern int __read_mostly futex_cmpxchg_enabled;
#endif
#ifdef CONFIG_FAIL_FUTEX #ifdef CONFIG_FAIL_FUTEX
extern bool should_fail_futex(bool fshared); extern bool should_fail_futex(bool fshared);
#else #else
......
...@@ -29,8 +29,6 @@ ...@@ -29,8 +29,6 @@
SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head, SYSCALL_DEFINE2(set_robust_list, struct robust_list_head __user *, head,
size_t, len) size_t, len)
{ {
if (!futex_cmpxchg_enabled)
return -ENOSYS;
/* /*
* The kernel knows only one size for now: * The kernel knows only one size for now:
*/ */
...@@ -56,9 +54,6 @@ SYSCALL_DEFINE3(get_robust_list, int, pid, ...@@ -56,9 +54,6 @@ SYSCALL_DEFINE3(get_robust_list, int, pid,
unsigned long ret; unsigned long ret;
struct task_struct *p; struct task_struct *p;
if (!futex_cmpxchg_enabled)
return -ENOSYS;
rcu_read_lock(); rcu_read_lock();
ret = -ESRCH; ret = -ESRCH;
...@@ -103,17 +98,6 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout, ...@@ -103,17 +98,6 @@ long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
return -ENOSYS; return -ENOSYS;
} }
switch (cmd) {
case FUTEX_LOCK_PI:
case FUTEX_LOCK_PI2:
case FUTEX_UNLOCK_PI:
case FUTEX_TRYLOCK_PI:
case FUTEX_WAIT_REQUEUE_PI:
case FUTEX_CMP_REQUEUE_PI:
if (!futex_cmpxchg_enabled)
return -ENOSYS;
}
switch (cmd) { switch (cmd) {
case FUTEX_WAIT: case FUTEX_WAIT:
val3 = FUTEX_BITSET_MATCH_ANY; val3 = FUTEX_BITSET_MATCH_ANY;
...@@ -323,9 +307,6 @@ COMPAT_SYSCALL_DEFINE2(set_robust_list, ...@@ -323,9 +307,6 @@ COMPAT_SYSCALL_DEFINE2(set_robust_list,
struct compat_robust_list_head __user *, head, struct compat_robust_list_head __user *, head,
compat_size_t, len) compat_size_t, len)
{ {
if (!futex_cmpxchg_enabled)
return -ENOSYS;
if (unlikely(len != sizeof(*head))) if (unlikely(len != sizeof(*head)))
return -EINVAL; return -EINVAL;
...@@ -342,9 +323,6 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid, ...@@ -342,9 +323,6 @@ COMPAT_SYSCALL_DEFINE3(get_robust_list, int, pid,
unsigned long ret; unsigned long ret;
struct task_struct *p; struct task_struct *p;
if (!futex_cmpxchg_enabled)
return -ENOSYS;
rcu_read_lock(); rcu_read_lock();
ret = -ESRCH; ret = -ESRCH;
......
...@@ -5485,6 +5485,7 @@ static noinstr void check_flags(unsigned long flags) ...@@ -5485,6 +5485,7 @@ static noinstr void check_flags(unsigned long flags)
} }
} }
#ifndef CONFIG_PREEMPT_RT
/* /*
* We dont accurately track softirq state in e.g. * We dont accurately track softirq state in e.g.
* hardirq contexts (such as on 4KSTACKS), so only * hardirq contexts (such as on 4KSTACKS), so only
...@@ -5499,6 +5500,7 @@ static noinstr void check_flags(unsigned long flags) ...@@ -5499,6 +5500,7 @@ static noinstr void check_flags(unsigned long flags)
DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled); DEBUG_LOCKS_WARN_ON(!current->softirqs_enabled);
} }
} }
#endif
if (!debug_locks) if (!debug_locks)
print_irqtrace_events(current); print_irqtrace_events(current);
......
...@@ -367,8 +367,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner, ...@@ -367,8 +367,7 @@ bool mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner,
/* /*
* Use vcpu_is_preempted to detect lock holder preemption issue. * Use vcpu_is_preempted to detect lock holder preemption issue.
*/ */
if (!owner->on_cpu || need_resched() || if (!owner_on_cpu(owner) || need_resched()) {
vcpu_is_preempted(task_cpu(owner))) {
ret = false; ret = false;
break; break;
} }
...@@ -403,14 +402,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock) ...@@ -403,14 +402,8 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)
* structure won't go away during the spinning period. * structure won't go away during the spinning period.
*/ */
owner = __mutex_owner(lock); owner = __mutex_owner(lock);
/*
* As lock holder preemption issue, we both skip spinning if task is not
* on cpu or its cpu is preempted
*/
if (owner) if (owner)
retval = owner->on_cpu && !vcpu_is_preempted(task_cpu(owner)); retval = owner_on_cpu(owner);
/* /*
* If lock->owner is not set, the mutex has been released. Return true * If lock->owner is not set, the mutex has been released. Return true
......
...@@ -1103,8 +1103,11 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock, ...@@ -1103,8 +1103,11 @@ static int __sched task_blocks_on_rt_mutex(struct rt_mutex_base *lock,
* the other will detect the deadlock and return -EDEADLOCK, * the other will detect the deadlock and return -EDEADLOCK,
* which is wrong, as the other waiter is not in a deadlock * which is wrong, as the other waiter is not in a deadlock
* situation. * situation.
*
* Except for ww_mutex, in that case the chain walk must already deal
* with spurious cycles, see the comments at [3] and [6].
*/ */
if (owner == task) if (owner == task && !(build_ww_mutex() && ww_ctx))
return -EDEADLK; return -EDEADLK;
raw_spin_lock(&task->pi_lock); raw_spin_lock(&task->pi_lock);
...@@ -1379,9 +1382,8 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock, ...@@ -1379,9 +1382,8 @@ static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
* for CONFIG_PREEMPT_RCU=y) * for CONFIG_PREEMPT_RCU=y)
* - the VCPU on which owner runs is preempted * - the VCPU on which owner runs is preempted
*/ */
if (!owner->on_cpu || need_resched() || if (!owner_on_cpu(owner) || need_resched() ||
!rt_mutex_waiter_is_top_waiter(lock, waiter) || !rt_mutex_waiter_is_top_waiter(lock, waiter)) {
vcpu_is_preempted(task_cpu(owner))) {
res = false; res = false;
break; break;
} }
......
...@@ -21,12 +21,13 @@ int max_lock_depth = 1024; ...@@ -21,12 +21,13 @@ int max_lock_depth = 1024;
*/ */
static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock, static __always_inline int __rt_mutex_lock_common(struct rt_mutex *lock,
unsigned int state, unsigned int state,
struct lockdep_map *nest_lock,
unsigned int subclass) unsigned int subclass)
{ {
int ret; int ret;
might_sleep(); might_sleep();
mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_); mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, _RET_IP_);
ret = __rt_mutex_lock(&lock->rtmutex, state); ret = __rt_mutex_lock(&lock->rtmutex, state);
if (ret) if (ret)
mutex_release(&lock->dep_map, _RET_IP_); mutex_release(&lock->dep_map, _RET_IP_);
...@@ -48,10 +49,16 @@ EXPORT_SYMBOL(rt_mutex_base_init); ...@@ -48,10 +49,16 @@ EXPORT_SYMBOL(rt_mutex_base_init);
*/ */
void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass) void __sched rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass)
{ {
__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass); __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, subclass);
} }
EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
void __sched _rt_mutex_lock_nest_lock(struct rt_mutex *lock, struct lockdep_map *nest_lock)
{
__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, nest_lock, 0);
}
EXPORT_SYMBOL_GPL(_rt_mutex_lock_nest_lock);
#else /* !CONFIG_DEBUG_LOCK_ALLOC */ #else /* !CONFIG_DEBUG_LOCK_ALLOC */
/** /**
...@@ -61,7 +68,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested); ...@@ -61,7 +68,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock_nested);
*/ */
void __sched rt_mutex_lock(struct rt_mutex *lock) void __sched rt_mutex_lock(struct rt_mutex *lock)
{ {
__rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); __rt_mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, NULL, 0);
} }
EXPORT_SYMBOL_GPL(rt_mutex_lock); EXPORT_SYMBOL_GPL(rt_mutex_lock);
#endif #endif
...@@ -77,10 +84,25 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock); ...@@ -77,10 +84,25 @@ EXPORT_SYMBOL_GPL(rt_mutex_lock);
*/ */
int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock) int __sched rt_mutex_lock_interruptible(struct rt_mutex *lock)
{ {
return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0); return __rt_mutex_lock_common(lock, TASK_INTERRUPTIBLE, NULL, 0);
} }
EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible); EXPORT_SYMBOL_GPL(rt_mutex_lock_interruptible);
/**
* rt_mutex_lock_killable - lock a rt_mutex killable
*
* @lock: the rt_mutex to be locked
*
* Returns:
* 0 on success
* -EINTR when interrupted by a signal
*/
int __sched rt_mutex_lock_killable(struct rt_mutex *lock)
{
return __rt_mutex_lock_common(lock, TASK_KILLABLE, NULL, 0);
}
EXPORT_SYMBOL_GPL(rt_mutex_lock_killable);
/** /**
* rt_mutex_trylock - try to lock a rt_mutex * rt_mutex_trylock - try to lock a rt_mutex
* *
......
...@@ -658,15 +658,6 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem) ...@@ -658,15 +658,6 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
return false; return false;
} }
static inline bool owner_on_cpu(struct task_struct *owner)
{
/*
* As lock holder preemption issue, we both skip spinning if
* task is not on cpu or its cpu is preempted
*/
return owner->on_cpu && !vcpu_is_preempted(task_cpu(owner));
}
static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
{ {
struct task_struct *owner; struct task_struct *owner;
......
...@@ -257,12 +257,6 @@ void __sched rt_write_unlock(rwlock_t *rwlock) ...@@ -257,12 +257,6 @@ void __sched rt_write_unlock(rwlock_t *rwlock)
} }
EXPORT_SYMBOL(rt_write_unlock); EXPORT_SYMBOL(rt_write_unlock);
int __sched rt_rwlock_is_contended(rwlock_t *rwlock)
{
return rw_base_is_contended(&rwlock->rwbase);
}
EXPORT_SYMBOL(rt_rwlock_is_contended);
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
void __rt_rwlock_init(rwlock_t *rwlock, const char *name, void __rt_rwlock_init(rwlock_t *rwlock, const char *name,
struct lock_class_key *key) struct lock_class_key *key)
......
...@@ -26,7 +26,7 @@ int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx) ...@@ -26,7 +26,7 @@ int ww_mutex_trylock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx)
if (__rt_mutex_trylock(&rtm->rtmutex)) { if (__rt_mutex_trylock(&rtm->rtmutex)) {
ww_mutex_set_context_fastpath(lock, ww_ctx); ww_mutex_set_context_fastpath(lock, ww_ctx);
mutex_acquire_nest(&rtm->dep_map, 0, 1, ww_ctx->dep_map, _RET_IP_); mutex_acquire_nest(&rtm->dep_map, 0, 1, &ww_ctx->dep_map, _RET_IP_);
return 1; return 1;
} }
......
...@@ -2184,6 +2184,9 @@ void migrate_enable(void) ...@@ -2184,6 +2184,9 @@ void migrate_enable(void)
return; return;
} }
if (WARN_ON_ONCE(!p->migration_disabled))
return;
/* /*
* Ensure stop_task runs either before or after this, and that * Ensure stop_task runs either before or after this, and that
* __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule(). * __set_cpus_allowed_ptr(SCA_MIGRATE_ENABLE) doesn't schedule().
......
...@@ -118,7 +118,6 @@ ATOMIC64_OPS(sub, -=) ...@@ -118,7 +118,6 @@ ATOMIC64_OPS(sub, -=)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#define ATOMIC64_OPS(op, c_op) \ #define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \ ATOMIC64_OP(op, c_op) \
ATOMIC64_OP_RETURN(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op) ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS(and, &=) ATOMIC64_OPS(and, &=)
...@@ -127,7 +126,6 @@ ATOMIC64_OPS(xor, ^=) ...@@ -127,7 +126,6 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OPS #undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP #undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP #undef ATOMIC64_OP
s64 generic_atomic64_dec_if_positive(atomic64_t *v) s64 generic_atomic64_dec_if_positive(atomic64_t *v)
......
...@@ -26,6 +26,12 @@ ...@@ -26,6 +26,12 @@
#include <linux/rtmutex.h> #include <linux/rtmutex.h>
#include <linux/local_lock.h> #include <linux/local_lock.h>
#ifdef CONFIG_PREEMPT_RT
# define NON_RT(...)
#else
# define NON_RT(...) __VA_ARGS__
#endif
/* /*
* Change this to 1 if you want to see the failure printouts: * Change this to 1 if you want to see the failure printouts:
*/ */
...@@ -139,7 +145,7 @@ static DEFINE_RT_MUTEX(rtmutex_Z2); ...@@ -139,7 +145,7 @@ static DEFINE_RT_MUTEX(rtmutex_Z2);
#endif #endif
static local_lock_t local_A = INIT_LOCAL_LOCK(local_A); static DEFINE_PER_CPU(local_lock_t, local_A);
/* /*
* non-inlined runtime initializers, to let separate locks share * non-inlined runtime initializers, to let separate locks share
...@@ -712,12 +718,18 @@ GENERATE_TESTCASE(ABCDBCDA_rtmutex); ...@@ -712,12 +718,18 @@ GENERATE_TESTCASE(ABCDBCDA_rtmutex);
#undef E #undef E
#ifdef CONFIG_PREEMPT_RT
# define RT_PREPARE_DBL_UNLOCK() { migrate_disable(); rcu_read_lock(); }
#else
# define RT_PREPARE_DBL_UNLOCK()
#endif
/* /*
* Double unlock: * Double unlock:
*/ */
#define E() \ #define E() \
\ \
LOCK(A); \ LOCK(A); \
RT_PREPARE_DBL_UNLOCK(); \
UNLOCK(A); \ UNLOCK(A); \
UNLOCK(A); /* fail */ UNLOCK(A); /* fail */
...@@ -802,6 +814,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock) ...@@ -802,6 +814,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_rlock)
#include "locking-selftest-wlock-hardirq.h" #include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock) GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h" #include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin) GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_spin)
...@@ -810,10 +823,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock) ...@@ -810,10 +823,12 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_rlock)
#include "locking-selftest-wlock-softirq.h" #include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock) GENERATE_PERMUTATIONS_2_EVENTS(irqsafe1_soft_wlock)
#endif
#undef E1 #undef E1
#undef E2 #undef E2
#ifndef CONFIG_PREEMPT_RT
/* /*
* Enabling hardirqs with a softirq-safe lock held: * Enabling hardirqs with a softirq-safe lock held:
*/ */
...@@ -846,6 +861,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock) ...@@ -846,6 +861,8 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2A_rlock)
#undef E1 #undef E1
#undef E2 #undef E2
#endif
/* /*
* Enabling irqs with an irq-safe lock held: * Enabling irqs with an irq-safe lock held:
*/ */
...@@ -875,6 +892,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock) ...@@ -875,6 +892,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_rlock)
#include "locking-selftest-wlock-hardirq.h" #include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock) GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h" #include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin) GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_spin)
...@@ -883,6 +901,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock) ...@@ -883,6 +901,7 @@ GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_rlock)
#include "locking-selftest-wlock-softirq.h" #include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock) GENERATE_PERMUTATIONS_2_EVENTS(irqsafe2B_soft_wlock)
#endif
#undef E1 #undef E1
#undef E2 #undef E2
...@@ -921,6 +940,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock) ...@@ -921,6 +940,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_rlock)
#include "locking-selftest-wlock-hardirq.h" #include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock) GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h" #include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin) GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_spin)
...@@ -929,6 +949,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock) ...@@ -929,6 +949,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_rlock)
#include "locking-selftest-wlock-softirq.h" #include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock) GENERATE_PERMUTATIONS_3_EVENTS(irqsafe3_soft_wlock)
#endif
#undef E1 #undef E1
#undef E2 #undef E2
...@@ -969,6 +990,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock) ...@@ -969,6 +990,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_rlock)
#include "locking-selftest-wlock-hardirq.h" #include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock) GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h" #include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin) GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_spin)
...@@ -977,6 +999,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock) ...@@ -977,6 +999,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_rlock)
#include "locking-selftest-wlock-softirq.h" #include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock) GENERATE_PERMUTATIONS_3_EVENTS(irqsafe4_soft_wlock)
#endif
#undef E1 #undef E1
#undef E2 #undef E2
...@@ -1031,6 +1054,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock) ...@@ -1031,6 +1054,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_rlock)
#include "locking-selftest-wlock-hardirq.h" #include "locking-selftest-wlock-hardirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock) GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-spin-softirq.h" #include "locking-selftest-spin-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin) GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_spin)
...@@ -1039,6 +1063,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock) ...@@ -1039,6 +1063,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_rlock)
#include "locking-selftest-wlock-softirq.h" #include "locking-selftest-wlock-softirq.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock) GENERATE_PERMUTATIONS_3_EVENTS(irq_inversion_soft_wlock)
#endif
#undef E1 #undef E1
#undef E2 #undef E2
...@@ -1206,12 +1231,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_rlock) ...@@ -1206,12 +1231,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_rlock)
#include "locking-selftest-wlock.h" #include "locking-selftest-wlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_wlock) GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-softirq.h" #include "locking-selftest-softirq.h"
#include "locking-selftest-rlock.h" #include "locking-selftest-rlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_rlock) GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_rlock)
#include "locking-selftest-wlock.h" #include "locking-selftest-wlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock) GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion_soft_wlock)
#endif
#undef E1 #undef E1
#undef E2 #undef E2
...@@ -1252,12 +1279,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_rlock) ...@@ -1252,12 +1279,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_rlock)
#include "locking-selftest-wlock.h" #include "locking-selftest-wlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_wlock) GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-softirq.h" #include "locking-selftest-softirq.h"
#include "locking-selftest-rlock.h" #include "locking-selftest-rlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_rlock) GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_rlock)
#include "locking-selftest-wlock.h" #include "locking-selftest-wlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_wlock) GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion2_soft_wlock)
#endif
#undef E1 #undef E1
#undef E2 #undef E2
...@@ -1306,12 +1335,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_rlock) ...@@ -1306,12 +1335,14 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_rlock)
#include "locking-selftest-wlock.h" #include "locking-selftest-wlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_wlock) GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_hard_wlock)
#ifndef CONFIG_PREEMPT_RT
#include "locking-selftest-softirq.h" #include "locking-selftest-softirq.h"
#include "locking-selftest-rlock.h" #include "locking-selftest-rlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_rlock) GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_rlock)
#include "locking-selftest-wlock.h" #include "locking-selftest-wlock.h"
GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock) GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
#endif
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
# define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map) # define I_SPINLOCK(x) lockdep_reset_lock(&lock_##x.dep_map)
...@@ -1320,7 +1351,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock) ...@@ -1320,7 +1351,7 @@ GENERATE_PERMUTATIONS_3_EVENTS(irq_read_recursion3_soft_wlock)
# define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map) # define I_MUTEX(x) lockdep_reset_lock(&mutex_##x.dep_map)
# define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map) # define I_RWSEM(x) lockdep_reset_lock(&rwsem_##x.dep_map)
# define I_WW(x) lockdep_reset_lock(&x.dep_map) # define I_WW(x) lockdep_reset_lock(&x.dep_map)
# define I_LOCAL_LOCK(x) lockdep_reset_lock(&local_##x.dep_map) # define I_LOCAL_LOCK(x) lockdep_reset_lock(this_cpu_ptr(&local_##x.dep_map))
#ifdef CONFIG_RT_MUTEXES #ifdef CONFIG_RT_MUTEXES
# define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map) # define I_RTMUTEX(x) lockdep_reset_lock(&rtmutex_##x.dep_map)
#endif #endif
...@@ -1380,7 +1411,7 @@ static void reset_locks(void) ...@@ -1380,7 +1411,7 @@ static void reset_locks(void)
init_shared_classes(); init_shared_classes();
raw_spin_lock_init(&raw_lock_A); raw_spin_lock_init(&raw_lock_A);
raw_spin_lock_init(&raw_lock_B); raw_spin_lock_init(&raw_lock_B);
local_lock_init(&local_A); local_lock_init(this_cpu_ptr(&local_A));
ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep); ww_mutex_init(&o, &ww_lockdep); ww_mutex_init(&o2, &ww_lockdep); ww_mutex_init(&o3, &ww_lockdep);
memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2)); memset(&t, 0, sizeof(t)); memset(&t2, 0, sizeof(t2));
...@@ -1398,7 +1429,13 @@ static int unexpected_testcase_failures; ...@@ -1398,7 +1429,13 @@ static int unexpected_testcase_failures;
static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
{ {
unsigned long saved_preempt_count = preempt_count(); int saved_preempt_count = preempt_count();
#ifdef CONFIG_PREEMPT_RT
#ifdef CONFIG_SMP
int saved_mgd_count = current->migration_disabled;
#endif
int saved_rcu_count = current->rcu_read_lock_nesting;
#endif
WARN_ON(irqs_disabled()); WARN_ON(irqs_disabled());
...@@ -1432,6 +1469,18 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask) ...@@ -1432,6 +1469,18 @@ static void dotest(void (*testcase_fn)(void), int expected, int lockclass_mask)
* count, so restore it: * count, so restore it:
*/ */
preempt_count_set(saved_preempt_count); preempt_count_set(saved_preempt_count);
#ifdef CONFIG_PREEMPT_RT
#ifdef CONFIG_SMP
while (current->migration_disabled > saved_mgd_count)
migrate_enable();
#endif
while (current->rcu_read_lock_nesting > saved_rcu_count)
rcu_read_unlock();
WARN_ON_ONCE(current->rcu_read_lock_nesting < saved_rcu_count);
#endif
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
if (softirq_count()) if (softirq_count())
current->softirqs_enabled = 0; current->softirqs_enabled = 0;
...@@ -1499,7 +1548,7 @@ static inline void print_testname(const char *testname) ...@@ -1499,7 +1548,7 @@ static inline void print_testname(const char *testname)
#define DO_TESTCASE_2x2RW(desc, name, nr) \ #define DO_TESTCASE_2x2RW(desc, name, nr) \
DO_TESTCASE_2RW("hard-"desc, name##_hard, nr) \ DO_TESTCASE_2RW("hard-"desc, name##_hard, nr) \
DO_TESTCASE_2RW("soft-"desc, name##_soft, nr) \ NON_RT(DO_TESTCASE_2RW("soft-"desc, name##_soft, nr)) \
#define DO_TESTCASE_6x2x2RW(desc, name) \ #define DO_TESTCASE_6x2x2RW(desc, name) \
DO_TESTCASE_2x2RW(desc, name, 123); \ DO_TESTCASE_2x2RW(desc, name, 123); \
...@@ -1547,19 +1596,19 @@ static inline void print_testname(const char *testname) ...@@ -1547,19 +1596,19 @@ static inline void print_testname(const char *testname)
#define DO_TESTCASE_2I(desc, name, nr) \ #define DO_TESTCASE_2I(desc, name, nr) \
DO_TESTCASE_1("hard-"desc, name##_hard, nr); \ DO_TESTCASE_1("hard-"desc, name##_hard, nr); \
DO_TESTCASE_1("soft-"desc, name##_soft, nr); NON_RT(DO_TESTCASE_1("soft-"desc, name##_soft, nr));
#define DO_TESTCASE_2IB(desc, name, nr) \ #define DO_TESTCASE_2IB(desc, name, nr) \
DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \ DO_TESTCASE_1B("hard-"desc, name##_hard, nr); \
DO_TESTCASE_1B("soft-"desc, name##_soft, nr); NON_RT(DO_TESTCASE_1B("soft-"desc, name##_soft, nr));
#define DO_TESTCASE_6I(desc, name, nr) \ #define DO_TESTCASE_6I(desc, name, nr) \
DO_TESTCASE_3("hard-"desc, name##_hard, nr); \ DO_TESTCASE_3("hard-"desc, name##_hard, nr); \
DO_TESTCASE_3("soft-"desc, name##_soft, nr); NON_RT(DO_TESTCASE_3("soft-"desc, name##_soft, nr));
#define DO_TESTCASE_6IRW(desc, name, nr) \ #define DO_TESTCASE_6IRW(desc, name, nr) \
DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \ DO_TESTCASE_3RW("hard-"desc, name##_hard, nr); \
DO_TESTCASE_3RW("soft-"desc, name##_soft, nr); NON_RT(DO_TESTCASE_3RW("soft-"desc, name##_soft, nr));
#define DO_TESTCASE_2x3(desc, name) \ #define DO_TESTCASE_2x3(desc, name) \
DO_TESTCASE_3(desc, name, 12); \ DO_TESTCASE_3(desc, name, 12); \
...@@ -1651,6 +1700,22 @@ static void ww_test_fail_acquire(void) ...@@ -1651,6 +1700,22 @@ static void ww_test_fail_acquire(void)
#endif #endif
} }
#ifdef CONFIG_PREEMPT_RT
#define ww_mutex_base_lock(b) rt_mutex_lock(b)
#define ww_mutex_base_trylock(b) rt_mutex_trylock(b)
#define ww_mutex_base_lock_nest_lock(b, b2) rt_mutex_lock_nest_lock(b, b2)
#define ww_mutex_base_lock_interruptible(b) rt_mutex_lock_interruptible(b)
#define ww_mutex_base_lock_killable(b) rt_mutex_lock_killable(b)
#define ww_mutex_base_unlock(b) rt_mutex_unlock(b)
#else
#define ww_mutex_base_lock(b) mutex_lock(b)
#define ww_mutex_base_trylock(b) mutex_trylock(b)
#define ww_mutex_base_lock_nest_lock(b, b2) mutex_lock_nest_lock(b, b2)
#define ww_mutex_base_lock_interruptible(b) mutex_lock_interruptible(b)
#define ww_mutex_base_lock_killable(b) mutex_lock_killable(b)
#define ww_mutex_base_unlock(b) mutex_unlock(b)
#endif
static void ww_test_normal(void) static void ww_test_normal(void)
{ {
int ret; int ret;
...@@ -1665,50 +1730,50 @@ static void ww_test_normal(void) ...@@ -1665,50 +1730,50 @@ static void ww_test_normal(void)
/* mutex_lock (and indirectly, mutex_lock_nested) */ /* mutex_lock (and indirectly, mutex_lock_nested) */
o.ctx = (void *)~0UL; o.ctx = (void *)~0UL;
mutex_lock(&o.base); ww_mutex_base_lock(&o.base);
mutex_unlock(&o.base); ww_mutex_base_unlock(&o.base);
WARN_ON(o.ctx != (void *)~0UL); WARN_ON(o.ctx != (void *)~0UL);
/* mutex_lock_interruptible (and *_nested) */ /* mutex_lock_interruptible (and *_nested) */
o.ctx = (void *)~0UL; o.ctx = (void *)~0UL;
ret = mutex_lock_interruptible(&o.base); ret = ww_mutex_base_lock_interruptible(&o.base);
if (!ret) if (!ret)
mutex_unlock(&o.base); ww_mutex_base_unlock(&o.base);
else else
WARN_ON(1); WARN_ON(1);
WARN_ON(o.ctx != (void *)~0UL); WARN_ON(o.ctx != (void *)~0UL);
/* mutex_lock_killable (and *_nested) */ /* mutex_lock_killable (and *_nested) */
o.ctx = (void *)~0UL; o.ctx = (void *)~0UL;
ret = mutex_lock_killable(&o.base); ret = ww_mutex_base_lock_killable(&o.base);
if (!ret) if (!ret)
mutex_unlock(&o.base); ww_mutex_base_unlock(&o.base);
else else
WARN_ON(1); WARN_ON(1);
WARN_ON(o.ctx != (void *)~0UL); WARN_ON(o.ctx != (void *)~0UL);
/* trylock, succeeding */ /* trylock, succeeding */
o.ctx = (void *)~0UL; o.ctx = (void *)~0UL;
ret = mutex_trylock(&o.base); ret = ww_mutex_base_trylock(&o.base);
WARN_ON(!ret); WARN_ON(!ret);
if (ret) if (ret)
mutex_unlock(&o.base); ww_mutex_base_unlock(&o.base);
else else
WARN_ON(1); WARN_ON(1);
WARN_ON(o.ctx != (void *)~0UL); WARN_ON(o.ctx != (void *)~0UL);
/* trylock, failing */ /* trylock, failing */
o.ctx = (void *)~0UL; o.ctx = (void *)~0UL;
mutex_lock(&o.base); ww_mutex_base_lock(&o.base);
ret = mutex_trylock(&o.base); ret = ww_mutex_base_trylock(&o.base);
WARN_ON(ret); WARN_ON(ret);
mutex_unlock(&o.base); ww_mutex_base_unlock(&o.base);
WARN_ON(o.ctx != (void *)~0UL); WARN_ON(o.ctx != (void *)~0UL);
/* nest_lock */ /* nest_lock */
o.ctx = (void *)~0UL; o.ctx = (void *)~0UL;
mutex_lock_nest_lock(&o.base, &t); ww_mutex_base_lock_nest_lock(&o.base, &t);
mutex_unlock(&o.base); ww_mutex_base_unlock(&o.base);
WARN_ON(o.ctx != (void *)~0UL); WARN_ON(o.ctx != (void *)~0UL);
} }
...@@ -1721,7 +1786,7 @@ static void ww_test_two_contexts(void) ...@@ -1721,7 +1786,7 @@ static void ww_test_two_contexts(void)
static void ww_test_diff_class(void) static void ww_test_diff_class(void)
{ {
WWAI(&t); WWAI(&t);
#ifdef CONFIG_DEBUG_MUTEXES #ifdef DEBUG_WW_MUTEXES
t.ww_class = NULL; t.ww_class = NULL;
#endif #endif
WWL(&o, &t); WWL(&o, &t);
...@@ -1785,7 +1850,7 @@ static void ww_test_edeadlk_normal(void) ...@@ -1785,7 +1850,7 @@ static void ww_test_edeadlk_normal(void)
{ {
int ret; int ret;
mutex_lock(&o2.base); ww_mutex_base_lock(&o2.base);
o2.ctx = &t2; o2.ctx = &t2;
mutex_release(&o2.base.dep_map, _THIS_IP_); mutex_release(&o2.base.dep_map, _THIS_IP_);
...@@ -1801,7 +1866,7 @@ static void ww_test_edeadlk_normal(void) ...@@ -1801,7 +1866,7 @@ static void ww_test_edeadlk_normal(void)
o2.ctx = NULL; o2.ctx = NULL;
mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
mutex_unlock(&o2.base); ww_mutex_base_unlock(&o2.base);
WWU(&o); WWU(&o);
WWL(&o2, &t); WWL(&o2, &t);
...@@ -1811,7 +1876,7 @@ static void ww_test_edeadlk_normal_slow(void) ...@@ -1811,7 +1876,7 @@ static void ww_test_edeadlk_normal_slow(void)
{ {
int ret; int ret;
mutex_lock(&o2.base); ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_); mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2; o2.ctx = &t2;
...@@ -1827,7 +1892,7 @@ static void ww_test_edeadlk_normal_slow(void) ...@@ -1827,7 +1892,7 @@ static void ww_test_edeadlk_normal_slow(void)
o2.ctx = NULL; o2.ctx = NULL;
mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
mutex_unlock(&o2.base); ww_mutex_base_unlock(&o2.base);
WWU(&o); WWU(&o);
ww_mutex_lock_slow(&o2, &t); ww_mutex_lock_slow(&o2, &t);
...@@ -1837,7 +1902,7 @@ static void ww_test_edeadlk_no_unlock(void) ...@@ -1837,7 +1902,7 @@ static void ww_test_edeadlk_no_unlock(void)
{ {
int ret; int ret;
mutex_lock(&o2.base); ww_mutex_base_lock(&o2.base);
o2.ctx = &t2; o2.ctx = &t2;
mutex_release(&o2.base.dep_map, _THIS_IP_); mutex_release(&o2.base.dep_map, _THIS_IP_);
...@@ -1853,7 +1918,7 @@ static void ww_test_edeadlk_no_unlock(void) ...@@ -1853,7 +1918,7 @@ static void ww_test_edeadlk_no_unlock(void)
o2.ctx = NULL; o2.ctx = NULL;
mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
mutex_unlock(&o2.base); ww_mutex_base_unlock(&o2.base);
WWL(&o2, &t); WWL(&o2, &t);
} }
...@@ -1862,7 +1927,7 @@ static void ww_test_edeadlk_no_unlock_slow(void) ...@@ -1862,7 +1927,7 @@ static void ww_test_edeadlk_no_unlock_slow(void)
{ {
int ret; int ret;
mutex_lock(&o2.base); ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_); mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2; o2.ctx = &t2;
...@@ -1878,7 +1943,7 @@ static void ww_test_edeadlk_no_unlock_slow(void) ...@@ -1878,7 +1943,7 @@ static void ww_test_edeadlk_no_unlock_slow(void)
o2.ctx = NULL; o2.ctx = NULL;
mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_); mutex_acquire(&o2.base.dep_map, 0, 1, _THIS_IP_);
mutex_unlock(&o2.base); ww_mutex_base_unlock(&o2.base);
ww_mutex_lock_slow(&o2, &t); ww_mutex_lock_slow(&o2, &t);
} }
...@@ -1887,7 +1952,7 @@ static void ww_test_edeadlk_acquire_more(void) ...@@ -1887,7 +1952,7 @@ static void ww_test_edeadlk_acquire_more(void)
{ {
int ret; int ret;
mutex_lock(&o2.base); ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_); mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2; o2.ctx = &t2;
...@@ -1908,7 +1973,7 @@ static void ww_test_edeadlk_acquire_more_slow(void) ...@@ -1908,7 +1973,7 @@ static void ww_test_edeadlk_acquire_more_slow(void)
{ {
int ret; int ret;
mutex_lock(&o2.base); ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_); mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2; o2.ctx = &t2;
...@@ -1929,11 +1994,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk(void) ...@@ -1929,11 +1994,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk(void)
{ {
int ret; int ret;
mutex_lock(&o2.base); ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_); mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2; o2.ctx = &t2;
mutex_lock(&o3.base); ww_mutex_base_lock(&o3.base);
mutex_release(&o3.base.dep_map, _THIS_IP_); mutex_release(&o3.base.dep_map, _THIS_IP_);
o3.ctx = &t2; o3.ctx = &t2;
...@@ -1955,11 +2020,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk_slow(void) ...@@ -1955,11 +2020,11 @@ static void ww_test_edeadlk_acquire_more_edeadlk_slow(void)
{ {
int ret; int ret;
mutex_lock(&o2.base); ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_); mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2; o2.ctx = &t2;
mutex_lock(&o3.base); ww_mutex_base_lock(&o3.base);
mutex_release(&o3.base.dep_map, _THIS_IP_); mutex_release(&o3.base.dep_map, _THIS_IP_);
o3.ctx = &t2; o3.ctx = &t2;
...@@ -1980,7 +2045,7 @@ static void ww_test_edeadlk_acquire_wrong(void) ...@@ -1980,7 +2045,7 @@ static void ww_test_edeadlk_acquire_wrong(void)
{ {
int ret; int ret;
mutex_lock(&o2.base); ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_); mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2; o2.ctx = &t2;
...@@ -2005,7 +2070,7 @@ static void ww_test_edeadlk_acquire_wrong_slow(void) ...@@ -2005,7 +2070,7 @@ static void ww_test_edeadlk_acquire_wrong_slow(void)
{ {
int ret; int ret;
mutex_lock(&o2.base); ww_mutex_base_lock(&o2.base);
mutex_release(&o2.base.dep_map, _THIS_IP_); mutex_release(&o2.base.dep_map, _THIS_IP_);
o2.ctx = &t2; o2.ctx = &t2;
...@@ -2646,8 +2711,8 @@ static void wait_context_tests(void) ...@@ -2646,8 +2711,8 @@ static void wait_context_tests(void)
static void local_lock_2(void) static void local_lock_2(void)
{ {
local_lock_acquire(&local_A); /* IRQ-ON */ local_lock(&local_A); /* IRQ-ON */
local_lock_release(&local_A); local_unlock(&local_A);
HARDIRQ_ENTER(); HARDIRQ_ENTER();
spin_lock(&lock_A); /* IN-IRQ */ spin_lock(&lock_A); /* IN-IRQ */
...@@ -2656,18 +2721,18 @@ static void local_lock_2(void) ...@@ -2656,18 +2721,18 @@ static void local_lock_2(void)
HARDIRQ_DISABLE(); HARDIRQ_DISABLE();
spin_lock(&lock_A); spin_lock(&lock_A);
local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle, false */
local_lock_release(&local_A); local_unlock(&local_A);
spin_unlock(&lock_A); spin_unlock(&lock_A);
HARDIRQ_ENABLE(); HARDIRQ_ENABLE();
} }
static void local_lock_3A(void) static void local_lock_3A(void)
{ {
local_lock_acquire(&local_A); /* IRQ-ON */ local_lock(&local_A); /* IRQ-ON */
spin_lock(&lock_B); /* IRQ-ON */ spin_lock(&lock_B); /* IRQ-ON */
spin_unlock(&lock_B); spin_unlock(&lock_B);
local_lock_release(&local_A); local_unlock(&local_A);
HARDIRQ_ENTER(); HARDIRQ_ENTER();
spin_lock(&lock_A); /* IN-IRQ */ spin_lock(&lock_A); /* IN-IRQ */
...@@ -2676,18 +2741,18 @@ static void local_lock_3A(void) ...@@ -2676,18 +2741,18 @@ static void local_lock_3A(void)
HARDIRQ_DISABLE(); HARDIRQ_DISABLE();
spin_lock(&lock_A); spin_lock(&lock_A);
local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
local_lock_release(&local_A); local_unlock(&local_A);
spin_unlock(&lock_A); spin_unlock(&lock_A);
HARDIRQ_ENABLE(); HARDIRQ_ENABLE();
} }
static void local_lock_3B(void) static void local_lock_3B(void)
{ {
local_lock_acquire(&local_A); /* IRQ-ON */ local_lock(&local_A); /* IRQ-ON */
spin_lock(&lock_B); /* IRQ-ON */ spin_lock(&lock_B); /* IRQ-ON */
spin_unlock(&lock_B); spin_unlock(&lock_B);
local_lock_release(&local_A); local_unlock(&local_A);
HARDIRQ_ENTER(); HARDIRQ_ENTER();
spin_lock(&lock_A); /* IN-IRQ */ spin_lock(&lock_A); /* IN-IRQ */
...@@ -2696,8 +2761,8 @@ static void local_lock_3B(void) ...@@ -2696,8 +2761,8 @@ static void local_lock_3B(void)
HARDIRQ_DISABLE(); HARDIRQ_DISABLE();
spin_lock(&lock_A); spin_lock(&lock_A);
local_lock_acquire(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */ local_lock(&local_A); /* IN-IRQ <-> IRQ-ON cycle only if we count local_lock(), false */
local_lock_release(&local_A); local_unlock(&local_A);
spin_unlock(&lock_A); spin_unlock(&lock_A);
HARDIRQ_ENABLE(); HARDIRQ_ENABLE();
...@@ -2812,7 +2877,7 @@ void locking_selftest(void) ...@@ -2812,7 +2877,7 @@ void locking_selftest(void)
printk("------------------------\n"); printk("------------------------\n");
printk("| Locking API testsuite:\n"); printk("| Locking API testsuite:\n");
printk("----------------------------------------------------------------------------\n"); printk("----------------------------------------------------------------------------\n");
printk(" | spin |wlock |rlock |mutex | wsem | rsem |\n"); printk(" | spin |wlock |rlock |mutex | wsem | rsem |rtmutex\n");
printk(" --------------------------------------------------------------------------\n"); printk(" --------------------------------------------------------------------------\n");
init_shared_classes(); init_shared_classes();
...@@ -2885,12 +2950,11 @@ void locking_selftest(void) ...@@ -2885,12 +2950,11 @@ void locking_selftest(void)
DO_TESTCASE_6x1RR("rlock W1R2/R2R3/W3W1", W1R2_R2R3_W3W1); DO_TESTCASE_6x1RR("rlock W1R2/R2R3/W3W1", W1R2_R2R3_W3W1);
printk(" --------------------------------------------------------------------------\n"); printk(" --------------------------------------------------------------------------\n");
/* /*
* irq-context testcases: * irq-context testcases:
*/ */
DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1); DO_TESTCASE_2x6("irqs-on + irq-safe-A", irqsafe1);
DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A); NON_RT(DO_TESTCASE_2x3("sirq-safe-A => hirqs-on", irqsafe2A));
DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B); DO_TESTCASE_2x6("safe-A + irqs-on", irqsafe2B);
DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3); DO_TESTCASE_6x6("safe-A + unsafe-B #1", irqsafe3);
DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4); DO_TESTCASE_6x6("safe-A + unsafe-B #2", irqsafe4);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment