Commit 200b812d authored by Catalin Marinas's avatar Catalin Marinas

Clear the exclusive monitor when returning from an exception

The patch adds a CLREX or dummy STREX to the exception return path. This
is needed because several atomic/locking operations use a pair of
LDREX/STREXEQ and the EQ condition may not always be satisfied. This
would leave the exclusive monitor status set and may cause problems with
atomic/locking operations in the interrupted code.

With this patch, the atomic_set() operation can be a simple STR
instruction (on SMP systems, the global exclusive monitor is cleared by
STR anyway). Clearing the exclusive monitor during context switch is no
longer needed as this is handled by the exception return path anyway.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reported-by: default avatarJamie Lokier <jamie@shareable.org>
parent df58bee2
...@@ -19,31 +19,21 @@ ...@@ -19,31 +19,21 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
/*
* On ARM, ordinary assignment (str instruction) doesn't clear the local
* strex/ldrex monitor on some implementations. The reason we can use it for
* atomic_set() is the clrex or dummy strex done on every exception return.
*/
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
#if __LINUX_ARM_ARCH__ >= 6 #if __LINUX_ARM_ARCH__ >= 6
/* /*
* ARMv6 UP and SMP safe atomic ops. We use load exclusive and * ARMv6 UP and SMP safe atomic ops. We use load exclusive and
* store exclusive to ensure that these are atomic. We may loop * store exclusive to ensure that these are atomic. We may loop
* to ensure that the update happens. Writing to 'v->counter' * to ensure that the update happens.
* without using the following operations WILL break the atomic
* nature of these ops.
*/ */
static inline void atomic_set(atomic_t *v, int i)
{
unsigned long tmp;
__asm__ __volatile__("@ atomic_set\n"
"1: ldrex %0, [%1]\n"
" strex %0, %2, [%1]\n"
" teq %0, #0\n"
" bne 1b"
: "=&r" (tmp)
: "r" (&v->counter), "r" (i)
: "cc");
}
static inline void atomic_add(int i, atomic_t *v) static inline void atomic_add(int i, atomic_t *v)
{ {
unsigned long tmp; unsigned long tmp;
...@@ -163,8 +153,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr) ...@@ -163,8 +153,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
#error SMP not supported on pre-ARMv6 CPUs #error SMP not supported on pre-ARMv6 CPUs
#endif #endif
#define atomic_set(v,i) (((v)->counter) = (i))
static inline int atomic_add_return(int i, atomic_t *v) static inline int atomic_add_return(int i, atomic_t *v)
{ {
unsigned long flags; unsigned long flags;
......
...@@ -734,13 +734,6 @@ ENTRY(__switch_to) ...@@ -734,13 +734,6 @@ ENTRY(__switch_to)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
ldr r6, [r2, #TI_CPU_DOMAIN] ldr r6, [r2, #TI_CPU_DOMAIN]
#endif #endif
#if __LINUX_ARM_ARCH__ >= 6
#ifdef CONFIG_CPU_32v6K
clrex
#else
strex r5, r4, [ip] @ Clear exclusive monitor
#endif
#endif
#if defined(CONFIG_HAS_TLS_REG) #if defined(CONFIG_HAS_TLS_REG)
mcr p15, 0, r3, c13, c0, 3 @ set TLS register mcr p15, 0, r3, c13, c0, 3 @ set TLS register
#elif !defined(CONFIG_TLS_REG_EMUL) #elif !defined(CONFIG_TLS_REG_EMUL)
......
...@@ -76,13 +76,25 @@ ...@@ -76,13 +76,25 @@
#ifndef CONFIG_THUMB2_KERNEL #ifndef CONFIG_THUMB2_KERNEL
.macro svc_exit, rpsr .macro svc_exit, rpsr
msr spsr_cxsf, \rpsr msr spsr_cxsf, \rpsr
#if defined(CONFIG_CPU_32v6K)
clrex @ clear the exclusive monitor
ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr ldmia sp, {r0 - pc}^ @ load r0 - pc, cpsr
#elif defined (CONFIG_CPU_V6)
ldr r0, [sp]
strex r1, r2, [sp] @ clear the exclusive monitor
ldmib sp, {r1 - pc}^ @ load r1 - pc, cpsr
#endif
.endm .endm
.macro restore_user_regs, fast = 0, offset = 0 .macro restore_user_regs, fast = 0, offset = 0
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
ldr lr, [sp, #\offset + S_PC]! @ get pc ldr lr, [sp, #\offset + S_PC]! @ get pc
msr spsr_cxsf, r1 @ save in spsr_svc msr spsr_cxsf, r1 @ save in spsr_svc
#if defined(CONFIG_CPU_32v6K)
clrex @ clear the exclusive monitor
#elif defined (CONFIG_CPU_V6)
strex r1, r2, [sp] @ clear the exclusive monitor
#endif
.if \fast .if \fast
ldmdb sp, {r1 - lr}^ @ get calling r1 - lr ldmdb sp, {r1 - lr}^ @ get calling r1 - lr
.else .else
...@@ -98,6 +110,7 @@ ...@@ -98,6 +110,7 @@
.endm .endm
#else /* CONFIG_THUMB2_KERNEL */ #else /* CONFIG_THUMB2_KERNEL */
.macro svc_exit, rpsr .macro svc_exit, rpsr
clrex @ clear the exclusive monitor
ldr r0, [sp, #S_SP] @ top of the stack ldr r0, [sp, #S_SP] @ top of the stack
ldr r1, [sp, #S_PC] @ return address ldr r1, [sp, #S_PC] @ return address
tst r0, #4 @ orig stack 8-byte aligned? tst r0, #4 @ orig stack 8-byte aligned?
...@@ -110,6 +123,7 @@ ...@@ -110,6 +123,7 @@
.endm .endm
.macro restore_user_regs, fast = 0, offset = 0 .macro restore_user_regs, fast = 0, offset = 0
clrex @ clear the exclusive monitor
mov r2, sp mov r2, sp
load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr load_user_sp_lr r2, r3, \offset + S_SP @ calling sp, lr
ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr ldr r1, [sp, #\offset + S_PSR] @ get calling cpsr
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment