Commit c38425df authored by Guo Ren's avatar Guo Ren

csky: Fixup asm/cmpxchg.h with correct ordering barrier

Optimize the performance of cmpxchg by using more fine-grained
acquire/release barriers.
Signed-off-by: default avatarGuo Ren <guoren@linux.alibaba.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Paul E. McKenney <paulmck@kernel.org>
parent d6c5cb9f
...@@ -3,12 +3,12 @@ ...@@ -3,12 +3,12 @@
#ifndef __ASM_CSKY_CMPXCHG_H #ifndef __ASM_CSKY_CMPXCHG_H
#define __ASM_CSKY_CMPXCHG_H #define __ASM_CSKY_CMPXCHG_H
#ifdef CONFIG_CPU_HAS_LDSTEX #ifdef CONFIG_SMP
#include <asm/barrier.h> #include <asm/barrier.h>
extern void __bad_xchg(void); extern void __bad_xchg(void);
#define __xchg(new, ptr, size) \ #define __xchg_relaxed(new, ptr, size) \
({ \ ({ \
__typeof__(ptr) __ptr = (ptr); \ __typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \ __typeof__(new) __new = (new); \
...@@ -16,7 +16,6 @@ extern void __bad_xchg(void); ...@@ -16,7 +16,6 @@ extern void __bad_xchg(void);
unsigned long tmp; \ unsigned long tmp; \
switch (size) { \ switch (size) { \
case 4: \ case 4: \
smp_mb(); \
asm volatile ( \ asm volatile ( \
"1: ldex.w %0, (%3) \n" \ "1: ldex.w %0, (%3) \n" \
" mov %1, %2 \n" \ " mov %1, %2 \n" \
...@@ -25,7 +24,6 @@ extern void __bad_xchg(void); ...@@ -25,7 +24,6 @@ extern void __bad_xchg(void);
: "=&r" (__ret), "=&r" (tmp) \ : "=&r" (__ret), "=&r" (tmp) \
: "r" (__new), "r"(__ptr) \ : "r" (__new), "r"(__ptr) \
:); \ :); \
smp_mb(); \
break; \ break; \
default: \ default: \
__bad_xchg(); \ __bad_xchg(); \
...@@ -33,9 +31,10 @@ extern void __bad_xchg(void); ...@@ -33,9 +31,10 @@ extern void __bad_xchg(void);
__ret; \ __ret; \
}) })
#define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr)))) #define xchg_relaxed(ptr, x) \
(__xchg_relaxed((x), (ptr), sizeof(*(ptr))))
#define __cmpxchg(ptr, old, new, size) \ #define __cmpxchg_relaxed(ptr, old, new, size) \
({ \ ({ \
__typeof__(ptr) __ptr = (ptr); \ __typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \ __typeof__(new) __new = (new); \
...@@ -44,7 +43,6 @@ extern void __bad_xchg(void); ...@@ -44,7 +43,6 @@ extern void __bad_xchg(void);
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
switch (size) { \ switch (size) { \
case 4: \ case 4: \
smp_mb(); \
asm volatile ( \ asm volatile ( \
"1: ldex.w %0, (%3) \n" \ "1: ldex.w %0, (%3) \n" \
" cmpne %0, %4 \n" \ " cmpne %0, %4 \n" \
...@@ -56,7 +54,6 @@ extern void __bad_xchg(void); ...@@ -56,7 +54,6 @@ extern void __bad_xchg(void);
: "=&r" (__ret), "=&r" (__tmp) \ : "=&r" (__ret), "=&r" (__tmp) \
: "r" (__new), "r"(__ptr), "r"(__old) \ : "r" (__new), "r"(__ptr), "r"(__old) \
:); \ :); \
smp_mb(); \
break; \ break; \
default: \ default: \
__bad_xchg(); \ __bad_xchg(); \
...@@ -64,8 +61,18 @@ extern void __bad_xchg(void); ...@@ -64,8 +61,18 @@ extern void __bad_xchg(void);
__ret; \ __ret; \
}) })
#define cmpxchg_relaxed(ptr, o, n) \
(__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
#define cmpxchg(ptr, o, n) \ #define cmpxchg(ptr, o, n) \
(__cmpxchg((ptr), (o), (n), sizeof(*(ptr)))) ({ \
__typeof__(*(ptr)) __ret; \
__smp_release_fence(); \
__ret = cmpxchg_relaxed(ptr, o, n); \
__smp_acquire_fence(); \
__ret; \
})
#else #else
#include <asm-generic/cmpxchg.h> #include <asm-generic/cmpxchg.h>
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment