Commit f252ffd5 authored by David Daney's avatar David Daney Committed by Ralf Baechle

MIPS: New macro smp_mb__before_llsc.

Replace some instances of smp_llsc_mb() with a new macro
smp_mb__before_llsc().  It is used before ll/sc sequences that are
documented as needing write barrier semantics.

The default implementation of smp_mb__before_llsc() is just smp_llsc_mb(),
so there are no changes in semantics.

Also simplify definition of smp_mb(), smp_rmb(), and smp_wmb() to be just
barrier() in the non-SMP case.
Signed-off-by: default avatarDavid Daney <ddaney@caviumnetworks.com>
To: linux-mips@linux-mips.org
Patchwork: http://patchwork.linux-mips.org/patch/851/Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent ec5380c7
...@@ -137,7 +137,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) ...@@ -137,7 +137,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
{ {
int result; int result;
smp_llsc_mb(); smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
int temp; int temp;
...@@ -189,7 +189,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) ...@@ -189,7 +189,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
{ {
int result; int result;
smp_llsc_mb(); smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
int temp; int temp;
...@@ -249,7 +249,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) ...@@ -249,7 +249,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
{ {
int result; int result;
smp_llsc_mb(); smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
int temp; int temp;
...@@ -516,7 +516,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) ...@@ -516,7 +516,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
{ {
long result; long result;
smp_llsc_mb(); smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
long temp; long temp;
...@@ -568,7 +568,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) ...@@ -568,7 +568,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
{ {
long result; long result;
smp_llsc_mb(); smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
long temp; long temp;
...@@ -628,7 +628,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ...@@ -628,7 +628,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
{ {
long result; long result;
smp_llsc_mb(); smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
long temp; long temp;
...@@ -788,9 +788,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) ...@@ -788,9 +788,9 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
* atomic*_return operations are serializing but not the non-*_return * atomic*_return operations are serializing but not the non-*_return
* versions. * versions.
*/ */
#define smp_mb__before_atomic_dec() smp_llsc_mb() #define smp_mb__before_atomic_dec() smp_mb__before_llsc()
#define smp_mb__after_atomic_dec() smp_llsc_mb() #define smp_mb__after_atomic_dec() smp_llsc_mb()
#define smp_mb__before_atomic_inc() smp_llsc_mb() #define smp_mb__before_atomic_inc() smp_mb__before_llsc()
#define smp_mb__after_atomic_inc() smp_llsc_mb() #define smp_mb__after_atomic_inc() smp_llsc_mb()
#include <asm-generic/atomic-long.h> #include <asm-generic/atomic-long.h>
......
...@@ -131,23 +131,26 @@ ...@@ -131,23 +131,26 @@
#endif /* !CONFIG_CPU_HAS_WB */ #endif /* !CONFIG_CPU_HAS_WB */
#if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP) #if defined(CONFIG_WEAK_ORDERING) && defined(CONFIG_SMP)
#define __WEAK_ORDERING_MB " sync \n" #define smp_mb() __asm__ __volatile__("sync" : : :"memory")
#define smp_rmb() __asm__ __volatile__("sync" : : :"memory")
#define smp_wmb() __asm__ __volatile__("sync" : : :"memory")
#else #else
#define __WEAK_ORDERING_MB " \n" #define smp_mb() barrier()
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#endif #endif
#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP) #if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
#define __WEAK_LLSC_MB " sync \n" #define __WEAK_LLSC_MB " sync \n"
#else #else
#define __WEAK_LLSC_MB " \n" #define __WEAK_LLSC_MB " \n"
#endif #endif
#define smp_mb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
#define smp_rmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
#define smp_wmb() __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
#define set_mb(var, value) \ #define set_mb(var, value) \
do { var = value; smp_mb(); } while (0) do { var = value; smp_mb(); } while (0)
#define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory") #define smp_llsc_mb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
#define smp_mb__before_llsc() smp_llsc_mb()
#endif /* __ASM_BARRIER_H */ #endif /* __ASM_BARRIER_H */
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
/* /*
* clear_bit() doesn't provide any barrier for the compiler. * clear_bit() doesn't provide any barrier for the compiler.
*/ */
#define smp_mb__before_clear_bit() smp_llsc_mb() #define smp_mb__before_clear_bit() smp_mb__before_llsc()
#define smp_mb__after_clear_bit() smp_llsc_mb() #define smp_mb__after_clear_bit() smp_llsc_mb()
/* /*
...@@ -258,7 +258,7 @@ static inline int test_and_set_bit(unsigned long nr, ...@@ -258,7 +258,7 @@ static inline int test_and_set_bit(unsigned long nr,
unsigned short bit = nr & SZLONG_MASK; unsigned short bit = nr & SZLONG_MASK;
unsigned long res; unsigned long res;
smp_llsc_mb(); smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
...@@ -395,7 +395,7 @@ static inline int test_and_clear_bit(unsigned long nr, ...@@ -395,7 +395,7 @@ static inline int test_and_clear_bit(unsigned long nr,
unsigned short bit = nr & SZLONG_MASK; unsigned short bit = nr & SZLONG_MASK;
unsigned long res; unsigned long res;
smp_llsc_mb(); smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
...@@ -485,7 +485,7 @@ static inline int test_and_change_bit(unsigned long nr, ...@@ -485,7 +485,7 @@ static inline int test_and_change_bit(unsigned long nr,
unsigned short bit = nr & SZLONG_MASK; unsigned short bit = nr & SZLONG_MASK;
unsigned long res; unsigned long res;
smp_llsc_mb(); smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
......
...@@ -72,14 +72,14 @@ ...@@ -72,14 +72,14 @@
*/ */
extern void __cmpxchg_called_with_bad_pointer(void); extern void __cmpxchg_called_with_bad_pointer(void);
#define __cmpxchg(ptr, old, new, barrier) \ #define __cmpxchg(ptr, old, new, pre_barrier, post_barrier) \
({ \ ({ \
__typeof__(ptr) __ptr = (ptr); \ __typeof__(ptr) __ptr = (ptr); \
__typeof__(*(ptr)) __old = (old); \ __typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \ __typeof__(*(ptr)) __new = (new); \
__typeof__(*(ptr)) __res = 0; \ __typeof__(*(ptr)) __res = 0; \
\ \
barrier; \ pre_barrier; \
\ \
switch (sizeof(*(__ptr))) { \ switch (sizeof(*(__ptr))) { \
case 4: \ case 4: \
...@@ -96,13 +96,13 @@ extern void __cmpxchg_called_with_bad_pointer(void); ...@@ -96,13 +96,13 @@ extern void __cmpxchg_called_with_bad_pointer(void);
break; \ break; \
} \ } \
\ \
barrier; \ post_barrier; \
\ \
__res; \ __res; \
}) })
#define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_llsc_mb()) #define cmpxchg(ptr, old, new) __cmpxchg(ptr, old, new, smp_mb__before_llsc(), smp_llsc_mb())
#define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new, ) #define cmpxchg_local(ptr, old, new) __cmpxchg(ptr, old, new, , )
#define cmpxchg64(ptr, o, n) \ #define cmpxchg64(ptr, o, n) \
({ \ ({ \
......
...@@ -138,7 +138,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock) ...@@ -138,7 +138,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
{ {
int tmp; int tmp;
smp_llsc_mb(); smp_mb__before_llsc();
if (R10000_LLSC_WAR) { if (R10000_LLSC_WAR) {
__asm__ __volatile__ ( __asm__ __volatile__ (
...@@ -305,7 +305,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw) ...@@ -305,7 +305,7 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
{ {
unsigned int tmp; unsigned int tmp;
smp_llsc_mb(); smp_mb__before_llsc();
if (R10000_LLSC_WAR) { if (R10000_LLSC_WAR) {
__asm__ __volatile__( __asm__ __volatile__(
......
...@@ -95,6 +95,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) ...@@ -95,6 +95,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
{ {
__u32 retval; __u32 retval;
smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long dummy; unsigned long dummy;
...@@ -147,6 +149,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) ...@@ -147,6 +149,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
{ {
__u64 retval; __u64 retval;
smp_mb__before_llsc();
if (kernel_uses_llsc && R10000_LLSC_WAR) { if (kernel_uses_llsc && R10000_LLSC_WAR) {
unsigned long dummy; unsigned long dummy;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment