Commit 7837314d authored by Ralf Baechle's avatar Ralf Baechle

MIPS: Get rid of branches to .subsections.

It was a nice optimization - on paper at least.  In practice it results in
branches that may exceed the maximum legal range for a branch.  We can
fight that problem with -ffunction-sections but -ffunction-sections again
is incompatible with -pg used by the function tracer.

By rewriting the loop around all simple LL/SC blocks to C we reduce the
amount of inline assembler and at the same time allow GCC to often fill
the branch delay slots with something sensible or whatever else clever
optimization it may have up in its sleeve.

With this optimization gone we also no longer need -ffunction-sections,
so drop it.

This optimization was originally introduced in 2.6.21, commit
5999eca25c1fd4b9b9aca7833b04d10fe4bc877d (linux-mips.org) rsp.
f65e4fa8 (kernel.org).

Original fix for the issues which caused me to pull this optimization by
Paul Gortmaker <paul.gortmaker@windriver.com>.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 18cb657c
...@@ -48,9 +48,6 @@ ifneq ($(SUBARCH),$(ARCH)) ...@@ -48,9 +48,6 @@ ifneq ($(SUBARCH),$(ARCH))
endif endif
endif endif
ifndef CONFIG_FUNCTION_TRACER
cflags-y := -ffunction-sections
endif
ifdef CONFIG_FUNCTION_GRAPH_TRACER ifdef CONFIG_FUNCTION_GRAPH_TRACER
ifndef KBUILD_MCOUNT_RA_ADDRESS ifndef KBUILD_MCOUNT_RA_ADDRESS
ifeq ($(call cc-option-yn,-mmcount-ra-address), y) ifeq ($(call cc-option-yn,-mmcount-ra-address), y)
......
...@@ -64,18 +64,16 @@ static __inline__ void atomic_add(int i, atomic_t * v) ...@@ -64,18 +64,16 @@ static __inline__ void atomic_add(int i, atomic_t * v)
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
int temp; int temp;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: ll %0, %1 # atomic_add \n" " ll %0, %1 # atomic_add \n"
" addu %0, %2 \n" " addu %0, %2 \n"
" sc %0, %1 \n" " sc %0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (v->counter) : "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)); : "Ir" (i), "m" (v->counter));
} while (unlikely(!temp));
} else { } else {
unsigned long flags; unsigned long flags;
...@@ -109,18 +107,16 @@ static __inline__ void atomic_sub(int i, atomic_t * v) ...@@ -109,18 +107,16 @@ static __inline__ void atomic_sub(int i, atomic_t * v)
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
int temp; int temp;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: ll %0, %1 # atomic_sub \n" " ll %0, %1 # atomic_sub \n"
" subu %0, %2 \n" " subu %0, %2 \n"
" sc %0, %1 \n" " sc %0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (v->counter) : "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)); : "Ir" (i), "m" (v->counter));
} while (unlikely(!temp));
} else { } else {
unsigned long flags; unsigned long flags;
...@@ -156,20 +152,19 @@ static __inline__ int atomic_add_return(int i, atomic_t * v) ...@@ -156,20 +152,19 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
int temp; int temp;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: ll %1, %2 # atomic_add_return \n" " ll %1, %2 # atomic_add_return \n"
" addu %0, %1, %3 \n" " addu %0, %1, %3 \n"
" sc %0, %2 \n" " sc %0, %2 \n"
" beqz %0, 2f \n"
" addu %0, %1, %3 \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter) : "Ir" (i), "m" (v->counter)
: "memory"); : "memory");
} while (unlikely(!result));
result = temp + i;
} else { } else {
unsigned long flags; unsigned long flags;
...@@ -205,23 +200,24 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v) ...@@ -205,23 +200,24 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter) : "Ir" (i), "m" (v->counter)
: "memory"); : "memory");
result = temp - i;
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
int temp; int temp;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: ll %1, %2 # atomic_sub_return \n" " ll %1, %2 # atomic_sub_return \n"
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
" sc %0, %2 \n" " sc %0, %2 \n"
" beqz %0, 2f \n"
" subu %0, %1, %3 \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter) : "Ir" (i), "m" (v->counter)
: "memory"); : "memory");
} while (unlikely(!result));
result = temp - i;
} else { } else {
unsigned long flags; unsigned long flags;
...@@ -279,12 +275,9 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v) ...@@ -279,12 +275,9 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
" bltz %0, 1f \n" " bltz %0, 1f \n"
" sc %0, %2 \n" " sc %0, %2 \n"
" .set noreorder \n" " .set noreorder \n"
" beqz %0, 2f \n" " beqz %0, 1b \n"
" subu %0, %1, %3 \n" " subu %0, %1, %3 \n"
" .set reorder \n" " .set reorder \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
"1: \n" "1: \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "=m" (v->counter)
...@@ -443,18 +436,16 @@ static __inline__ void atomic64_add(long i, atomic64_t * v) ...@@ -443,18 +436,16 @@ static __inline__ void atomic64_add(long i, atomic64_t * v)
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
long temp; long temp;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: lld %0, %1 # atomic64_add \n" " lld %0, %1 # atomic64_add \n"
" daddu %0, %2 \n" " daddu %0, %2 \n"
" scd %0, %1 \n" " scd %0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (v->counter) : "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)); : "Ir" (i), "m" (v->counter));
} while (unlikely(!temp));
} else { } else {
unsigned long flags; unsigned long flags;
...@@ -488,18 +479,16 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v) ...@@ -488,18 +479,16 @@ static __inline__ void atomic64_sub(long i, atomic64_t * v)
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
long temp; long temp;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: lld %0, %1 # atomic64_sub \n" " lld %0, %1 # atomic64_sub \n"
" dsubu %0, %2 \n" " dsubu %0, %2 \n"
" scd %0, %1 \n" " scd %0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (v->counter) : "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter)); : "Ir" (i), "m" (v->counter));
} while (unlikely(!temp));
} else { } else {
unsigned long flags; unsigned long flags;
...@@ -535,20 +524,19 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v) ...@@ -535,20 +524,19 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
long temp; long temp;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: lld %1, %2 # atomic64_add_return \n" " lld %1, %2 # atomic64_add_return \n"
" daddu %0, %1, %3 \n" " daddu %0, %1, %3 \n"
" scd %0, %2 \n" " scd %0, %2 \n"
" beqz %0, 2f \n"
" daddu %0, %1, %3 \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter) : "Ir" (i), "m" (v->counter)
: "memory"); : "memory");
} while (unlikely(!result));
result = temp + i;
} else { } else {
unsigned long flags; unsigned long flags;
...@@ -587,20 +575,19 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) ...@@ -587,20 +575,19 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
long temp; long temp;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: lld %1, %2 # atomic64_sub_return \n" " lld %1, %2 # atomic64_sub_return \n"
" dsubu %0, %1, %3 \n" " dsubu %0, %1, %3 \n"
" scd %0, %2 \n" " scd %0, %2 \n"
" beqz %0, 2f \n"
" dsubu %0, %1, %3 \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "Ir" (i), "m" (v->counter) : "Ir" (i), "m" (v->counter)
: "memory"); : "memory");
} while (unlikely(!result));
result = temp - i;
} else { } else {
unsigned long flags; unsigned long flags;
...@@ -658,12 +645,9 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v) ...@@ -658,12 +645,9 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
" bltz %0, 1f \n" " bltz %0, 1f \n"
" scd %0, %2 \n" " scd %0, %2 \n"
" .set noreorder \n" " .set noreorder \n"
" beqz %0, 2f \n" " beqz %0, 1b \n"
" dsubu %0, %1, %3 \n" " dsubu %0, %1, %3 \n"
" .set reorder \n" " .set reorder \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
"1: \n" "1: \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (result), "=&r" (temp), "=m" (v->counter) : "=&r" (result), "=&r" (temp), "=m" (v->counter)
......
...@@ -73,30 +73,26 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -73,30 +73,26 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
: "ir" (1UL << bit), "m" (*m)); : "ir" (1UL << bit), "m" (*m));
#ifdef CONFIG_CPU_MIPSR2 #ifdef CONFIG_CPU_MIPSR2
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) { } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
do {
__asm__ __volatile__( __asm__ __volatile__(
"1: " __LL "%0, %1 # set_bit \n" " " __LL "%0, %1 # set_bit \n"
" " __INS "%0, %4, %2, 1 \n" " " __INS "%0, %3, %2, 1 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" beqz %0, 2f \n" : "=&r" (temp), "+m" (*m)
" .subsection 2 \n" : "ir" (bit), "r" (~0));
"2: b 1b \n" } while (unlikely(!temp));
" .previous \n"
: "=&r" (temp), "=m" (*m)
: "ir" (bit), "m" (*m), "r" (~0));
#endif /* CONFIG_CPU_MIPSR2 */ #endif /* CONFIG_CPU_MIPSR2 */
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: " __LL "%0, %1 # set_bit \n" " " __LL "%0, %1 # set_bit \n"
" or %0, %2 \n" " or %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (*m) : "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit), "m" (*m)); : "ir" (1UL << bit));
} while (unlikely(!temp));
} else { } else {
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
unsigned long mask; unsigned long mask;
...@@ -134,34 +130,30 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -134,34 +130,30 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (*m) : "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)), "m" (*m)); : "ir" (~(1UL << bit)));
#ifdef CONFIG_CPU_MIPSR2 #ifdef CONFIG_CPU_MIPSR2
} else if (kernel_uses_llsc && __builtin_constant_p(bit)) { } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
do {
__asm__ __volatile__( __asm__ __volatile__(
"1: " __LL "%0, %1 # clear_bit \n" " " __LL "%0, %1 # clear_bit \n"
" " __INS "%0, $0, %2, 1 \n" " " __INS "%0, $0, %2, 1 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" beqz %0, 2f \n" : "=&r" (temp), "+m" (*m)
" .subsection 2 \n" : "ir" (bit));
"2: b 1b \n" } while (unlikely(!temp));
" .previous \n"
: "=&r" (temp), "=m" (*m)
: "ir" (bit), "m" (*m));
#endif /* CONFIG_CPU_MIPSR2 */ #endif /* CONFIG_CPU_MIPSR2 */
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: " __LL "%0, %1 # clear_bit \n" " " __LL "%0, %1 # clear_bit \n"
" and %0, %2 \n" " and %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (*m) : "=&r" (temp), "+m" (*m)
: "ir" (~(1UL << bit)), "m" (*m)); : "ir" (~(1UL << bit)));
} while (unlikely(!temp));
} else { } else {
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
unsigned long mask; unsigned long mask;
...@@ -213,24 +205,22 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -213,24 +205,22 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" beqzl %0, 1b \n" " beqzl %0, 1b \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (*m) : "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit), "m" (*m)); : "ir" (1UL << bit));
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp; unsigned long temp;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: " __LL "%0, %1 # change_bit \n" " " __LL "%0, %1 # change_bit \n"
" xor %0, %2 \n" " xor %0, %2 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" beqz %0, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (*m) : "=&r" (temp), "+m" (*m)
: "ir" (1UL << bit), "m" (*m)); : "ir" (1UL << bit));
} while (unlikely(!temp));
} else { } else {
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
unsigned long mask; unsigned long mask;
...@@ -272,30 +262,26 @@ static inline int test_and_set_bit(unsigned long nr, ...@@ -272,30 +262,26 @@ static inline int test_and_set_bit(unsigned long nr,
" beqzl %2, 1b \n" " beqzl %2, 1b \n"
" and %2, %0, %3 \n" " and %2, %0, %3 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res) : "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m) : "r" (1UL << bit)
: "memory"); : "memory");
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp; unsigned long temp;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set mips3 \n" " .set mips3 \n"
"1: " __LL "%0, %1 # test_and_set_bit \n" " " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
" beqz %2, 2f \n" " .set mips0 \n"
" and %2, %0, %3 \n" : "=&r" (temp), "+m" (*m), "=&r" (res)
" .subsection 2 \n" : "r" (1UL << bit)
"2: b 1b \n"
" nop \n"
" .previous \n"
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory"); : "memory");
} while (unlikely(!res));
res = temp & (1UL << bit);
} else { } else {
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
unsigned long mask; unsigned long mask;
...@@ -340,30 +326,26 @@ static inline int test_and_set_bit_lock(unsigned long nr, ...@@ -340,30 +326,26 @@ static inline int test_and_set_bit_lock(unsigned long nr,
" beqzl %2, 1b \n" " beqzl %2, 1b \n"
" and %2, %0, %3 \n" " and %2, %0, %3 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res) : "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m) : "r" (1UL << bit)
: "memory"); : "memory");
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp; unsigned long temp;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set mips3 \n" " .set mips3 \n"
"1: " __LL "%0, %1 # test_and_set_bit \n" " " __LL "%0, %1 # test_and_set_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
" beqz %2, 2f \n" " .set mips0 \n"
" and %2, %0, %3 \n" : "=&r" (temp), "+m" (*m), "=&r" (res)
" .subsection 2 \n" : "r" (1UL << bit)
"2: b 1b \n"
" nop \n"
" .previous \n"
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory"); : "memory");
} while (unlikely(!res));
res = temp & (1UL << bit);
} else { } else {
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
unsigned long mask; unsigned long mask;
...@@ -410,49 +392,43 @@ static inline int test_and_clear_bit(unsigned long nr, ...@@ -410,49 +392,43 @@ static inline int test_and_clear_bit(unsigned long nr,
" beqzl %2, 1b \n" " beqzl %2, 1b \n"
" and %2, %0, %3 \n" " and %2, %0, %3 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res) : "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m) : "r" (1UL << bit)
: "memory"); : "memory");
#ifdef CONFIG_CPU_MIPSR2 #ifdef CONFIG_CPU_MIPSR2
} else if (kernel_uses_llsc && __builtin_constant_p(nr)) { } else if (kernel_uses_llsc && __builtin_constant_p(nr)) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp; unsigned long temp;
do {
__asm__ __volatile__( __asm__ __volatile__(
"1: " __LL "%0, %1 # test_and_clear_bit \n" " " __LL "%0, %1 # test_and_clear_bit \n"
" " __EXT "%2, %0, %3, 1 \n" " " __EXT "%2, %0, %3, 1 \n"
" " __INS "%0, $0, %3, 1 \n" " " __INS "%0, $0, %3, 1 \n"
" " __SC "%0, %1 \n" " " __SC "%0, %1 \n"
" beqz %0, 2f \n" : "=&r" (temp), "+m" (*m), "=&r" (res)
" .subsection 2 \n" : "ir" (bit)
"2: b 1b \n"
" .previous \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "ir" (bit), "m" (*m)
: "memory"); : "memory");
} while (unlikely(!temp));
#endif #endif
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp; unsigned long temp;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set mips3 \n" " .set mips3 \n"
"1: " __LL "%0, %1 # test_and_clear_bit \n" " " __LL "%0, %1 # test_and_clear_bit \n"
" or %2, %0, %3 \n" " or %2, %0, %3 \n"
" xor %2, %3 \n" " xor %2, %3 \n"
" " __SC "%2, %1 \n" " " __SC "%2, %1 \n"
" beqz %2, 2f \n" " .set mips0 \n"
" and %2, %0, %3 \n" : "=&r" (temp), "+m" (*m), "=&r" (res)
" .subsection 2 \n" : "r" (1UL << bit)
"2: b 1b \n"
" nop \n"
" .previous \n"
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory"); : "memory");
} while (unlikely(!res));
res = temp & (1UL << bit);
} else { } else {
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
unsigned long mask; unsigned long mask;
...@@ -499,30 +475,26 @@ static inline int test_and_change_bit(unsigned long nr, ...@@ -499,30 +475,26 @@ static inline int test_and_change_bit(unsigned long nr,
" beqzl %2, 1b \n" " beqzl %2, 1b \n"
" and %2, %0, %3 \n" " and %2, %0, %3 \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (temp), "=m" (*m), "=&r" (res) : "=&r" (temp), "+m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m) : "r" (1UL << bit)
: "memory"); : "memory");
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp; unsigned long temp;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n"
" .set noreorder \n"
" .set mips3 \n" " .set mips3 \n"
"1: " __LL "%0, %1 # test_and_change_bit \n" " " __LL "%0, %1 # test_and_change_bit \n"
" xor %2, %0, %3 \n" " xor %2, %0, %3 \n"
" " __SC "\t%2, %1 \n" " " __SC "\t%2, %1 \n"
" beqz %2, 2f \n" " .set mips0 \n"
" and %2, %0, %3 \n" : "=&r" (temp), "+m" (*m), "=&r" (res)
" .subsection 2 \n" : "r" (1UL << bit)
"2: b 1b \n"
" nop \n"
" .previous \n"
" .set pop \n"
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory"); : "memory");
} while (unlikely(!res));
res = temp & (1UL << bit);
} else { } else {
volatile unsigned long *a = addr; volatile unsigned long *a = addr;
unsigned long mask; unsigned long mask;
......
...@@ -44,12 +44,9 @@ ...@@ -44,12 +44,9 @@
" move $1, %z4 \n" \ " move $1, %z4 \n" \
" .set mips3 \n" \ " .set mips3 \n" \
" " st " $1, %1 \n" \ " " st " $1, %1 \n" \
" beqz $1, 3f \n" \ " beqz $1, 1b \n" \
"2: \n" \
" .subsection 2 \n" \
"3: b 1b \n" \
" .previous \n" \
" .set pop \n" \ " .set pop \n" \
"2: \n" \
: "=&r" (__ret), "=R" (*m) \ : "=&r" (__ret), "=R" (*m) \
: "R" (*m), "Jr" (old), "Jr" (new) \ : "R" (*m), "Jr" (old), "Jr" (new) \
: "memory"); \ : "memory"); \
......
...@@ -115,21 +115,19 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) ...@@ -115,21 +115,19 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
unsigned long dummy; unsigned long dummy;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: ll %0, %3 # xchg_u32 \n" " ll %0, %3 # xchg_u32 \n"
" .set mips0 \n" " .set mips0 \n"
" move %2, %z4 \n" " move %2, %z4 \n"
" .set mips3 \n" " .set mips3 \n"
" sc %2, %1 \n" " sc %2, %1 \n"
" beqz %2, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy) : "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val) : "R" (*m), "Jr" (val)
: "memory"); : "memory");
} while (unlikely(!dummy));
} else { } else {
unsigned long flags; unsigned long flags;
...@@ -167,19 +165,17 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val) ...@@ -167,19 +165,17 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
} else if (kernel_uses_llsc) { } else if (kernel_uses_llsc) {
unsigned long dummy; unsigned long dummy;
do {
__asm__ __volatile__( __asm__ __volatile__(
" .set mips3 \n" " .set mips3 \n"
"1: lld %0, %3 # xchg_u64 \n" " lld %0, %3 # xchg_u64 \n"
" move %2, %z4 \n" " move %2, %z4 \n"
" scd %2, %1 \n" " scd %2, %1 \n"
" beqz %2, 2f \n"
" .subsection 2 \n"
"2: b 1b \n"
" .previous \n"
" .set mips0 \n" " .set mips0 \n"
: "=&r" (retval), "=m" (*m), "=&r" (dummy) : "=&r" (retval), "=m" (*m), "=&r" (dummy)
: "R" (*m), "Jr" (val) : "R" (*m), "Jr" (val)
: "memory"); : "memory");
} while (unlikely(!dummy));
} else { } else {
unsigned long flags; unsigned long flags;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment