Commit 86ca2ffe authored by David S. Miller's avatar David S. Miller

[SPARC64]: __atomic_{add,sub}() must sign-extend return value.

Even though we declare these functions as returning
a 32-bit signed integer, the sparc64 ABI states that
such functions must properly sign-extend the return
value to the full 64-bits.

Due to this bug, parts of mm/rmap.c were misbehaving
when compiled with gcc-3.4 on sparc64.  gcc-3.4 was
legally using a 64-bit comparison against zero with
the return value of __atomic_add().

I would like to thank Hugh Daniels and others for helping
to track down this peculiar bug.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1d36022f
...@@ -7,8 +7,22 @@ ...@@ -7,8 +7,22 @@
#include <asm/asi.h> #include <asm/asi.h>
.text .text
.align 64
/* We use these stubs for the uncommon case
* of contention on the atomic value. This is
* so that we can keep the main fast path 8
* instructions long and thus fit into a single
* L2 cache line.
*/
__atomic_add_membar:
ba,pt %xcc, __atomic_add
membar #StoreLoad | #StoreStore
__atomic_sub_membar:
ba,pt %xcc, __atomic_sub
membar #StoreLoad | #StoreStore
.align 64
.globl __atomic_add .globl __atomic_add
.type __atomic_add,#function .type __atomic_add,#function
__atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ __atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
...@@ -16,10 +30,10 @@ __atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ ...@@ -16,10 +30,10 @@ __atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
add %g5, %o0, %g7 add %g5, %o0, %g7
cas [%o1], %g5, %g7 cas [%o1], %g5, %g7
cmp %g5, %g7 cmp %g5, %g7
bne,pn %icc, __atomic_add bne,pn %icc, __atomic_add_membar
membar #StoreLoad | #StoreStore add %g7, %o0, %g7
retl retl
add %g7, %o0, %o0 sra %g7, 0, %o0
.size __atomic_add, .-__atomic_add .size __atomic_add, .-__atomic_add
.globl __atomic_sub .globl __atomic_sub
...@@ -29,10 +43,10 @@ __atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */ ...@@ -29,10 +43,10 @@ __atomic_sub: /* %o0 = increment, %o1 = atomic_ptr */
sub %g5, %o0, %g7 sub %g5, %o0, %g7
cas [%o1], %g5, %g7 cas [%o1], %g5, %g7
cmp %g5, %g7 cmp %g5, %g7
bne,pn %icc, __atomic_sub bne,pn %icc, __atomic_sub_membar
membar #StoreLoad | #StoreStore sub %g7, %o0, %g7
retl retl
sub %g7, %o0, %o0 sra %g7, 0, %o0
.size __atomic_sub, .-__atomic_sub .size __atomic_sub, .-__atomic_sub
.globl __atomic64_add .globl __atomic64_add
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment