Commit 3c3b5c3b authored by Mathieu Desnoyers's avatar Mathieu Desnoyers Committed by Ingo Molnar

x86: correct register constraints for 64-bit atomic operations

x86_64 add/sub atomic ops does not seems to accept integer values bigger
than 32 bits as immediates. Intel's add/sub documentation specifies they
have to be passed as registers.

The only operations in the x86-64 architecture which accept arbitrary
64-bit immediates is "movq" to any register; similarly, the only
operation which accept arbitrary 64-bit displacement is "movabs" to or
from al/ax/eax/rax.

http://gcc.gnu.org/onlinedocs/gcc-4.3.0/gcc/Machine-Constraints.html

states :

e
    32-bit signed integer constant, or a symbolic reference known to fit
    that range (for immediate operands in sign-extending x86-64
    instructions).
Z
    32-bit unsigned integer constant, or a symbolic reference known to
    fit that range (for immediate operands in zero-extending x86-64
    instructions).

Since add/sub does sign extension, using the "e" constraint seems appropriate.

It applies to 2.6.27-rc, 2.6.26, 2.6.25...
Signed-off-by: default avatarMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 2fdc8690
...@@ -228,7 +228,7 @@ static inline void atomic64_add(long i, atomic64_t *v) ...@@ -228,7 +228,7 @@ static inline void atomic64_add(long i, atomic64_t *v)
{ {
asm volatile(LOCK_PREFIX "addq %1,%0" asm volatile(LOCK_PREFIX "addq %1,%0"
: "=m" (v->counter) : "=m" (v->counter)
: "ir" (i), "m" (v->counter)); : "er" (i), "m" (v->counter));
} }
/** /**
...@@ -242,7 +242,7 @@ static inline void atomic64_sub(long i, atomic64_t *v) ...@@ -242,7 +242,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
{ {
asm volatile(LOCK_PREFIX "subq %1,%0" asm volatile(LOCK_PREFIX "subq %1,%0"
: "=m" (v->counter) : "=m" (v->counter)
: "ir" (i), "m" (v->counter)); : "er" (i), "m" (v->counter));
} }
/** /**
...@@ -260,7 +260,7 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v) ...@@ -260,7 +260,7 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
: "=m" (v->counter), "=qm" (c) : "=m" (v->counter), "=qm" (c)
: "ir" (i), "m" (v->counter) : "memory"); : "er" (i), "m" (v->counter) : "memory");
return c; return c;
} }
...@@ -341,7 +341,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v) ...@@ -341,7 +341,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
: "=m" (v->counter), "=qm" (c) : "=m" (v->counter), "=qm" (c)
: "ir" (i), "m" (v->counter) : "memory"); : "er" (i), "m" (v->counter) : "memory");
return c; return c;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment