Commit 9a70a428 authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

s390/atomic: various small cleanups

- add a typecheck to the defines to make sure they operate on an atomic_t
- simplify inline assembly constraints
- keep variable names common between functions
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 5692e4d1
...@@ -28,9 +28,11 @@ ...@@ -28,9 +28,11 @@
#define __ATOMIC_LOOP(ptr, op_val, op_string) \ #define __ATOMIC_LOOP(ptr, op_val, op_string) \
({ \ ({ \
int old_val; \ int old_val; \
\
typecheck(atomic_t *, ptr); \
asm volatile( \ asm volatile( \
op_string " %0,%2,%1\n" \ op_string " %0,%2,%1\n" \
: "=d" (old_val), "+Q" (((atomic_t *)(ptr))->counter) \ : "=d" (old_val), "+Q" ((ptr)->counter) \
: "d" (op_val) \ : "d" (op_val) \
: "cc", "memory"); \ : "cc", "memory"); \
old_val; \ old_val; \
...@@ -45,15 +47,16 @@ ...@@ -45,15 +47,16 @@
#define __ATOMIC_LOOP(ptr, op_val, op_string) \ #define __ATOMIC_LOOP(ptr, op_val, op_string) \
({ \ ({ \
int old_val, new_val; \ int old_val, new_val; \
\
typecheck(atomic_t *, ptr); \
asm volatile( \ asm volatile( \
" l %0,%2\n" \ " l %0,%2\n" \
"0: lr %1,%0\n" \ "0: lr %1,%0\n" \
op_string " %1,%3\n" \ op_string " %1,%3\n" \
" cs %0,%1,%2\n" \ " cs %0,%1,%2\n" \
" jl 0b" \ " jl 0b" \
: "=&d" (old_val), "=&d" (new_val), \ : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
"=Q" (((atomic_t *)(ptr))->counter) \ : "d" (op_val) \
: "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
: "cc", "memory"); \ : "cc", "memory"); \
old_val; \ old_val; \
}) })
...@@ -126,8 +129,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new) ...@@ -126,8 +129,8 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
asm volatile( asm volatile(
" cs %0,%2,%1" " cs %0,%2,%1"
: "+d" (old), "=Q" (v->counter) : "+d" (old), "+Q" (v->counter)
: "d" (new), "Q" (v->counter) : "d" (new)
: "cc", "memory"); : "cc", "memory");
return old; return old;
} }
...@@ -163,9 +166,11 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -163,9 +166,11 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define __ATOMIC64_LOOP(ptr, op_val, op_string) \ #define __ATOMIC64_LOOP(ptr, op_val, op_string) \
({ \ ({ \
long long old_val; \ long long old_val; \
\
typecheck(atomic64_t *, ptr); \
asm volatile( \ asm volatile( \
op_string " %0,%2,%1\n" \ op_string " %0,%2,%1\n" \
: "=d" (old_val), "+Q" (((atomic_t *)(ptr))->counter) \ : "=d" (old_val), "+Q" ((ptr)->counter) \
: "d" (op_val) \ : "d" (op_val) \
: "cc", "memory"); \ : "cc", "memory"); \
old_val; \ old_val; \
...@@ -180,15 +185,16 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -180,15 +185,16 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
#define __ATOMIC64_LOOP(ptr, op_val, op_string) \ #define __ATOMIC64_LOOP(ptr, op_val, op_string) \
({ \ ({ \
long long old_val, new_val; \ long long old_val, new_val; \
\
typecheck(atomic64_t *, ptr); \
asm volatile( \ asm volatile( \
" lg %0,%2\n" \ " lg %0,%2\n" \
"0: lgr %1,%0\n" \ "0: lgr %1,%0\n" \
op_string " %1,%3\n" \ op_string " %1,%3\n" \
" csg %0,%1,%2\n" \ " csg %0,%1,%2\n" \
" jl 0b" \ " jl 0b" \
: "=&d" (old_val), "=&d" (new_val), \ : "=&d" (old_val), "=&d" (new_val), "+Q" ((ptr)->counter)\
"=Q" (((atomic_t *)(ptr))->counter) \ : "d" (op_val) \
: "d" (op_val), "Q" (((atomic_t *)(ptr))->counter) \
: "cc", "memory"); \ : "cc", "memory"); \
old_val; \ old_val; \
}) })
...@@ -234,8 +240,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, ...@@ -234,8 +240,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
{ {
asm volatile( asm volatile(
" csg %0,%2,%1" " csg %0,%2,%1"
: "+d" (old), "=Q" (v->counter) : "+d" (old), "+Q" (v->counter)
: "d" (new), "Q" (v->counter) : "d" (new)
: "cc", "memory"); : "cc", "memory");
return old; return old;
} }
...@@ -276,8 +282,8 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new) ...@@ -276,8 +282,8 @@ static inline long long atomic64_xchg(atomic64_t *v, long long new)
" lm %0,%N0,%1\n" " lm %0,%N0,%1\n"
"0: cds %0,%2,%1\n" "0: cds %0,%2,%1\n"
" jl 0b\n" " jl 0b\n"
: "=&d" (rp_old), "=Q" (v->counter) : "=&d" (rp_old), "+Q" (v->counter)
: "d" (rp_new), "Q" (v->counter) : "d" (rp_new)
: "cc"); : "cc");
return rp_old.pair; return rp_old.pair;
} }
...@@ -290,8 +296,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, ...@@ -290,8 +296,8 @@ static inline long long atomic64_cmpxchg(atomic64_t *v,
asm volatile( asm volatile(
" cds %0,%2,%1" " cds %0,%2,%1"
: "+&d" (rp_old), "=Q" (v->counter) : "+&d" (rp_old), "+Q" (v->counter)
: "d" (rp_new), "Q" (v->counter) : "d" (rp_new)
: "cc"); : "cc");
return rp_old.pair; return rp_old.pair;
} }
...@@ -347,7 +353,7 @@ static inline void atomic64_add(long long i, atomic64_t *v) ...@@ -347,7 +353,7 @@ static inline void atomic64_add(long long i, atomic64_t *v)
#endif #endif
} }
static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) static inline int atomic64_add_unless(atomic64_t *v, long long i, long long u)
{ {
long long c, old; long long c, old;
...@@ -355,7 +361,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u) ...@@ -355,7 +361,7 @@ static inline int atomic64_add_unless(atomic64_t *v, long long a, long long u)
for (;;) { for (;;) {
if (unlikely(c == u)) if (unlikely(c == u))
break; break;
old = atomic64_cmpxchg(v, c, c + a); old = atomic64_cmpxchg(v, c, c + i);
if (likely(old == c)) if (likely(old == c))
break; break;
c = old; c = old;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment