Commit 96d330af authored by Mark Rutland's avatar Mark Rutland Committed by Peter Zijlstra

locking/atomic: alpha: move to ARCH_ATOMIC

We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates alpha to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.

Note: xchg_local() is NOT currently part of the generic atomic
arch_atomic API, and is not instrumented.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-14-mark.rutland@arm.com
parent 82b993e8
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
config ALPHA config ALPHA
bool bool
default y default y
select ARCH_ATOMIC
select ARCH_32BIT_USTAT_F_TINODE select ARCH_32BIT_USTAT_F_TINODE
select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO select ARCH_MIGHT_HAVE_PC_SERIO
......
...@@ -26,11 +26,11 @@ ...@@ -26,11 +26,11 @@
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) READ_ONCE((v)->counter) #define arch_atomic_read(v) READ_ONCE((v)->counter)
#define atomic64_read(v) READ_ONCE((v)->counter) #define arch_atomic64_read(v) READ_ONCE((v)->counter)
#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i)) #define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i)) #define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
/* /*
* To get proper branch prediction for the main line, we must branch * To get proper branch prediction for the main line, we must branch
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
*/ */
#define ATOMIC_OP(op, asm_op) \ #define ATOMIC_OP(op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \ static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
{ \ { \
unsigned long temp; \ unsigned long temp; \
__asm__ __volatile__( \ __asm__ __volatile__( \
...@@ -55,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \ ...@@ -55,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
} \ } \
#define ATOMIC_OP_RETURN(op, asm_op) \ #define ATOMIC_OP_RETURN(op, asm_op) \
static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \ { \
long temp, result; \ long temp, result; \
__asm__ __volatile__( \ __asm__ __volatile__( \
...@@ -74,7 +74,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \ ...@@ -74,7 +74,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op, asm_op) \ #define ATOMIC_FETCH_OP(op, asm_op) \
static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \ { \
long temp, result; \ long temp, result; \
__asm__ __volatile__( \ __asm__ __volatile__( \
...@@ -92,7 +92,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \ ...@@ -92,7 +92,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
} }
#define ATOMIC64_OP(op, asm_op) \ #define ATOMIC64_OP(op, asm_op) \
static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \ static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
{ \ { \
s64 temp; \ s64 temp; \
__asm__ __volatile__( \ __asm__ __volatile__( \
...@@ -108,7 +108,8 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \ ...@@ -108,7 +108,8 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
} \ } \
#define ATOMIC64_OP_RETURN(op, asm_op) \ #define ATOMIC64_OP_RETURN(op, asm_op) \
static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \ static __inline__ s64 \
arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
{ \ { \
s64 temp, result; \ s64 temp, result; \
__asm__ __volatile__( \ __asm__ __volatile__( \
...@@ -127,7 +128,8 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \ ...@@ -127,7 +128,8 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
} }
#define ATOMIC64_FETCH_OP(op, asm_op) \ #define ATOMIC64_FETCH_OP(op, asm_op) \
static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \ static __inline__ s64 \
arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
{ \ { \
s64 temp, result; \ s64 temp, result; \
__asm__ __volatile__( \ __asm__ __volatile__( \
...@@ -155,18 +157,18 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \ ...@@ -155,18 +157,18 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
ATOMIC_OPS(add) ATOMIC_OPS(add)
ATOMIC_OPS(sub) ATOMIC_OPS(sub)
#define atomic_add_return_relaxed atomic_add_return_relaxed #define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed #define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed #define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed #define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
#define atomic64_add_return_relaxed atomic64_add_return_relaxed #define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed #define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed #define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed #define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
#define atomic_andnot atomic_andnot #define arch_atomic_andnot arch_atomic_andnot
#define atomic64_andnot atomic64_andnot #define arch_atomic64_andnot arch_atomic64_andnot
#undef ATOMIC_OPS #undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm) \ #define ATOMIC_OPS(op, asm) \
...@@ -180,15 +182,15 @@ ATOMIC_OPS(andnot, bic) ...@@ -180,15 +182,15 @@ ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, bis) ATOMIC_OPS(or, bis)
ATOMIC_OPS(xor, xor) ATOMIC_OPS(xor, xor)
#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed #define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed #define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed #define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed #define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed #define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed #define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed #define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed #define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
#undef ATOMIC_OPS #undef ATOMIC_OPS
#undef ATOMIC64_FETCH_OP #undef ATOMIC64_FETCH_OP
...@@ -198,14 +200,18 @@ ATOMIC_OPS(xor, xor) ...@@ -198,14 +200,18 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN #undef ATOMIC_OP_RETURN
#undef ATOMIC_OP #undef ATOMIC_OP
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define arch_atomic64_cmpxchg(v, old, new) \
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) (arch_cmpxchg(&((v)->counter), old, new))
#define arch_atomic64_xchg(v, new) \
(arch_xchg(&((v)->counter), new))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new)) #define arch_atomic_cmpxchg(v, old, new) \
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) (arch_cmpxchg(&((v)->counter), old, new))
#define arch_atomic_xchg(v, new) \
(arch_xchg(&((v)->counter), new))
/** /**
* atomic_fetch_add_unless - add unless the number is a given value * arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* @a: the amount to add to v... * @a: the amount to add to v...
* @u: ...unless v is equal to u. * @u: ...unless v is equal to u.
...@@ -213,7 +219,7 @@ ATOMIC_OPS(xor, xor) ...@@ -213,7 +219,7 @@ ATOMIC_OPS(xor, xor)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v. * Returns the old value of @v.
*/ */
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{ {
int c, new, old; int c, new, old;
smp_mb(); smp_mb();
...@@ -234,10 +240,10 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) ...@@ -234,10 +240,10 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
smp_mb(); smp_mb();
return old; return old;
} }
#define atomic_fetch_add_unless atomic_fetch_add_unless #define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
/** /**
* atomic64_fetch_add_unless - add unless the number is a given value * arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t * @v: pointer of type atomic64_t
* @a: the amount to add to v... * @a: the amount to add to v...
* @u: ...unless v is equal to u. * @u: ...unless v is equal to u.
...@@ -245,7 +251,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u) ...@@ -245,7 +251,7 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v. * Returns the old value of @v.
*/ */
static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{ {
s64 c, new, old; s64 c, new, old;
smp_mb(); smp_mb();
...@@ -266,16 +272,16 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) ...@@ -266,16 +272,16 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
smp_mb(); smp_mb();
return old; return old;
} }
#define atomic64_fetch_add_unless atomic64_fetch_add_unless #define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
/* /*
* atomic64_dec_if_positive - decrement by 1 if old value positive * arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t * @v: pointer of type atomic_t
* *
* The function returns the old value of *v minus 1, even if * The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented. * the atomic variable, v, was not decremented.
*/ */
static inline s64 atomic64_dec_if_positive(atomic64_t *v) static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{ {
s64 old, tmp; s64 old, tmp;
smp_mb(); smp_mb();
...@@ -295,6 +301,6 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v) ...@@ -295,6 +301,6 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
smp_mb(); smp_mb();
return old - 1; return old - 1;
} }
#define atomic64_dec_if_positive atomic64_dec_if_positive #define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
#endif /* _ALPHA_ATOMIC_H */ #endif /* _ALPHA_ATOMIC_H */
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
sizeof(*(ptr))); \ sizeof(*(ptr))); \
}) })
#define cmpxchg_local(ptr, o, n) \ #define arch_cmpxchg_local(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
...@@ -26,7 +26,7 @@ ...@@ -26,7 +26,7 @@
sizeof(*(ptr))); \ sizeof(*(ptr))); \
}) })
#define cmpxchg64_local(ptr, o, n) \ #define arch_cmpxchg64_local(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \ cmpxchg_local((ptr), (o), (n)); \
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
* The leading and the trailing memory barriers guarantee that these * The leading and the trailing memory barriers guarantee that these
* operations are fully ordered. * operations are fully ordered.
*/ */
#define xchg(ptr, x) \ #define arch_xchg(ptr, x) \
({ \ ({ \
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \ __typeof__(*(ptr)) _x_ = (x); \
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
__ret; \ __ret; \
}) })
#define cmpxchg(ptr, o, n) \ #define arch_cmpxchg(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
...@@ -65,10 +65,10 @@ ...@@ -65,10 +65,10 @@
__ret; \ __ret; \
}) })
#define cmpxchg64(ptr, o, n) \ #define arch_cmpxchg64(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \ arch_cmpxchg((ptr), (o), (n)); \
}) })
#undef ____cmpxchg #undef ____cmpxchg
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment