Commit 329c161b authored by Mark Rutland's avatar Mark Rutland Committed by Peter Zijlstra

locking/atomic: parisc: move to ARCH_ATOMIC

We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates parisc to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-27-mark.rutland@arm.com
parent 3f1e931d
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
config PARISC config PARISC
def_bool y def_bool y
select ARCH_32BIT_OFF_T if !64BIT select ARCH_32BIT_OFF_T if !64BIT
select ARCH_ATOMIC
select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_IDE select HAVE_IDE
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
......
...@@ -56,7 +56,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned; ...@@ -56,7 +56,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
* are atomic, so a reader never sees inconsistent values. * are atomic, so a reader never sees inconsistent values.
*/ */
static __inline__ void atomic_set(atomic_t *v, int i) static __inline__ void arch_atomic_set(atomic_t *v, int i)
{ {
unsigned long flags; unsigned long flags;
_atomic_spin_lock_irqsave(v, flags); _atomic_spin_lock_irqsave(v, flags);
...@@ -66,19 +66,19 @@ static __inline__ void atomic_set(atomic_t *v, int i) ...@@ -66,19 +66,19 @@ static __inline__ void atomic_set(atomic_t *v, int i)
_atomic_spin_unlock_irqrestore(v, flags); _atomic_spin_unlock_irqrestore(v, flags);
} }
#define atomic_set_release(v, i) atomic_set((v), (i)) #define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))
static __inline__ int atomic_read(const atomic_t *v) static __inline__ int arch_atomic_read(const atomic_t *v)
{ {
return READ_ONCE((v)->counter); return READ_ONCE((v)->counter);
} }
/* exported interface */ /* exported interface */
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#define ATOMIC_OP(op, c_op) \ #define ATOMIC_OP(op, c_op) \
static __inline__ void atomic_##op(int i, atomic_t *v) \ static __inline__ void arch_atomic_##op(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
\ \
...@@ -88,7 +88,7 @@ static __inline__ void atomic_##op(int i, atomic_t *v) \ ...@@ -88,7 +88,7 @@ static __inline__ void atomic_##op(int i, atomic_t *v) \
} }
#define ATOMIC_OP_RETURN(op, c_op) \ #define ATOMIC_OP_RETURN(op, c_op) \
static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
int ret; \ int ret; \
...@@ -101,7 +101,7 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \ ...@@ -101,7 +101,7 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
} }
#define ATOMIC_FETCH_OP(op, c_op) \ #define ATOMIC_FETCH_OP(op, c_op) \
static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \ static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
int ret; \ int ret; \
...@@ -141,7 +141,7 @@ ATOMIC_OPS(xor, ^=) ...@@ -141,7 +141,7 @@ ATOMIC_OPS(xor, ^=)
#define ATOMIC64_INIT(i) { (i) } #define ATOMIC64_INIT(i) { (i) }
#define ATOMIC64_OP(op, c_op) \ #define ATOMIC64_OP(op, c_op) \
static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \ static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
\ \
...@@ -151,7 +151,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \ ...@@ -151,7 +151,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
} }
#define ATOMIC64_OP_RETURN(op, c_op) \ #define ATOMIC64_OP_RETURN(op, c_op) \
static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
s64 ret; \ s64 ret; \
...@@ -164,7 +164,7 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \ ...@@ -164,7 +164,7 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
} }
#define ATOMIC64_FETCH_OP(op, c_op) \ #define ATOMIC64_FETCH_OP(op, c_op) \
static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \ static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
{ \ { \
unsigned long flags; \ unsigned long flags; \
s64 ret; \ s64 ret; \
...@@ -200,7 +200,7 @@ ATOMIC64_OPS(xor, ^=) ...@@ -200,7 +200,7 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP #undef ATOMIC64_OP
static __inline__ void static __inline__ void
atomic64_set(atomic64_t *v, s64 i) arch_atomic64_set(atomic64_t *v, s64 i)
{ {
unsigned long flags; unsigned long flags;
_atomic_spin_lock_irqsave(v, flags); _atomic_spin_lock_irqsave(v, flags);
...@@ -210,18 +210,18 @@ atomic64_set(atomic64_t *v, s64 i) ...@@ -210,18 +210,18 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags); _atomic_spin_unlock_irqrestore(v, flags);
} }
#define atomic64_set_release(v, i) atomic64_set((v), (i)) #define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i))
static __inline__ s64 static __inline__ s64
atomic64_read(const atomic64_t *v) arch_atomic64_read(const atomic64_t *v)
{ {
return READ_ONCE((v)->counter); return READ_ONCE((v)->counter);
} }
/* exported interface */ /* exported interface */
#define atomic64_cmpxchg(v, o, n) \ #define arch_atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) ((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) #define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))
#endif /* !CONFIG_64BIT */ #endif /* !CONFIG_64BIT */
......
...@@ -44,7 +44,7 @@ __xchg(unsigned long x, volatile void *ptr, int size) ...@@ -44,7 +44,7 @@ __xchg(unsigned long x, volatile void *ptr, int size)
** if (((unsigned long)p & 0xf) == 0) ** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p); ** return __ldcw(p);
*/ */
#define xchg(ptr, x) \ #define arch_xchg(ptr, x) \
({ \ ({ \
__typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \ __typeof__(*(ptr)) _x_ = (x); \
...@@ -78,7 +78,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) ...@@ -78,7 +78,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
return old; return old;
} }
#define cmpxchg(ptr, o, n) \ #define arch_cmpxchg(ptr, o, n) \
({ \ ({ \
__typeof__(*(ptr)) _o_ = (o); \ __typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \ __typeof__(*(ptr)) _n_ = (n); \
...@@ -106,19 +106,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ...@@ -106,19 +106,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available. * them available.
*/ */
#define cmpxchg_local(ptr, o, n) \ #define arch_cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)))) (unsigned long)(n), sizeof(*(ptr))))
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#define cmpxchg64_local(ptr, o, n) \ #define arch_cmpxchg64_local(ptr, o, n) \
({ \ ({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \ cmpxchg_local((ptr), (o), (n)); \
}) })
#else #else
#define cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n)) #define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif #endif
#define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n) #define arch_cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
#endif /* _ASM_PARISC_CMPXCHG_H_ */ #endif /* _ASM_PARISC_CMPXCHG_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment