Commit 8c641755 authored by Mark Rutland's avatar Mark Rutland Committed by Peter Zijlstra

locking/atomic: sh: move to ARCH_ATOMIC

We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates sh to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rich Felker <dalias@libc.org>
Cc: Will Deacon <will@kernel.org>
Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-30-mark.rutland@arm.com
parent 9efbb355
......@@ -2,6 +2,7 @@
config SUPERH
def_bool y
select ARCH_32BIT_OFF_T
select ARCH_ATOMIC
select ARCH_ENABLE_MEMORY_HOTPLUG if SPARSEMEM && MMU
select ARCH_ENABLE_MEMORY_HOTREMOVE if SPARSEMEM && MMU
select ARCH_HAVE_CUSTOM_GPIO_H
......
......@@ -3,7 +3,7 @@
#define __ASM_SH_ATOMIC_GRB_H
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
int tmp; \
\
......@@ -23,7 +23,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
} \
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int tmp; \
\
......@@ -45,7 +45,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int res, tmp; \
\
......
......@@ -11,7 +11,7 @@
*/
#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
......@@ -21,7 +21,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long temp, flags; \
\
......@@ -35,7 +35,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long temp, flags; \
\
......
......@@ -17,7 +17,7 @@
*/
#define ATOMIC_OP(op) \
static inline void atomic_##op(int i, atomic_t *v) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
\
......@@ -32,7 +32,7 @@ static inline void atomic_##op(int i, atomic_t *v) \
}
#define ATOMIC_OP_RETURN(op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long temp; \
\
......@@ -50,7 +50,7 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
}
#define ATOMIC_FETCH_OP(op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long res, temp; \
\
......
......@@ -19,8 +19,8 @@
#include <asm/cmpxchg.h>
#include <asm/barrier.h>
#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#if defined(CONFIG_GUSA_RB)
#include <asm/atomic-grb.h>
......@@ -30,8 +30,8 @@
#include <asm/atomic-irq.h>
#endif
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))
#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
#endif /* CONFIG_CPU_J2 */
......
......@@ -45,7 +45,7 @@ extern void __xchg_called_with_bad_pointer(void);
__xchg__res; \
})
#define xchg(ptr,x) \
#define arch_xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr))))
/* This function doesn't exist, so you'll get a linker error
......@@ -63,7 +63,7 @@ static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
return old;
}
#define cmpxchg(ptr,o,n) \
#define arch_cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment