Commit a8bcccab authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Ingo Molnar

locking/atomic, arch/x86: Implement atomic{,64}_fetch_{add,sub,and,or,xor}()

Implement FETCH-OP atomic primitives, these are very similar to the
existing OP-RETURN primitives we already have, except they return the
value of the atomic variable _before_ modification.

This is especially useful for irreversible operations -- such as
bitops (because it becomes impossible to reconstruct the state prior
to modification).
Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: linux-arch@vger.kernel.org
Cc: linux-kernel@vger.kernel.org
Signed-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 1af5de9a
...@@ -171,6 +171,16 @@ static __always_inline int atomic_sub_return(int i, atomic_t *v) ...@@ -171,6 +171,16 @@ static __always_inline int atomic_sub_return(int i, atomic_t *v)
#define atomic_inc_return(v) (atomic_add_return(1, v)) #define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v)) #define atomic_dec_return(v) (atomic_sub_return(1, v))
static __always_inline int atomic_fetch_add(int i, atomic_t *v)
{
return xadd(&v->counter, i);
}
static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
{
return xadd(&v->counter, -i);
}
static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new) static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{ {
return cmpxchg(&v->counter, old, new); return cmpxchg(&v->counter, old, new);
...@@ -190,10 +200,31 @@ static inline void atomic_##op(int i, atomic_t *v) \ ...@@ -190,10 +200,31 @@ static inline void atomic_##op(int i, atomic_t *v) \
: "memory"); \ : "memory"); \
} }
ATOMIC_OP(and) #define ATOMIC_FETCH_OP(op, c_op) \
ATOMIC_OP(or) static inline int atomic_fetch_##op(int i, atomic_t *v) \
ATOMIC_OP(xor) { \
int old, val = atomic_read(v); \
for (;;) { \
old = atomic_cmpxchg(v, val, val c_op i); \
if (old == val) \
break; \
val = old; \
} \
return old; \
}
#define ATOMIC_OPS(op, c_op) \
ATOMIC_OP(op) \
ATOMIC_FETCH_OP(op, c_op)
#define atomic_fetch_or atomic_fetch_or
ATOMIC_OPS(and, &)
ATOMIC_OPS(or , |)
ATOMIC_OPS(xor, ^)
#undef ATOMIC_OPS
#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP #undef ATOMIC_OP
/** /**
......
...@@ -320,10 +320,29 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \ ...@@ -320,10 +320,29 @@ static inline void atomic64_##op(long long i, atomic64_t *v) \
c = old; \ c = old; \
} }
ATOMIC64_OP(and, &) #define ATOMIC64_FETCH_OP(op, c_op) \
ATOMIC64_OP(or, |) static inline long long atomic64_fetch_##op(long long i, atomic64_t *v) \
ATOMIC64_OP(xor, ^) { \
long long old, c = 0; \
while ((old = atomic64_cmpxchg(v, c, c c_op i)) != c) \
c = old; \
return old; \
}
ATOMIC64_FETCH_OP(add, +)
#define atomic64_fetch_sub(i, v) atomic64_fetch_add(-(i), (v))
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op, c_op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS(and, &)
ATOMIC64_OPS(or, |)
ATOMIC64_OPS(xor, ^)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP #undef ATOMIC64_OP
#endif /* _ASM_X86_ATOMIC64_32_H */ #endif /* _ASM_X86_ATOMIC64_32_H */
...@@ -158,6 +158,16 @@ static inline long atomic64_sub_return(long i, atomic64_t *v) ...@@ -158,6 +158,16 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
return atomic64_add_return(-i, v); return atomic64_add_return(-i, v);
} }
static inline long atomic64_fetch_add(long i, atomic64_t *v)
{
return xadd(&v->counter, i);
}
static inline long atomic64_fetch_sub(long i, atomic64_t *v)
{
return xadd(&v->counter, -i);
}
#define atomic64_inc_return(v) (atomic64_add_return(1, (v))) #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
#define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
...@@ -229,10 +239,29 @@ static inline void atomic64_##op(long i, atomic64_t *v) \ ...@@ -229,10 +239,29 @@ static inline void atomic64_##op(long i, atomic64_t *v) \
: "memory"); \ : "memory"); \
} }
ATOMIC64_OP(and) #define ATOMIC64_FETCH_OP(op, c_op) \
ATOMIC64_OP(or) static inline long atomic64_fetch_##op(long i, atomic64_t *v) \
ATOMIC64_OP(xor) { \
long old, val = atomic64_read(v); \
for (;;) { \
old = atomic64_cmpxchg(v, val, val c_op i); \
if (old == val) \
break; \
val = old; \
} \
return old; \
}
#define ATOMIC64_OPS(op, c_op) \
ATOMIC64_OP(op) \
ATOMIC64_FETCH_OP(op, c_op)
ATOMIC64_OPS(and, &)
ATOMIC64_OPS(or, |)
ATOMIC64_OPS(xor, ^)
#undef ATOMIC64_OPS
#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP #undef ATOMIC64_OP
#endif /* _ASM_X86_ATOMIC64_64_H */ #endif /* _ASM_X86_ATOMIC64_64_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment