Commit 79d365a3 authored by Mathieu Desnoyers's avatar Mathieu Desnoyers Committed by Linus Torvalds

atomic.h: add atomic64 cmpxchg, xchg and add_unless to x86_64

Signed-off-by: default avatarMathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2549c858
...@@ -375,8 +375,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t *v) ...@@ -375,8 +375,8 @@ static __inline__ long atomic64_add_return(long i, atomic64_t *v)
long __i = i; long __i = i;
__asm__ __volatile__( __asm__ __volatile__(
LOCK_PREFIX "xaddq %0, %1;" LOCK_PREFIX "xaddq %0, %1;"
:"=r"(i) :"+r" (i), "+m" (v->counter)
:"m"(v->counter), "0"(i)); : : "memory");
return i + __i; return i + __i;
} }
...@@ -388,7 +388,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) ...@@ -388,7 +388,10 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
#define atomic64_inc_return(v) (atomic64_add_return(1,v)) #define atomic64_inc_return(v) (atomic64_add_return(1,v))
#define atomic64_dec_return(v) (atomic64_sub_return(1,v)) #define atomic64_dec_return(v) (atomic64_sub_return(1,v))
#define atomic_cmpxchg(v, old, new) ((int)cmpxchg(&((v)->counter), old, new)) #define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
/** /**
...@@ -402,7 +405,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) ...@@ -402,7 +405,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
*/ */
#define atomic_add_unless(v, a, u) \ #define atomic_add_unless(v, a, u) \
({ \ ({ \
int c, old; \ __typeof__((v)->counter) c, old; \
c = atomic_read(v); \ c = atomic_read(v); \
for (;;) { \ for (;;) { \
if (unlikely(c == (u))) \ if (unlikely(c == (u))) \
...@@ -416,6 +419,31 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v) ...@@ -416,6 +419,31 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
}) })
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
/**
* atomic64_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
#define atomic64_add_unless(v, a, u) \
({ \
__typeof__((v)->counter) c, old; \
c = atomic64_read(v); \
for (;;) { \
if (unlikely(c == (u))) \
break; \
old = atomic64_cmpxchg((v), c, c + (a)); \
if (likely(old == c)) \
break; \
c = old; \
} \
c != (u); \
})
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
/* These are x86-specific, used by some header files */ /* These are x86-specific, used by some header files */
#define atomic_clear_mask(mask, addr) \ #define atomic_clear_mask(mask, addr) \
__asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \ __asm__ __volatile__(LOCK_PREFIX "andl %0,%1" \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment