Commit 8bf705d1 authored by Dmitry Vyukov's avatar Dmitry Vyukov Committed by Ingo Molnar

locking/atomic/x86: Switch atomic.h to use atomic-instrumented.h

Add arch_ prefix to all atomic operations and include
<asm-generic/atomic-instrumented.h>. This will allow
to add KASAN instrumentation to all atomic ops.
Signed-off-by: default avatarDmitry Vyukov <dvyukov@google.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Cc: kasan-dev@googlegroups.com
Cc: linux-mm@kvack.org
Link: http://lkml.kernel.org/r/54f0eb64260b84199e538652e079a89b5423ad41.1517246437.git.dvyukov@google.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent b06ed71a
......@@ -17,36 +17,36 @@
#define ATOMIC_INIT(i) { (i) }
/**
* atomic_read - read atomic variable
* arch_atomic_read - read atomic variable
* @v: pointer of type atomic_t
*
* Atomically reads the value of @v.
*/
static __always_inline int atomic_read(const atomic_t *v)
static __always_inline int arch_atomic_read(const atomic_t *v)
{
return READ_ONCE((v)->counter);
}
/**
* atomic_set - set atomic variable
* arch_atomic_set - set atomic variable
* @v: pointer of type atomic_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
static __always_inline void atomic_set(atomic_t *v, int i)
static __always_inline void arch_atomic_set(atomic_t *v, int i)
{
WRITE_ONCE(v->counter, i);
}
/**
* atomic_add - add integer to atomic variable
* arch_atomic_add - add integer to atomic variable
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v.
*/
static __always_inline void atomic_add(int i, atomic_t *v)
static __always_inline void arch_atomic_add(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "addl %1,%0"
: "+m" (v->counter)
......@@ -54,13 +54,13 @@ static __always_inline void atomic_add(int i, atomic_t *v)
}
/**
* atomic_sub - subtract integer from atomic variable
* arch_atomic_sub - subtract integer from atomic variable
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
* Atomically subtracts @i from @v.
*/
static __always_inline void atomic_sub(int i, atomic_t *v)
static __always_inline void arch_atomic_sub(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "subl %1,%0"
: "+m" (v->counter)
......@@ -68,7 +68,7 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
}
/**
* atomic_sub_and_test - subtract value from variable and test result
* arch_atomic_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer of type atomic_t
*
......@@ -76,63 +76,63 @@ static __always_inline void atomic_sub(int i, atomic_t *v)
* true if the result is zero, or false for all
* other cases.
*/
static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
static __always_inline bool arch_atomic_sub_and_test(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", e);
}
/**
* atomic_inc - increment atomic variable
* arch_atomic_inc - increment atomic variable
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1.
*/
static __always_inline void atomic_inc(atomic_t *v)
static __always_inline void arch_atomic_inc(atomic_t *v)
{
asm volatile(LOCK_PREFIX "incl %0"
: "+m" (v->counter));
}
/**
* atomic_dec - decrement atomic variable
* arch_atomic_dec - decrement atomic variable
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1.
*/
static __always_inline void atomic_dec(atomic_t *v)
static __always_inline void arch_atomic_dec(atomic_t *v)
{
asm volatile(LOCK_PREFIX "decl %0"
: "+m" (v->counter));
}
/**
* atomic_dec_and_test - decrement and test
* arch_atomic_dec_and_test - decrement and test
* @v: pointer of type atomic_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static __always_inline bool atomic_dec_and_test(atomic_t *v)
static __always_inline bool arch_atomic_dec_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", e);
}
/**
* atomic_inc_and_test - increment and test
* arch_atomic_inc_and_test - increment and test
* @v: pointer of type atomic_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static __always_inline bool atomic_inc_and_test(atomic_t *v)
static __always_inline bool arch_atomic_inc_and_test(atomic_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", e);
}
/**
* atomic_add_negative - add and test if negative
* arch_atomic_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer of type atomic_t
*
......@@ -140,65 +140,65 @@ static __always_inline bool atomic_inc_and_test(atomic_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static __always_inline bool atomic_add_negative(int i, atomic_t *v)
static __always_inline bool arch_atomic_add_negative(int i, atomic_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", s);
}
/**
* atomic_add_return - add integer and return
* arch_atomic_add_return - add integer and return
* @i: integer value to add
* @v: pointer of type atomic_t
*
* Atomically adds @i to @v and returns @i + @v
*/
static __always_inline int atomic_add_return(int i, atomic_t *v)
static __always_inline int arch_atomic_add_return(int i, atomic_t *v)
{
return i + xadd(&v->counter, i);
}
/**
* atomic_sub_return - subtract integer and return
* arch_atomic_sub_return - subtract integer and return
* @v: pointer of type atomic_t
* @i: integer value to subtract
*
* Atomically subtracts @i from @v and returns @v - @i
*/
static __always_inline int atomic_sub_return(int i, atomic_t *v)
static __always_inline int arch_atomic_sub_return(int i, atomic_t *v)
{
return atomic_add_return(-i, v);
return arch_atomic_add_return(-i, v);
}
#define atomic_inc_return(v) (atomic_add_return(1, v))
#define atomic_dec_return(v) (atomic_sub_return(1, v))
#define arch_atomic_inc_return(v) (arch_atomic_add_return(1, v))
#define arch_atomic_dec_return(v) (arch_atomic_sub_return(1, v))
static __always_inline int atomic_fetch_add(int i, atomic_t *v)
static __always_inline int arch_atomic_fetch_add(int i, atomic_t *v)
{
return xadd(&v->counter, i);
}
static __always_inline int atomic_fetch_sub(int i, atomic_t *v)
static __always_inline int arch_atomic_fetch_sub(int i, atomic_t *v)
{
return xadd(&v->counter, -i);
}
static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
static __always_inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
return cmpxchg(&v->counter, old, new);
return arch_cmpxchg(&v->counter, old, new);
}
#define atomic_try_cmpxchg atomic_try_cmpxchg
static __always_inline bool atomic_try_cmpxchg(atomic_t *v, int *old, int new)
#define arch_atomic_try_cmpxchg arch_atomic_try_cmpxchg
static __always_inline bool arch_atomic_try_cmpxchg(atomic_t *v, int *old, int new)
{
return try_cmpxchg(&v->counter, old, new);
}
static inline int atomic_xchg(atomic_t *v, int new)
static inline int arch_atomic_xchg(atomic_t *v, int new)
{
return xchg(&v->counter, new);
}
static inline void atomic_and(int i, atomic_t *v)
static inline void arch_atomic_and(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "andl %1,%0"
: "+m" (v->counter)
......@@ -206,16 +206,16 @@ static inline void atomic_and(int i, atomic_t *v)
: "memory");
}
static inline int atomic_fetch_and(int i, atomic_t *v)
static inline int arch_atomic_fetch_and(int i, atomic_t *v)
{
int val = atomic_read(v);
int val = arch_atomic_read(v);
do { } while (!atomic_try_cmpxchg(v, &val, val & i));
do { } while (!arch_atomic_try_cmpxchg(v, &val, val & i));
return val;
}
static inline void atomic_or(int i, atomic_t *v)
static inline void arch_atomic_or(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "orl %1,%0"
: "+m" (v->counter)
......@@ -223,16 +223,16 @@ static inline void atomic_or(int i, atomic_t *v)
: "memory");
}
static inline int atomic_fetch_or(int i, atomic_t *v)
static inline int arch_atomic_fetch_or(int i, atomic_t *v)
{
int val = atomic_read(v);
int val = arch_atomic_read(v);
do { } while (!atomic_try_cmpxchg(v, &val, val | i));
do { } while (!arch_atomic_try_cmpxchg(v, &val, val | i));
return val;
}
static inline void atomic_xor(int i, atomic_t *v)
static inline void arch_atomic_xor(int i, atomic_t *v)
{
asm volatile(LOCK_PREFIX "xorl %1,%0"
: "+m" (v->counter)
......@@ -240,17 +240,17 @@ static inline void atomic_xor(int i, atomic_t *v)
: "memory");
}
static inline int atomic_fetch_xor(int i, atomic_t *v)
static inline int arch_atomic_fetch_xor(int i, atomic_t *v)
{
int val = atomic_read(v);
int val = arch_atomic_read(v);
do { } while (!atomic_try_cmpxchg(v, &val, val ^ i));
do { } while (!arch_atomic_try_cmpxchg(v, &val, val ^ i));
return val;
}
/**
* __atomic_add_unless - add unless the number is already a given value
* __arch_atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
......@@ -258,14 +258,14 @@ static inline int atomic_fetch_xor(int i, atomic_t *v)
* Atomically adds @a to @v, so long as @v was not already @u.
* Returns the old value of @v.
*/
static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
static __always_inline int __arch_atomic_add_unless(atomic_t *v, int a, int u)
{
int c = atomic_read(v);
int c = arch_atomic_read(v);
do {
if (unlikely(c == u))
break;
} while (!atomic_try_cmpxchg(v, &c, c + a));
} while (!arch_atomic_try_cmpxchg(v, &c, c + a));
return c;
}
......@@ -276,4 +276,6 @@ static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
# include <asm/atomic64_64.h>
#endif
#include <asm-generic/atomic-instrumented.h>
#endif /* _ASM_X86_ATOMIC_H */
This diff is collapsed.
......@@ -11,37 +11,37 @@
#define ATOMIC64_INIT(i) { (i) }
/**
* atomic64_read - read atomic64 variable
* arch_atomic64_read - read atomic64 variable
* @v: pointer of type atomic64_t
*
* Atomically reads the value of @v.
* Doesn't imply a read memory barrier.
*/
static inline long atomic64_read(const atomic64_t *v)
static inline long arch_atomic64_read(const atomic64_t *v)
{
return READ_ONCE((v)->counter);
}
/**
* atomic64_set - set atomic64 variable
* arch_atomic64_set - set atomic64 variable
* @v: pointer to type atomic64_t
* @i: required value
*
* Atomically sets the value of @v to @i.
*/
static inline void atomic64_set(atomic64_t *v, long i)
static inline void arch_atomic64_set(atomic64_t *v, long i)
{
WRITE_ONCE(v->counter, i);
}
/**
* atomic64_add - add integer to atomic64 variable
* arch_atomic64_add - add integer to atomic64 variable
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v.
*/
static __always_inline void atomic64_add(long i, atomic64_t *v)
static __always_inline void arch_atomic64_add(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "addq %1,%0"
: "=m" (v->counter)
......@@ -49,13 +49,13 @@ static __always_inline void atomic64_add(long i, atomic64_t *v)
}
/**
* atomic64_sub - subtract the atomic64 variable
* arch_atomic64_sub - subtract the atomic64 variable
* @i: integer value to subtract
* @v: pointer to type atomic64_t
*
* Atomically subtracts @i from @v.
*/
static inline void atomic64_sub(long i, atomic64_t *v)
static inline void arch_atomic64_sub(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "subq %1,%0"
: "=m" (v->counter)
......@@ -63,7 +63,7 @@ static inline void atomic64_sub(long i, atomic64_t *v)
}
/**
* atomic64_sub_and_test - subtract value from variable and test result
* arch_atomic64_sub_and_test - subtract value from variable and test result
* @i: integer value to subtract
* @v: pointer to type atomic64_t
*
......@@ -71,18 +71,18 @@ static inline void atomic64_sub(long i, atomic64_t *v)
* true if the result is zero, or false for all
* other cases.
*/
static inline bool atomic64_sub_and_test(long i, atomic64_t *v)
static inline bool arch_atomic64_sub_and_test(long i, atomic64_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "subq", v->counter, "er", i, "%0", e);
}
/**
* atomic64_inc - increment atomic64 variable
* arch_atomic64_inc - increment atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1.
*/
static __always_inline void atomic64_inc(atomic64_t *v)
static __always_inline void arch_atomic64_inc(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "incq %0"
: "=m" (v->counter)
......@@ -90,12 +90,12 @@ static __always_inline void atomic64_inc(atomic64_t *v)
}
/**
* atomic64_dec - decrement atomic64 variable
* arch_atomic64_dec - decrement atomic64 variable
* @v: pointer to type atomic64_t
*
* Atomically decrements @v by 1.
*/
static __always_inline void atomic64_dec(atomic64_t *v)
static __always_inline void arch_atomic64_dec(atomic64_t *v)
{
asm volatile(LOCK_PREFIX "decq %0"
: "=m" (v->counter)
......@@ -103,33 +103,33 @@ static __always_inline void atomic64_dec(atomic64_t *v)
}
/**
* atomic64_dec_and_test - decrement and test
* arch_atomic64_dec_and_test - decrement and test
* @v: pointer to type atomic64_t
*
* Atomically decrements @v by 1 and
* returns true if the result is 0, or false for all other
* cases.
*/
static inline bool atomic64_dec_and_test(atomic64_t *v)
static inline bool arch_atomic64_dec_and_test(atomic64_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "decq", v->counter, "%0", e);
}
/**
* atomic64_inc_and_test - increment and test
* arch_atomic64_inc_and_test - increment and test
* @v: pointer to type atomic64_t
*
* Atomically increments @v by 1
* and returns true if the result is zero, or false for all
* other cases.
*/
static inline bool atomic64_inc_and_test(atomic64_t *v)
static inline bool arch_atomic64_inc_and_test(atomic64_t *v)
{
GEN_UNARY_RMWcc(LOCK_PREFIX "incq", v->counter, "%0", e);
}
/**
* atomic64_add_negative - add and test if negative
* arch_atomic64_add_negative - add and test if negative
* @i: integer value to add
* @v: pointer to type atomic64_t
*
......@@ -137,59 +137,59 @@ static inline bool atomic64_inc_and_test(atomic64_t *v)
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
static inline bool atomic64_add_negative(long i, atomic64_t *v)
static inline bool arch_atomic64_add_negative(long i, atomic64_t *v)
{
GEN_BINARY_RMWcc(LOCK_PREFIX "addq", v->counter, "er", i, "%0", s);
}
/**
* atomic64_add_return - add and return
* arch_atomic64_add_return - add and return
* @i: integer value to add
* @v: pointer to type atomic64_t
*
* Atomically adds @i to @v and returns @i + @v
*/
static __always_inline long atomic64_add_return(long i, atomic64_t *v)
static __always_inline long arch_atomic64_add_return(long i, atomic64_t *v)
{
return i + xadd(&v->counter, i);
}
static inline long atomic64_sub_return(long i, atomic64_t *v)
static inline long arch_atomic64_sub_return(long i, atomic64_t *v)
{
return atomic64_add_return(-i, v);
return arch_atomic64_add_return(-i, v);
}
static inline long atomic64_fetch_add(long i, atomic64_t *v)
static inline long arch_atomic64_fetch_add(long i, atomic64_t *v)
{
return xadd(&v->counter, i);
}
static inline long atomic64_fetch_sub(long i, atomic64_t *v)
static inline long arch_atomic64_fetch_sub(long i, atomic64_t *v)
{
return xadd(&v->counter, -i);
}
#define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
#define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
#define arch_atomic64_inc_return(v) (arch_atomic64_add_return(1, (v)))
#define arch_atomic64_dec_return(v) (arch_atomic64_sub_return(1, (v)))
static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
static inline long arch_atomic64_cmpxchg(atomic64_t *v, long old, long new)
{
return cmpxchg(&v->counter, old, new);
return arch_cmpxchg(&v->counter, old, new);
}
#define atomic64_try_cmpxchg atomic64_try_cmpxchg
static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new)
#define arch_atomic64_try_cmpxchg arch_atomic64_try_cmpxchg
static __always_inline bool arch_atomic64_try_cmpxchg(atomic64_t *v, s64 *old, long new)
{
return try_cmpxchg(&v->counter, old, new);
}
static inline long atomic64_xchg(atomic64_t *v, long new)
static inline long arch_atomic64_xchg(atomic64_t *v, long new)
{
return xchg(&v->counter, new);
}
/**
* atomic64_add_unless - add unless the number is a given value
* arch_atomic64_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
......@@ -197,37 +197,37 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static inline bool atomic64_add_unless(atomic64_t *v, long a, long u)
static inline bool arch_atomic64_add_unless(atomic64_t *v, long a, long u)
{
s64 c = atomic64_read(v);
s64 c = arch_atomic64_read(v);
do {
if (unlikely(c == u))
return false;
} while (!atomic64_try_cmpxchg(v, &c, c + a));
} while (!arch_atomic64_try_cmpxchg(v, &c, c + a));
return true;
}
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
#define arch_atomic64_inc_not_zero(v) arch_atomic64_add_unless((v), 1, 0)
/*
* atomic64_dec_if_positive - decrement by 1 if old value positive
* arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
static inline long atomic64_dec_if_positive(atomic64_t *v)
static inline long arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 dec, c = atomic64_read(v);
s64 dec, c = arch_atomic64_read(v);
do {
dec = c - 1;
if (unlikely(dec < 0))
break;
} while (!atomic64_try_cmpxchg(v, &c, dec));
} while (!arch_atomic64_try_cmpxchg(v, &c, dec));
return dec;
}
static inline void atomic64_and(long i, atomic64_t *v)
static inline void arch_atomic64_and(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "andq %1,%0"
: "+m" (v->counter)
......@@ -235,16 +235,16 @@ static inline void atomic64_and(long i, atomic64_t *v)
: "memory");
}
static inline long atomic64_fetch_and(long i, atomic64_t *v)
static inline long arch_atomic64_fetch_and(long i, atomic64_t *v)
{
s64 val = atomic64_read(v);
s64 val = arch_atomic64_read(v);
do {
} while (!atomic64_try_cmpxchg(v, &val, val & i));
} while (!arch_atomic64_try_cmpxchg(v, &val, val & i));
return val;
}
static inline void atomic64_or(long i, atomic64_t *v)
static inline void arch_atomic64_or(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "orq %1,%0"
: "+m" (v->counter)
......@@ -252,16 +252,16 @@ static inline void atomic64_or(long i, atomic64_t *v)
: "memory");
}
static inline long atomic64_fetch_or(long i, atomic64_t *v)
static inline long arch_atomic64_fetch_or(long i, atomic64_t *v)
{
s64 val = atomic64_read(v);
s64 val = arch_atomic64_read(v);
do {
} while (!atomic64_try_cmpxchg(v, &val, val | i));
} while (!arch_atomic64_try_cmpxchg(v, &val, val | i));
return val;
}
static inline void atomic64_xor(long i, atomic64_t *v)
static inline void arch_atomic64_xor(long i, atomic64_t *v)
{
asm volatile(LOCK_PREFIX "xorq %1,%0"
: "+m" (v->counter)
......@@ -269,12 +269,12 @@ static inline void atomic64_xor(long i, atomic64_t *v)
: "memory");
}
static inline long atomic64_fetch_xor(long i, atomic64_t *v)
static inline long arch_atomic64_fetch_xor(long i, atomic64_t *v)
{
s64 val = atomic64_read(v);
s64 val = arch_atomic64_read(v);
do {
} while (!atomic64_try_cmpxchg(v, &val, val ^ i));
} while (!arch_atomic64_try_cmpxchg(v, &val, val ^ i));
return val;
}
......
......@@ -145,13 +145,13 @@ extern void __add_wrong_size(void)
# include <asm/cmpxchg_64.h>
#endif
#define cmpxchg(ptr, old, new) \
#define arch_cmpxchg(ptr, old, new) \
__cmpxchg(ptr, old, new, sizeof(*(ptr)))
#define sync_cmpxchg(ptr, old, new) \
#define arch_sync_cmpxchg(ptr, old, new) \
__sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
#define cmpxchg_local(ptr, old, new) \
#define arch_cmpxchg_local(ptr, old, new) \
__cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
......@@ -221,7 +221,7 @@ extern void __add_wrong_size(void)
#define __try_cmpxchg(ptr, pold, new, size) \
__raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX)
#define try_cmpxchg(ptr, pold, new) \
#define try_cmpxchg(ptr, pold, new) \
__try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr)))
/*
......@@ -250,10 +250,10 @@ extern void __add_wrong_size(void)
__ret; \
})
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
#define arch_cmpxchg_double(p1, p2, o1, o2, n1, n2) \
__cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
#define arch_cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
__cmpxchg_double(, p1, p2, o1, o2, n1, n2)
#endif /* ASM_X86_CMPXCHG_H */
......@@ -36,10 +36,10 @@ static inline void set_64bit(volatile u64 *ptr, u64 value)
}
#ifdef CONFIG_X86_CMPXCHG64
#define cmpxchg64(ptr, o, n) \
#define arch_cmpxchg64(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
(unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n) \
#define arch_cmpxchg64_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
(unsigned long long)(n)))
#endif
......@@ -76,7 +76,7 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
* to simulate the cmpxchg8b on the 80386 and 80486 CPU.
*/
#define cmpxchg64(ptr, o, n) \
#define arch_cmpxchg64(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (o); \
......@@ -93,7 +93,7 @@ static inline u64 __cmpxchg64_local(volatile u64 *ptr, u64 old, u64 new)
__ret; })
#define cmpxchg64_local(ptr, o, n) \
#define arch_cmpxchg64_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (o); \
......
......@@ -7,13 +7,13 @@ static inline void set_64bit(volatile u64 *ptr, u64 val)
*ptr = val;
}
#define cmpxchg64(ptr, o, n) \
#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \
})
#define cmpxchg64_local(ptr, o, n) \
#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment