Commit 8121019c authored by Joe Perches's avatar Joe Perches Committed by Ingo Molnar

include/asm-x86/cmpxchg_32.h: checkpatch cleanups - formatting only

Signed-off-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 3d3c6e10
...@@ -8,9 +8,12 @@ ...@@ -8,9 +8,12 @@
* you need to test for the feature in boot_cpu_data. * you need to test for the feature in boot_cpu_data.
*/ */
#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr)))) #define xchg(ptr, v) \
((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
struct __xchg_dummy { unsigned long a[100]; }; struct __xchg_dummy {
unsigned long a[100];
};
#define __xg(x) ((struct __xchg_dummy *)(x)) #define __xg(x) ((struct __xchg_dummy *)(x))
/* /*
...@@ -27,11 +30,10 @@ struct __xchg_dummy { unsigned long a[100]; }; ...@@ -27,11 +30,10 @@ struct __xchg_dummy { unsigned long a[100]; };
* of the instruction set reference 24319102.pdf. We need * of the instruction set reference 24319102.pdf. We need
* the reader side to see the coherent 64bit value. * the reader side to see the coherent 64bit value.
*/ */
static inline void __set_64bit (unsigned long long * ptr, static inline void __set_64bit(unsigned long long *ptr,
unsigned int low, unsigned int high) unsigned int low, unsigned int high)
{ {
__asm__ __volatile__ ( asm volatile("\n1:\t"
"\n1:\t"
"movl (%0), %%eax\n\t" "movl (%0), %%eax\n\t"
"movl 4(%0), %%edx\n\t" "movl 4(%0), %%edx\n\t"
LOCK_PREFIX "cmpxchg8b (%0)\n\t" LOCK_PREFIX "cmpxchg8b (%0)\n\t"
...@@ -40,58 +42,61 @@ static inline void __set_64bit (unsigned long long * ptr, ...@@ -40,58 +42,61 @@ static inline void __set_64bit (unsigned long long * ptr,
: "D"(ptr), : "D"(ptr),
"b"(low), "b"(low),
"c"(high) "c"(high)
: "ax","dx","memory"); : "ax", "dx", "memory");
} }
static inline void __set_64bit_constant (unsigned long long *ptr, static inline void __set_64bit_constant(unsigned long long *ptr,
unsigned long long value) unsigned long long value)
{ {
__set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL)); __set_64bit(ptr, (unsigned int)value, (unsigned int)(value >> 32));
} }
#define ll_low(x) *(((unsigned int*)&(x))+0)
#define ll_high(x) *(((unsigned int*)&(x))+1)
static inline void __set_64bit_var (unsigned long long *ptr, #define ll_low(x) *(((unsigned int *)&(x)) + 0)
#define ll_high(x) *(((unsigned int *)&(x)) + 1)
static inline void __set_64bit_var(unsigned long long *ptr,
unsigned long long value) unsigned long long value)
{ {
__set_64bit(ptr,ll_low(value), ll_high(value)); __set_64bit(ptr, ll_low(value), ll_high(value));
} }
#define set_64bit(ptr,value) \ #define set_64bit(ptr, value) \
(__builtin_constant_p(value) ? \ (__builtin_constant_p((value)) \
__set_64bit_constant(ptr, value) : \ ? __set_64bit_constant((ptr), (value)) \
__set_64bit_var(ptr, value) ) : __set_64bit_var((ptr), (value)))
#define _set_64bit(ptr,value) \ #define _set_64bit(ptr, value) \
(__builtin_constant_p(value) ? \ (__builtin_constant_p(value) \
__set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \ ? __set_64bit(ptr, (unsigned int)(value), \
__set_64bit(ptr, ll_low(value), ll_high(value)) ) (unsigned int)((value) >> 32)) \
: __set_64bit(ptr, ll_low((value)), ll_high((value))))
/* /*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary, * Note 2: xchg has side effect, so that attribute volatile is necessary,
* but generally the primitive is invalid, *ptr is output argument. --ANK * but generally the primitive is invalid, *ptr is output argument. --ANK
*/ */
static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
int size)
{ {
switch (size) { switch (size) {
case 1: case 1:
__asm__ __volatile__("xchgb %b0,%1" asm volatile("xchgb %b0,%1"
:"=q" (x) : "=q" (x)
:"m" (*__xg(ptr)), "0" (x) : "m" (*__xg(ptr)), "0" (x)
:"memory"); : "memory");
break; break;
case 2: case 2:
__asm__ __volatile__("xchgw %w0,%1" asm volatile("xchgw %w0,%1"
:"=r" (x) : "=r" (x)
:"m" (*__xg(ptr)), "0" (x) : "m" (*__xg(ptr)), "0" (x)
:"memory"); : "memory");
break; break;
case 4: case 4:
__asm__ __volatile__("xchgl %0,%1" asm volatile("xchgl %0,%1"
:"=r" (x) : "=r" (x)
:"m" (*__xg(ptr)), "0" (x) : "m" (*__xg(ptr)), "0" (x)
:"memory"); : "memory");
break; break;
} }
return x; return x;
...@@ -107,13 +112,16 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz ...@@ -107,13 +112,16 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
#define __HAVE_ARCH_CMPXCHG 1 #define __HAVE_ARCH_CMPXCHG 1
#define cmpxchg(ptr, o, n) \ #define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)))) (unsigned long)(n), \
sizeof(*(ptr))))
#define sync_cmpxchg(ptr, o, n) \ #define sync_cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \ ((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)))) (unsigned long)(n), \
sizeof(*(ptr))))
#define cmpxchg_local(ptr, o, n) \ #define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)))) (unsigned long)(n), \
sizeof(*(ptr))))
#endif #endif
#ifdef CONFIG_X86_CMPXCHG64 #ifdef CONFIG_X86_CMPXCHG64
...@@ -121,7 +129,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz ...@@ -121,7 +129,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \ ((__typeof__(*(ptr)))__cmpxchg64((ptr), (unsigned long long)(o), \
(unsigned long long)(n))) (unsigned long long)(n)))
#define cmpxchg64_local(ptr, o, n) \ #define cmpxchg64_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o),\ ((__typeof__(*(ptr)))__cmpxchg64_local((ptr), (unsigned long long)(o), \
(unsigned long long)(n))) (unsigned long long)(n)))
#endif #endif
...@@ -131,19 +139,19 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, ...@@ -131,19 +139,19 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long prev; unsigned long prev;
switch (size) { switch (size) {
case 1: case 1:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2" asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
: "=a"(prev) : "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old) : "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
case 2: case 2:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2" asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
: "=a"(prev) : "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old) : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
case 4: case 4:
__asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2" asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
: "=a"(prev) : "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old) : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
...@@ -164,19 +172,19 @@ static inline unsigned long __sync_cmpxchg(volatile void *ptr, ...@@ -164,19 +172,19 @@ static inline unsigned long __sync_cmpxchg(volatile void *ptr,
unsigned long prev; unsigned long prev;
switch (size) { switch (size) {
case 1: case 1:
__asm__ __volatile__("lock; cmpxchgb %b1,%2" asm volatile("lock; cmpxchgb %b1,%2"
: "=a"(prev) : "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old) : "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
case 2: case 2:
__asm__ __volatile__("lock; cmpxchgw %w1,%2" asm volatile("lock; cmpxchgw %w1,%2"
: "=a"(prev) : "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old) : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
case 4: case 4:
__asm__ __volatile__("lock; cmpxchgl %1,%2" asm volatile("lock; cmpxchgl %1,%2"
: "=a"(prev) : "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old) : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
...@@ -186,24 +194,25 @@ static inline unsigned long __sync_cmpxchg(volatile void *ptr, ...@@ -186,24 +194,25 @@ static inline unsigned long __sync_cmpxchg(volatile void *ptr,
} }
static inline unsigned long __cmpxchg_local(volatile void *ptr, static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old, unsigned long new, int size) unsigned long old,
unsigned long new, int size)
{ {
unsigned long prev; unsigned long prev;
switch (size) { switch (size) {
case 1: case 1:
__asm__ __volatile__("cmpxchgb %b1,%2" asm volatile("cmpxchgb %b1,%2"
: "=a"(prev) : "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old) : "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
case 2: case 2:
__asm__ __volatile__("cmpxchgw %w1,%2" asm volatile("cmpxchgw %w1,%2"
: "=a"(prev) : "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old) : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
return prev; return prev;
case 4: case 4:
__asm__ __volatile__("cmpxchgl %1,%2" asm volatile("cmpxchgl %1,%2"
: "=a"(prev) : "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old) : "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory"); : "memory");
...@@ -213,10 +222,11 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr, ...@@ -213,10 +222,11 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
} }
static inline unsigned long long __cmpxchg64(volatile void *ptr, static inline unsigned long long __cmpxchg64(volatile void *ptr,
unsigned long long old, unsigned long long new) unsigned long long old,
unsigned long long new)
{ {
unsigned long long prev; unsigned long long prev;
__asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3" asm volatile(LOCK_PREFIX "cmpxchg8b %3"
: "=A"(prev) : "=A"(prev)
: "b"((unsigned long)new), : "b"((unsigned long)new),
"c"((unsigned long)(new >> 32)), "c"((unsigned long)(new >> 32)),
...@@ -227,10 +237,11 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr, ...@@ -227,10 +237,11 @@ static inline unsigned long long __cmpxchg64(volatile void *ptr,
} }
static inline unsigned long long __cmpxchg64_local(volatile void *ptr, static inline unsigned long long __cmpxchg64_local(volatile void *ptr,
unsigned long long old, unsigned long long new) unsigned long long old,
unsigned long long new)
{ {
unsigned long long prev; unsigned long long prev;
__asm__ __volatile__("cmpxchg8b %3" asm volatile("cmpxchg8b %3"
: "=A"(prev) : "=A"(prev)
: "b"((unsigned long)new), : "b"((unsigned long)new),
"c"((unsigned long)(new >> 32)), "c"((unsigned long)(new >> 32)),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment