Commit d3bf60a6 authored by Joe Perches's avatar Joe Perches Committed by Ingo Molnar

include/asm-x86/spinlock.h: checkpatch cleanups - formatting only

Signed-off-by: default avatarJoe Perches <joe@perches.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent ceb7ce10
...@@ -82,7 +82,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ...@@ -82,7 +82,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
{ {
short inc = 0x0100; short inc = 0x0100;
__asm__ __volatile__ ( asm volatile (
LOCK_PREFIX "xaddw %w0, %1\n" LOCK_PREFIX "xaddw %w0, %1\n"
"1:\t" "1:\t"
"cmpb %h0, %b0\n\t" "cmpb %h0, %b0\n\t"
...@@ -92,9 +92,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ...@@ -92,9 +92,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
/* don't need lfence here, because loads are in-order */ /* don't need lfence here, because loads are in-order */
"jmp 1b\n" "jmp 1b\n"
"2:" "2:"
:"+Q" (inc), "+m" (lock->slock) : "+Q" (inc), "+m" (lock->slock)
: :
:"memory", "cc"); : "memory", "cc");
} }
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
...@@ -104,8 +104,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) ...@@ -104,8 +104,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
int tmp; int tmp;
short new; short new;
asm volatile( asm volatile("movw %2,%w0\n\t"
"movw %2,%w0\n\t"
"cmpb %h0,%b0\n\t" "cmpb %h0,%b0\n\t"
"jne 1f\n\t" "jne 1f\n\t"
"movw %w0,%w1\n\t" "movw %w0,%w1\n\t"
...@@ -114,7 +113,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) ...@@ -114,7 +113,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
"1:" "1:"
"sete %b1\n\t" "sete %b1\n\t"
"movzbl %b1,%0\n\t" "movzbl %b1,%0\n\t"
:"=&a" (tmp), "=Q" (new), "+m" (lock->slock) : "=&a" (tmp), "=Q" (new), "+m" (lock->slock)
: :
: "memory", "cc"); : "memory", "cc");
...@@ -123,11 +122,10 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) ...@@ -123,11 +122,10 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(raw_spinlock_t *lock) static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{ {
__asm__ __volatile__( asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
UNLOCK_LOCK_PREFIX "incb %0" : "+m" (lock->slock)
:"+m" (lock->slock)
: :
:"memory", "cc"); : "memory", "cc");
} }
#else #else
static inline int __raw_spin_is_locked(raw_spinlock_t *lock) static inline int __raw_spin_is_locked(raw_spinlock_t *lock)
...@@ -149,8 +147,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ...@@ -149,8 +147,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
int inc = 0x00010000; int inc = 0x00010000;
int tmp; int tmp;
__asm__ __volatile__ ( asm volatile("lock ; xaddl %0, %1\n"
"lock ; xaddl %0, %1\n"
"movzwl %w0, %2\n\t" "movzwl %w0, %2\n\t"
"shrl $16, %0\n\t" "shrl $16, %0\n\t"
"1:\t" "1:\t"
...@@ -161,9 +158,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock) ...@@ -161,9 +158,9 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
/* don't need lfence here, because loads are in-order */ /* don't need lfence here, because loads are in-order */
"jmp 1b\n" "jmp 1b\n"
"2:" "2:"
:"+Q" (inc), "+m" (lock->slock), "=r" (tmp) : "+Q" (inc), "+m" (lock->slock), "=r" (tmp)
: :
:"memory", "cc"); : "memory", "cc");
} }
#define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock)
...@@ -173,8 +170,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) ...@@ -173,8 +170,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
int tmp; int tmp;
int new; int new;
asm volatile( asm volatile("movl %2,%0\n\t"
"movl %2,%0\n\t"
"movl %0,%1\n\t" "movl %0,%1\n\t"
"roll $16, %0\n\t" "roll $16, %0\n\t"
"cmpl %0,%1\n\t" "cmpl %0,%1\n\t"
...@@ -184,7 +180,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) ...@@ -184,7 +180,7 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
"1:" "1:"
"sete %b1\n\t" "sete %b1\n\t"
"movzbl %b1,%0\n\t" "movzbl %b1,%0\n\t"
:"=&a" (tmp), "=r" (new), "+m" (lock->slock) : "=&a" (tmp), "=r" (new), "+m" (lock->slock)
: :
: "memory", "cc"); : "memory", "cc");
...@@ -193,11 +189,10 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock) ...@@ -193,11 +189,10 @@ static inline int __raw_spin_trylock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(raw_spinlock_t *lock) static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{ {
__asm__ __volatile__( asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
UNLOCK_LOCK_PREFIX "incw %0" : "+m" (lock->slock)
:"+m" (lock->slock)
: :
:"memory", "cc"); : "memory", "cc");
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment