Commit 59c33fa7 authored by Linus Torvalds's avatar Linus Torvalds Committed by H. Peter Anvin

x86-32: clean up rwsem inline asm statements

This makes gcc use the right register names and instruction operand sizes
automatically for the rwsem inline asm statements.

So instead of using "(%%eax)" to specify the memory address that is the
semaphore, we use "(%1)" or similar. And instead of forcing the operation
to always be 32-bit, we use "%z0", taking the size from the actual
semaphore data structure itself.

This doesn't actually matter on x86-32, but if we want to use the same
inline asm for x86-64, we'll need to have the compiler generate the proper
64-bit names for the registers (%rax instead of %eax), and if we want to
use a 64-bit counter too (in order to avoid the 15-bit limit on the
write counter that limits concurrent users to 32767 threads), we'll need
to be able to generate instructions with "q" accesses rather than "l".

Since this header currently isn't enabled on x86-64, none of that matters,
but we do want to use the xadd version of the semaphores rather than have
to take spinlocks to do a rwsem. The mm->mmap_sem can be heavily contended
when you have lots of threads all taking page faults, and the fallback
rwsem code that uses a spinlock performs abysmally badly in that case.

[ hpa: modified the patch to skip size suffixes entirely when they are
  redundant due to register operands. ]
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
LKML-Reference: <alpine.LFD.2.00.1001121613560.17145@localhost.localdomain>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent 5abbbbf0
...@@ -105,7 +105,7 @@ do { \ ...@@ -105,7 +105,7 @@ do { \
static inline void __down_read(struct rw_semaphore *sem) static inline void __down_read(struct rw_semaphore *sem)
{ {
asm volatile("# beginning down_read\n\t" asm volatile("# beginning down_read\n\t"
LOCK_PREFIX " incl (%%eax)\n\t" LOCK_PREFIX " inc%z0 (%1)\n\t"
/* adds 0x00000001, returns the old value */ /* adds 0x00000001, returns the old value */
" jns 1f\n" " jns 1f\n"
" call call_rwsem_down_read_failed\n" " call call_rwsem_down_read_failed\n"
...@@ -123,12 +123,12 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -123,12 +123,12 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
{ {
__s32 result, tmp; __s32 result, tmp;
asm volatile("# beginning __down_read_trylock\n\t" asm volatile("# beginning __down_read_trylock\n\t"
" movl %0,%1\n\t" " mov %0,%1\n\t"
"1:\n\t" "1:\n\t"
" movl %1,%2\n\t" " mov %1,%2\n\t"
" addl %3,%2\n\t" " add %3,%2\n\t"
" jle 2f\n\t" " jle 2f\n\t"
LOCK_PREFIX " cmpxchgl %2,%0\n\t" LOCK_PREFIX " cmpxchg %2,%0\n\t"
" jnz 1b\n\t" " jnz 1b\n\t"
"2:\n\t" "2:\n\t"
"# ending __down_read_trylock\n\t" "# ending __down_read_trylock\n\t"
...@@ -147,9 +147,9 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) ...@@ -147,9 +147,9 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
tmp = RWSEM_ACTIVE_WRITE_BIAS; tmp = RWSEM_ACTIVE_WRITE_BIAS;
asm volatile("# beginning down_write\n\t" asm volatile("# beginning down_write\n\t"
LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t"
/* subtract 0x0000ffff, returns the old value */ /* subtract 0x0000ffff, returns the old value */
" testl %%edx,%%edx\n\t" " test %1,%1\n\t"
/* was the count 0 before? */ /* was the count 0 before? */
" jz 1f\n" " jz 1f\n"
" call call_rwsem_down_write_failed\n" " call call_rwsem_down_write_failed\n"
...@@ -185,7 +185,7 @@ static inline void __up_read(struct rw_semaphore *sem) ...@@ -185,7 +185,7 @@ static inline void __up_read(struct rw_semaphore *sem)
{ {
__s32 tmp = -RWSEM_ACTIVE_READ_BIAS; __s32 tmp = -RWSEM_ACTIVE_READ_BIAS;
asm volatile("# beginning __up_read\n\t" asm volatile("# beginning __up_read\n\t"
LOCK_PREFIX " xadd %%edx,(%%eax)\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t"
/* subtracts 1, returns the old value */ /* subtracts 1, returns the old value */
" jns 1f\n\t" " jns 1f\n\t"
" call call_rwsem_wake\n" " call call_rwsem_wake\n"
...@@ -201,18 +201,18 @@ static inline void __up_read(struct rw_semaphore *sem) ...@@ -201,18 +201,18 @@ static inline void __up_read(struct rw_semaphore *sem)
*/ */
static inline void __up_write(struct rw_semaphore *sem) static inline void __up_write(struct rw_semaphore *sem)
{ {
unsigned long tmp;
asm volatile("# beginning __up_write\n\t" asm volatile("# beginning __up_write\n\t"
" movl %2,%%edx\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t"
LOCK_PREFIX " xaddl %%edx,(%%eax)\n\t"
/* tries to transition /* tries to transition
0xffff0001 -> 0x00000000 */ 0xffff0001 -> 0x00000000 */
" jz 1f\n" " jz 1f\n"
" call call_rwsem_wake\n" " call call_rwsem_wake\n"
"1:\n\t" "1:\n\t"
"# ending __up_write\n" "# ending __up_write\n"
: "+m" (sem->count) : "+m" (sem->count), "=d" (tmp)
: "a" (sem), "i" (-RWSEM_ACTIVE_WRITE_BIAS) : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS)
: "memory", "cc", "edx"); : "memory", "cc");
} }
/* /*
...@@ -221,7 +221,7 @@ static inline void __up_write(struct rw_semaphore *sem) ...@@ -221,7 +221,7 @@ static inline void __up_write(struct rw_semaphore *sem)
static inline void __downgrade_write(struct rw_semaphore *sem) static inline void __downgrade_write(struct rw_semaphore *sem)
{ {
asm volatile("# beginning __downgrade_write\n\t" asm volatile("# beginning __downgrade_write\n\t"
LOCK_PREFIX " addl %2,(%%eax)\n\t" LOCK_PREFIX " add%z0 %2,(%1)\n\t"
/* transitions 0xZZZZ0001 -> 0xYYYY0001 */ /* transitions 0xZZZZ0001 -> 0xYYYY0001 */
" jns 1f\n\t" " jns 1f\n\t"
" call call_rwsem_downgrade_wake\n" " call call_rwsem_downgrade_wake\n"
...@@ -237,7 +237,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem) ...@@ -237,7 +237,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
*/ */
static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
{ {
asm volatile(LOCK_PREFIX "addl %1,%0" asm volatile(LOCK_PREFIX "add%z0 %1,%0"
: "+m" (sem->count) : "+m" (sem->count)
: "ir" (delta)); : "ir" (delta));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment