Commit 5d0b7235 authored by Linus Torvalds's avatar Linus Torvalds Committed by H. Peter Anvin

x86: clean up rwsem type system

The fast version of the rwsems (the code that uses xadd) has
traditionally only worked on x86-32, and as a result it mixes different
kinds of types wildly - they just all happen to be 32-bit.  We have
"long", we have "__s32", and we have "int".

To make it work on x86-64, the types suddenly matter a lot more.  It can
be either a 32-bit or 64-bit signed type, and both work (with the caveat
that a 32-bit counter will only have 15 bits of effective write
counters, so it's limited to 32767 users).  But whatever type you
choose, it needs to be used consistently.

This makes a new 'rwsem_counter_t', that is a 32-bit signed type.  For a
64-bit type, you'd need to also update the BIAS values.
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
LKML-Reference: <alpine.LFD.2.00.1001121755220.17145@localhost.localdomain>
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent 3bef4447
...@@ -55,6 +55,9 @@ extern asmregparm struct rw_semaphore * ...@@ -55,6 +55,9 @@ extern asmregparm struct rw_semaphore *
/* /*
* the semaphore definition * the semaphore definition
*
* The bias values and the counter type needs to be extended to 64 bits
* if we want to have more than 32767 potential readers/writers
*/ */
#define RWSEM_UNLOCKED_VALUE 0x00000000 #define RWSEM_UNLOCKED_VALUE 0x00000000
...@@ -64,8 +67,10 @@ extern asmregparm struct rw_semaphore * ...@@ -64,8 +67,10 @@ extern asmregparm struct rw_semaphore *
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
typedef signed int rwsem_count_t;
struct rw_semaphore { struct rw_semaphore {
signed long count; rwsem_count_t count;
spinlock_t wait_lock; spinlock_t wait_lock;
struct list_head wait_list; struct list_head wait_list;
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
...@@ -121,7 +126,7 @@ static inline void __down_read(struct rw_semaphore *sem) ...@@ -121,7 +126,7 @@ static inline void __down_read(struct rw_semaphore *sem)
*/ */
static inline int __down_read_trylock(struct rw_semaphore *sem) static inline int __down_read_trylock(struct rw_semaphore *sem)
{ {
__s32 result, tmp; rwsem_count_t result, tmp;
asm volatile("# beginning __down_read_trylock\n\t" asm volatile("# beginning __down_read_trylock\n\t"
" mov %0,%1\n\t" " mov %0,%1\n\t"
"1:\n\t" "1:\n\t"
...@@ -143,7 +148,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -143,7 +148,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
*/ */
static inline void __down_write_nested(struct rw_semaphore *sem, int subclass) static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
{ {
int tmp; rwsem_count_t tmp;
tmp = RWSEM_ACTIVE_WRITE_BIAS; tmp = RWSEM_ACTIVE_WRITE_BIAS;
asm volatile("# beginning down_write\n\t" asm volatile("# beginning down_write\n\t"
...@@ -170,9 +175,9 @@ static inline void __down_write(struct rw_semaphore *sem) ...@@ -170,9 +175,9 @@ static inline void __down_write(struct rw_semaphore *sem)
*/ */
static inline int __down_write_trylock(struct rw_semaphore *sem) static inline int __down_write_trylock(struct rw_semaphore *sem)
{ {
signed long ret = cmpxchg(&sem->count, rwsem_count_t ret = cmpxchg(&sem->count,
RWSEM_UNLOCKED_VALUE, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS); RWSEM_ACTIVE_WRITE_BIAS);
if (ret == RWSEM_UNLOCKED_VALUE) if (ret == RWSEM_UNLOCKED_VALUE)
return 1; return 1;
return 0; return 0;
...@@ -183,7 +188,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) ...@@ -183,7 +188,7 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
*/ */
static inline void __up_read(struct rw_semaphore *sem) static inline void __up_read(struct rw_semaphore *sem)
{ {
__s32 tmp = -RWSEM_ACTIVE_READ_BIAS; rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
asm volatile("# beginning __up_read\n\t" asm volatile("# beginning __up_read\n\t"
LOCK_PREFIX " xadd %1,(%2)\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t"
/* subtracts 1, returns the old value */ /* subtracts 1, returns the old value */
...@@ -201,7 +206,7 @@ static inline void __up_read(struct rw_semaphore *sem) ...@@ -201,7 +206,7 @@ static inline void __up_read(struct rw_semaphore *sem)
*/ */
static inline void __up_write(struct rw_semaphore *sem) static inline void __up_write(struct rw_semaphore *sem)
{ {
unsigned long tmp; rwsem_count_t tmp;
asm volatile("# beginning __up_write\n\t" asm volatile("# beginning __up_write\n\t"
LOCK_PREFIX " xadd %1,(%2)\n\t" LOCK_PREFIX " xadd %1,(%2)\n\t"
/* tries to transition /* tries to transition
...@@ -245,9 +250,9 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) ...@@ -245,9 +250,9 @@ static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
/* /*
* implement exchange and add functionality * implement exchange and add functionality
*/ */
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem) static inline rwsem_count_t rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{ {
int tmp = delta; rwsem_count_t tmp = delta;
asm volatile(LOCK_PREFIX "xadd %0,%1" asm volatile(LOCK_PREFIX "xadd %0,%1"
: "+r" (tmp), "+m" (sem->count) : "+r" (tmp), "+m" (sem->count)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment