Commit 9b3ed4c1 authored by David Mosberger's avatar David Mosberger

ia64: Make ia64_fetch_and_add() simpler to optimize so lib/rwsem.c

	can be optimized properly.
parent 364aa238
...@@ -39,19 +39,29 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); ...@@ -39,19 +39,29 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
({ \ ({ \
__u64 _tmp; \ __u64 _tmp; \
volatile __typeof__(*(v)) *_v = (v); \ volatile __typeof__(*(v)) *_v = (v); \
switch (i) { \ /* Can't use a switch () here: gcc isn't always smart enough for that... */ \
case -16: IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); break; \ if ((i) == -16) \
case -8: IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); break; \ IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); \
case -4: IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); break; \ else if ((i) == -8) \
case -1: IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); break; \ IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); \
case 1: IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); break; \ else if ((i) == -4) \
case 4: IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); break; \ IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); \
case 8: IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); break; \ else if ((i) == -2) \
case 16: IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); break; \ IA64_FETCHADD(_tmp, _v, -2, sizeof(*(v))); \
default: \ else if ((i) == -1) \
IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); \
else if ((i) == 1) \
IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); \
else if ((i) == 2) \
IA64_FETCHADD(_tmp, _v, 2, sizeof(*(v))); \
else if ((i) == 4) \
IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); \
else if ((i) == 8) \
IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); \
else if ((i) == 16) \
IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); \
else \
_tmp = __bad_increment_for_ia64_fetch_and_add(); \ _tmp = __bad_increment_for_ia64_fetch_and_add(); \
break; \
} \
(__typeof__(*(v))) (_tmp + (i)); /* return new value */ \ (__typeof__(*(v))) (_tmp + (i)); /* return new value */ \
}) })
......
...@@ -17,10 +17,9 @@ ...@@ -17,10 +17,9 @@
* waiting (in which case it goes to sleep). * waiting (in which case it goes to sleep).
*/ */
#ifndef _IA64_RWSEM_H #ifndef _ASM_IA64_RWSEM_H
#define _IA64_RWSEM_H #define _ASM_IA64_RWSEM_H
#ifdef __KERNEL__
#include <linux/list.h> #include <linux/list.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
...@@ -65,7 +64,8 @@ extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); ...@@ -65,7 +64,8 @@ extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
static inline void init_rwsem(struct rw_semaphore *sem) static inline void
init_rwsem (struct rw_semaphore *sem)
{ {
sem->count = RWSEM_UNLOCKED_VALUE; sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock); spin_lock_init(&sem->wait_lock);
...@@ -78,7 +78,8 @@ static inline void init_rwsem(struct rw_semaphore *sem) ...@@ -78,7 +78,8 @@ static inline void init_rwsem(struct rw_semaphore *sem)
/* /*
* lock for reading * lock for reading
*/ */
static inline void __down_read(struct rw_semaphore *sem) static inline void
__down_read (struct rw_semaphore *sem)
{ {
int result; int result;
__asm__ __volatile__ ("fetchadd4.acq %0=[%1],1" : __asm__ __volatile__ ("fetchadd4.acq %0=[%1],1" :
...@@ -90,7 +91,8 @@ static inline void __down_read(struct rw_semaphore *sem) ...@@ -90,7 +91,8 @@ static inline void __down_read(struct rw_semaphore *sem)
/* /*
* lock for writing * lock for writing
*/ */
static inline void __down_write(struct rw_semaphore *sem) static inline void
__down_write (struct rw_semaphore *sem)
{ {
int old, new; int old, new;
...@@ -106,7 +108,8 @@ static inline void __down_write(struct rw_semaphore *sem) ...@@ -106,7 +108,8 @@ static inline void __down_write(struct rw_semaphore *sem)
/* /*
* unlock after reading * unlock after reading
*/ */
static inline void __up_read(struct rw_semaphore *sem) static inline void
__up_read (struct rw_semaphore *sem)
{ {
int result; int result;
__asm__ __volatile__ ("fetchadd4.rel %0=[%1],-1" : __asm__ __volatile__ ("fetchadd4.rel %0=[%1],-1" :
...@@ -118,7 +121,8 @@ static inline void __up_read(struct rw_semaphore *sem) ...@@ -118,7 +121,8 @@ static inline void __up_read(struct rw_semaphore *sem)
/* /*
* unlock after writing * unlock after writing
*/ */
static inline void __up_write(struct rw_semaphore *sem) static inline void
__up_write (struct rw_semaphore *sem)
{ {
int old, new; int old, new;
...@@ -134,7 +138,8 @@ static inline void __up_write(struct rw_semaphore *sem) ...@@ -134,7 +138,8 @@ static inline void __up_write(struct rw_semaphore *sem)
/* /*
* trylock for reading -- returns 1 if successful, 0 if contention * trylock for reading -- returns 1 if successful, 0 if contention
*/ */
static inline int __down_read_trylock(struct rw_semaphore *sem) static inline int
__down_read_trylock (struct rw_semaphore *sem)
{ {
int tmp; int tmp;
while ((tmp = sem->count) >= 0) { while ((tmp = sem->count) >= 0) {
...@@ -148,7 +153,8 @@ static inline int __down_read_trylock(struct rw_semaphore *sem) ...@@ -148,7 +153,8 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/* /*
* trylock for writing -- returns 1 if successful, 0 if contention * trylock for writing -- returns 1 if successful, 0 if contention
*/ */
static inline int __down_write_trylock(struct rw_semaphore *sem) static inline int
__down_write_trylock (struct rw_semaphore *sem)
{ {
int tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE, int tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS); RWSEM_ACTIVE_WRITE_BIAS);
...@@ -158,7 +164,8 @@ static inline int __down_write_trylock(struct rw_semaphore *sem) ...@@ -158,7 +164,8 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
/* /*
* downgrade write lock to read lock * downgrade write lock to read lock
*/ */
static inline void __downgrade_write(struct rw_semaphore *sem) static inline void
__downgrade_write (struct rw_semaphore *sem)
{ {
int old, new; int old, new;
...@@ -172,17 +179,10 @@ static inline void __downgrade_write(struct rw_semaphore *sem) ...@@ -172,17 +179,10 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
} }
/* /*
* implement atomic add functionality * Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1
* doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
*/ */
static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem) #define rwsem_atomic_add(delta, sem) atomic_add(delta, (atomic_t *)(&(sem)->count))
{ #define rwsem_atomic_update(delta, sem) atomic_add_return(delta, (atomic_t *)(&(sem)->count))
atomic_add(delta, (atomic_t *)(&sem->count));
}
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
#endif /* __KERNEL__ */ #endif /* _ASM_IA64_RWSEM_H */
#endif /* _IA64_RWSEM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment