Commit 9b3ed4c1 authored by David Mosberger's avatar David Mosberger

ia64: Make ia64_fetch_and_add() simpler to optimize so lib/rwsem.c

	can be optimized properly.
parent 364aa238
......@@ -39,19 +39,29 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
({ \
__u64 _tmp; \
volatile __typeof__(*(v)) *_v = (v); \
switch (i) { \
case -16: IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); break; \
case -8: IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); break; \
case -4: IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); break; \
case -1: IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); break; \
case 1: IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); break; \
case 4: IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); break; \
case 8: IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); break; \
case 16: IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); break; \
default: \
/* Can't use a switch () here: gcc isn't always smart enough for that... */ \
if ((i) == -16) \
IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); \
else if ((i) == -8) \
IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); \
else if ((i) == -4) \
IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); \
else if ((i) == -2) \
IA64_FETCHADD(_tmp, _v, -2, sizeof(*(v))); \
else if ((i) == -1) \
IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); \
else if ((i) == 1) \
IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); \
else if ((i) == 2) \
IA64_FETCHADD(_tmp, _v, 2, sizeof(*(v))); \
else if ((i) == 4) \
IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); \
else if ((i) == 8) \
IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); \
else if ((i) == 16) \
IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); \
else \
_tmp = __bad_increment_for_ia64_fetch_and_add(); \
break; \
} \
(__typeof__(*(v))) (_tmp + (i)); /* return new value */ \
})
......
......@@ -17,10 +17,9 @@
* waiting (in which case it goes to sleep).
*/
#ifndef _IA64_RWSEM_H
#define _IA64_RWSEM_H
#ifndef _ASM_IA64_RWSEM_H
#define _ASM_IA64_RWSEM_H
#ifdef __KERNEL__
#include <linux/list.h>
#include <linux/spinlock.h>
......@@ -65,7 +64,8 @@ extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
static inline void init_rwsem(struct rw_semaphore *sem)
static inline void
init_rwsem (struct rw_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
......@@ -78,7 +78,8 @@ static inline void init_rwsem(struct rw_semaphore *sem)
/*
* lock for reading
*/
static inline void __down_read(struct rw_semaphore *sem)
static inline void
__down_read (struct rw_semaphore *sem)
{
int result;
__asm__ __volatile__ ("fetchadd4.acq %0=[%1],1" :
......@@ -90,7 +91,8 @@ static inline void __down_read(struct rw_semaphore *sem)
/*
* lock for writing
*/
static inline void __down_write(struct rw_semaphore *sem)
static inline void
__down_write (struct rw_semaphore *sem)
{
int old, new;
......@@ -106,7 +108,8 @@ static inline void __down_write(struct rw_semaphore *sem)
/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
static inline void
__up_read (struct rw_semaphore *sem)
{
int result;
__asm__ __volatile__ ("fetchadd4.rel %0=[%1],-1" :
......@@ -118,7 +121,8 @@ static inline void __up_read(struct rw_semaphore *sem)
/*
* unlock after writing
*/
static inline void __up_write(struct rw_semaphore *sem)
static inline void
__up_write (struct rw_semaphore *sem)
{
int old, new;
......@@ -134,7 +138,8 @@ static inline void __up_write(struct rw_semaphore *sem)
/*
* trylock for reading -- returns 1 if successful, 0 if contention
*/
static inline int __down_read_trylock(struct rw_semaphore *sem)
static inline int
__down_read_trylock (struct rw_semaphore *sem)
{
int tmp;
while ((tmp = sem->count) >= 0) {
......@@ -148,7 +153,8 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
/*
* trylock for writing -- returns 1 if successful, 0 if contention
*/
static inline int __down_write_trylock(struct rw_semaphore *sem)
static inline int
__down_write_trylock (struct rw_semaphore *sem)
{
int tmp = cmpxchg_acq(&sem->count, RWSEM_UNLOCKED_VALUE,
RWSEM_ACTIVE_WRITE_BIAS);
......@@ -158,7 +164,8 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
/*
* downgrade write lock to read lock
*/
static inline void __downgrade_write(struct rw_semaphore *sem)
static inline void
__downgrade_write (struct rw_semaphore *sem)
{
int old, new;
......@@ -172,17 +179,10 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
}
/*
* implement atomic add functionality
* Implement atomic add functionality. These used to be "inline" functions, but GCC v3.1
* doesn't quite optimize this stuff right and ends up with bad calls to fetchandadd.
*/
static inline void rwsem_atomic_add(int delta, struct rw_semaphore *sem)
{
atomic_add(delta, (atomic_t *)(&sem->count));
}
static inline int rwsem_atomic_update(int delta, struct rw_semaphore *sem)
{
return atomic_add_return(delta, (atomic_t *)(&sem->count));
}
#define rwsem_atomic_add(delta, sem) atomic_add(delta, (atomic_t *)(&(sem)->count))
#define rwsem_atomic_update(delta, sem) atomic_add_return(delta, (atomic_t *)(&(sem)->count))
#endif /* __KERNEL__ */
#endif /* _IA64_RWSEM_H */
#endif /* _ASM_IA64_RWSEM_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment