Commit 8a340e4e authored by David Mosberger's avatar David Mosberger

ia64: Fix _raw_read_lock() to not switch text sections. Tidy it up with the

	help of ia64_fetchadd() macro.  Ditto for _raw_read_unlock().
parent e3e682b8
...@@ -17,16 +17,16 @@ ...@@ -17,16 +17,16 @@
extern unsigned long __bad_size_for_ia64_fetch_and_add (void); extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
#define IA64_FETCHADD(tmp,v,n,sz) \ #define IA64_FETCHADD(tmp,v,n,sz,sem) \
({ \ ({ \
switch (sz) { \ switch (sz) { \
case 4: \ case 4: \
__asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2" \ __asm__ __volatile__ ("fetchadd4."sem" %0=[%1],%2" \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \ : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
break; \ break; \
\ \
case 8: \ case 8: \
__asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2" \ __asm__ __volatile__ ("fetchadd8."sem" %0=[%1],%2" \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \ : "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
break; \ break; \
\ \
...@@ -35,32 +35,34 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void); ...@@ -35,32 +35,34 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
} \ } \
}) })
#define ia64_fetch_and_add(i,v) \ #define ia64_fetchadd(i,v,sem) \
({ \ ({ \
__u64 _tmp; \ __u64 _tmp; \
volatile __typeof__(*(v)) *_v = (v); \ volatile __typeof__(*(v)) *_v = (v); \
/* Can't use a switch () here: gcc isn't always smart enough for that... */ \ /* Can't use a switch () here: gcc isn't always smart enough for that... */ \
if ((i) == -16) \ if ((i) == -16) \
IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem); \
else if ((i) == -8) \ else if ((i) == -8) \
IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem); \
else if ((i) == -4) \ else if ((i) == -4) \
IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem); \
else if ((i) == -1) \ else if ((i) == -1) \
IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem); \
else if ((i) == 1) \ else if ((i) == 1) \
IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem); \
else if ((i) == 4) \ else if ((i) == 4) \
IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem); \
else if ((i) == 8) \ else if ((i) == 8) \
IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem); \
else if ((i) == 16) \ else if ((i) == 16) \
IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); \ IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem); \
else \ else \
_tmp = __bad_increment_for_ia64_fetch_and_add(); \ _tmp = __bad_increment_for_ia64_fetch_and_add(); \
(__typeof__(*(v))) (_tmp + (i)); /* return new value */ \ (__typeof__(*(v))) (_tmp); /* return old value */ \
}) })
#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, "rel") + (i)) /* return new value */
/* /*
* This function doesn't exist, so you'll get a linker error if * This function doesn't exist, so you'll get a linker error if
* something tries to do an invalid xchg(). * something tries to do an invalid xchg().
......
...@@ -72,8 +72,8 @@ do { \ ...@@ -72,8 +72,8 @@ do { \
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
typedef struct { typedef struct {
volatile int read_counter:31; volatile int read_counter : 31;
volatile int write_lock:1; volatile int write_lock : 1;
} rwlock_t; } rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 } #define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
...@@ -82,33 +82,21 @@ typedef struct { ...@@ -82,33 +82,21 @@ typedef struct {
#define _raw_read_lock(rw) \ #define _raw_read_lock(rw) \
do { \ do { \
int __read_lock_tmp = 0; \ rwlock_t *__read_lock_ptr = (rw); \
__asm__ __volatile__ ("1:\tfetchadd4.acq %0 = [%1], 1\n" \ \
";;\n" \ while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, "acq") < 0)) { \
"tbit.nz p6,p0 = %0, 31\n" \ ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \
"(p6) br.cond.sptk.few 2f\n" \ while (*(volatile int *)__read_lock_ptr < 0) \
".section .text.lock,\"ax\"\n" \ barrier(); \
"2:\tfetchadd4.rel %0 = [%1], -1\n" \ \
";;\n" \ } \
"3:\tld4.acq %0 = [%1]\n" \ } while (0)
";;\n" \
"tbit.nz p6,p0 = %0, 31\n" \
"(p6) br.cond.sptk.few 3b\n" \
"br.cond.sptk.few 1b\n" \
";;\n" \
".previous\n" \
: "=&r" (__read_lock_tmp) \
: "r" (rw) : "p6", "memory"); \
} while(0)
#define _raw_read_unlock(rw) \ #define _raw_read_unlock(rw) \
do { \ do { \
int __read_unlock_tmp = 0; \ rwlock_t *__read_lock_ptr = (rw); \
__asm__ __volatile__ ("fetchadd4.rel %0 = [%1], -1\n" \ ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \
: "=r" (__read_unlock_tmp) \ } while (0)
: "r" (rw) \
: "memory"); \
} while(0)
#define _raw_write_lock(rw) \ #define _raw_write_lock(rw) \
do { \ do { \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment