Commit 8a340e4e authored by David Mosberger's avatar David Mosberger

ia64: Fix _raw_read_lock() to not switch text sections. Tidy it up with the

	help of ia64_fetchadd() macro.  Ditto for _raw_read_unlock().
parent e3e682b8
......@@ -17,16 +17,16 @@
extern unsigned long __bad_size_for_ia64_fetch_and_add (void);
extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
#define IA64_FETCHADD(tmp,v,n,sz) \
#define IA64_FETCHADD(tmp,v,n,sz,sem) \
({ \
switch (sz) { \
case 4: \
__asm__ __volatile__ ("fetchadd4.rel %0=[%1],%2" \
__asm__ __volatile__ ("fetchadd4."sem" %0=[%1],%2" \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
break; \
\
case 8: \
__asm__ __volatile__ ("fetchadd8.rel %0=[%1],%2" \
__asm__ __volatile__ ("fetchadd8."sem" %0=[%1],%2" \
: "=r"(tmp) : "r"(v), "i"(n) : "memory"); \
break; \
\
......@@ -35,32 +35,34 @@ extern unsigned long __bad_increment_for_ia64_fetch_and_add (void);
} \
})
#define ia64_fetch_and_add(i,v) \
#define ia64_fetchadd(i,v,sem) \
({ \
__u64 _tmp; \
volatile __typeof__(*(v)) *_v = (v); \
/* Can't use a switch () here: gcc isn't always smart enough for that... */ \
if ((i) == -16) \
IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, -16, sizeof(*(v)), sem); \
else if ((i) == -8) \
IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, -8, sizeof(*(v)), sem); \
else if ((i) == -4) \
IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, -4, sizeof(*(v)), sem); \
else if ((i) == -1) \
IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, -1, sizeof(*(v)), sem); \
else if ((i) == 1) \
IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, 1, sizeof(*(v)), sem); \
else if ((i) == 4) \
IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, 4, sizeof(*(v)), sem); \
else if ((i) == 8) \
IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, 8, sizeof(*(v)), sem); \
else if ((i) == 16) \
IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v))); \
IA64_FETCHADD(_tmp, _v, 16, sizeof(*(v)), sem); \
else \
_tmp = __bad_increment_for_ia64_fetch_and_add(); \
(__typeof__(*(v))) (_tmp + (i)); /* return new value */ \
(__typeof__(*(v))) (_tmp); /* return old value */ \
})
#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, "rel") + (i)) /* return new value */
/*
* This function doesn't exist, so you'll get a linker error if
* something tries to do an invalid xchg().
......
......@@ -72,8 +72,8 @@ do { \
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
typedef struct {
volatile int read_counter:31;
volatile int write_lock:1;
volatile int read_counter : 31;
volatile int write_lock : 1;
} rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { 0, 0 }
......@@ -82,33 +82,21 @@ typedef struct {
#define _raw_read_lock(rw) \
do { \
int __read_lock_tmp = 0; \
__asm__ __volatile__ ("1:\tfetchadd4.acq %0 = [%1], 1\n" \
";;\n" \
"tbit.nz p6,p0 = %0, 31\n" \
"(p6) br.cond.sptk.few 2f\n" \
".section .text.lock,\"ax\"\n" \
"2:\tfetchadd4.rel %0 = [%1], -1\n" \
";;\n" \
"3:\tld4.acq %0 = [%1]\n" \
";;\n" \
"tbit.nz p6,p0 = %0, 31\n" \
"(p6) br.cond.sptk.few 3b\n" \
"br.cond.sptk.few 1b\n" \
";;\n" \
".previous\n" \
: "=&r" (__read_lock_tmp) \
: "r" (rw) : "p6", "memory"); \
} while(0)
rwlock_t *__read_lock_ptr = (rw); \
\
while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, "acq") < 0)) { \
ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \
while (*(volatile int *)__read_lock_ptr < 0) \
barrier(); \
\
} \
} while (0)
#define _raw_read_unlock(rw) \
do { \
int __read_unlock_tmp = 0; \
__asm__ __volatile__ ("fetchadd4.rel %0 = [%1], -1\n" \
: "=r" (__read_unlock_tmp) \
: "r" (rw) \
: "memory"); \
} while(0)
rwlock_t *__read_lock_ptr = (rw); \
ia64_fetchadd(-1, (int *) __read_lock_ptr, "rel"); \
} while (0)
#define _raw_write_lock(rw) \
do { \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment