Commit c461783b authored by David Mosberger's avatar David Mosberger

ia64: Drop unused NEW_LOCK spinlock code and clean up unneeded test

	in kernel unwinder.
parent 96fd6d6a
...@@ -318,11 +318,6 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char ...@@ -318,11 +318,6 @@ unw_access_gr (struct unw_frame_info *info, int regnum, unsigned long *val, char
} }
} else { } else {
/* access a scratch register */ /* access a scratch register */
if (!info->pt) {
UNW_DPRINT(0, "unwind.%s: no pt-regs; cannot access r%d\n",
__FUNCTION__, regnum);
return -1;
}
pt = get_scratch_regs(info); pt = get_scratch_regs(info);
addr = (unsigned long *) (pt + pt_regs_off(regnum)); addr = (unsigned long *) (pt + pt_regs_off(regnum));
if (info->pri_unat_loc) if (info->pri_unat_loc)
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define _ASM_IA64_SPINLOCK_H #define _ASM_IA64_SPINLOCK_H
/* /*
* Copyright (C) 1998-2002 Hewlett-Packard Co * Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com> * David Mosberger-Tang <davidm@hpl.hp.com>
* Copyright (C) 1999 Walt Drummond <drummond@valinux.com> * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
* *
...@@ -15,58 +15,6 @@ ...@@ -15,58 +15,6 @@
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#undef NEW_LOCK
#ifdef NEW_LOCK
typedef struct {
volatile unsigned int lock;
} spinlock_t;
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 0 }
#define spin_lock_init(x) ((x)->lock = 0)
/*
* Streamlined test_and_set_bit(0, (x)). We use test-and-test-and-set
* rather than a simple xchg to avoid writing the cache-line when
* there is contention.
*/
#define _raw_spin_lock(x) \
{ \
register char *addr __asm__ ("r31") = (char *) &(x)->lock; \
\
__asm__ __volatile__ ( \
"mov r30=1\n" \
"mov ar.ccv=r0\n" \
";;\n" \
"cmpxchg4.acq r30=[%0],r30,ar.ccv\n" \
";;\n" \
"cmp.ne p15,p0=r30,r0\n" \
"(p15) br.call.spnt.few b7=ia64_spinlock_contention\n" \
";;\n" \
"1:\n" /* force a new bundle */ \
:: "r"(addr) \
: "ar.ccv", "ar.pfs", "b7", "p15", "r28", "r29", "r30", "memory"); \
}
#define _raw_spin_trylock(x) \
({ \
register long result; \
\
__asm__ __volatile__ ( \
"mov ar.ccv=r0\n" \
";;\n" \
"cmpxchg4.acq %0=[%2],%1,ar.ccv\n" \
: "=r"(result) : "r"(1), "r"(&(x)->lock) : "ar.ccv", "memory"); \
(result == 0); \
})
#define spin_is_locked(x) ((x)->lock != 0)
#define _raw_spin_unlock(x) do { barrier(); ((spinlock_t *) x)->lock = 0;} while (0)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
#else /* !NEW_LOCK */
typedef struct { typedef struct {
volatile unsigned int lock; volatile unsigned int lock;
} spinlock_t; } spinlock_t;
...@@ -123,8 +71,6 @@ do { \ ...@@ -123,8 +71,6 @@ do { \
#define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0) #define _raw_spin_trylock(x) (cmpxchg_acq(&(x)->lock, 0, 1) == 0)
#define spin_unlock_wait(x) do { barrier(); } while ((x)->lock) #define spin_unlock_wait(x) do { barrier(); } while ((x)->lock)
#endif /* !NEW_LOCK */
typedef struct { typedef struct {
volatile int read_counter:31; volatile int read_counter:31;
volatile int write_lock:1; volatile int write_lock:1;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment