Commit f50386f9 authored by David S. Miller's avatar David S. Miller

Merge nuts.ninka.net:/disk1/davem/BK/sparcwork-2.5

into nuts.ninka.net:/disk1/davem/BK/sparc-2.5
parents 01d259fe fc49a434
...@@ -570,6 +570,7 @@ CONFIG_BRIDGE_EBT_BROUTE=m ...@@ -570,6 +570,7 @@ CONFIG_BRIDGE_EBT_BROUTE=m
CONFIG_BRIDGE_EBT_T_FILTER=m CONFIG_BRIDGE_EBT_T_FILTER=m
CONFIG_BRIDGE_EBT_T_NAT=m CONFIG_BRIDGE_EBT_T_NAT=m
CONFIG_BRIDGE_EBT_802_3=m CONFIG_BRIDGE_EBT_802_3=m
CONFIG_BRIDGE_EBT_AMONG=m
CONFIG_BRIDGE_EBT_ARP=m CONFIG_BRIDGE_EBT_ARP=m
CONFIG_BRIDGE_EBT_IP=m CONFIG_BRIDGE_EBT_IP=m
CONFIG_BRIDGE_EBT_LIMIT=m CONFIG_BRIDGE_EBT_LIMIT=m
......
...@@ -48,10 +48,13 @@ static __inline__ int atomic_read(const atomic_t *v) ...@@ -48,10 +48,13 @@ static __inline__ int atomic_read(const atomic_t *v)
#define atomic_set(v, i) (((v)->counter) = ((i) << 8)) #define atomic_set(v, i) (((v)->counter) = ((i) << 8))
#endif #endif
static __inline__ int __atomic_add(int i, atomic_t *v) static inline int __atomic_add(int i, atomic_t *v)
{ {
register volatile int *ptr asm("g1"); register volatile int *ptr asm("g1");
register int increment asm("g2"); register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter; ptr = &v->counter;
increment = i; increment = i;
...@@ -60,17 +63,20 @@ static __inline__ int __atomic_add(int i, atomic_t *v) ...@@ -60,17 +63,20 @@ static __inline__ int __atomic_add(int i, atomic_t *v)
"mov %%o7, %%g4\n\t" "mov %%o7, %%g4\n\t"
"call ___atomic_add\n\t" "call ___atomic_add\n\t"
" add %%o7, 8, %%o7\n" " add %%o7, 8, %%o7\n"
: "=&r" (increment) : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr) : "0" (increment), "r" (ptr)
: "g3", "g4", "g7", "memory", "cc"); : "memory", "cc");
return increment; return increment;
} }
static __inline__ int __atomic_sub(int i, atomic_t *v) static inline int __atomic_sub(int i, atomic_t *v)
{ {
register volatile int *ptr asm("g1"); register volatile int *ptr asm("g1");
register int increment asm("g2"); register int increment asm("g2");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g7");
ptr = &v->counter; ptr = &v->counter;
increment = i; increment = i;
...@@ -79,9 +85,9 @@ static __inline__ int __atomic_sub(int i, atomic_t *v) ...@@ -79,9 +85,9 @@ static __inline__ int __atomic_sub(int i, atomic_t *v)
"mov %%o7, %%g4\n\t" "mov %%o7, %%g4\n\t"
"call ___atomic_sub\n\t" "call ___atomic_sub\n\t"
" add %%o7, 8, %%o7\n" " add %%o7, 8, %%o7\n"
: "=&r" (increment) : "=&r" (increment), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3)
: "0" (increment), "r" (ptr) : "0" (increment), "r" (ptr)
: "g3", "g4", "g7", "memory", "cc"); : "memory", "cc");
return increment; return increment;
} }
......
...@@ -20,10 +20,14 @@ ...@@ -20,10 +20,14 @@
* within the first byte. Sparc is BIG-Endian. Unless noted otherwise * within the first byte. Sparc is BIG-Endian. Unless noted otherwise
* all bit-ops return 0 if bit was previously clear and != 0 otherwise. * all bit-ops return 0 if bit was previously clear and != 0 otherwise.
*/ */
static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long *addr) static inline int test_and_set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
register unsigned long mask asm("g2"); register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1"); register unsigned long *ADDR asm("g1");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g5");
register int tmp4 asm("g7");
ADDR = ((unsigned long *) addr) + (nr >> 5); ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31); mask = 1 << (nr & 31);
...@@ -32,17 +36,21 @@ static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long ...@@ -32,17 +36,21 @@ static __inline__ int test_and_set_bit(unsigned long nr, volatile unsigned long
"mov %%o7, %%g4\n\t" "mov %%o7, %%g4\n\t"
"call ___set_bit\n\t" "call ___set_bit\n\t"
" add %%o7, 8, %%o7\n" " add %%o7, 8, %%o7\n"
: "=&r" (mask) : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
: "0" (mask), "r" (ADDR) : "0" (mask), "r" (ADDR)
: "g3", "g4", "g5", "g7", "memory", "cc"); : "memory", "cc");
return mask != 0; return mask != 0;
} }
static __inline__ void set_bit(unsigned long nr, volatile unsigned long *addr) static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
{ {
register unsigned long mask asm("g2"); register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1"); register unsigned long *ADDR asm("g1");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g5");
register int tmp4 asm("g7");
ADDR = ((unsigned long *) addr) + (nr >> 5); ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31); mask = 1 << (nr & 31);
...@@ -51,15 +59,19 @@ static __inline__ void set_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -51,15 +59,19 @@ static __inline__ void set_bit(unsigned long nr, volatile unsigned long *addr)
"mov %%o7, %%g4\n\t" "mov %%o7, %%g4\n\t"
"call ___set_bit\n\t" "call ___set_bit\n\t"
" add %%o7, 8, %%o7\n" " add %%o7, 8, %%o7\n"
: "=&r" (mask) : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
: "0" (mask), "r" (ADDR) : "0" (mask), "r" (ADDR)
: "g3", "g4", "g5", "g7", "cc"); : "memory", "cc");
} }
static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr) static inline int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
register unsigned long mask asm("g2"); register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1"); register unsigned long *ADDR asm("g1");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g5");
register int tmp4 asm("g7");
ADDR = ((unsigned long *) addr) + (nr >> 5); ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31); mask = 1 << (nr & 31);
...@@ -68,17 +80,21 @@ static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned lon ...@@ -68,17 +80,21 @@ static __inline__ int test_and_clear_bit(unsigned long nr, volatile unsigned lon
"mov %%o7, %%g4\n\t" "mov %%o7, %%g4\n\t"
"call ___clear_bit\n\t" "call ___clear_bit\n\t"
" add %%o7, 8, %%o7\n" " add %%o7, 8, %%o7\n"
: "=&r" (mask) : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
: "0" (mask), "r" (ADDR) : "0" (mask), "r" (ADDR)
: "g3", "g4", "g5", "g7", "memory", "cc"); : "memory", "cc");
return mask != 0; return mask != 0;
} }
static __inline__ void clear_bit(unsigned long nr, volatile unsigned long *addr) static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
{ {
register unsigned long mask asm("g2"); register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1"); register unsigned long *ADDR asm("g1");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g5");
register int tmp4 asm("g7");
ADDR = ((unsigned long *) addr) + (nr >> 5); ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31); mask = 1 << (nr & 31);
...@@ -87,15 +103,19 @@ static __inline__ void clear_bit(unsigned long nr, volatile unsigned long *addr) ...@@ -87,15 +103,19 @@ static __inline__ void clear_bit(unsigned long nr, volatile unsigned long *addr)
"mov %%o7, %%g4\n\t" "mov %%o7, %%g4\n\t"
"call ___clear_bit\n\t" "call ___clear_bit\n\t"
" add %%o7, 8, %%o7\n" " add %%o7, 8, %%o7\n"
: "=&r" (mask) : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
: "0" (mask), "r" (ADDR) : "0" (mask), "r" (ADDR)
: "g3", "g4", "g5", "g7", "cc"); : "memory", "cc");
} }
static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned long *addr) static inline int test_and_change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
register unsigned long mask asm("g2"); register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1"); register unsigned long *ADDR asm("g1");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g5");
register int tmp4 asm("g7");
ADDR = ((unsigned long *) addr) + (nr >> 5); ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31); mask = 1 << (nr & 31);
...@@ -104,17 +124,21 @@ static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned lo ...@@ -104,17 +124,21 @@ static __inline__ int test_and_change_bit(unsigned long nr, volatile unsigned lo
"mov %%o7, %%g4\n\t" "mov %%o7, %%g4\n\t"
"call ___change_bit\n\t" "call ___change_bit\n\t"
" add %%o7, 8, %%o7\n" " add %%o7, 8, %%o7\n"
: "=&r" (mask) : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
: "0" (mask), "r" (ADDR) : "0" (mask), "r" (ADDR)
: "g3", "g4", "g5", "g7", "memory", "cc"); : "memory", "cc");
return mask != 0; return mask != 0;
} }
static __inline__ void change_bit(unsigned long nr, volatile unsigned long *addr) static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
{ {
register unsigned long mask asm("g2"); register unsigned long mask asm("g2");
register unsigned long *ADDR asm("g1"); register unsigned long *ADDR asm("g1");
register int tmp1 asm("g3");
register int tmp2 asm("g4");
register int tmp3 asm("g5");
register int tmp4 asm("g7");
ADDR = ((unsigned long *) addr) + (nr >> 5); ADDR = ((unsigned long *) addr) + (nr >> 5);
mask = 1 << (nr & 31); mask = 1 << (nr & 31);
...@@ -123,15 +147,15 @@ static __inline__ void change_bit(unsigned long nr, volatile unsigned long *addr ...@@ -123,15 +147,15 @@ static __inline__ void change_bit(unsigned long nr, volatile unsigned long *addr
"mov %%o7, %%g4\n\t" "mov %%o7, %%g4\n\t"
"call ___change_bit\n\t" "call ___change_bit\n\t"
" add %%o7, 8, %%o7\n" " add %%o7, 8, %%o7\n"
: "=&r" (mask) : "=&r" (mask), "=r" (tmp1), "=r" (tmp2), "=r" (tmp3), "=r" (tmp4)
: "0" (mask), "r" (ADDR) : "0" (mask), "r" (ADDR)
: "g3", "g4", "g5", "g7", "cc"); : "memory", "cc");
} }
/* /*
* non-atomic versions * non-atomic versions
*/ */
static __inline__ void __set_bit(int nr, volatile unsigned long *addr) static inline void __set_bit(int nr, volatile unsigned long *addr)
{ {
unsigned long mask = 1UL << (nr & 0x1f); unsigned long mask = 1UL << (nr & 0x1f);
unsigned long *p = ((unsigned long *)addr) + (nr >> 5); unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
...@@ -139,7 +163,7 @@ static __inline__ void __set_bit(int nr, volatile unsigned long *addr) ...@@ -139,7 +163,7 @@ static __inline__ void __set_bit(int nr, volatile unsigned long *addr)
*p |= mask; *p |= mask;
} }
static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) static inline void __clear_bit(int nr, volatile unsigned long *addr)
{ {
unsigned long mask = 1UL << (nr & 0x1f); unsigned long mask = 1UL << (nr & 0x1f);
unsigned long *p = ((unsigned long *)addr) + (nr >> 5); unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
...@@ -147,7 +171,7 @@ static __inline__ void __clear_bit(int nr, volatile unsigned long *addr) ...@@ -147,7 +171,7 @@ static __inline__ void __clear_bit(int nr, volatile unsigned long *addr)
*p &= ~mask; *p &= ~mask;
} }
static __inline__ void __change_bit(int nr, volatile unsigned long *addr) static inline void __change_bit(int nr, volatile unsigned long *addr)
{ {
unsigned long mask = 1UL << (nr & 0x1f); unsigned long mask = 1UL << (nr & 0x1f);
unsigned long *p = ((unsigned long *)addr) + (nr >> 5); unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
...@@ -155,7 +179,7 @@ static __inline__ void __change_bit(int nr, volatile unsigned long *addr) ...@@ -155,7 +179,7 @@ static __inline__ void __change_bit(int nr, volatile unsigned long *addr)
*p ^= mask; *p ^= mask;
} }
static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) static inline int __test_and_set_bit(int nr, volatile unsigned long *addr)
{ {
unsigned long mask = 1UL << (nr & 0x1f); unsigned long mask = 1UL << (nr & 0x1f);
unsigned long *p = ((unsigned long *)addr) + (nr >> 5); unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
...@@ -165,7 +189,7 @@ static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr) ...@@ -165,7 +189,7 @@ static __inline__ int __test_and_set_bit(int nr, volatile unsigned long *addr)
return (old & mask) != 0; return (old & mask) != 0;
} }
static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) static inline int __test_and_clear_bit(int nr, volatile unsigned long *addr)
{ {
unsigned long mask = 1UL << (nr & 0x1f); unsigned long mask = 1UL << (nr & 0x1f);
unsigned long *p = ((unsigned long *)addr) + (nr >> 5); unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
...@@ -175,7 +199,7 @@ static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr) ...@@ -175,7 +199,7 @@ static __inline__ int __test_and_clear_bit(int nr, volatile unsigned long *addr)
return (old & mask) != 0; return (old & mask) != 0;
} }
static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr) static inline int __test_and_change_bit(int nr, volatile unsigned long *addr)
{ {
unsigned long mask = 1UL << (nr & 0x1f); unsigned long mask = 1UL << (nr & 0x1f);
unsigned long *p = ((unsigned long *)addr) + (nr >> 5); unsigned long *p = ((unsigned long *)addr) + (nr >> 5);
...@@ -189,13 +213,13 @@ static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr ...@@ -189,13 +213,13 @@ static __inline__ int __test_and_change_bit(int nr, volatile unsigned long *addr
#define smp_mb__after_clear_bit() do { } while(0) #define smp_mb__after_clear_bit() do { } while(0)
/* The following routine need not be atomic. */ /* The following routine need not be atomic. */
static __inline__ int test_bit(int nr, __const__ volatile unsigned long *addr) static inline int test_bit(int nr, __const__ volatile unsigned long *addr)
{ {
return (1UL & (((unsigned long *)addr)[nr >> 5] >> (nr & 31))) != 0UL; return (1UL & (((unsigned long *)addr)[nr >> 5] >> (nr & 31))) != 0UL;
} }
/* The easy/cheese version for now. */ /* The easy/cheese version for now. */
static __inline__ unsigned long ffz(unsigned long word) static inline unsigned long ffz(unsigned long word)
{ {
unsigned long result = 0; unsigned long result = 0;
...@@ -212,7 +236,7 @@ static __inline__ unsigned long ffz(unsigned long word) ...@@ -212,7 +236,7 @@ static __inline__ unsigned long ffz(unsigned long word)
* *
* Undefined if no bit exists, so code should check against 0 first. * Undefined if no bit exists, so code should check against 0 first.
*/ */
static __inline__ int __ffs(unsigned long word) static inline int __ffs(unsigned long word)
{ {
int num = 0; int num = 0;
...@@ -243,7 +267,7 @@ static __inline__ int __ffs(unsigned long word) ...@@ -243,7 +267,7 @@ static __inline__ int __ffs(unsigned long word)
* unlikely to be set. It's guaranteed that at least one of the 140 * unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared. * bits is cleared.
*/ */
static __inline__ int sched_find_first_bit(unsigned long *b) static inline int sched_find_first_bit(unsigned long *b)
{ {
if (unlikely(b[0])) if (unlikely(b[0]))
...@@ -262,7 +286,7 @@ static __inline__ int sched_find_first_bit(unsigned long *b) ...@@ -262,7 +286,7 @@ static __inline__ int sched_find_first_bit(unsigned long *b)
* the libc and compiler builtin ffs routines, therefore * the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs). * differs in spirit from the above ffz (man ffs).
*/ */
static __inline__ int ffs(int x) static inline int ffs(int x)
{ {
if (!x) if (!x)
return 0; return 0;
...@@ -288,7 +312,7 @@ static __inline__ int ffs(int x) ...@@ -288,7 +312,7 @@ static __inline__ int ffs(int x)
* 'size' bits, starting the search at bit 'offset'. This is largely based * 'size' bits, starting the search at bit 'offset'. This is largely based
* on Linus's ALPHA routines, which are pretty portable BTW. * on Linus's ALPHA routines, which are pretty portable BTW.
*/ */
static __inline__ unsigned long find_next_zero_bit(unsigned long *addr, static inline unsigned long find_next_zero_bit(unsigned long *addr,
unsigned long size, unsigned long offset) unsigned long size, unsigned long offset)
{ {
unsigned long *p = addr + (offset >> 5); unsigned long *p = addr + (offset >> 5);
...@@ -342,7 +366,7 @@ static __inline__ unsigned long find_next_zero_bit(unsigned long *addr, ...@@ -342,7 +366,7 @@ static __inline__ unsigned long find_next_zero_bit(unsigned long *addr,
* *
* Scheduler induced bitop, do not use. * Scheduler induced bitop, do not use.
*/ */
static __inline__ int find_next_bit(unsigned long *addr, int size, int offset) static inline int find_next_bit(unsigned long *addr, int size, int offset)
{ {
unsigned long *p = addr + (offset >> 5); unsigned long *p = addr + (offset >> 5);
int num = offset & ~0x1f; int num = offset & ~0x1f;
...@@ -362,7 +386,7 @@ static __inline__ int find_next_bit(unsigned long *addr, int size, int offset) ...@@ -362,7 +386,7 @@ static __inline__ int find_next_bit(unsigned long *addr, int size, int offset)
/* /*
*/ */
static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr) static inline int test_le_bit(int nr, __const__ unsigned long * addr)
{ {
__const__ unsigned char *ADDR = (__const__ unsigned char *) addr; __const__ unsigned char *ADDR = (__const__ unsigned char *) addr;
return (ADDR[nr >> 3] >> (nr & 7)) & 1; return (ADDR[nr >> 3] >> (nr & 7)) & 1;
...@@ -371,7 +395,7 @@ static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr) ...@@ -371,7 +395,7 @@ static __inline__ int test_le_bit(int nr, __const__ unsigned long * addr)
/* /*
* non-atomic versions * non-atomic versions
*/ */
static __inline__ void __set_le_bit(int nr, unsigned long *addr) static inline void __set_le_bit(int nr, unsigned long *addr)
{ {
unsigned char *ADDR = (unsigned char *)addr; unsigned char *ADDR = (unsigned char *)addr;
...@@ -379,7 +403,7 @@ static __inline__ void __set_le_bit(int nr, unsigned long *addr) ...@@ -379,7 +403,7 @@ static __inline__ void __set_le_bit(int nr, unsigned long *addr)
*ADDR |= 1 << (nr & 0x07); *ADDR |= 1 << (nr & 0x07);
} }
static __inline__ void __clear_le_bit(int nr, unsigned long *addr) static inline void __clear_le_bit(int nr, unsigned long *addr)
{ {
unsigned char *ADDR = (unsigned char *)addr; unsigned char *ADDR = (unsigned char *)addr;
...@@ -387,7 +411,7 @@ static __inline__ void __clear_le_bit(int nr, unsigned long *addr) ...@@ -387,7 +411,7 @@ static __inline__ void __clear_le_bit(int nr, unsigned long *addr)
*ADDR &= ~(1 << (nr & 0x07)); *ADDR &= ~(1 << (nr & 0x07));
} }
static __inline__ int __test_and_set_le_bit(int nr, unsigned long *addr) static inline int __test_and_set_le_bit(int nr, unsigned long *addr)
{ {
int mask, retval; int mask, retval;
unsigned char *ADDR = (unsigned char *)addr; unsigned char *ADDR = (unsigned char *)addr;
...@@ -399,7 +423,7 @@ static __inline__ int __test_and_set_le_bit(int nr, unsigned long *addr) ...@@ -399,7 +423,7 @@ static __inline__ int __test_and_set_le_bit(int nr, unsigned long *addr)
return retval; return retval;
} }
static __inline__ int __test_and_clear_le_bit(int nr, unsigned long *addr) static inline int __test_and_clear_le_bit(int nr, unsigned long *addr)
{ {
int mask, retval; int mask, retval;
unsigned char *ADDR = (unsigned char *)addr; unsigned char *ADDR = (unsigned char *)addr;
...@@ -411,7 +435,7 @@ static __inline__ int __test_and_clear_le_bit(int nr, unsigned long *addr) ...@@ -411,7 +435,7 @@ static __inline__ int __test_and_clear_le_bit(int nr, unsigned long *addr)
return retval; return retval;
} }
static __inline__ unsigned long find_next_zero_le_bit(unsigned long *addr, static inline unsigned long find_next_zero_le_bit(unsigned long *addr,
unsigned long size, unsigned long offset) unsigned long size, unsigned long offset)
{ {
unsigned long *p = addr + (offset >> 5); unsigned long *p = addr + (offset >> 5);
...@@ -455,14 +479,16 @@ static __inline__ unsigned long find_next_zero_le_bit(unsigned long *addr, ...@@ -455,14 +479,16 @@ static __inline__ unsigned long find_next_zero_le_bit(unsigned long *addr,
#define find_first_zero_le_bit(addr, size) \ #define find_first_zero_le_bit(addr, size) \
find_next_zero_le_bit((addr), (size), 0) find_next_zero_le_bit((addr), (size), 0)
#define ext2_set_bit __test_and_set_le_bit #define ext2_set_bit(nr,addr) \
#define ext2_clear_bit __test_and_clear_le_bit __test_and_set_le_bit((nr),(unsigned long *)(addr))
#define ext2_clear_bit(nr,addr) \
__test_and_clear_le_bit((nr),(unsigned long *)(addr))
#define ext2_set_bit_atomic(lock, nr, addr) \ #define ext2_set_bit_atomic(lock, nr, addr) \
({ \ ({ \
int ret; \ int ret; \
spin_lock(lock); \ spin_lock(lock); \
ret = ext2_set_bit((nr), (addr)); \ ret = ext2_set_bit((nr), (unsigned long *)(addr)); \
spin_unlock(lock); \ spin_unlock(lock); \
ret; \ ret; \
}) })
...@@ -471,21 +497,29 @@ static __inline__ unsigned long find_next_zero_le_bit(unsigned long *addr, ...@@ -471,21 +497,29 @@ static __inline__ unsigned long find_next_zero_le_bit(unsigned long *addr,
({ \ ({ \
int ret; \ int ret; \
spin_lock(lock); \ spin_lock(lock); \
ret = ext2_clear_bit((nr), (addr)); \ ret = ext2_clear_bit((nr), (unsigned long *)(addr)); \
spin_unlock(lock); \ spin_unlock(lock); \
ret; \ ret; \
}) })
#define ext2_test_bit test_le_bit #define ext2_test_bit(nr,addr) \
#define ext2_find_first_zero_bit find_first_zero_le_bit test_le_bit((nr),(unsigned long *)(addr))
#define ext2_find_next_zero_bit find_next_zero_le_bit #define ext2_find_first_zero_bit(addr, size) \
find_first_zero_le_bit((unsigned long *)(addr), (size))
#define ext2_find_next_zero_bit(addr, size, off) \
find_next_zero_le_bit((unsigned long *)(addr), (size), (off))
/* Bitmap functions for the minix filesystem. */ /* Bitmap functions for the minix filesystem. */
#define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr) #define minix_test_and_set_bit(nr,addr) \
#define minix_set_bit(nr,addr) set_bit(nr,addr) test_and_set_bit((nr),(unsigned long *)(addr))
#define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr) #define minix_set_bit(nr,addr) \
#define minix_test_bit(nr,addr) test_bit(nr,addr) set_bit((nr),(unsigned long *)(addr))
#define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size) #define minix_test_and_clear_bit(nr,addr) \
test_and_clear_bit((nr),(unsigned long *)(addr))
#define minix_test_bit(nr,addr) \
test_bit((nr),(unsigned long *)(addr))
#define minix_find_first_zero_bit(addr,size) \
find_first_zero_bit((unsigned long *)(addr),(size))
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -42,23 +42,26 @@ extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned i ...@@ -42,23 +42,26 @@ extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned i
extern unsigned int __csum_partial_copy_sparc_generic (const char *, char *); extern unsigned int __csum_partial_copy_sparc_generic (const char *, char *);
extern __inline__ unsigned int static inline unsigned int
csum_partial_copy_nocheck (const char *src, char *dst, int len, csum_partial_copy_nocheck (const char *src, char *dst, int len,
unsigned int sum) unsigned int sum)
{ {
register unsigned int ret asm("o0") = (unsigned int)src; register unsigned int ret asm("o0") = (unsigned int)src;
register char *d asm("o1") = dst; register char *d asm("o1") = dst;
register int l asm("g1") = len; register int l asm("g1") = len;
__asm__ __volatile__ ( __asm__ __volatile__ (
"call " C_LABEL_STR(__csum_partial_copy_sparc_generic) "\n\t" "call " C_LABEL_STR(__csum_partial_copy_sparc_generic) "\n\t"
" mov %4, %%g7\n" " mov %6, %%g7\n"
: "=r" (ret) : "0" (ret), "r" (d), "r" (l), "r" (sum) : : "=&r" (ret), "=&r" (d), "=&r" (l)
"o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "g4", "g5", "g7"); : "0" (ret), "1" (d), "2" (l), "r" (sum)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5", "g7",
"memory", "cc");
return ret; return ret;
} }
extern __inline__ unsigned int static inline unsigned int
csum_partial_copy_from_user(const char *src, char *dst, int len, csum_partial_copy_from_user(const char *src, char *dst, int len,
unsigned int sum, int *err) unsigned int sum, int *err)
{ {
...@@ -79,14 +82,16 @@ csum_partial_copy_from_user(const char *src, char *dst, int len, ...@@ -79,14 +82,16 @@ csum_partial_copy_from_user(const char *src, char *dst, int len,
".previous\n" ".previous\n"
"1:\n\t" "1:\n\t"
"call " C_LABEL_STR(__csum_partial_copy_sparc_generic) "\n\t" "call " C_LABEL_STR(__csum_partial_copy_sparc_generic) "\n\t"
" st %5, [%%sp + 64]\n" " st %8, [%%sp + 64]\n"
: "=r" (ret) : "0" (ret), "r" (d), "r" (l), "r" (s), "r" (err) : : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
"o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "g4", "g5", "g7"); : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7", "g2", "g3", "g4", "g5",
"cc", "memory");
return ret; return ret;
} }
} }
extern __inline__ unsigned int static inline unsigned int
csum_partial_copy_to_user(const char *src, char *dst, int len, csum_partial_copy_to_user(const char *src, char *dst, int len,
unsigned int sum, int *err) unsigned int sum, int *err)
{ {
...@@ -106,9 +111,12 @@ csum_partial_copy_to_user(const char *src, char *dst, int len, ...@@ -106,9 +111,12 @@ csum_partial_copy_to_user(const char *src, char *dst, int len,
".previous\n" ".previous\n"
"1:\n\t" "1:\n\t"
"call " C_LABEL_STR(__csum_partial_copy_sparc_generic) "\n\t" "call " C_LABEL_STR(__csum_partial_copy_sparc_generic) "\n\t"
" st %5, [%%sp + 64]\n" " st %8, [%%sp + 64]\n"
: "=r" (ret) : "0" (ret), "r" (d), "r" (l), "r" (s), "r" (err) : : "=&r" (ret), "=&r" (d), "=&r" (l), "=&r" (s)
"o1", "o2", "o3", "o4", "o5", "o7", "g1", "g2", "g3", "g4", "g5", "g7"); : "0" (ret), "1" (d), "2" (l), "3" (s), "r" (err)
: "o2", "o3", "o4", "o5", "o7",
"g2", "g3", "g4", "g5",
"cc", "memory");
return ret; return ret;
} }
} }
...@@ -119,8 +127,8 @@ csum_partial_copy_to_user(const char *src, char *dst, int len, ...@@ -119,8 +127,8 @@ csum_partial_copy_to_user(const char *src, char *dst, int len,
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned /* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time. * the majority of the time.
*/ */
extern __inline__ unsigned short ip_fast_csum(__const__ unsigned char *iph, static inline unsigned short ip_fast_csum(const unsigned char *iph,
unsigned int ihl) unsigned int ihl)
{ {
unsigned short sum; unsigned short sum;
...@@ -157,7 +165,7 @@ extern __inline__ unsigned short ip_fast_csum(__const__ unsigned char *iph, ...@@ -157,7 +165,7 @@ extern __inline__ unsigned short ip_fast_csum(__const__ unsigned char *iph,
} }
/* Fold a partial checksum without adding pseudo headers. */ /* Fold a partial checksum without adding pseudo headers. */
extern __inline__ unsigned int csum_fold(unsigned int sum) static inline unsigned int csum_fold(unsigned int sum)
{ {
unsigned int tmp; unsigned int tmp;
...@@ -171,11 +179,11 @@ extern __inline__ unsigned int csum_fold(unsigned int sum) ...@@ -171,11 +179,11 @@ extern __inline__ unsigned int csum_fold(unsigned int sum)
return sum; return sum;
} }
extern __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr, static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
unsigned long daddr, unsigned long daddr,
unsigned int len, unsigned int len,
unsigned short proto, unsigned short proto,
unsigned int sum) unsigned int sum)
{ {
__asm__ __volatile__("addcc\t%1, %0, %0\n\t" __asm__ __volatile__("addcc\t%1, %0, %0\n\t"
"addxcc\t%2, %0, %0\n\t" "addxcc\t%2, %0, %0\n\t"
...@@ -203,11 +211,11 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr, ...@@ -203,11 +211,11 @@ static inline unsigned short int csum_tcpudp_magic(unsigned long saddr,
#define _HAVE_ARCH_IPV6_CSUM #define _HAVE_ARCH_IPV6_CSUM
static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, static inline unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
struct in6_addr *daddr, struct in6_addr *daddr,
__u32 len, __u32 len,
unsigned short proto, unsigned short proto,
unsigned int sum) unsigned int sum)
{ {
__asm__ __volatile__ ( __asm__ __volatile__ (
"addcc %3, %4, %%g4\n\t" "addcc %3, %4, %%g4\n\t"
...@@ -238,7 +246,7 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, ...@@ -238,7 +246,7 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
} }
/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */ /* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
extern __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len) static inline unsigned short ip_compute_csum(unsigned char * buff, int len)
{ {
return csum_fold(csum_partial(buff, len, 0)); return csum_fold(csum_partial(buff, len, 0));
} }
......
...@@ -13,12 +13,12 @@ ...@@ -13,12 +13,12 @@
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT) #define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
static __inline__ u32 flip_dword (u32 d) static inline u32 flip_dword (u32 d)
{ {
return ((d&0xff)<<24) | (((d>>8)&0xff)<<16) | (((d>>16)&0xff)<<8)| ((d>>24)&0xff); return ((d&0xff)<<24) | (((d>>8)&0xff)<<16) | (((d>>16)&0xff)<<8)| ((d>>24)&0xff);
} }
static __inline__ u16 flip_word (u16 d) static inline u16 flip_word (u16 d)
{ {
return ((d&0xff) << 8) | ((d>>8)&0xff); return ((d&0xff) << 8) | ((d>>8)&0xff);
} }
...@@ -36,43 +36,73 @@ static __inline__ u16 flip_word (u16 d) ...@@ -36,43 +36,73 @@ static __inline__ u16 flip_word (u16 d)
* The offshot is, we must cast readb et. al. arguments with a #define. * The offshot is, we must cast readb et. al. arguments with a #define.
*/ */
static __inline__ u8 __raw_readb(unsigned long addr) static inline u8 __raw_readb(unsigned long addr)
{ {
return *(volatile u8 *)addr; return *(volatile u8 *)addr;
} }
static __inline__ u16 __raw_readw(unsigned long addr) static inline u16 __raw_readw(unsigned long addr)
{ {
return *(volatile u16 *)addr; return *(volatile u16 *)addr;
} }
static __inline__ u32 __raw_readl(unsigned long addr) static inline u32 __raw_readl(unsigned long addr)
{ {
return *(volatile u32 *)addr; return *(volatile u32 *)addr;
} }
static __inline__ void __raw_writeb(u8 b, unsigned long addr) static inline void __raw_writeb(u8 b, unsigned long addr)
{ {
*(volatile u8 *)addr = b; *(volatile u8 *)addr = b;
} }
static __inline__ void __raw_writew(u16 b, unsigned long addr) static inline void __raw_writew(u16 b, unsigned long addr)
{ {
*(volatile u16 *)addr = b; *(volatile u16 *)addr = b;
} }
static __inline__ void __raw_writel(u32 b, unsigned long addr) static inline void __raw_writel(u32 b, unsigned long addr)
{ {
*(volatile u32 *)addr = b; *(volatile u32 *)addr = b;
} }
#define readb(addr) (*(volatile u8 *)(addr)) static inline u8 __readb(unsigned long addr)
#define readw(addr) flip_word(*(volatile u16 *)(addr)) {
#define readl(addr) flip_dword(*(volatile u32 *)(addr)) return *(volatile u8 *)addr;
}
static inline u16 __readw(unsigned long addr)
{
return flip_word(*(volatile u16 *)addr);
}
static inline u32 __readl(unsigned long addr)
{
return flip_dword(*(volatile u32 *)addr);
}
static inline void __writeb(u8 b, unsigned long addr)
{
*(volatile u8 *)addr = b;
}
static inline void __writew(u16 b, unsigned long addr)
{
*(volatile u16 *)addr = flip_word(b);
}
static inline void __writel(u32 b, unsigned long addr)
{
*(volatile u32 *)addr = flip_dword(b);
}
#define readb(addr) __readb((unsigned long)(addr))
#define readw(addr) __readw((unsigned long)(addr))
#define readl(addr) __readl((unsigned long)(addr))
#define writeb(b, a) (*(volatile u8 *)(a) = b) #define writeb(b, addr) __writeb((b),(unsigned long)(addr))
#define writew(b, a) (*(volatile u16 *)(a) = flip_word(b)) #define writew(b, addr) __writew((b),(unsigned long)(addr))
#define writel(b, a) (*(volatile u32 *)(a) = flip_dword(b)) #define writel(b, addr) __writel((b),(unsigned long)(addr))
/* /*
* I/O space operations * I/O space operations
...@@ -91,17 +121,22 @@ static __inline__ void __raw_writel(u32 b, unsigned long addr) ...@@ -91,17 +121,22 @@ static __inline__ void __raw_writel(u32 b, unsigned long addr)
* mapped somewhere into virtual kernel space and we * mapped somewhere into virtual kernel space and we
* can use inb/outb again. * can use inb/outb again.
*/ */
#define inb_local(addr) readb(addr) #define inb_local(addr) __readb(addr)
#define inb(addr) readb(addr) #define inb(addr) __readb(addr)
#define inw(addr) readw(addr) #define inw(addr) __readw(addr)
#define inl(addr) readl(addr) #define inl(addr) __readl(addr)
#define inb_p(addr) readb(addr)
#define outb_local(b, addr) __writeb(b, addr)
#define outb_local(b, addr) writeb(b, addr) #define outb(b, addr) __writeb(b, addr)
#define outb(b, addr) writeb(b, addr) #define outw(b, addr) __writew(b, addr)
#define outw(b, addr) writew(b, addr) #define outl(b, addr) __writel(b, addr)
#define outl(b, addr) writel(b, addr)
#define outb_p(b, addr) writeb(b, addr) #define inb_p inb
#define outb_p outb
#define inw_p inw
#define outw_p outw
#define inl_p inl
#define outl_p outl
extern void outsb(unsigned long addr, const void *src, unsigned long cnt); extern void outsb(unsigned long addr, const void *src, unsigned long cnt);
extern void outsw(unsigned long addr, const void *src, unsigned long cnt); extern void outsw(unsigned long addr, const void *src, unsigned long cnt);
...@@ -118,32 +153,32 @@ extern void insl(unsigned long addr, void *dst, unsigned long count); ...@@ -118,32 +153,32 @@ extern void insl(unsigned long addr, void *dst, unsigned long count);
* SBus has only one, memory mapped, I/O space. * SBus has only one, memory mapped, I/O space.
* We do not need to flip bytes for SBus of course. * We do not need to flip bytes for SBus of course.
*/ */
static __inline__ u8 _sbus_readb(unsigned long addr) static inline u8 _sbus_readb(unsigned long addr)
{ {
return *(volatile u8 *)addr; return *(volatile u8 *)addr;
} }
static __inline__ u16 _sbus_readw(unsigned long addr) static inline u16 _sbus_readw(unsigned long addr)
{ {
return *(volatile u16 *)addr; return *(volatile u16 *)addr;
} }
static __inline__ u32 _sbus_readl(unsigned long addr) static inline u32 _sbus_readl(unsigned long addr)
{ {
return *(volatile u32 *)addr; return *(volatile u32 *)addr;
} }
static __inline__ void _sbus_writeb(u8 b, unsigned long addr) static inline void _sbus_writeb(u8 b, unsigned long addr)
{ {
*(volatile u8 *)addr = b; *(volatile u8 *)addr = b;
} }
static __inline__ void _sbus_writew(u16 b, unsigned long addr) static inline void _sbus_writew(u16 b, unsigned long addr)
{ {
*(volatile u16 *)addr = b; *(volatile u16 *)addr = b;
} }
static __inline__ void _sbus_writel(u32 b, unsigned long addr) static inline void _sbus_writel(u32 b, unsigned long addr)
{ {
*(volatile u32 *)addr = b; *(volatile u32 *)addr = b;
} }
......
...@@ -39,14 +39,45 @@ BTFIXUPDEF_CALL(void, clear_clock_irq, void) ...@@ -39,14 +39,45 @@ BTFIXUPDEF_CALL(void, clear_clock_irq, void)
BTFIXUPDEF_CALL(void, clear_profile_irq, int) BTFIXUPDEF_CALL(void, clear_profile_irq, int)
BTFIXUPDEF_CALL(void, load_profile_irq, int, unsigned int) BTFIXUPDEF_CALL(void, load_profile_irq, int, unsigned int)
#define disable_irq_nosync disable_irq static inline void disable_irq_nosync(unsigned int irq)
#define disable_irq(irq) BTFIXUP_CALL(disable_irq)(irq) {
#define enable_irq(irq) BTFIXUP_CALL(enable_irq)(irq) BTFIXUP_CALL(disable_irq)(irq);
#define disable_pil_irq(irq) BTFIXUP_CALL(disable_pil_irq)(irq) }
#define enable_pil_irq(irq) BTFIXUP_CALL(enable_pil_irq)(irq)
#define clear_clock_irq() BTFIXUP_CALL(clear_clock_irq)() static inline void disable_irq(unsigned int irq)
#define clear_profile_irq(cpu) BTFIXUP_CALL(clear_profile_irq)(cpu) {
#define load_profile_irq(cpu,limit) BTFIXUP_CALL(load_profile_irq)(cpu,limit) BTFIXUP_CALL(disable_irq)(irq);
}
static inline void enable_irq(unsigned int irq)
{
BTFIXUP_CALL(enable_irq)(irq);
}
static inline void disable_pil_irq(unsigned int irq)
{
BTFIXUP_CALL(disable_pil_irq)(irq);
}
static inline void enable_pil_irq(unsigned int irq)
{
BTFIXUP_CALL(enable_pil_irq)(irq);
}
static inline void clear_clock_irq(void)
{
BTFIXUP_CALL(clear_clock_irq)();
}
static inline void clear_profile_irq(int irq)
{
BTFIXUP_CALL(clear_profile_irq)(irq);
}
static inline void load_profile_irq(int cpu, int limit)
{
BTFIXUP_CALL(load_profile_irq)(cpu, limit);
}
extern void (*sparc_init_timers)(irqreturn_t (*lvl10_irq)(int, void *, struct pt_regs *)); extern void (*sparc_init_timers)(irqreturn_t (*lvl10_irq)(int, void *, struct pt_regs *));
extern void claim_ticker14(irqreturn_t (*irq_handler)(int, void *, struct pt_regs *), extern void claim_ticker14(irqreturn_t (*irq_handler)(int, void *, struct pt_regs *),
......
...@@ -76,7 +76,7 @@ ...@@ -76,7 +76,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern __inline__ unsigned long sun4c_get_synchronous_error(void) static inline unsigned long sun4c_get_synchronous_error(void)
{ {
unsigned long sync_err; unsigned long sync_err;
...@@ -86,7 +86,7 @@ extern __inline__ unsigned long sun4c_get_synchronous_error(void) ...@@ -86,7 +86,7 @@ extern __inline__ unsigned long sun4c_get_synchronous_error(void)
return sync_err; return sync_err;
} }
extern __inline__ unsigned long sun4c_get_synchronous_address(void) static inline unsigned long sun4c_get_synchronous_address(void)
{ {
unsigned long sync_addr; unsigned long sync_addr;
...@@ -97,7 +97,7 @@ extern __inline__ unsigned long sun4c_get_synchronous_address(void) ...@@ -97,7 +97,7 @@ extern __inline__ unsigned long sun4c_get_synchronous_address(void)
} }
/* SUN4 pte, segmap, and context manipulation */ /* SUN4 pte, segmap, and context manipulation */
extern __inline__ unsigned long sun4c_get_segmap(unsigned long addr) static inline unsigned long sun4c_get_segmap(unsigned long addr)
{ {
register unsigned long entry; register unsigned long entry;
...@@ -107,14 +107,15 @@ extern __inline__ unsigned long sun4c_get_segmap(unsigned long addr) ...@@ -107,14 +107,15 @@ extern __inline__ unsigned long sun4c_get_segmap(unsigned long addr)
return entry; return entry;
} }
extern __inline__ void sun4c_put_segmap(unsigned long addr, unsigned long entry) static inline void sun4c_put_segmap(unsigned long addr, unsigned long entry)
{ {
__asm__ __volatile__("\n\tstha %1, [%0] %2; nop; nop; nop;\n\t" : : __asm__ __volatile__("\n\tstha %1, [%0] %2; nop; nop; nop;\n\t" : :
"r" (addr), "r" (entry), "r" (addr), "r" (entry),
"i" (ASI_SEGMAP)); "i" (ASI_SEGMAP)
: "memory");
} }
extern __inline__ unsigned long sun4c_get_pte(unsigned long addr) static inline unsigned long sun4c_get_pte(unsigned long addr)
{ {
register unsigned long entry; register unsigned long entry;
...@@ -124,14 +125,15 @@ extern __inline__ unsigned long sun4c_get_pte(unsigned long addr) ...@@ -124,14 +125,15 @@ extern __inline__ unsigned long sun4c_get_pte(unsigned long addr)
return entry; return entry;
} }
extern __inline__ void sun4c_put_pte(unsigned long addr, unsigned long entry) static inline void sun4c_put_pte(unsigned long addr, unsigned long entry)
{ {
__asm__ __volatile__("\n\tsta %1, [%0] %2; nop; nop; nop;\n\t" : : __asm__ __volatile__("\n\tsta %1, [%0] %2; nop; nop; nop;\n\t" : :
"r" (addr), "r" (addr),
"r" ((entry & ~(_SUN4C_PAGE_PRESENT))), "i" (ASI_PTE)); "r" ((entry & ~(_SUN4C_PAGE_PRESENT))), "i" (ASI_PTE)
: "memory");
} }
extern __inline__ int sun4c_get_context(void) static inline int sun4c_get_context(void)
{ {
register int ctx; register int ctx;
...@@ -142,10 +144,11 @@ extern __inline__ int sun4c_get_context(void) ...@@ -142,10 +144,11 @@ extern __inline__ int sun4c_get_context(void)
return ctx; return ctx;
} }
extern __inline__ int sun4c_set_context(int ctx) static inline int sun4c_set_context(int ctx)
{ {
__asm__ __volatile__("\n\tstba %0, [%1] %2; nop; nop; nop;\n\t" : : __asm__ __volatile__("\n\tstba %0, [%1] %2; nop; nop; nop;\n\t" : :
"r" (ctx), "r" (AC_CONTEXT), "i" (ASI_CONTROL)); "r" (ctx), "r" (AC_CONTEXT), "i" (ASI_CONTROL)
: "memory");
return ctx; return ctx;
} }
......
...@@ -76,7 +76,7 @@ ...@@ -76,7 +76,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern __inline__ unsigned long sun4c_get_synchronous_error(void) static inline unsigned long sun4c_get_synchronous_error(void)
{ {
unsigned long sync_err; unsigned long sync_err;
...@@ -86,7 +86,7 @@ extern __inline__ unsigned long sun4c_get_synchronous_error(void) ...@@ -86,7 +86,7 @@ extern __inline__ unsigned long sun4c_get_synchronous_error(void)
return sync_err; return sync_err;
} }
extern __inline__ unsigned long sun4c_get_synchronous_address(void) static inline unsigned long sun4c_get_synchronous_address(void)
{ {
unsigned long sync_addr; unsigned long sync_addr;
...@@ -97,7 +97,7 @@ extern __inline__ unsigned long sun4c_get_synchronous_address(void) ...@@ -97,7 +97,7 @@ extern __inline__ unsigned long sun4c_get_synchronous_address(void)
} }
/* SUN4C pte, segmap, and context manipulation */ /* SUN4C pte, segmap, and context manipulation */
extern __inline__ unsigned long sun4c_get_segmap(unsigned long addr) static inline unsigned long sun4c_get_segmap(unsigned long addr)
{ {
register unsigned long entry; register unsigned long entry;
...@@ -108,15 +108,16 @@ extern __inline__ unsigned long sun4c_get_segmap(unsigned long addr) ...@@ -108,15 +108,16 @@ extern __inline__ unsigned long sun4c_get_segmap(unsigned long addr)
return entry; return entry;
} }
extern __inline__ void sun4c_put_segmap(unsigned long addr, unsigned long entry) static inline void sun4c_put_segmap(unsigned long addr, unsigned long entry)
{ {
__asm__ __volatile__("\n\tstba %1, [%0] %2; nop; nop; nop;\n\t" : : __asm__ __volatile__("\n\tstba %1, [%0] %2; nop; nop; nop;\n\t" : :
"r" (addr), "r" (entry), "r" (addr), "r" (entry),
"i" (ASI_SEGMAP)); "i" (ASI_SEGMAP)
: "memory");
} }
extern __inline__ unsigned long sun4c_get_pte(unsigned long addr) static inline unsigned long sun4c_get_pte(unsigned long addr)
{ {
register unsigned long entry; register unsigned long entry;
...@@ -126,14 +127,15 @@ extern __inline__ unsigned long sun4c_get_pte(unsigned long addr) ...@@ -126,14 +127,15 @@ extern __inline__ unsigned long sun4c_get_pte(unsigned long addr)
return entry; return entry;
} }
extern __inline__ void sun4c_put_pte(unsigned long addr, unsigned long entry) static inline void sun4c_put_pte(unsigned long addr, unsigned long entry)
{ {
__asm__ __volatile__("\n\tsta %1, [%0] %2; nop; nop; nop;\n\t" : : __asm__ __volatile__("\n\tsta %1, [%0] %2; nop; nop; nop;\n\t" : :
"r" (addr), "r" (addr),
"r" ((entry & ~(_SUN4C_PAGE_PRESENT))), "i" (ASI_PTE)); "r" ((entry & ~(_SUN4C_PAGE_PRESENT))), "i" (ASI_PTE)
: "memory");
} }
extern __inline__ int sun4c_get_context(void) static inline int sun4c_get_context(void)
{ {
register int ctx; register int ctx;
...@@ -144,10 +146,11 @@ extern __inline__ int sun4c_get_context(void) ...@@ -144,10 +146,11 @@ extern __inline__ int sun4c_get_context(void)
return ctx; return ctx;
} }
extern __inline__ int sun4c_set_context(int ctx) static inline int sun4c_set_context(int ctx)
{ {
__asm__ __volatile__("\n\tstba %0, [%1] %2; nop; nop; nop;\n\t" : : __asm__ __volatile__("\n\tstba %0, [%1] %2; nop; nop; nop;\n\t" : :
"r" (ctx), "r" (AC_CONTEXT), "i" (ASI_CONTROL)); "r" (ctx), "r" (AC_CONTEXT), "i" (ASI_CONTROL)
: "memory");
return ctx; return ctx;
} }
......
...@@ -130,8 +130,12 @@ extern __inline__ void start_thread(struct pt_regs * regs, unsigned long pc, ...@@ -130,8 +130,12 @@ extern __inline__ void start_thread(struct pt_regs * regs, unsigned long pc,
"std\t%%g0, [%0 + %3 + 0x30]\n\t" "std\t%%g0, [%0 + %3 + 0x30]\n\t"
"st\t%1, [%0 + %3 + 0x38]\n\t" "st\t%1, [%0 + %3 + 0x38]\n\t"
"st\t%%g0, [%0 + %3 + 0x3c]" "st\t%%g0, [%0 + %3 + 0x3c]"
: : "r" (regs), "r" (sp - sizeof(struct reg_window)), "r" (zero), : /* no outputs */
"i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))); : "r" (regs),
"r" (sp - sizeof(struct reg_window)),
"r" (zero),
"i" ((const unsigned long)(&((struct pt_regs *)0)->u_regs[0]))
: "memory");
} }
/* Free all resources held by a thread. */ /* Free all resources held by a thread. */
......
...@@ -96,27 +96,29 @@ ...@@ -96,27 +96,29 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern __inline__ unsigned int get_ross_icr(void) static inline unsigned int get_ross_icr(void)
{ {
unsigned int icreg; unsigned int icreg;
__asm__ __volatile__(".word 0x8347c000\n\t" /* rd %iccr, %g1 */ __asm__ __volatile__(".word 0x8347c000\n\t" /* rd %iccr, %g1 */
"mov %%g1, %0\n\t" : "mov %%g1, %0\n\t"
"=r" (icreg) : : : "=r" (icreg)
"g1", "memory"); : /* no inputs */
: "g1", "memory");
return icreg; return icreg;
} }
extern __inline__ void put_ross_icr(unsigned int icreg) static inline void put_ross_icr(unsigned int icreg)
{ {
__asm__ __volatile__("or %%g0, %0, %%g1\n\t" __asm__ __volatile__("or %%g0, %0, %%g1\n\t"
".word 0xbf806000\n\t" /* wr %g1, 0x0, %iccr */ ".word 0xbf806000\n\t" /* wr %g1, 0x0, %iccr */
"nop\n\t" "nop\n\t"
"nop\n\t" "nop\n\t"
"nop\n\t" : : "nop\n\t"
"r" (icreg) : : /* no outputs */
"g1", "memory"); : "r" (icreg)
: "g1", "memory");
return; return;
} }
...@@ -124,52 +126,62 @@ extern __inline__ void put_ross_icr(unsigned int icreg) ...@@ -124,52 +126,62 @@ extern __inline__ void put_ross_icr(unsigned int icreg)
/* HyperSparc specific cache flushing. */ /* HyperSparc specific cache flushing. */
/* This is for the on-chip instruction cache. */ /* This is for the on-chip instruction cache. */
extern __inline__ void hyper_flush_whole_icache(void) static inline void hyper_flush_whole_icache(void)
{ {
__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t"
"i" (ASI_M_FLUSH_IWHOLE)); : /* no outputs */
: "i" (ASI_M_FLUSH_IWHOLE)
: "memory");
return; return;
} }
extern int vac_cache_size; extern int vac_cache_size;
extern int vac_line_size; extern int vac_line_size;
extern __inline__ void hyper_clear_all_tags(void) static inline void hyper_clear_all_tags(void)
{ {
unsigned long addr; unsigned long addr;
for(addr = 0; addr < vac_cache_size; addr += vac_line_size) for(addr = 0; addr < vac_cache_size; addr += vac_line_size)
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
"r" (addr), "i" (ASI_M_DATAC_TAG)); : /* no outputs */
: "r" (addr), "i" (ASI_M_DATAC_TAG)
: "memory");
} }
extern __inline__ void hyper_flush_unconditional_combined(void) static inline void hyper_flush_unconditional_combined(void)
{ {
unsigned long addr; unsigned long addr;
for(addr = 0; addr < vac_cache_size; addr += vac_line_size) for (addr = 0; addr < vac_cache_size; addr += vac_line_size)
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
"r" (addr), "i" (ASI_M_FLUSH_CTX)); : /* no outputs */
: "r" (addr), "i" (ASI_M_FLUSH_CTX)
: "memory");
} }
extern __inline__ void hyper_flush_cache_user(void) static inline void hyper_flush_cache_user(void)
{ {
unsigned long addr; unsigned long addr;
for(addr = 0; addr < vac_cache_size; addr += vac_line_size) for (addr = 0; addr < vac_cache_size; addr += vac_line_size)
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
"r" (addr), "i" (ASI_M_FLUSH_USER)); : /* no outputs */
: "r" (addr), "i" (ASI_M_FLUSH_USER)
: "memory");
} }
extern __inline__ void hyper_flush_cache_page(unsigned long page) static inline void hyper_flush_cache_page(unsigned long page)
{ {
unsigned long end; unsigned long end;
page &= PAGE_MASK; page &= PAGE_MASK;
end = page + PAGE_SIZE; end = page + PAGE_SIZE;
while(page < end) { while (page < end) {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
"r" (page), "i" (ASI_M_FLUSH_PAGE)); : /* no outputs */
: "r" (page), "i" (ASI_M_FLUSH_PAGE)
: "memory");
page += vac_line_size; page += vac_line_size;
} }
} }
......
...@@ -27,68 +27,80 @@ ...@@ -27,68 +27,80 @@
#define SWIFT_EN 0x00000001 /* MMU enable */ #define SWIFT_EN 0x00000001 /* MMU enable */
/* Bits [13:5] select one of 512 instruction cache tags */ /* Bits [13:5] select one of 512 instruction cache tags */
extern __inline__ void swift_inv_insn_tag(unsigned long addr) static inline void swift_inv_insn_tag(unsigned long addr)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
"r" (addr), "i" (ASI_M_TXTC_TAG)); : /* no outputs */
: "r" (addr), "i" (ASI_M_TXTC_TAG)
: "memory");
} }
/* Bits [12:4] select one of 512 data cache tags */ /* Bits [12:4] select one of 512 data cache tags */
extern __inline__ void swift_inv_data_tag(unsigned long addr) static inline void swift_inv_data_tag(unsigned long addr)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
"r" (addr), "i" (ASI_M_DATAC_TAG)); : /* no outputs */
: "r" (addr), "i" (ASI_M_DATAC_TAG)
: "memory");
} }
extern __inline__ void swift_flush_dcache(void) static inline void swift_flush_dcache(void)
{ {
unsigned long addr; unsigned long addr;
for(addr = 0; addr < 0x2000; addr += 0x10) for (addr = 0; addr < 0x2000; addr += 0x10)
swift_inv_data_tag(addr); swift_inv_data_tag(addr);
} }
extern __inline__ void swift_flush_icache(void) static inline void swift_flush_icache(void)
{ {
unsigned long addr; unsigned long addr;
for(addr = 0; addr < 0x4000; addr += 0x20) for (addr = 0; addr < 0x4000; addr += 0x20)
swift_inv_insn_tag(addr); swift_inv_insn_tag(addr);
} }
extern __inline__ void swift_idflash_clear(void) static inline void swift_idflash_clear(void)
{ {
unsigned long addr; unsigned long addr;
for(addr = 0; addr < 0x2000; addr += 0x10) { for (addr = 0; addr < 0x2000; addr += 0x10) {
swift_inv_insn_tag(addr<<1); swift_inv_insn_tag(addr<<1);
swift_inv_data_tag(addr); swift_inv_data_tag(addr);
} }
} }
/* Swift is so broken, it isn't even safe to use the following. */ /* Swift is so broken, it isn't even safe to use the following. */
extern __inline__ void swift_flush_page(unsigned long page) static inline void swift_flush_page(unsigned long page)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
"r" (page), "i" (ASI_M_FLUSH_PAGE)); : /* no outputs */
: "r" (page), "i" (ASI_M_FLUSH_PAGE)
: "memory");
} }
extern __inline__ void swift_flush_segment(unsigned long addr) static inline void swift_flush_segment(unsigned long addr)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
"r" (addr), "i" (ASI_M_FLUSH_SEG)); : /* no outputs */
: "r" (addr), "i" (ASI_M_FLUSH_SEG)
: "memory");
} }
extern __inline__ void swift_flush_region(unsigned long addr) static inline void swift_flush_region(unsigned long addr)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
"r" (addr), "i" (ASI_M_FLUSH_REGION)); : /* no outputs */
: "r" (addr), "i" (ASI_M_FLUSH_REGION)
: "memory");
} }
extern __inline__ void swift_flush_context(void) static inline void swift_flush_context(void)
{ {
__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t"
"i" (ASI_M_FLUSH_CTX)); : /* no outputs */
: "i" (ASI_M_FLUSH_CTX)
: "memory");
} }
#endif /* !(_SPARC_SWIFT_H) */ #endif /* !(_SPARC_SWIFT_H) */
...@@ -220,7 +220,7 @@ extern __inline__ unsigned long swap_pil(unsigned long __new_psr) ...@@ -220,7 +220,7 @@ extern __inline__ unsigned long swap_pil(unsigned long __new_psr)
"wr %0, %2, %%psr\n\t" "wr %0, %2, %%psr\n\t"
"nop; nop; nop;\n" "nop; nop; nop;\n"
"1:\n" "1:\n"
: "=r" (retval) : "=&r" (retval)
: "r" (__new_psr), "i" (PSR_PIL) : "r" (__new_psr), "i" (PSR_PIL)
: "g1", "g2", "memory", "cc"); : "g1", "g2", "memory", "cc");
...@@ -298,7 +298,8 @@ extern __inline__ unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned ...@@ -298,7 +298,8 @@ extern __inline__ unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
__asm__ __volatile__("swap [%2], %0" __asm__ __volatile__("swap [%2], %0"
: "=&r" (val) : "=&r" (val)
: "0" (val), "r" (m)); : "0" (val), "r" (m)
: "memory");
return val; return val;
#else #else
register unsigned long *ptr asm("g1"); register unsigned long *ptr asm("g1");
......
...@@ -45,16 +45,20 @@ ...@@ -45,16 +45,20 @@
#define TSUNAMI_NF 0x00000002 #define TSUNAMI_NF 0x00000002
#define TSUNAMI_ME 0x00000001 #define TSUNAMI_ME 0x00000001
extern __inline__ void tsunami_flush_icache(void) static inline void tsunami_flush_icache(void)
{ {
__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t"
"i" (ASI_M_IC_FLCLEAR) : "memory"); : /* no outputs */
: "i" (ASI_M_IC_FLCLEAR)
: "memory");
} }
extern __inline__ void tsunami_flush_dcache(void) static inline void tsunami_flush_dcache(void)
{ {
__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t"
"i" (ASI_M_DC_FLCLEAR) : "memory"); : /* no outputs */
: "i" (ASI_M_DC_FLCLEAR)
: "memory");
} }
#endif /* !(_SPARC_TSUNAMI_H) */ #endif /* !(_SPARC_TSUNAMI_H) */
...@@ -59,60 +59,64 @@ ...@@ -59,60 +59,64 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* Bits [13:5] select one of 512 instruction cache tags */ /* Bits [13:5] select one of 512 instruction cache tags */
extern __inline__ void turbosparc_inv_insn_tag(unsigned long addr) static inline void turbosparc_inv_insn_tag(unsigned long addr)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
"r" (addr), "i" (ASI_M_TXTC_TAG)); : /* no outputs */
: "r" (addr), "i" (ASI_M_TXTC_TAG)
: "memory");
} }
/* Bits [13:5] select one of 512 data cache tags */ /* Bits [13:5] select one of 512 data cache tags */
extern __inline__ void turbosparc_inv_data_tag(unsigned long addr) static inline void turbosparc_inv_data_tag(unsigned long addr)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
"r" (addr), "i" (ASI_M_DATAC_TAG)); : /* no outputs */
: "r" (addr), "i" (ASI_M_DATAC_TAG)
: "memory");
} }
extern __inline__ void turbosparc_flush_icache(void) static inline void turbosparc_flush_icache(void)
{ {
unsigned long addr; unsigned long addr;
for(addr = 0; addr < 0x4000; addr += 0x20) for (addr = 0; addr < 0x4000; addr += 0x20)
turbosparc_inv_insn_tag(addr); turbosparc_inv_insn_tag(addr);
} }
extern __inline__ void turbosparc_flush_dcache(void) static inline void turbosparc_flush_dcache(void)
{ {
unsigned long addr; unsigned long addr;
for(addr = 0; addr < 0x4000; addr += 0x20) for (addr = 0; addr < 0x4000; addr += 0x20)
turbosparc_inv_data_tag(addr); turbosparc_inv_data_tag(addr);
} }
extern __inline__ void turbosparc_idflash_clear(void) static inline void turbosparc_idflash_clear(void)
{ {
unsigned long addr; unsigned long addr;
for(addr = 0; addr < 0x4000; addr += 0x20) { for (addr = 0; addr < 0x4000; addr += 0x20) {
turbosparc_inv_insn_tag(addr); turbosparc_inv_insn_tag(addr);
turbosparc_inv_data_tag(addr); turbosparc_inv_data_tag(addr);
} }
} }
extern __inline__ void turbosparc_set_ccreg(unsigned long regval) static inline void turbosparc_set_ccreg(unsigned long regval)
{ {
__asm__ __volatile__("sta %0, [%1] %2\n\t" : : __asm__ __volatile__("sta %0, [%1] %2\n\t"
"r" (regval), "r" (0x600), : /* no outputs */
"i" (ASI_M_MMUREGS)); : "r" (regval), "r" (0x600), "i" (ASI_M_MMUREGS)
: "memory");
} }
extern __inline__ unsigned long turbosparc_get_ccreg(void) static inline unsigned long turbosparc_get_ccreg(void)
{ {
unsigned long regval; unsigned long regval;
__asm__ __volatile__("lda [%1] %2, %0\n\t" : __asm__ __volatile__("lda [%1] %2, %0\n\t"
"=r" (regval) : : "=r" (regval)
"r" (0x600), : "r" (0x600), "i" (ASI_M_MMUREGS));
"i" (ASI_M_MMUREGS));
return regval; return regval;
} }
......
...@@ -108,27 +108,29 @@ struct sun4c_vac_props { ...@@ -108,27 +108,29 @@ struct sun4c_vac_props {
extern struct sun4c_vac_props sun4c_vacinfo; extern struct sun4c_vac_props sun4c_vacinfo;
/* sun4c_enable_vac() enables the sun4c virtual address cache. */ /* sun4c_enable_vac() enables the sun4c virtual address cache. */
extern __inline__ void sun4c_enable_vac(void) static inline void sun4c_enable_vac(void)
{ {
__asm__ __volatile__("lduba [%0] %1, %%g1\n\t" __asm__ __volatile__("lduba [%0] %1, %%g1\n\t"
"or %%g1, %2, %%g1\n\t" "or %%g1, %2, %%g1\n\t"
"stba %%g1, [%0] %1\n\t" : : "stba %%g1, [%0] %1\n\t"
"r" ((unsigned int) AC_SENABLE), : /* no outputs */
"i" (ASI_CONTROL), "i" (SENABLE_CACHE) : : "r" ((unsigned int) AC_SENABLE),
"g1"); "i" (ASI_CONTROL), "i" (SENABLE_CACHE)
sun4c_vacinfo.on = 1; : "g1", "memory");
sun4c_vacinfo.on = 1;
} }
/* sun4c_disable_vac() disables the virtual address cache. */ /* sun4c_disable_vac() disables the virtual address cache. */
extern __inline__ void sun4c_disable_vac(void) static inline void sun4c_disable_vac(void)
{ {
__asm__ __volatile__("lduba [%0] %1, %%g1\n\t" __asm__ __volatile__("lduba [%0] %1, %%g1\n\t"
"andn %%g1, %2, %%g1\n\t" "andn %%g1, %2, %%g1\n\t"
"stba %%g1, [%0] %1\n\t" : : "stba %%g1, [%0] %1\n\t"
"r" ((unsigned int) AC_SENABLE), : /* no outputs */
"i" (ASI_CONTROL), "i" (SENABLE_CACHE) : : "r" ((unsigned int) AC_SENABLE),
"g1"); "i" (ASI_CONTROL), "i" (SENABLE_CACHE)
sun4c_vacinfo.on = 0; : "g1", "memory");
sun4c_vacinfo.on = 0;
} }
#endif /* !(_SPARC_VAC_OPS_H) */ #endif /* !(_SPARC_VAC_OPS_H) */
...@@ -110,48 +110,57 @@ ...@@ -110,48 +110,57 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern __inline__ void viking_flush_icache(void) static inline void viking_flush_icache(void)
{ {
__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t"
"i" (ASI_M_IC_FLCLEAR)); : /* no outputs */
: "i" (ASI_M_IC_FLCLEAR)
: "memory");
} }
extern __inline__ void viking_flush_dcache(void) static inline void viking_flush_dcache(void)
{ {
__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t"
"i" (ASI_M_DC_FLCLEAR)); : /* no outputs */
: "i" (ASI_M_DC_FLCLEAR)
: "memory");
} }
extern __inline__ void viking_unlock_icache(void) static inline void viking_unlock_icache(void)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
"r" (0x80000000), "i" (ASI_M_IC_FLCLEAR)); : /* no outputs */
: "r" (0x80000000), "i" (ASI_M_IC_FLCLEAR)
: "memory");
} }
extern __inline__ void viking_unlock_dcache(void) static inline void viking_unlock_dcache(void)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
"r" (0x80000000), "i" (ASI_M_DC_FLCLEAR)); : /* no outputs */
: "r" (0x80000000), "i" (ASI_M_DC_FLCLEAR)
: "memory");
} }
extern __inline__ void viking_set_bpreg(unsigned long regval) static inline void viking_set_bpreg(unsigned long regval)
{ {
__asm__ __volatile__("sta %0, [%%g0] %1\n\t" : : __asm__ __volatile__("sta %0, [%%g0] %1\n\t"
"r" (regval), : /* no outputs */
"i" (ASI_M_ACTION)); : "r" (regval), "i" (ASI_M_ACTION)
: "memory");
} }
extern __inline__ unsigned long viking_get_bpreg(void) static inline unsigned long viking_get_bpreg(void)
{ {
unsigned long regval; unsigned long regval;
__asm__ __volatile__("lda [%%g0] %1, %0\n\t" : __asm__ __volatile__("lda [%%g0] %1, %0\n\t"
"=r" (regval) : : "=r" (regval)
"i" (ASI_M_ACTION)); : "i" (ASI_M_ACTION));
return regval; return regval;
} }
extern __inline__ void viking_get_dcache_ptag(int set, int block, static inline void viking_get_dcache_ptag(int set, int block,
unsigned long *data) unsigned long *data)
{ {
unsigned long ptag = ((set & 0x7f) << 5) | ((block & 0x3) << 26) | unsigned long ptag = ((set & 0x7f) << 5) | ((block & 0x3) << 26) |
...@@ -160,15 +169,15 @@ extern __inline__ void viking_get_dcache_ptag(int set, int block, ...@@ -160,15 +169,15 @@ extern __inline__ void viking_get_dcache_ptag(int set, int block,
__asm__ __volatile__ ("ldda [%2] %3, %%g2\n\t" __asm__ __volatile__ ("ldda [%2] %3, %%g2\n\t"
"or %%g0, %%g2, %0\n\t" "or %%g0, %%g2, %0\n\t"
"or %%g0, %%g3, %1\n\t" : "or %%g0, %%g3, %1\n\t"
"=r" (info), "=r" (page) : : "=r" (info), "=r" (page)
"r" (ptag), "i" (ASI_M_DATAC_TAG) : : "r" (ptag), "i" (ASI_M_DATAC_TAG)
"g2", "g3"); : "g2", "g3");
data[0] = info; data[0] = info;
data[1] = page; data[1] = page;
} }
extern __inline__ void viking_mxcc_turn_off_parity(unsigned long *mregp, static inline void viking_mxcc_turn_off_parity(unsigned long *mregp,
unsigned long *mxcc_cregp) unsigned long *mxcc_cregp)
{ {
unsigned long mreg = *mregp; unsigned long mreg = *mregp;
...@@ -190,30 +199,32 @@ extern __inline__ void viking_mxcc_turn_off_parity(unsigned long *mregp, ...@@ -190,30 +199,32 @@ extern __inline__ void viking_mxcc_turn_off_parity(unsigned long *mregp,
"2:\n\t" "2:\n\t"
"sta %0, [%%g0] %3\n\t" "sta %0, [%%g0] %3\n\t"
"sta %1, [%2] %4\n" "sta %1, [%2] %4\n"
"1:\n\t" : : "1:\n\t"
"r" (mreg), "r" (mxcc_creg), : /* no output */
"r" (MXCC_CREG), "i" (ASI_M_MMUREGS), : "r" (mreg), "r" (mxcc_creg),
"i" (ASI_M_MXCC) : "g2", "cc"); "r" (MXCC_CREG), "i" (ASI_M_MMUREGS),
"i" (ASI_M_MXCC)
: "g2", "memory", "cc");
*mregp = mreg; *mregp = mreg;
*mxcc_cregp = mxcc_creg; *mxcc_cregp = mxcc_creg;
} }
extern __inline__ unsigned long viking_hwprobe(unsigned long vaddr) static inline unsigned long viking_hwprobe(unsigned long vaddr)
{ {
unsigned long val; unsigned long val;
vaddr &= PAGE_MASK; vaddr &= PAGE_MASK;
/* Probe all MMU entries. */ /* Probe all MMU entries. */
__asm__ __volatile__("lda [%1] %2, %0\n\t" : __asm__ __volatile__("lda [%1] %2, %0\n\t"
"=r" (val) : : "=r" (val)
"r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE)); : "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
if (!val) if (!val)
return 0; return 0;
/* Probe region. */ /* Probe region. */
__asm__ __volatile__("lda [%1] %2, %0\n\t" : __asm__ __volatile__("lda [%1] %2, %0\n\t"
"=r" (val) : : "=r" (val)
"r" (vaddr | 0x200), "i" (ASI_M_FLUSH_PROBE)); : "r" (vaddr | 0x200), "i" (ASI_M_FLUSH_PROBE));
if ((val & SRMMU_ET_MASK) == SRMMU_ET_PTE) { if ((val & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
vaddr &= ~SRMMU_PGDIR_MASK; vaddr &= ~SRMMU_PGDIR_MASK;
vaddr >>= PAGE_SHIFT; vaddr >>= PAGE_SHIFT;
...@@ -221,9 +232,9 @@ extern __inline__ unsigned long viking_hwprobe(unsigned long vaddr) ...@@ -221,9 +232,9 @@ extern __inline__ unsigned long viking_hwprobe(unsigned long vaddr)
} }
/* Probe segment. */ /* Probe segment. */
__asm__ __volatile__("lda [%1] %2, %0\n\t" : __asm__ __volatile__("lda [%1] %2, %0\n\t"
"=r" (val) : : "=r" (val)
"r" (vaddr | 0x100), "i" (ASI_M_FLUSH_PROBE)); : "r" (vaddr | 0x100), "i" (ASI_M_FLUSH_PROBE));
if ((val & SRMMU_ET_MASK) == SRMMU_ET_PTE) { if ((val & SRMMU_ET_MASK) == SRMMU_ET_PTE) {
vaddr &= ~SRMMU_PMD_MASK; vaddr &= ~SRMMU_PMD_MASK;
vaddr >>= PAGE_SHIFT; vaddr >>= PAGE_SHIFT;
...@@ -231,9 +242,9 @@ extern __inline__ unsigned long viking_hwprobe(unsigned long vaddr) ...@@ -231,9 +242,9 @@ extern __inline__ unsigned long viking_hwprobe(unsigned long vaddr)
} }
/* Probe page. */ /* Probe page. */
__asm__ __volatile__("lda [%1] %2, %0\n\t" : __asm__ __volatile__("lda [%1] %2, %0\n\t"
"=r" (val) : : "=r" (val)
"r" (vaddr), "i" (ASI_M_FLUSH_PROBE)); : "r" (vaddr), "i" (ASI_M_FLUSH_PROBE));
return val; return val;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment