Commit cc957886 authored by David S. Miller's avatar David S. Miller

Merge nuts.ninka.net:/home/davem/src/BK/sparcwork-2.5

into nuts.ninka.net:/home/davem/src/BK/sparc-2.5
parents 8a257dca 281a37e8
...@@ -1095,18 +1095,9 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs, ...@@ -1095,18 +1095,9 @@ asmlinkage int do_signal(sigset_t *oldset, struct pt_regs * regs,
sigset_t *mask = &current->blocked; sigset_t *mask = &current->blocked;
unsigned long signr = 0; unsigned long signr = 0;
local_irq_disable(); spin_lock_irq(&current->sig->siglock);
if (current->sig->shared_pending.head) { signr = dequeue_signal(mask, &info);
spin_lock(&current->sig->siglock); spin_unlock_irq(&current->sig->siglock);
signr = dequeue_signal(&current->sig->shared_pending, mask, &info);
spin_unlock(&current->sig->siglock);
}
if (!signr) {
spin_lock(&current->sig->siglock);
signr = dequeue_signal(&current->pending, mask, &info);
spin_unlock(&current->sig->siglock);
}
local_irq_enable();
if (!signr) if (!signr)
break; break;
......
...@@ -23,6 +23,9 @@ ...@@ -23,6 +23,9 @@
#include <linux/pci.h> #include <linux/pci.h>
#endif #endif
#include <linux/pm.h> #include <linux/pm.h>
#ifdef CONFIG_HIGHMEM
#include <linux/highmem.h>
#endif
#include <asm/oplib.h> #include <asm/oplib.h>
#include <asm/delay.h> #include <asm/delay.h>
...@@ -199,6 +202,12 @@ EXPORT_SYMBOL(ioremap); ...@@ -199,6 +202,12 @@ EXPORT_SYMBOL(ioremap);
EXPORT_SYMBOL(iounmap); EXPORT_SYMBOL(iounmap);
#endif #endif
/* in arch/sparc/mm/highmem.c */
#ifdef CONFIG_HIGHMEM
EXPORT_SYMBOL(kmap_atomic);
EXPORT_SYMBOL(kunmap_atomic);
#endif
/* Solaris/SunOS binary compatibility */ /* Solaris/SunOS binary compatibility */
EXPORT_SYMBOL(svr4_setcontext); EXPORT_SYMBOL(svr4_setcontext);
EXPORT_SYMBOL(svr4_getcontext); EXPORT_SYMBOL(svr4_getcontext);
......
...@@ -547,7 +547,7 @@ void __init sun4d_init_IRQ(void) ...@@ -547,7 +547,7 @@ void __init sun4d_init_IRQ(void)
BTFIXUPSET_CALL(clear_profile_irq, sun4d_clear_profile_irq, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(clear_profile_irq, sun4d_clear_profile_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(load_profile_irq, sun4d_load_profile_irq, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(__irq_itoa, sun4d_irq_itoa, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(__irq_itoa, sun4d_irq_itoa, BTFIXUPCALL_NORM);
init_timers = sun4d_init_timers; sparc_init_timers = sun4d_init_timers;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM); BTFIXUPSET_CALL(set_cpu_int, sun4d_set_cpu_int, BTFIXUPCALL_NORM);
BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP); BTFIXUPSET_CALL(clear_cpu_int, sun4d_clear_ipi, BTFIXUPCALL_NOP);
......
...@@ -12,6 +12,10 @@ else ...@@ -12,6 +12,10 @@ else
obj-y += srmmu.o iommu.o io-unit.o hypersparc.o viking.o tsunami.o swift.o obj-y += srmmu.o iommu.o io-unit.o hypersparc.o viking.o tsunami.o swift.o
endif endif
ifdef CONFIG_HIGHMEM
obj-y += highmem.o
endif
ifdef CONFIG_SMP ifdef CONFIG_SMP
obj-y += nosun4c.o obj-y += nosun4c.o
else else
......
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/page.h> #include <asm/page.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
static inline void forget_pte(pte_t page) static inline void forget_pte(pte_t page)
{ {
......
/*
* highmem.c: virtual kernel memory mappings for high memory
*
* Provides kernel-static versions of atomic kmap functions originally
* found as inlines in include/asm-sparc/highmem.h. These became
* needed as kmap_atomic() and kunmap_atomic() started getting
* called from within modules.
* -- Tomas Szepe <szepe@pinerecords.com>, September 2002
*
* But kmap_atomic() and kunmap_atomic() cannot be inlined in
* modules because they are loaded with btfixup-ped functions.
*/
/*
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need it.
*
* XXX This is an old text. Actually, it's good to use atomic kmaps,
* provided you remember that they are atomic and not try to sleep
* with a kmap taken, much like a spinlock. Non-atomic kmaps are
* shared by CPUs, and so precious, and establishing them requires IPI.
* Atomic kmaps are lightweight and we may have NCPUS more of them.
*/
#include <linux/mm.h>
#include <linux/highmem.h>
#include <asm/pgalloc.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
void *kmap_atomic(struct page *page, enum km_type type)
{
unsigned long idx;
unsigned long vaddr;
inc_preempt_count();
if (page < highmem_start_page)
return page_address(page);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = FIX_KMAP_BEGIN + idx * PAGE_SIZE;
/* XXX Fix - Anton */
#if 0
__flush_cache_one(vaddr);
#else
flush_cache_all();
#endif
#if HIGHMEM_DEBUG
if (!pte_none(*(kmap_pte+idx)))
BUG();
#endif
set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
/* XXX Fix - Anton */
#if 0
__flush_tlb_one(vaddr);
#else
flush_tlb_all();
#endif
return (void*) vaddr;
}
void kunmap_atomic(void *kvaddr, enum km_type type)
{
unsigned long vaddr = (unsigned long) kvaddr;
unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < FIX_KMAP_BEGIN) { // FIXME
dec_preempt_count();
return;
}
if (vaddr != FIX_KMAP_BEGIN + idx * PAGE_SIZE)
BUG();
/* XXX Fix - Anton */
#if 0
__flush_cache_one(vaddr);
#else
flush_cache_all();
#endif
#ifdef HIGHMEM_DEBUG
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(kmap_pte+idx);
/* XXX Fix - Anton */
#if 0
__flush_tlb_one(vaddr);
#else
flush_tlb_all();
#endif
#endif
dec_preempt_count();
}
...@@ -38,6 +38,8 @@ ...@@ -38,6 +38,8 @@
#include <asm/a.out.h> #include <asm/a.out.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/io-unit.h> #include <asm/io-unit.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
/* Now the cpu specific definitions. */ /* Now the cpu specific definitions. */
#include <asm/viking.h> #include <asm/viking.h>
......
...@@ -29,7 +29,7 @@ CONFIG_BBC_I2C=m ...@@ -29,7 +29,7 @@ CONFIG_BBC_I2C=m
CONFIG_VT=y CONFIG_VT=y
CONFIG_VT_CONSOLE=y CONFIG_VT_CONSOLE=y
CONFIG_HW_CONSOLE=y CONFIG_HW_CONSOLE=y
CONFIG_HUGETLB_PAGE=y # CONFIG_HUGETLB_PAGE is not set
CONFIG_SMP=y CONFIG_SMP=y
# CONFIG_PREEMPT is not set # CONFIG_PREEMPT is not set
CONFIG_SPARC64=y CONFIG_SPARC64=y
...@@ -157,7 +157,7 @@ CONFIG_SUN_MOSTEK_RTC=y ...@@ -157,7 +157,7 @@ CONFIG_SUN_MOSTEK_RTC=y
CONFIG_OBP_FLASH=m CONFIG_OBP_FLASH=m
# CONFIG_SUN_BPP is not set # CONFIG_SUN_BPP is not set
# CONFIG_SUN_VIDEOPIX is not set # CONFIG_SUN_VIDEOPIX is not set
CONFIG_SUN_AURORA=m # CONFIG_SUN_AURORA is not set
# #
# Memory Technology Devices (MTD) # Memory Technology Devices (MTD)
...@@ -335,26 +335,7 @@ CONFIG_NET_FC=y ...@@ -335,26 +335,7 @@ CONFIG_NET_FC=y
# #
# IEEE 1394 (FireWire) support (EXPERIMENTAL) # IEEE 1394 (FireWire) support (EXPERIMENTAL)
# #
CONFIG_IEEE1394=m # CONFIG_IEEE1394 is not set
#
# Device Drivers
#
# CONFIG_IEEE1394_PCILYNX is not set
CONFIG_IEEE1394_OHCI1394=m
#
# Protocol Drivers
#
# CONFIG_IEEE1394_VIDEO1394 is not set
CONFIG_IEEE1394_SBP2=m
# CONFIG_IEEE1394_SBP2_PHYS_DMA is not set
CONFIG_IEEE1394_ETH1394=m
CONFIG_IEEE1394_DV1394=m
CONFIG_IEEE1394_RAWIO=m
CONFIG_IEEE1394_CMP=m
CONFIG_IEEE1394_AMDTP=m
# CONFIG_IEEE1394_VERBOSEDEBUG is not set
# #
# Networking options # Networking options
...@@ -465,6 +446,18 @@ CONFIG_VORTEX=m ...@@ -465,6 +446,18 @@ CONFIG_VORTEX=m
# CONFIG_LANCE is not set # CONFIG_LANCE is not set
# CONFIG_NET_VENDOR_SMC is not set # CONFIG_NET_VENDOR_SMC is not set
# CONFIG_NET_VENDOR_RACAL is not set # CONFIG_NET_VENDOR_RACAL is not set
#
# Tulip family network device support
#
CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=m
# CONFIG_TULIP_MWI is not set
# CONFIG_TULIP_MMIO is not set
CONFIG_DE4X5=m
CONFIG_WINBOND_840=m
# CONFIG_DM9102 is not set
# CONFIG_HP100 is not set # CONFIG_HP100 is not set
# CONFIG_NET_ISA is not set # CONFIG_NET_ISA is not set
CONFIG_NET_PCI=y CONFIG_NET_PCI=y
...@@ -547,18 +540,6 @@ CONFIG_SHAPER=m ...@@ -547,18 +540,6 @@ CONFIG_SHAPER=m
# #
# CONFIG_WAN is not set # CONFIG_WAN is not set
#
# Tulip family network device support
#
CONFIG_NET_TULIP=y
CONFIG_DE2104X=m
CONFIG_TULIP=m
# CONFIG_TULIP_MWI is not set
# CONFIG_TULIP_MMIO is not set
CONFIG_DE4X5=m
CONFIG_WINBOND_840=m
# CONFIG_DM9102 is not set
# #
# Amateur Radio support # Amateur Radio support
# #
...@@ -864,7 +845,7 @@ CONFIG_SND_VIRMIDI=m ...@@ -864,7 +845,7 @@ CONFIG_SND_VIRMIDI=m
# #
CONFIG_SND_ALI5451=m CONFIG_SND_ALI5451=m
CONFIG_SND_CS46XX=m CONFIG_SND_CS46XX=m
CONFIG_SND_CS46XX_ACCEPT_VALID=y # CONFIG_SND_CS46XX_NEW_DSP is not set
CONFIG_SND_CS4281=m CONFIG_SND_CS4281=m
CONFIG_SND_EMU10K1=m CONFIG_SND_EMU10K1=m
CONFIG_SND_KORG1212=m CONFIG_SND_KORG1212=m
...@@ -886,8 +867,12 @@ CONFIG_SND_FM801=m ...@@ -886,8 +867,12 @@ CONFIG_SND_FM801=m
CONFIG_SND_ICE1712=m CONFIG_SND_ICE1712=m
CONFIG_SND_INTEL8X0=m CONFIG_SND_INTEL8X0=m
CONFIG_SND_SONICVIBES=m CONFIG_SND_SONICVIBES=m
CONFIG_SND_VIA686=m # CONFIG_SND_VIA82XX is not set
CONFIG_SND_VIA8233=m
#
# ALSA USB devices
#
# CONFIG_SND_USB_AUDIO is not set
# #
# ALSA Sparc devices # ALSA Sparc devices
...@@ -1031,6 +1016,7 @@ CONFIG_USB_RIO500=m ...@@ -1031,6 +1016,7 @@ CONFIG_USB_RIO500=m
# CONFIG_USB_BRLVGER is not set # CONFIG_USB_BRLVGER is not set
CONFIG_USB_LCD=m CONFIG_USB_LCD=m
# CONFIG_USB_SPEEDTOUCH is not set # CONFIG_USB_SPEEDTOUCH is not set
CONFIG_USB_TEST=m
# #
# Bluetooth support # Bluetooth support
......
...@@ -645,18 +645,9 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs, ...@@ -645,18 +645,9 @@ static int do_signal(sigset_t *oldset, struct pt_regs * regs,
sigset_t *mask = &current->blocked; sigset_t *mask = &current->blocked;
unsigned long signr = 0; unsigned long signr = 0;
local_irq_disable(); spin_lock_irq(&current->sig->siglock);
if (current->sig->shared_pending.head) { signr = dequeue_signal(mask, &info);
spin_lock(&current->sig->siglock); spin_unlock_irq(&current->sig->siglock);
signr = dequeue_signal(&current->sig->shared_pending, mask, &info);
spin_unlock(&current->sig->siglock);
}
if (!signr) {
spin_lock(&current->sig->siglock);
signr = dequeue_signal(&current->pending, mask, &info);
spin_unlock(&current->sig->siglock);
}
local_irq_enable();
if (!signr) if (!signr)
break; break;
......
...@@ -1284,18 +1284,9 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs, ...@@ -1284,18 +1284,9 @@ int do_signal32(sigset_t *oldset, struct pt_regs * regs,
sigset_t *mask = &current->blocked; sigset_t *mask = &current->blocked;
unsigned long signr = 0; unsigned long signr = 0;
local_irq_disable(); spin_lock_irq(&current->sig->siglock);
if (current->sig->shared_pending.head) { signr = dequeue_signal(mask, &info);
spin_lock(&current->sig->siglock); spin_unlock_irq(&current->sig->siglock);
signr = dequeue_signal(&current->sig->shared_pending, mask, &info);
spin_unlock(&current->sig->siglock);
}
if (!signr) {
spin_lock(&current->sig->siglock);
signr = dequeue_signal(&current->pending, mask, &info);
spin_unlock(&current->sig->siglock);
}
local_irq_enable();
if (!signr) if (!signr)
break; break;
......
...@@ -1972,10 +1972,7 @@ sys32_rt_sigtimedwait(sigset_t32 *uthese, siginfo_t32 *uinfo, ...@@ -1972,10 +1972,7 @@ sys32_rt_sigtimedwait(sigset_t32 *uthese, siginfo_t32 *uinfo,
} }
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sig->siglock);
spin_lock(&current->sig->siglock); sig = dequeue_signal(&these, &info);
sig = dequeue_signal(&current->sig->shared_pending, &these, &info);
if (!sig)
sig = dequeue_signal(&current->pending, &these, &info);
if (!sig) { if (!sig) {
timeout = MAX_SCHEDULE_TIMEOUT; timeout = MAX_SCHEDULE_TIMEOUT;
if (uts) if (uts)
...@@ -1989,23 +1986,18 @@ sys32_rt_sigtimedwait(sigset_t32 *uthese, siginfo_t32 *uinfo, ...@@ -1989,23 +1986,18 @@ sys32_rt_sigtimedwait(sigset_t32 *uthese, siginfo_t32 *uinfo,
current->real_blocked = current->blocked; current->real_blocked = current->blocked;
sigandsets(&current->blocked, &current->blocked, &these); sigandsets(&current->blocked, &current->blocked, &these);
recalc_sigpending(); recalc_sigpending();
spin_unlock(&current->sig->siglock);
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sig->siglock);
current->state = TASK_INTERRUPTIBLE; current->state = TASK_INTERRUPTIBLE;
timeout = schedule_timeout(timeout); timeout = schedule_timeout(timeout);
spin_lock_irq(&current->sig->siglock); spin_lock_irq(&current->sig->siglock);
spin_lock(&current->sig->siglock); sig = dequeue_signal(&these, &info);
sig = dequeue_signal(&current->sig->shared_pending, &these, &info);
if (!sig)
sig = dequeue_signal(&current->pending, &these, &info);
current->blocked = current->real_blocked; current->blocked = current->real_blocked;
siginitset(&current->real_blocked, 0); siginitset(&current->real_blocked, 0);
recalc_sigpending(); recalc_sigpending();
} }
} }
spin_unlock(&current->sig->siglock);
spin_unlock_irq(&current->sig->siglock); spin_unlock_irq(&current->sig->siglock);
if (sig) { if (sig) {
......
...@@ -20,13 +20,8 @@ ...@@ -20,13 +20,8 @@
#ifdef __KERNEL__ #ifdef __KERNEL__
#include <linux/init.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <asm/vaddrs.h>
#include <asm/kmap_types.h> #include <asm/kmap_types.h>
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlbflush.h>
/* undef for production */ /* undef for production */
#define HIGHMEM_DEBUG 1 #define HIGHMEM_DEBUG 1
...@@ -72,81 +67,8 @@ static inline void kunmap(struct page *page) ...@@ -72,81 +67,8 @@ static inline void kunmap(struct page *page)
kunmap_high(page); kunmap_high(page);
} }
/* extern void *kmap_atomic(struct page *page, enum km_type type);
* The use of kmap_atomic/kunmap_atomic is discouraged - kmap/kunmap extern void kunmap_atomic(void *kvaddr, enum km_type type);
* gives a more generic (and caching) interface. But kmap_atomic can
* be used in IRQ contexts, so in some (very limited) cases we need
* it.
*/
static inline void *kmap_atomic(struct page *page, enum km_type type)
{
unsigned long idx;
unsigned long vaddr;
inc_preempt_count();
if (page < highmem_start_page)
return page_address(page);
idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = FIX_KMAP_BEGIN + idx * PAGE_SIZE;
/* XXX Fix - Anton */
#if 0
__flush_cache_one(vaddr);
#else
flush_cache_all();
#endif
#if HIGHMEM_DEBUG
if (!pte_none(*(kmap_pte+idx)))
BUG();
#endif
set_pte(kmap_pte+idx, mk_pte(page, kmap_prot));
/* XXX Fix - Anton */
#if 0
__flush_tlb_one(vaddr);
#else
flush_tlb_all();
#endif
return (void*) vaddr;
}
static inline void kunmap_atomic(void *kvaddr, enum km_type type)
{
unsigned long vaddr = (unsigned long) kvaddr;
unsigned long idx = type + KM_TYPE_NR*smp_processor_id();
if (vaddr < FIX_KMAP_BEGIN) { // FIXME
dec_preempt_count();
return;
}
if (vaddr != FIX_KMAP_BEGIN + idx * PAGE_SIZE)
BUG();
/* XXX Fix - Anton */
#if 0
__flush_cache_one(vaddr);
#else
flush_cache_all();
#endif
#ifdef HIGHMEM_DEBUG
/*
* force other mappings to Oops if they'll try to access
* this pte without first remap it
*/
pte_clear(kmap_pte+idx);
/* XXX Fix - Anton */
#if 0
__flush_tlb_one(vaddr);
#else
flush_tlb_all();
#endif
#endif
dec_preempt_count();
}
static inline struct page *kmap_atomic_to_page(void *ptr) static inline struct page *kmap_atomic_to_page(void *ptr)
{ {
......
...@@ -40,7 +40,7 @@ extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned i ...@@ -40,7 +40,7 @@ extern unsigned int csum_partial(const unsigned char * buff, int len, unsigned i
*/ */
extern unsigned int csum_partial_copy_sparc64(const char *src, char *dst, int len, unsigned int sum); extern unsigned int csum_partial_copy_sparc64(const char *src, char *dst, int len, unsigned int sum);
extern __inline__ unsigned int static __inline__ unsigned int
csum_partial_copy_nocheck (const char *src, char *dst, int len, csum_partial_copy_nocheck (const char *src, char *dst, int len,
unsigned int sum) unsigned int sum)
{ {
...@@ -52,7 +52,7 @@ csum_partial_copy_nocheck (const char *src, char *dst, int len, ...@@ -52,7 +52,7 @@ csum_partial_copy_nocheck (const char *src, char *dst, int len,
return ret; return ret;
} }
extern __inline__ unsigned int static __inline__ unsigned int
csum_partial_copy_from_user(const char *src, char *dst, int len, csum_partial_copy_from_user(const char *src, char *dst, int len,
unsigned int sum, int *err) unsigned int sum, int *err)
{ {
...@@ -66,7 +66,7 @@ csum_partial_copy_from_user(const char *src, char *dst, int len, ...@@ -66,7 +66,7 @@ csum_partial_copy_from_user(const char *src, char *dst, int len,
*/ */
#define HAVE_CSUM_COPY_USER #define HAVE_CSUM_COPY_USER
extern unsigned int csum_partial_copy_user_sparc64(const char *src, char *dst, int len, unsigned int sum); extern unsigned int csum_partial_copy_user_sparc64(const char *src, char *dst, int len, unsigned int sum);
extern __inline__ unsigned int static __inline__ unsigned int
csum_and_copy_to_user(const char *src, char *dst, int len, csum_and_copy_to_user(const char *src, char *dst, int len,
unsigned int sum, int *err) unsigned int sum, int *err)
{ {
...@@ -78,7 +78,7 @@ csum_and_copy_to_user(const char *src, char *dst, int len, ...@@ -78,7 +78,7 @@ csum_and_copy_to_user(const char *src, char *dst, int len,
/* ihl is always 5 or greater, almost always is 5, and iph is word aligned /* ihl is always 5 or greater, almost always is 5, and iph is word aligned
* the majority of the time. * the majority of the time.
*/ */
extern __inline__ unsigned short ip_fast_csum(__const__ unsigned char *iph, static __inline__ unsigned short ip_fast_csum(__const__ unsigned char *iph,
unsigned int ihl) unsigned int ihl)
{ {
unsigned short sum; unsigned short sum;
...@@ -119,7 +119,7 @@ extern __inline__ unsigned short ip_fast_csum(__const__ unsigned char *iph, ...@@ -119,7 +119,7 @@ extern __inline__ unsigned short ip_fast_csum(__const__ unsigned char *iph,
} }
/* Fold a partial checksum without adding pseudo headers. */ /* Fold a partial checksum without adding pseudo headers. */
extern __inline__ unsigned short csum_fold(unsigned int sum) static __inline__ unsigned short csum_fold(unsigned int sum)
{ {
unsigned int tmp; unsigned int tmp;
...@@ -134,7 +134,7 @@ extern __inline__ unsigned short csum_fold(unsigned int sum) ...@@ -134,7 +134,7 @@ extern __inline__ unsigned short csum_fold(unsigned int sum)
return (sum & 0xffff); return (sum & 0xffff);
} }
extern __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr, static __inline__ unsigned long csum_tcpudp_nofold(unsigned long saddr,
unsigned long daddr, unsigned long daddr,
unsigned int len, unsigned int len,
unsigned short proto, unsigned short proto,
...@@ -201,7 +201,7 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr, ...@@ -201,7 +201,7 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
} }
/* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */ /* this routine is used for miscellaneous IP-like checksums, mainly in icmp.c */
extern __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len) static __inline__ unsigned short ip_compute_csum(unsigned char * buff, int len)
{ {
return csum_fold(csum_partial(buff, len, 0)); return csum_fold(csum_partial(buff, len, 0));
} }
......
...@@ -18,7 +18,7 @@ ...@@ -18,7 +18,7 @@
extern unsigned long loops_per_jiffy; extern unsigned long loops_per_jiffy;
#endif #endif
extern __inline__ void __delay(unsigned long loops) static __inline__ void __delay(unsigned long loops)
{ {
__asm__ __volatile__( __asm__ __volatile__(
" b,pt %%xcc, 1f\n" " b,pt %%xcc, 1f\n"
...@@ -32,7 +32,7 @@ extern __inline__ void __delay(unsigned long loops) ...@@ -32,7 +32,7 @@ extern __inline__ void __delay(unsigned long loops)
: "cc"); : "cc");
} }
extern __inline__ void __udelay(unsigned long usecs, unsigned long lps) static __inline__ void __udelay(unsigned long usecs, unsigned long lps)
{ {
usecs *= 0x00000000000010c6UL; /* 2**32 / 1000000 */ usecs *= 0x00000000000010c6UL; /* 2**32 / 1000000 */
......
...@@ -16,7 +16,7 @@ struct fpustate { ...@@ -16,7 +16,7 @@ struct fpustate {
#define FPUSTATE (struct fpustate *)(current_thread_info()->fpregs) #define FPUSTATE (struct fpustate *)(current_thread_info()->fpregs)
extern __inline__ unsigned long fprs_read(void) static __inline__ unsigned long fprs_read(void)
{ {
unsigned long retval; unsigned long retval;
...@@ -25,7 +25,7 @@ extern __inline__ unsigned long fprs_read(void) ...@@ -25,7 +25,7 @@ extern __inline__ unsigned long fprs_read(void)
return retval; return retval;
} }
extern __inline__ void fprs_write(unsigned long val) static __inline__ void fprs_write(unsigned long val)
{ {
__asm__ __volatile__("wr %0, 0x0, %%fprs" : : "r" (val)); __asm__ __volatile__("wr %0, 0x0, %%fprs" : : "r" (val));
} }
......
...@@ -133,21 +133,21 @@ extern int request_fast_irq(unsigned int irq, ...@@ -133,21 +133,21 @@ extern int request_fast_irq(unsigned int irq,
unsigned long flags, __const__ char *devname, unsigned long flags, __const__ char *devname,
void *dev_id); void *dev_id);
extern __inline__ void set_softint(unsigned long bits) static __inline__ void set_softint(unsigned long bits)
{ {
__asm__ __volatile__("wr %0, 0x0, %%set_softint" __asm__ __volatile__("wr %0, 0x0, %%set_softint"
: /* No outputs */ : /* No outputs */
: "r" (bits)); : "r" (bits));
} }
extern __inline__ void clear_softint(unsigned long bits) static __inline__ void clear_softint(unsigned long bits)
{ {
__asm__ __volatile__("wr %0, 0x0, %%clear_softint" __asm__ __volatile__("wr %0, 0x0, %%clear_softint"
: /* No outputs */ : /* No outputs */
: "r" (bits)); : "r" (bits));
} }
extern __inline__ unsigned long get_softint(void) static __inline__ unsigned long get_softint(void)
{ {
unsigned long retval; unsigned long retval;
......
...@@ -161,7 +161,7 @@ struct sparc_phys_banks { ...@@ -161,7 +161,7 @@ struct sparc_phys_banks {
extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS]; extern struct sparc_phys_banks sp_banks[SPARC_PHYS_BANKS];
/* Pure 2^n version of get_order */ /* Pure 2^n version of get_order */
extern __inline__ int get_order(unsigned long size) static __inline__ int get_order(unsigned long size)
{ {
int order; int order;
......
...@@ -17,12 +17,12 @@ ...@@ -17,12 +17,12 @@
#define PCI_IRQ_NONE 0xffffffff #define PCI_IRQ_NONE 0xffffffff
extern inline void pcibios_set_master(struct pci_dev *dev) static inline void pcibios_set_master(struct pci_dev *dev)
{ {
/* No special bus mastering setup handling */ /* No special bus mastering setup handling */
} }
extern inline void pcibios_penalize_isa_irq(int irq) static inline void pcibios_penalize_isa_irq(int irq)
{ {
/* We don't do dynamic PCI IRQ allocation */ /* We don't do dynamic PCI IRQ allocation */
} }
......
...@@ -212,7 +212,7 @@ extern struct page *mem_map_zero; ...@@ -212,7 +212,7 @@ extern struct page *mem_map_zero;
#define page_pte_prot(page, prot) mk_pte(page, prot) #define page_pte_prot(page, prot) mk_pte(page, prot)
#define page_pte(page) page_pte_prot(page, __pgprot(0)) #define page_pte(page) page_pte_prot(page, __pgprot(0))
extern inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot) static inline pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
{ {
pte_t __pte; pte_t __pte;
...@@ -291,7 +291,7 @@ struct vm_area_struct; ...@@ -291,7 +291,7 @@ struct vm_area_struct;
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t); extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
/* Make a non-present pseudo-TTE. */ /* Make a non-present pseudo-TTE. */
extern inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space) static inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
{ {
pte_t pte; pte_t pte;
pte_val(pte) = ((page) | pgprot_val(prot) | _PAGE_E) & ~(unsigned long)_PAGE_CACHE; pte_val(pte) = ((page) | pgprot_val(prot) | _PAGE_E) & ~(unsigned long)_PAGE_CACHE;
...@@ -313,7 +313,7 @@ extern inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space) ...@@ -313,7 +313,7 @@ extern inline pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space)
extern unsigned long prom_virt_to_phys(unsigned long, int *); extern unsigned long prom_virt_to_phys(unsigned long, int *);
extern __inline__ unsigned long static __inline__ unsigned long
sun4u_get_pte (unsigned long addr) sun4u_get_pte (unsigned long addr)
{ {
pgd_t *pgdp; pgd_t *pgdp;
...@@ -330,13 +330,13 @@ sun4u_get_pte (unsigned long addr) ...@@ -330,13 +330,13 @@ sun4u_get_pte (unsigned long addr)
return pte_val(*ptep) & _PAGE_PADDR; return pte_val(*ptep) & _PAGE_PADDR;
} }
extern __inline__ unsigned long static __inline__ unsigned long
__get_phys (unsigned long addr) __get_phys (unsigned long addr)
{ {
return sun4u_get_pte (addr); return sun4u_get_pte (addr);
} }
extern __inline__ int static __inline__ int
__get_iospace (unsigned long addr) __get_iospace (unsigned long addr)
{ {
return ((sun4u_get_pte (addr) & 0xf0000000) >> 28); return ((sun4u_get_pte (addr) & 0xf0000000) >> 28);
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#define PSR_V8PLUS 0xff000000 /* fake impl/ver, meaning a 64bit CPU is present */ #define PSR_V8PLUS 0xff000000 /* fake impl/ver, meaning a 64bit CPU is present */
#define PSR_XCC 0x000f0000 /* if PSR_V8PLUS, this is %xcc */ #define PSR_XCC 0x000f0000 /* if PSR_V8PLUS, this is %xcc */
extern inline unsigned int tstate_to_psr(unsigned long tstate) static inline unsigned int tstate_to_psr(unsigned long tstate)
{ {
return ((tstate & TSTATE_CWP) | return ((tstate & TSTATE_CWP) |
PSR_S | PSR_S |
...@@ -33,7 +33,7 @@ extern inline unsigned int tstate_to_psr(unsigned long tstate) ...@@ -33,7 +33,7 @@ extern inline unsigned int tstate_to_psr(unsigned long tstate)
PSR_V8PLUS); PSR_V8PLUS);
} }
extern inline unsigned long psr_to_tstate_icc(unsigned int psr) static inline unsigned long psr_to_tstate_icc(unsigned int psr)
{ {
unsigned long tstate = ((unsigned long)(psr & PSR_ICC)) << 12; unsigned long tstate = ((unsigned long)(psr & PSR_ICC)) << 12;
if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS) if ((psr & (PSR_VERS|PSR_IMPL)) == PSR_V8PLUS)
......
...@@ -85,32 +85,4 @@ ...@@ -85,32 +85,4 @@
#define VERS_MAXTL 0x000000000000ff00 /* Maximum Trap Level. */ #define VERS_MAXTL 0x000000000000ff00 /* Maximum Trap Level. */
#define VERS_MAXWIN 0x000000000000001f /* Maximum Reg Window Index. */ #define VERS_MAXWIN 0x000000000000001f /* Maximum Reg Window Index. */
#if defined(__KERNEL__) && !defined(__ASSEMBLY__)
#define set_pstate(bits) \
__asm__ __volatile__( \
"rdpr %%pstate, %%g1\n\t" \
"or %%g1, %0, %%g1\n\t" \
"wrpr %%g1, 0x0, %%pstate\n\t" \
: /* no outputs */ \
: "i" (bits) \
: "g1")
#define clear_pstate(bits) \
__asm__ __volatile__( \
"rdpr %%pstate, %%g1\n\t" \
"andn %%g1, %0, %%g1\n\t" \
"wrpr %%g1, 0x0, %%pstate\n\t" \
: /* no outputs */ \
: "i" (bits) \
: "g1")
#define change_pstate(bits) \
__asm__ __volatile__( \
"rdpr %%pstate, %%g1\n\t" \
"wrpr %%g1, %0, %%pstate\n\t" \
: /* no outputs */ \
: "i" (bits) \
: "g1")
#endif
#endif /* !(_SPARC64_PSTATE_H) */ #endif /* !(_SPARC64_PSTATE_H) */
...@@ -27,12 +27,12 @@ ...@@ -27,12 +27,12 @@
* numbers + offsets, and vice versa. * numbers + offsets, and vice versa.
*/ */
extern __inline__ unsigned long sbus_devaddr(int slotnum, unsigned long offset) static __inline__ unsigned long sbus_devaddr(int slotnum, unsigned long offset)
{ {
return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<28)+(offset)); return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<28)+(offset));
} }
extern __inline__ int sbus_dev_slot(unsigned long dev_addr) static __inline__ int sbus_dev_slot(unsigned long dev_addr)
{ {
return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>28); return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>28);
} }
......
...@@ -154,7 +154,7 @@ typedef struct sigevent32 { ...@@ -154,7 +154,7 @@ typedef struct sigevent32 {
#include <linux/string.h> #include <linux/string.h>
extern inline void copy_siginfo(siginfo_t *to, siginfo_t *from) static inline void copy_siginfo(siginfo_t *to, siginfo_t *from)
{ {
if (from->si_code < 0) if (from->si_code < 0)
*to = *from; *to = *from;
......
...@@ -89,7 +89,7 @@ static inline int any_online_cpu(unsigned long mask) ...@@ -89,7 +89,7 @@ static inline int any_online_cpu(unsigned long mask)
* General functions that each host system must provide. * General functions that each host system must provide.
*/ */
extern __inline__ int hard_smp_processor_id(void) static __inline__ int hard_smp_processor_id(void)
{ {
if (tlb_type == cheetah || tlb_type == cheetah_plus) { if (tlb_type == cheetah || tlb_type == cheetah_plus) {
unsigned long safari_config; unsigned long safari_config;
...@@ -130,7 +130,7 @@ static __inline__ void smp_send_reschedule(int cpu) ...@@ -130,7 +130,7 @@ static __inline__ void smp_send_reschedule(int cpu)
/* This is a nop as well because we capture all other cpus /* This is a nop as well because we capture all other cpus
* anyways when making the PROM active. * anyways when making the PROM active.
*/ */
extern __inline__ void smp_send_stop(void) { } static __inline__ void smp_send_stop(void) { }
#endif /* !(__ASSEMBLY__) */ #endif /* !(__ASSEMBLY__) */
......
...@@ -40,7 +40,7 @@ typedef unsigned char spinlock_t; ...@@ -40,7 +40,7 @@ typedef unsigned char spinlock_t;
do { membar("#LoadLoad"); \ do { membar("#LoadLoad"); \
} while(*((volatile unsigned char *)lock)) } while(*((volatile unsigned char *)lock))
extern __inline__ void _raw_spin_lock(spinlock_t *lock) static __inline__ void _raw_spin_lock(spinlock_t *lock)
{ {
__asm__ __volatile__( __asm__ __volatile__(
"1: ldstub [%0], %%g7\n" "1: ldstub [%0], %%g7\n"
...@@ -57,7 +57,7 @@ extern __inline__ void _raw_spin_lock(spinlock_t *lock) ...@@ -57,7 +57,7 @@ extern __inline__ void _raw_spin_lock(spinlock_t *lock)
: "g7", "memory"); : "g7", "memory");
} }
extern __inline__ int _raw_spin_trylock(spinlock_t *lock) static __inline__ int _raw_spin_trylock(spinlock_t *lock)
{ {
unsigned int result; unsigned int result;
__asm__ __volatile__("ldstub [%1], %0\n\t" __asm__ __volatile__("ldstub [%1], %0\n\t"
...@@ -68,7 +68,7 @@ extern __inline__ int _raw_spin_trylock(spinlock_t *lock) ...@@ -68,7 +68,7 @@ extern __inline__ int _raw_spin_trylock(spinlock_t *lock)
return (result == 0); return (result == 0);
} }
extern __inline__ void _raw_spin_unlock(spinlock_t *lock) static __inline__ void _raw_spin_unlock(spinlock_t *lock)
{ {
__asm__ __volatile__("membar #StoreStore | #LoadStore\n\t" __asm__ __volatile__("membar #StoreStore | #LoadStore\n\t"
"stb %%g0, [%0]" "stb %%g0, [%0]"
......
...@@ -56,7 +56,7 @@ extern enum ultra_tlb_layout tlb_type; ...@@ -56,7 +56,7 @@ extern enum ultra_tlb_layout tlb_type;
SPITFIRE_HIGHEST_LOCKED_TLBENT : \ SPITFIRE_HIGHEST_LOCKED_TLBENT : \
CHEETAH_HIGHEST_LOCKED_TLBENT) CHEETAH_HIGHEST_LOCKED_TLBENT)
extern __inline__ unsigned long spitfire_get_isfsr(void) static __inline__ unsigned long spitfire_get_isfsr(void)
{ {
unsigned long ret; unsigned long ret;
...@@ -66,7 +66,7 @@ extern __inline__ unsigned long spitfire_get_isfsr(void) ...@@ -66,7 +66,7 @@ extern __inline__ unsigned long spitfire_get_isfsr(void)
return ret; return ret;
} }
extern __inline__ unsigned long spitfire_get_dsfsr(void) static __inline__ unsigned long spitfire_get_dsfsr(void)
{ {
unsigned long ret; unsigned long ret;
...@@ -76,7 +76,7 @@ extern __inline__ unsigned long spitfire_get_dsfsr(void) ...@@ -76,7 +76,7 @@ extern __inline__ unsigned long spitfire_get_dsfsr(void)
return ret; return ret;
} }
extern __inline__ unsigned long spitfire_get_sfar(void) static __inline__ unsigned long spitfire_get_sfar(void)
{ {
unsigned long ret; unsigned long ret;
...@@ -86,7 +86,7 @@ extern __inline__ unsigned long spitfire_get_sfar(void) ...@@ -86,7 +86,7 @@ extern __inline__ unsigned long spitfire_get_sfar(void)
return ret; return ret;
} }
extern __inline__ void spitfire_put_isfsr(unsigned long sfsr) static __inline__ void spitfire_put_isfsr(unsigned long sfsr)
{ {
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
...@@ -94,7 +94,7 @@ extern __inline__ void spitfire_put_isfsr(unsigned long sfsr) ...@@ -94,7 +94,7 @@ extern __inline__ void spitfire_put_isfsr(unsigned long sfsr)
: "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU)); : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_IMMU));
} }
extern __inline__ void spitfire_put_dsfsr(unsigned long sfsr) static __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
{ {
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
...@@ -102,7 +102,7 @@ extern __inline__ void spitfire_put_dsfsr(unsigned long sfsr) ...@@ -102,7 +102,7 @@ extern __inline__ void spitfire_put_dsfsr(unsigned long sfsr)
: "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU)); : "r" (sfsr), "r" (TLB_SFSR), "i" (ASI_DMMU));
} }
extern __inline__ unsigned long spitfire_get_primary_context(void) static __inline__ unsigned long spitfire_get_primary_context(void)
{ {
unsigned long ctx; unsigned long ctx;
...@@ -112,7 +112,7 @@ extern __inline__ unsigned long spitfire_get_primary_context(void) ...@@ -112,7 +112,7 @@ extern __inline__ unsigned long spitfire_get_primary_context(void)
return ctx; return ctx;
} }
extern __inline__ void spitfire_set_primary_context(unsigned long ctx) static __inline__ void spitfire_set_primary_context(unsigned long ctx)
{ {
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
...@@ -122,7 +122,7 @@ extern __inline__ void spitfire_set_primary_context(unsigned long ctx) ...@@ -122,7 +122,7 @@ extern __inline__ void spitfire_set_primary_context(unsigned long ctx)
__asm__ __volatile__ ("membar #Sync" : : : "memory"); __asm__ __volatile__ ("membar #Sync" : : : "memory");
} }
extern __inline__ unsigned long spitfire_get_secondary_context(void) static __inline__ unsigned long spitfire_get_secondary_context(void)
{ {
unsigned long ctx; unsigned long ctx;
...@@ -132,7 +132,7 @@ extern __inline__ unsigned long spitfire_get_secondary_context(void) ...@@ -132,7 +132,7 @@ extern __inline__ unsigned long spitfire_get_secondary_context(void)
return ctx; return ctx;
} }
extern __inline__ void spitfire_set_secondary_context(unsigned long ctx) static __inline__ void spitfire_set_secondary_context(unsigned long ctx)
{ {
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
...@@ -145,7 +145,7 @@ extern __inline__ void spitfire_set_secondary_context(unsigned long ctx) ...@@ -145,7 +145,7 @@ extern __inline__ void spitfire_set_secondary_context(unsigned long ctx)
/* The data cache is write through, so this just invalidates the /* The data cache is write through, so this just invalidates the
* specified line. * specified line.
*/ */
extern __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag) static __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long tag)
{ {
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
...@@ -160,7 +160,7 @@ extern __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long ...@@ -160,7 +160,7 @@ extern __inline__ void spitfire_put_dcache_tag(unsigned long addr, unsigned long
* a flush instruction (to any address) is sufficient to handle * a flush instruction (to any address) is sufficient to handle
* this issue after the line is invalidated. * this issue after the line is invalidated.
*/ */
extern __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag) static __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long tag)
{ {
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
...@@ -168,7 +168,7 @@ extern __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long ...@@ -168,7 +168,7 @@ extern __inline__ void spitfire_put_icache_tag(unsigned long addr, unsigned long
: "r" (tag), "r" (addr), "i" (ASI_IC_TAG)); : "r" (tag), "r" (addr), "i" (ASI_IC_TAG));
} }
extern __inline__ unsigned long spitfire_get_dtlb_data(int entry) static __inline__ unsigned long spitfire_get_dtlb_data(int entry)
{ {
unsigned long data; unsigned long data;
...@@ -182,7 +182,7 @@ extern __inline__ unsigned long spitfire_get_dtlb_data(int entry) ...@@ -182,7 +182,7 @@ extern __inline__ unsigned long spitfire_get_dtlb_data(int entry)
return data; return data;
} }
extern __inline__ unsigned long spitfire_get_dtlb_tag(int entry) static __inline__ unsigned long spitfire_get_dtlb_tag(int entry)
{ {
unsigned long tag; unsigned long tag;
...@@ -192,7 +192,7 @@ extern __inline__ unsigned long spitfire_get_dtlb_tag(int entry) ...@@ -192,7 +192,7 @@ extern __inline__ unsigned long spitfire_get_dtlb_tag(int entry)
return tag; return tag;
} }
extern __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data) static __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data)
{ {
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
...@@ -201,7 +201,7 @@ extern __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data) ...@@ -201,7 +201,7 @@ extern __inline__ void spitfire_put_dtlb_data(int entry, unsigned long data)
"i" (ASI_DTLB_DATA_ACCESS)); "i" (ASI_DTLB_DATA_ACCESS));
} }
extern __inline__ unsigned long spitfire_get_itlb_data(int entry) static __inline__ unsigned long spitfire_get_itlb_data(int entry)
{ {
unsigned long data; unsigned long data;
...@@ -215,7 +215,7 @@ extern __inline__ unsigned long spitfire_get_itlb_data(int entry) ...@@ -215,7 +215,7 @@ extern __inline__ unsigned long spitfire_get_itlb_data(int entry)
return data; return data;
} }
extern __inline__ unsigned long spitfire_get_itlb_tag(int entry) static __inline__ unsigned long spitfire_get_itlb_tag(int entry)
{ {
unsigned long tag; unsigned long tag;
...@@ -225,7 +225,7 @@ extern __inline__ unsigned long spitfire_get_itlb_tag(int entry) ...@@ -225,7 +225,7 @@ extern __inline__ unsigned long spitfire_get_itlb_tag(int entry)
return tag; return tag;
} }
extern __inline__ void spitfire_put_itlb_data(int entry, unsigned long data) static __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
{ {
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
...@@ -237,7 +237,7 @@ extern __inline__ void spitfire_put_itlb_data(int entry, unsigned long data) ...@@ -237,7 +237,7 @@ extern __inline__ void spitfire_put_itlb_data(int entry, unsigned long data)
/* Spitfire hardware assisted TLB flushes. */ /* Spitfire hardware assisted TLB flushes. */
/* Context level flushes. */ /* Context level flushes. */
extern __inline__ void spitfire_flush_dtlb_primary_context(void) static __inline__ void spitfire_flush_dtlb_primary_context(void)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
...@@ -245,7 +245,7 @@ extern __inline__ void spitfire_flush_dtlb_primary_context(void) ...@@ -245,7 +245,7 @@ extern __inline__ void spitfire_flush_dtlb_primary_context(void)
: "r" (0x40), "i" (ASI_DMMU_DEMAP)); : "r" (0x40), "i" (ASI_DMMU_DEMAP));
} }
extern __inline__ void spitfire_flush_itlb_primary_context(void) static __inline__ void spitfire_flush_itlb_primary_context(void)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
...@@ -253,7 +253,7 @@ extern __inline__ void spitfire_flush_itlb_primary_context(void) ...@@ -253,7 +253,7 @@ extern __inline__ void spitfire_flush_itlb_primary_context(void)
: "r" (0x40), "i" (ASI_IMMU_DEMAP)); : "r" (0x40), "i" (ASI_IMMU_DEMAP));
} }
extern __inline__ void spitfire_flush_dtlb_secondary_context(void) static __inline__ void spitfire_flush_dtlb_secondary_context(void)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
...@@ -261,7 +261,7 @@ extern __inline__ void spitfire_flush_dtlb_secondary_context(void) ...@@ -261,7 +261,7 @@ extern __inline__ void spitfire_flush_dtlb_secondary_context(void)
: "r" (0x50), "i" (ASI_DMMU_DEMAP)); : "r" (0x50), "i" (ASI_DMMU_DEMAP));
} }
extern __inline__ void spitfire_flush_itlb_secondary_context(void) static __inline__ void spitfire_flush_itlb_secondary_context(void)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
...@@ -269,7 +269,7 @@ extern __inline__ void spitfire_flush_itlb_secondary_context(void) ...@@ -269,7 +269,7 @@ extern __inline__ void spitfire_flush_itlb_secondary_context(void)
: "r" (0x50), "i" (ASI_IMMU_DEMAP)); : "r" (0x50), "i" (ASI_IMMU_DEMAP));
} }
extern __inline__ void spitfire_flush_dtlb_nucleus_context(void) static __inline__ void spitfire_flush_dtlb_nucleus_context(void)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
...@@ -277,7 +277,7 @@ extern __inline__ void spitfire_flush_dtlb_nucleus_context(void) ...@@ -277,7 +277,7 @@ extern __inline__ void spitfire_flush_dtlb_nucleus_context(void)
: "r" (0x60), "i" (ASI_DMMU_DEMAP)); : "r" (0x60), "i" (ASI_DMMU_DEMAP));
} }
extern __inline__ void spitfire_flush_itlb_nucleus_context(void) static __inline__ void spitfire_flush_itlb_nucleus_context(void)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
...@@ -286,7 +286,7 @@ extern __inline__ void spitfire_flush_itlb_nucleus_context(void) ...@@ -286,7 +286,7 @@ extern __inline__ void spitfire_flush_itlb_nucleus_context(void)
} }
/* Page level flushes. */ /* Page level flushes. */
extern __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page) static __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
...@@ -294,7 +294,7 @@ extern __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page) ...@@ -294,7 +294,7 @@ extern __inline__ void spitfire_flush_dtlb_primary_page(unsigned long page)
: "r" (page), "i" (ASI_DMMU_DEMAP)); : "r" (page), "i" (ASI_DMMU_DEMAP));
} }
extern __inline__ void spitfire_flush_itlb_primary_page(unsigned long page) static __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
...@@ -302,7 +302,7 @@ extern __inline__ void spitfire_flush_itlb_primary_page(unsigned long page) ...@@ -302,7 +302,7 @@ extern __inline__ void spitfire_flush_itlb_primary_page(unsigned long page)
: "r" (page), "i" (ASI_IMMU_DEMAP)); : "r" (page), "i" (ASI_IMMU_DEMAP));
} }
extern __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page) static __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
...@@ -310,7 +310,7 @@ extern __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page) ...@@ -310,7 +310,7 @@ extern __inline__ void spitfire_flush_dtlb_secondary_page(unsigned long page)
: "r" (page | 0x10), "i" (ASI_DMMU_DEMAP)); : "r" (page | 0x10), "i" (ASI_DMMU_DEMAP));
} }
extern __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page) static __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
...@@ -318,7 +318,7 @@ extern __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page) ...@@ -318,7 +318,7 @@ extern __inline__ void spitfire_flush_itlb_secondary_page(unsigned long page)
: "r" (page | 0x10), "i" (ASI_IMMU_DEMAP)); : "r" (page | 0x10), "i" (ASI_IMMU_DEMAP));
} }
extern __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page) static __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
...@@ -326,7 +326,7 @@ extern __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page) ...@@ -326,7 +326,7 @@ extern __inline__ void spitfire_flush_dtlb_nucleus_page(unsigned long page)
: "r" (page | 0x20), "i" (ASI_DMMU_DEMAP)); : "r" (page | 0x20), "i" (ASI_DMMU_DEMAP));
} }
extern __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page) static __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
...@@ -335,7 +335,7 @@ extern __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page) ...@@ -335,7 +335,7 @@ extern __inline__ void spitfire_flush_itlb_nucleus_page(unsigned long page)
} }
/* Cheetah has "all non-locked" tlb flushes. */ /* Cheetah has "all non-locked" tlb flushes. */
extern __inline__ void cheetah_flush_dtlb_all(void) static __inline__ void cheetah_flush_dtlb_all(void)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
...@@ -343,7 +343,7 @@ extern __inline__ void cheetah_flush_dtlb_all(void) ...@@ -343,7 +343,7 @@ extern __inline__ void cheetah_flush_dtlb_all(void)
: "r" (0x80), "i" (ASI_DMMU_DEMAP)); : "r" (0x80), "i" (ASI_DMMU_DEMAP));
} }
extern __inline__ void cheetah_flush_itlb_all(void) static __inline__ void cheetah_flush_itlb_all(void)
{ {
__asm__ __volatile__("stxa %%g0, [%0] %1\n\t" __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
"membar #Sync" "membar #Sync"
...@@ -365,7 +365,7 @@ extern __inline__ void cheetah_flush_itlb_all(void) ...@@ -365,7 +365,7 @@ extern __inline__ void cheetah_flush_itlb_all(void)
* ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes * ASI_{D,I}TLB_DATA_ACCESS loads, doing the load twice fixes
* the problem for me. -DaveM * the problem for me. -DaveM
*/ */
extern __inline__ unsigned long cheetah_get_ldtlb_data(int entry) static __inline__ unsigned long cheetah_get_ldtlb_data(int entry)
{ {
unsigned long data; unsigned long data;
...@@ -378,7 +378,7 @@ extern __inline__ unsigned long cheetah_get_ldtlb_data(int entry) ...@@ -378,7 +378,7 @@ extern __inline__ unsigned long cheetah_get_ldtlb_data(int entry)
return data; return data;
} }
extern __inline__ unsigned long cheetah_get_litlb_data(int entry) static __inline__ unsigned long cheetah_get_litlb_data(int entry)
{ {
unsigned long data; unsigned long data;
...@@ -391,7 +391,7 @@ extern __inline__ unsigned long cheetah_get_litlb_data(int entry) ...@@ -391,7 +391,7 @@ extern __inline__ unsigned long cheetah_get_litlb_data(int entry)
return data; return data;
} }
extern __inline__ unsigned long cheetah_get_ldtlb_tag(int entry) static __inline__ unsigned long cheetah_get_ldtlb_tag(int entry)
{ {
unsigned long tag; unsigned long tag;
...@@ -403,7 +403,7 @@ extern __inline__ unsigned long cheetah_get_ldtlb_tag(int entry) ...@@ -403,7 +403,7 @@ extern __inline__ unsigned long cheetah_get_ldtlb_tag(int entry)
return tag; return tag;
} }
extern __inline__ unsigned long cheetah_get_litlb_tag(int entry) static __inline__ unsigned long cheetah_get_litlb_tag(int entry)
{ {
unsigned long tag; unsigned long tag;
...@@ -415,7 +415,7 @@ extern __inline__ unsigned long cheetah_get_litlb_tag(int entry) ...@@ -415,7 +415,7 @@ extern __inline__ unsigned long cheetah_get_litlb_tag(int entry)
return tag; return tag;
} }
extern __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data) static __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data)
{ {
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
...@@ -425,7 +425,7 @@ extern __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data) ...@@ -425,7 +425,7 @@ extern __inline__ void cheetah_put_ldtlb_data(int entry, unsigned long data)
"i" (ASI_DTLB_DATA_ACCESS)); "i" (ASI_DTLB_DATA_ACCESS));
} }
extern __inline__ void cheetah_put_litlb_data(int entry, unsigned long data) static __inline__ void cheetah_put_litlb_data(int entry, unsigned long data)
{ {
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
...@@ -435,7 +435,7 @@ extern __inline__ void cheetah_put_litlb_data(int entry, unsigned long data) ...@@ -435,7 +435,7 @@ extern __inline__ void cheetah_put_litlb_data(int entry, unsigned long data)
"i" (ASI_ITLB_DATA_ACCESS)); "i" (ASI_ITLB_DATA_ACCESS));
} }
extern __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb) static __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb)
{ {
unsigned long data; unsigned long data;
...@@ -447,7 +447,7 @@ extern __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb) ...@@ -447,7 +447,7 @@ extern __inline__ unsigned long cheetah_get_dtlb_data(int entry, int tlb)
return data; return data;
} }
extern __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb) static __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
{ {
unsigned long tag; unsigned long tag;
...@@ -457,7 +457,7 @@ extern __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb) ...@@ -457,7 +457,7 @@ extern __inline__ unsigned long cheetah_get_dtlb_tag(int entry, int tlb)
return tag; return tag;
} }
extern __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb) static __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int tlb)
{ {
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
...@@ -467,7 +467,7 @@ extern __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int ...@@ -467,7 +467,7 @@ extern __inline__ void cheetah_put_dtlb_data(int entry, unsigned long data, int
"i" (ASI_DTLB_DATA_ACCESS)); "i" (ASI_DTLB_DATA_ACCESS));
} }
extern __inline__ unsigned long cheetah_get_itlb_data(int entry) static __inline__ unsigned long cheetah_get_itlb_data(int entry)
{ {
unsigned long data; unsigned long data;
...@@ -480,7 +480,7 @@ extern __inline__ unsigned long cheetah_get_itlb_data(int entry) ...@@ -480,7 +480,7 @@ extern __inline__ unsigned long cheetah_get_itlb_data(int entry)
return data; return data;
} }
extern __inline__ unsigned long cheetah_get_itlb_tag(int entry) static __inline__ unsigned long cheetah_get_itlb_tag(int entry)
{ {
unsigned long tag; unsigned long tag;
...@@ -490,7 +490,7 @@ extern __inline__ unsigned long cheetah_get_itlb_tag(int entry) ...@@ -490,7 +490,7 @@ extern __inline__ unsigned long cheetah_get_itlb_tag(int entry)
return tag; return tag;
} }
extern __inline__ void cheetah_put_itlb_data(int entry, unsigned long data) static __inline__ void cheetah_put_itlb_data(int entry, unsigned long data)
{ {
__asm__ __volatile__("stxa %0, [%1] %2\n\t" __asm__ __volatile__("stxa %0, [%1] %2\n\t"
"membar #Sync" "membar #Sync"
......
...@@ -217,7 +217,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \ ...@@ -217,7 +217,7 @@ do { if (test_thread_flag(TIF_PERFCTR)) { \
} \ } \
} while(0) } while(0)
extern __inline__ unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val) static __inline__ unsigned long xchg32(__volatile__ unsigned int *m, unsigned int val)
{ {
__asm__ __volatile__( __asm__ __volatile__(
" mov %0, %%g5\n" " mov %0, %%g5\n"
...@@ -233,7 +233,7 @@ extern __inline__ unsigned long xchg32(__volatile__ unsigned int *m, unsigned in ...@@ -233,7 +233,7 @@ extern __inline__ unsigned long xchg32(__volatile__ unsigned int *m, unsigned in
return val; return val;
} }
extern __inline__ unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val) static __inline__ unsigned long xchg64(__volatile__ unsigned long *m, unsigned long val)
{ {
__asm__ __volatile__( __asm__ __volatile__(
" mov %0, %%g5\n" " mov %0, %%g5\n"
...@@ -277,7 +277,7 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret ...@@ -277,7 +277,7 @@ extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noret
#define __HAVE_ARCH_CMPXCHG 1 #define __HAVE_ARCH_CMPXCHG 1
extern __inline__ unsigned long static __inline__ unsigned long
__cmpxchg_u32(volatile int *m, int old, int new) __cmpxchg_u32(volatile int *m, int old, int new)
{ {
__asm__ __volatile__("cas [%2], %3, %0\n\t" __asm__ __volatile__("cas [%2], %3, %0\n\t"
...@@ -289,7 +289,7 @@ __cmpxchg_u32(volatile int *m, int old, int new) ...@@ -289,7 +289,7 @@ __cmpxchg_u32(volatile int *m, int old, int new)
return new; return new;
} }
extern __inline__ unsigned long static __inline__ unsigned long
__cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new) __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
{ {
__asm__ __volatile__("casx [%2], %3, %0\n\t" __asm__ __volatile__("casx [%2], %3, %0\n\t"
......
...@@ -52,7 +52,7 @@ do { \ ...@@ -52,7 +52,7 @@ do { \
#define __access_ok(addr,size) 1 #define __access_ok(addr,size) 1
#define access_ok(type,addr,size) 1 #define access_ok(type,addr,size) 1
extern inline int verify_area(int type, const void * addr, unsigned long size) static inline int verify_area(int type, const void * addr, unsigned long size)
{ {
return 0; return 0;
} }
...@@ -270,7 +270,7 @@ extern __kernel_size_t __copy_in_user(void *to, const void *from, ...@@ -270,7 +270,7 @@ extern __kernel_size_t __copy_in_user(void *to, const void *from,
__copy_in_user((void *)(to), \ __copy_in_user((void *)(to), \
(void *) (from), (__kernel_size_t)(n)) (void *) (from), (__kernel_size_t)(n))
extern __inline__ __kernel_size_t __clear_user(void *addr, __kernel_size_t size) static __inline__ __kernel_size_t __clear_user(void *addr, __kernel_size_t size)
{ {
extern __kernel_size_t __bzero_noasi(void *addr, __kernel_size_t size); extern __kernel_size_t __bzero_noasi(void *addr, __kernel_size_t size);
......
...@@ -25,7 +25,7 @@ ...@@ -25,7 +25,7 @@
/* UPA I/O space accessors */ /* UPA I/O space accessors */
#if defined(__KERNEL__) && !defined(__ASSEMBLY__) #if defined(__KERNEL__) && !defined(__ASSEMBLY__)
extern __inline__ unsigned char _upa_readb(unsigned long addr) static __inline__ unsigned char _upa_readb(unsigned long addr)
{ {
unsigned char ret; unsigned char ret;
...@@ -36,7 +36,7 @@ extern __inline__ unsigned char _upa_readb(unsigned long addr) ...@@ -36,7 +36,7 @@ extern __inline__ unsigned char _upa_readb(unsigned long addr)
return ret; return ret;
} }
extern __inline__ unsigned short _upa_readw(unsigned long addr) static __inline__ unsigned short _upa_readw(unsigned long addr)
{ {
unsigned short ret; unsigned short ret;
...@@ -47,7 +47,7 @@ extern __inline__ unsigned short _upa_readw(unsigned long addr) ...@@ -47,7 +47,7 @@ extern __inline__ unsigned short _upa_readw(unsigned long addr)
return ret; return ret;
} }
extern __inline__ unsigned int _upa_readl(unsigned long addr) static __inline__ unsigned int _upa_readl(unsigned long addr)
{ {
unsigned int ret; unsigned int ret;
...@@ -58,7 +58,7 @@ extern __inline__ unsigned int _upa_readl(unsigned long addr) ...@@ -58,7 +58,7 @@ extern __inline__ unsigned int _upa_readl(unsigned long addr)
return ret; return ret;
} }
extern __inline__ unsigned long _upa_readq(unsigned long addr) static __inline__ unsigned long _upa_readq(unsigned long addr)
{ {
unsigned long ret; unsigned long ret;
...@@ -69,28 +69,28 @@ extern __inline__ unsigned long _upa_readq(unsigned long addr) ...@@ -69,28 +69,28 @@ extern __inline__ unsigned long _upa_readq(unsigned long addr)
return ret; return ret;
} }
extern __inline__ void _upa_writeb(unsigned char b, unsigned long addr) static __inline__ void _upa_writeb(unsigned char b, unsigned long addr)
{ {
__asm__ __volatile__("stba\t%0, [%1] %2\t/* upa_writeb */" __asm__ __volatile__("stba\t%0, [%1] %2\t/* upa_writeb */"
: /* no outputs */ : /* no outputs */
: "r" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); : "r" (b), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
} }
extern __inline__ void _upa_writew(unsigned short w, unsigned long addr) static __inline__ void _upa_writew(unsigned short w, unsigned long addr)
{ {
__asm__ __volatile__("stha\t%0, [%1] %2\t/* upa_writew */" __asm__ __volatile__("stha\t%0, [%1] %2\t/* upa_writew */"
: /* no outputs */ : /* no outputs */
: "r" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); : "r" (w), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
} }
extern __inline__ void _upa_writel(unsigned int l, unsigned long addr) static __inline__ void _upa_writel(unsigned int l, unsigned long addr)
{ {
__asm__ __volatile__("stwa\t%0, [%1] %2\t/* upa_writel */" __asm__ __volatile__("stwa\t%0, [%1] %2\t/* upa_writel */"
: /* no outputs */ : /* no outputs */
: "r" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E)); : "r" (l), "r" (addr), "i" (ASI_PHYS_BYPASS_EC_E));
} }
extern __inline__ void _upa_writeq(unsigned long q, unsigned long addr) static __inline__ void _upa_writeq(unsigned long q, unsigned long addr)
{ {
__asm__ __volatile__("stxa\t%0, [%1] %2\t/* upa_writeq */" __asm__ __volatile__("stxa\t%0, [%1] %2\t/* upa_writeq */"
: /* no outputs */ : /* no outputs */
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
wr %o5, 0, %fprs; wr %o5, 0, %fprs;
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern __inline__ void save_and_clear_fpu(void) { static __inline__ void save_and_clear_fpu(void) {
__asm__ __volatile__ ( __asm__ __volatile__ (
" rd %%fprs, %%o5\n" " rd %%fprs, %%o5\n"
" andcc %%o5, %0, %%g0\n" " andcc %%o5, %0, %%g0\n"
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/hash.h> #include <linux/hash.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/tlbflush.h>
static mempool_t *page_pool, *isa_page_pool; static mempool_t *page_pool, *isa_page_pool;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment