Commit 3115624e authored by Adrian Bunk's avatar Adrian Bunk Committed by David S. Miller

[SPARC]: "extern inline" doesn't make much sense.

Signed-off-by: default avatarAdrian Bunk <bunk@stusta.de>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ed39f731
...@@ -457,7 +457,7 @@ void __init time_init(void) ...@@ -457,7 +457,7 @@ void __init time_init(void)
sbus_time_init(); sbus_time_init();
} }
extern __inline__ unsigned long do_gettimeoffset(void) static inline unsigned long do_gettimeoffset(void)
{ {
return (*master_l10_counter >> 10) & 0x1fffff; return (*master_l10_counter >> 10) & 0x1fffff;
} }
......
...@@ -260,7 +260,7 @@ static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) ...@@ -260,7 +260,7 @@ static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
/* to find an entry in a top-level page table... */ /* to find an entry in a top-level page table... */
extern inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
/* Find an entry in the second-level page table.. */ /* Find an entry in the second-level page table.. */
......
...@@ -51,7 +51,7 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void); ...@@ -51,7 +51,7 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
#define BTFIXUPDEF_SIMM13(__name) \ #define BTFIXUPDEF_SIMM13(__name) \
extern unsigned int ___sf_##__name(void) __attribute_const__; \ extern unsigned int ___sf_##__name(void) __attribute_const__; \
extern unsigned ___ss_##__name[2]; \ extern unsigned ___ss_##__name[2]; \
extern __inline__ unsigned int ___sf_##__name(void) { \ static inline unsigned int ___sf_##__name(void) { \
unsigned int ret; \ unsigned int ret; \
__asm__ ("or %%g0, ___s_" #__name ", %0" : "=r"(ret)); \ __asm__ ("or %%g0, ___s_" #__name ", %0" : "=r"(ret)); \
return ret; \ return ret; \
...@@ -59,7 +59,7 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void); ...@@ -59,7 +59,7 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
#define BTFIXUPDEF_SIMM13_INIT(__name,__val) \ #define BTFIXUPDEF_SIMM13_INIT(__name,__val) \
extern unsigned int ___sf_##__name(void) __attribute_const__; \ extern unsigned int ___sf_##__name(void) __attribute_const__; \
extern unsigned ___ss_##__name[2]; \ extern unsigned ___ss_##__name[2]; \
extern __inline__ unsigned int ___sf_##__name(void) { \ static inline unsigned int ___sf_##__name(void) { \
unsigned int ret; \ unsigned int ret; \
__asm__ ("or %%g0, ___s_" #__name "__btset_" #__val ", %0" : "=r"(ret));\ __asm__ ("or %%g0, ___s_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
return ret; \ return ret; \
...@@ -73,7 +73,7 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void); ...@@ -73,7 +73,7 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
#define BTFIXUPDEF_HALF(__name) \ #define BTFIXUPDEF_HALF(__name) \
extern unsigned int ___af_##__name(void) __attribute_const__; \ extern unsigned int ___af_##__name(void) __attribute_const__; \
extern unsigned ___as_##__name[2]; \ extern unsigned ___as_##__name[2]; \
extern __inline__ unsigned int ___af_##__name(void) { \ static inline unsigned int ___af_##__name(void) { \
unsigned int ret; \ unsigned int ret; \
__asm__ ("or %%g0, ___a_" #__name ", %0" : "=r"(ret)); \ __asm__ ("or %%g0, ___a_" #__name ", %0" : "=r"(ret)); \
return ret; \ return ret; \
...@@ -81,7 +81,7 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void); ...@@ -81,7 +81,7 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
#define BTFIXUPDEF_HALF_INIT(__name,__val) \ #define BTFIXUPDEF_HALF_INIT(__name,__val) \
extern unsigned int ___af_##__name(void) __attribute_const__; \ extern unsigned int ___af_##__name(void) __attribute_const__; \
extern unsigned ___as_##__name[2]; \ extern unsigned ___as_##__name[2]; \
extern __inline__ unsigned int ___af_##__name(void) { \ static inline unsigned int ___af_##__name(void) { \
unsigned int ret; \ unsigned int ret; \
__asm__ ("or %%g0, ___a_" #__name "__btset_" #__val ", %0" : "=r"(ret));\ __asm__ ("or %%g0, ___a_" #__name "__btset_" #__val ", %0" : "=r"(ret));\
return ret; \ return ret; \
...@@ -92,7 +92,7 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void); ...@@ -92,7 +92,7 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
#define BTFIXUPDEF_SETHI(__name) \ #define BTFIXUPDEF_SETHI(__name) \
extern unsigned int ___hf_##__name(void) __attribute_const__; \ extern unsigned int ___hf_##__name(void) __attribute_const__; \
extern unsigned ___hs_##__name[2]; \ extern unsigned ___hs_##__name[2]; \
extern __inline__ unsigned int ___hf_##__name(void) { \ static inline unsigned int ___hf_##__name(void) { \
unsigned int ret; \ unsigned int ret; \
__asm__ ("sethi %%hi(___h_" #__name "), %0" : "=r"(ret)); \ __asm__ ("sethi %%hi(___h_" #__name "), %0" : "=r"(ret)); \
return ret; \ return ret; \
...@@ -100,7 +100,7 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void); ...@@ -100,7 +100,7 @@ extern unsigned int ___illegal_use_of_BTFIXUP_INT_in_module(void);
#define BTFIXUPDEF_SETHI_INIT(__name,__val) \ #define BTFIXUPDEF_SETHI_INIT(__name,__val) \
extern unsigned int ___hf_##__name(void) __attribute_const__; \ extern unsigned int ___hf_##__name(void) __attribute_const__; \
extern unsigned ___hs_##__name[2]; \ extern unsigned ___hs_##__name[2]; \
extern __inline__ unsigned int ___hf_##__name(void) { \ static inline unsigned int ___hf_##__name(void) { \
unsigned int ret; \ unsigned int ret; \
__asm__ ("sethi %%hi(___h_" #__name "__btset_" #__val "), %0" : \ __asm__ ("sethi %%hi(___h_" #__name "__btset_" #__val "), %0" : \
"=r"(ret)); \ "=r"(ret)); \
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
*/ */
/* First, cache-tag access. */ /* First, cache-tag access. */
extern __inline__ unsigned int get_icache_tag(int setnum, int tagnum) static inline unsigned int get_icache_tag(int setnum, int tagnum)
{ {
unsigned int vaddr, retval; unsigned int vaddr, retval;
...@@ -38,7 +38,7 @@ extern __inline__ unsigned int get_icache_tag(int setnum, int tagnum) ...@@ -38,7 +38,7 @@ extern __inline__ unsigned int get_icache_tag(int setnum, int tagnum)
return retval; return retval;
} }
extern __inline__ void put_icache_tag(int setnum, int tagnum, unsigned int entry) static inline void put_icache_tag(int setnum, int tagnum, unsigned int entry)
{ {
unsigned int vaddr; unsigned int vaddr;
...@@ -51,7 +51,7 @@ extern __inline__ void put_icache_tag(int setnum, int tagnum, unsigned int entry ...@@ -51,7 +51,7 @@ extern __inline__ void put_icache_tag(int setnum, int tagnum, unsigned int entry
/* Second cache-data access. The data is returned two-32bit quantities /* Second cache-data access. The data is returned two-32bit quantities
* at a time. * at a time.
*/ */
extern __inline__ void get_icache_data(int setnum, int tagnum, int subblock, static inline void get_icache_data(int setnum, int tagnum, int subblock,
unsigned int *data) unsigned int *data)
{ {
unsigned int value1, value2, vaddr; unsigned int value1, value2, vaddr;
...@@ -67,7 +67,7 @@ extern __inline__ void get_icache_data(int setnum, int tagnum, int subblock, ...@@ -67,7 +67,7 @@ extern __inline__ void get_icache_data(int setnum, int tagnum, int subblock,
data[0] = value1; data[1] = value2; data[0] = value1; data[1] = value2;
} }
extern __inline__ void put_icache_data(int setnum, int tagnum, int subblock, static inline void put_icache_data(int setnum, int tagnum, int subblock,
unsigned int *data) unsigned int *data)
{ {
unsigned int value1, value2, vaddr; unsigned int value1, value2, vaddr;
...@@ -92,35 +92,35 @@ extern __inline__ void put_icache_data(int setnum, int tagnum, int subblock, ...@@ -92,35 +92,35 @@ extern __inline__ void put_icache_data(int setnum, int tagnum, int subblock,
*/ */
/* Flushes which clear out both the on-chip and external caches */ /* Flushes which clear out both the on-chip and external caches */
extern __inline__ void flush_ei_page(unsigned int addr) static inline void flush_ei_page(unsigned int addr)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_FLUSH_PAGE) : "r" (addr), "i" (ASI_M_FLUSH_PAGE) :
"memory"); "memory");
} }
extern __inline__ void flush_ei_seg(unsigned int addr) static inline void flush_ei_seg(unsigned int addr)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_FLUSH_SEG) : "r" (addr), "i" (ASI_M_FLUSH_SEG) :
"memory"); "memory");
} }
extern __inline__ void flush_ei_region(unsigned int addr) static inline void flush_ei_region(unsigned int addr)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_FLUSH_REGION) : "r" (addr), "i" (ASI_M_FLUSH_REGION) :
"memory"); "memory");
} }
extern __inline__ void flush_ei_ctx(unsigned int addr) static inline void flush_ei_ctx(unsigned int addr)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_FLUSH_CTX) : "r" (addr), "i" (ASI_M_FLUSH_CTX) :
"memory"); "memory");
} }
extern __inline__ void flush_ei_user(unsigned int addr) static inline void flush_ei_user(unsigned int addr)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_FLUSH_USER) : "r" (addr), "i" (ASI_M_FLUSH_USER) :
......
...@@ -48,25 +48,25 @@ ...@@ -48,25 +48,25 @@
#define CYPRESS_NFAULT 0x00000002 #define CYPRESS_NFAULT 0x00000002
#define CYPRESS_MENABLE 0x00000001 #define CYPRESS_MENABLE 0x00000001
extern __inline__ void cypress_flush_page(unsigned long page) static inline void cypress_flush_page(unsigned long page)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (page), "i" (ASI_M_FLUSH_PAGE)); "r" (page), "i" (ASI_M_FLUSH_PAGE));
} }
extern __inline__ void cypress_flush_segment(unsigned long addr) static inline void cypress_flush_segment(unsigned long addr)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_FLUSH_SEG)); "r" (addr), "i" (ASI_M_FLUSH_SEG));
} }
extern __inline__ void cypress_flush_region(unsigned long addr) static inline void cypress_flush_region(unsigned long addr)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
"r" (addr), "i" (ASI_M_FLUSH_REGION)); "r" (addr), "i" (ASI_M_FLUSH_REGION));
} }
extern __inline__ void cypress_flush_context(void) static inline void cypress_flush_context(void)
{ {
__asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : : __asm__ __volatile__("sta %%g0, [%%g0] %0\n\t" : :
"i" (ASI_M_FLUSH_CTX)); "i" (ASI_M_FLUSH_CTX));
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <asm/cpudata.h> #include <asm/cpudata.h>
extern __inline__ void __delay(unsigned long loops) static inline void __delay(unsigned long loops)
{ {
__asm__ __volatile__("cmp %0, 0\n\t" __asm__ __volatile__("cmp %0, 0\n\t"
"1: bne 1b\n\t" "1: bne 1b\n\t"
......
...@@ -198,7 +198,7 @@ extern void dvma_init(struct sbus_bus *); ...@@ -198,7 +198,7 @@ extern void dvma_init(struct sbus_bus *);
/* Pause until counter runs out or BIT isn't set in the DMA condition /* Pause until counter runs out or BIT isn't set in the DMA condition
* register. * register.
*/ */
extern __inline__ void sparc_dma_pause(struct sparc_dma_registers *regs, static inline void sparc_dma_pause(struct sparc_dma_registers *regs,
unsigned long bit) unsigned long bit)
{ {
int ctr = 50000; /* Let's find some bugs ;) */ int ctr = 50000; /* Let's find some bugs ;) */
......
...@@ -108,12 +108,12 @@ struct iommu_struct { ...@@ -108,12 +108,12 @@ struct iommu_struct {
struct bit_map usemap; struct bit_map usemap;
}; };
extern __inline__ void iommu_invalidate(struct iommu_regs *regs) static inline void iommu_invalidate(struct iommu_regs *regs)
{ {
regs->tlbflush = 0; regs->tlbflush = 0;
} }
extern __inline__ void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba) static inline void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba)
{ {
regs->pageflush = (ba & PAGE_MASK); regs->pageflush = (ba & PAGE_MASK);
} }
......
...@@ -46,7 +46,7 @@ struct kernel_debug { ...@@ -46,7 +46,7 @@ struct kernel_debug {
extern struct kernel_debug *linux_dbvec; extern struct kernel_debug *linux_dbvec;
/* Use this macro in C-code to enter the debugger. */ /* Use this macro in C-code to enter the debugger. */
extern __inline__ void sp_enter_debugger(void) static inline void sp_enter_debugger(void)
{ {
__asm__ __volatile__("jmpl %0, %%o7\n\t" __asm__ __volatile__("jmpl %0, %%o7\n\t"
"nop\n\t" : : "nop\n\t" : :
......
...@@ -83,7 +83,7 @@ extern unsigned int hwbug_bitmask; ...@@ -83,7 +83,7 @@ extern unsigned int hwbug_bitmask;
*/ */
#define TBR_ID_SHIFT 20 #define TBR_ID_SHIFT 20
extern __inline__ int get_cpuid(void) static inline int get_cpuid(void)
{ {
register int retval; register int retval;
__asm__ __volatile__("rd %%tbr, %0\n\t" __asm__ __volatile__("rd %%tbr, %0\n\t"
...@@ -93,7 +93,7 @@ extern __inline__ int get_cpuid(void) ...@@ -93,7 +93,7 @@ extern __inline__ int get_cpuid(void)
return (retval & 3); return (retval & 3);
} }
extern __inline__ int get_modid(void) static inline int get_modid(void)
{ {
return (get_cpuid() | 0x8); return (get_cpuid() | 0x8);
} }
......
...@@ -19,7 +19,7 @@ ...@@ -19,7 +19,7 @@
#define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */ #define MSI_ASYNC_MODE 0x80000000 /* Operate the MSI asynchronously */
extern __inline__ void msi_set_sync(void) static inline void msi_set_sync(void)
{ {
__asm__ __volatile__ ("lda [%0] %1, %%g3\n\t" __asm__ __volatile__ ("lda [%0] %1, %%g3\n\t"
"andn %%g3, %2, %%g3\n\t" "andn %%g3, %2, %%g3\n\t"
......
...@@ -85,7 +85,7 @@ ...@@ -85,7 +85,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern __inline__ void mxcc_set_stream_src(unsigned long *paddr) static inline void mxcc_set_stream_src(unsigned long *paddr)
{ {
unsigned long data0 = paddr[0]; unsigned long data0 = paddr[0];
unsigned long data1 = paddr[1]; unsigned long data1 = paddr[1];
...@@ -98,7 +98,7 @@ extern __inline__ void mxcc_set_stream_src(unsigned long *paddr) ...@@ -98,7 +98,7 @@ extern __inline__ void mxcc_set_stream_src(unsigned long *paddr)
"i" (ASI_M_MXCC) : "g2", "g3"); "i" (ASI_M_MXCC) : "g2", "g3");
} }
extern __inline__ void mxcc_set_stream_dst(unsigned long *paddr) static inline void mxcc_set_stream_dst(unsigned long *paddr)
{ {
unsigned long data0 = paddr[0]; unsigned long data0 = paddr[0];
unsigned long data1 = paddr[1]; unsigned long data1 = paddr[1];
...@@ -111,7 +111,7 @@ extern __inline__ void mxcc_set_stream_dst(unsigned long *paddr) ...@@ -111,7 +111,7 @@ extern __inline__ void mxcc_set_stream_dst(unsigned long *paddr)
"i" (ASI_M_MXCC) : "g2", "g3"); "i" (ASI_M_MXCC) : "g2", "g3");
} }
extern __inline__ unsigned long mxcc_get_creg(void) static inline unsigned long mxcc_get_creg(void)
{ {
unsigned long mxcc_control; unsigned long mxcc_control;
...@@ -125,7 +125,7 @@ extern __inline__ unsigned long mxcc_get_creg(void) ...@@ -125,7 +125,7 @@ extern __inline__ unsigned long mxcc_get_creg(void)
return mxcc_control; return mxcc_control;
} }
extern __inline__ void mxcc_set_creg(unsigned long mxcc_control) static inline void mxcc_set_creg(unsigned long mxcc_control)
{ {
__asm__ __volatile__("sta %0, [%1] %2\n\t" : : __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
"r" (mxcc_control), "r" (MXCC_CREG), "r" (mxcc_control), "r" (MXCC_CREG),
......
...@@ -98,7 +98,7 @@ ...@@ -98,7 +98,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern __inline__ int bw_get_intr_mask(int sbus_level) static inline int bw_get_intr_mask(int sbus_level)
{ {
int mask; int mask;
...@@ -109,7 +109,7 @@ extern __inline__ int bw_get_intr_mask(int sbus_level) ...@@ -109,7 +109,7 @@ extern __inline__ int bw_get_intr_mask(int sbus_level)
return mask; return mask;
} }
extern __inline__ void bw_clear_intr_mask(int sbus_level, int mask) static inline void bw_clear_intr_mask(int sbus_level, int mask)
{ {
__asm__ __volatile__ ("stha %0, [%1] %2" : : __asm__ __volatile__ ("stha %0, [%1] %2" : :
"r" (mask), "r" (mask),
...@@ -117,7 +117,7 @@ extern __inline__ void bw_clear_intr_mask(int sbus_level, int mask) ...@@ -117,7 +117,7 @@ extern __inline__ void bw_clear_intr_mask(int sbus_level, int mask)
"i" (ASI_M_CTL)); "i" (ASI_M_CTL));
} }
extern __inline__ unsigned bw_get_prof_limit(int cpu) static inline unsigned bw_get_prof_limit(int cpu)
{ {
unsigned limit; unsigned limit;
...@@ -128,7 +128,7 @@ extern __inline__ unsigned bw_get_prof_limit(int cpu) ...@@ -128,7 +128,7 @@ extern __inline__ unsigned bw_get_prof_limit(int cpu)
return limit; return limit;
} }
extern __inline__ void bw_set_prof_limit(int cpu, unsigned limit) static inline void bw_set_prof_limit(int cpu, unsigned limit)
{ {
__asm__ __volatile__ ("sta %0, [%1] %2" : : __asm__ __volatile__ ("sta %0, [%1] %2" : :
"r" (limit), "r" (limit),
...@@ -136,7 +136,7 @@ extern __inline__ void bw_set_prof_limit(int cpu, unsigned limit) ...@@ -136,7 +136,7 @@ extern __inline__ void bw_set_prof_limit(int cpu, unsigned limit)
"i" (ASI_M_CTL)); "i" (ASI_M_CTL));
} }
extern __inline__ unsigned bw_get_ctrl(int cpu) static inline unsigned bw_get_ctrl(int cpu)
{ {
unsigned ctrl; unsigned ctrl;
...@@ -147,7 +147,7 @@ extern __inline__ unsigned bw_get_ctrl(int cpu) ...@@ -147,7 +147,7 @@ extern __inline__ unsigned bw_get_ctrl(int cpu)
return ctrl; return ctrl;
} }
extern __inline__ void bw_set_ctrl(int cpu, unsigned ctrl) static inline void bw_set_ctrl(int cpu, unsigned ctrl)
{ {
__asm__ __volatile__ ("sta %0, [%1] %2" : : __asm__ __volatile__ ("sta %0, [%1] %2" : :
"r" (ctrl), "r" (ctrl),
...@@ -157,7 +157,7 @@ extern __inline__ void bw_set_ctrl(int cpu, unsigned ctrl) ...@@ -157,7 +157,7 @@ extern __inline__ void bw_set_ctrl(int cpu, unsigned ctrl)
extern unsigned char cpu_leds[32]; extern unsigned char cpu_leds[32];
extern __inline__ void show_leds(int cpuid) static inline void show_leds(int cpuid)
{ {
cpuid &= 0x1e; cpuid &= 0x1e;
__asm__ __volatile__ ("stba %0, [%1] %2" : : __asm__ __volatile__ ("stba %0, [%1] %2" : :
...@@ -166,7 +166,7 @@ extern __inline__ void show_leds(int cpuid) ...@@ -166,7 +166,7 @@ extern __inline__ void show_leds(int cpuid)
"i" (ASI_M_CTL)); "i" (ASI_M_CTL));
} }
extern __inline__ unsigned cc_get_ipen(void) static inline unsigned cc_get_ipen(void)
{ {
unsigned pending; unsigned pending;
...@@ -177,7 +177,7 @@ extern __inline__ unsigned cc_get_ipen(void) ...@@ -177,7 +177,7 @@ extern __inline__ unsigned cc_get_ipen(void)
return pending; return pending;
} }
extern __inline__ void cc_set_iclr(unsigned clear) static inline void cc_set_iclr(unsigned clear)
{ {
__asm__ __volatile__ ("stha %0, [%1] %2" : : __asm__ __volatile__ ("stha %0, [%1] %2" : :
"r" (clear), "r" (clear),
...@@ -185,7 +185,7 @@ extern __inline__ void cc_set_iclr(unsigned clear) ...@@ -185,7 +185,7 @@ extern __inline__ void cc_set_iclr(unsigned clear)
"i" (ASI_M_MXCC)); "i" (ASI_M_MXCC));
} }
extern __inline__ unsigned cc_get_imsk(void) static inline unsigned cc_get_imsk(void)
{ {
unsigned mask; unsigned mask;
...@@ -196,7 +196,7 @@ extern __inline__ unsigned cc_get_imsk(void) ...@@ -196,7 +196,7 @@ extern __inline__ unsigned cc_get_imsk(void)
return mask; return mask;
} }
extern __inline__ void cc_set_imsk(unsigned mask) static inline void cc_set_imsk(unsigned mask)
{ {
__asm__ __volatile__ ("stha %0, [%1] %2" : : __asm__ __volatile__ ("stha %0, [%1] %2" : :
"r" (mask), "r" (mask),
...@@ -204,7 +204,7 @@ extern __inline__ void cc_set_imsk(unsigned mask) ...@@ -204,7 +204,7 @@ extern __inline__ void cc_set_imsk(unsigned mask)
"i" (ASI_M_MXCC)); "i" (ASI_M_MXCC));
} }
extern __inline__ unsigned cc_get_imsk_other(int cpuid) static inline unsigned cc_get_imsk_other(int cpuid)
{ {
unsigned mask; unsigned mask;
...@@ -215,7 +215,7 @@ extern __inline__ unsigned cc_get_imsk_other(int cpuid) ...@@ -215,7 +215,7 @@ extern __inline__ unsigned cc_get_imsk_other(int cpuid)
return mask; return mask;
} }
extern __inline__ void cc_set_imsk_other(int cpuid, unsigned mask) static inline void cc_set_imsk_other(int cpuid, unsigned mask)
{ {
__asm__ __volatile__ ("stha %0, [%1] %2" : : __asm__ __volatile__ ("stha %0, [%1] %2" : :
"r" (mask), "r" (mask),
...@@ -223,7 +223,7 @@ extern __inline__ void cc_set_imsk_other(int cpuid, unsigned mask) ...@@ -223,7 +223,7 @@ extern __inline__ void cc_set_imsk_other(int cpuid, unsigned mask)
"i" (ASI_M_CTL)); "i" (ASI_M_CTL));
} }
extern __inline__ void cc_set_igen(unsigned gen) static inline void cc_set_igen(unsigned gen)
{ {
__asm__ __volatile__ ("sta %0, [%1] %2" : : __asm__ __volatile__ ("sta %0, [%1] %2" : :
"r" (gen), "r" (gen),
...@@ -239,7 +239,7 @@ extern __inline__ void cc_set_igen(unsigned gen) ...@@ -239,7 +239,7 @@ extern __inline__ void cc_set_igen(unsigned gen)
#define IGEN_MESSAGE(bcast, devid, sid, levels) \ #define IGEN_MESSAGE(bcast, devid, sid, levels) \
(((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels)) (((bcast) << 31) | ((devid) << 23) | ((sid) << 15) | (levels))
extern __inline__ void sun4d_send_ipi(int cpu, int level) static inline void sun4d_send_ipi(int cpu, int level)
{ {
cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1))); cc_set_igen(IGEN_MESSAGE(0, cpu << 3, 6 + ((level >> 1) & 7), 1 << (level - 1)));
} }
......
...@@ -15,12 +15,12 @@ ...@@ -15,12 +15,12 @@
#define PCI_IRQ_NONE 0xffffffff #define PCI_IRQ_NONE 0xffffffff
extern inline void pcibios_set_master(struct pci_dev *dev) static inline void pcibios_set_master(struct pci_dev *dev)
{ {
/* No special bus mastering setup handling */ /* No special bus mastering setup handling */
} }
extern inline void pcibios_penalize_isa_irq(int irq, int active) static inline void pcibios_penalize_isa_irq(int irq, int active)
{ {
/* We don't do dynamic PCI IRQ allocation */ /* We don't do dynamic PCI IRQ allocation */
} }
...@@ -137,7 +137,7 @@ extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist ...@@ -137,7 +137,7 @@ extern void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist
* only drive the low 24-bits during PCI bus mastering, then * only drive the low 24-bits during PCI bus mastering, then
* you would pass 0x00ffffff as the mask to this function. * you would pass 0x00ffffff as the mask to this function.
*/ */
extern inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask) static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
{ {
return 1; return 1;
} }
......
...@@ -154,7 +154,7 @@ BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t) ...@@ -154,7 +154,7 @@ BTFIXUPDEF_CALL_CONST(int, pte_present, pte_t)
BTFIXUPDEF_CALL(void, pte_clear, pte_t *) BTFIXUPDEF_CALL(void, pte_clear, pte_t *)
BTFIXUPDEF_CALL(int, pte_read, pte_t) BTFIXUPDEF_CALL(int, pte_read, pte_t)
extern __inline__ int pte_none(pte_t pte) static inline int pte_none(pte_t pte)
{ {
return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask)); return !(pte_val(pte) & ~BTFIXUP_SETHI(none_mask));
} }
...@@ -167,7 +167,7 @@ BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t) ...@@ -167,7 +167,7 @@ BTFIXUPDEF_CALL_CONST(int, pmd_bad, pmd_t)
BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t) BTFIXUPDEF_CALL_CONST(int, pmd_present, pmd_t)
BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *) BTFIXUPDEF_CALL(void, pmd_clear, pmd_t *)
extern __inline__ int pmd_none(pmd_t pmd) static inline int pmd_none(pmd_t pmd)
{ {
return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask)); return !(pmd_val(pmd) & ~BTFIXUP_SETHI(none_mask));
} }
...@@ -195,19 +195,19 @@ BTFIXUPDEF_HALF(pte_dirtyi) ...@@ -195,19 +195,19 @@ BTFIXUPDEF_HALF(pte_dirtyi)
BTFIXUPDEF_HALF(pte_youngi) BTFIXUPDEF_HALF(pte_youngi)
extern int pte_write(pte_t pte) __attribute_const__; extern int pte_write(pte_t pte) __attribute_const__;
extern __inline__ int pte_write(pte_t pte) static inline int pte_write(pte_t pte)
{ {
return pte_val(pte) & BTFIXUP_HALF(pte_writei); return pte_val(pte) & BTFIXUP_HALF(pte_writei);
} }
extern int pte_dirty(pte_t pte) __attribute_const__; extern int pte_dirty(pte_t pte) __attribute_const__;
extern __inline__ int pte_dirty(pte_t pte) static inline int pte_dirty(pte_t pte)
{ {
return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi); return pte_val(pte) & BTFIXUP_HALF(pte_dirtyi);
} }
extern int pte_young(pte_t pte) __attribute_const__; extern int pte_young(pte_t pte) __attribute_const__;
extern __inline__ int pte_young(pte_t pte) static inline int pte_young(pte_t pte)
{ {
return pte_val(pte) & BTFIXUP_HALF(pte_youngi); return pte_val(pte) & BTFIXUP_HALF(pte_youngi);
} }
...@@ -218,7 +218,7 @@ extern __inline__ int pte_young(pte_t pte) ...@@ -218,7 +218,7 @@ extern __inline__ int pte_young(pte_t pte)
BTFIXUPDEF_HALF(pte_filei) BTFIXUPDEF_HALF(pte_filei)
extern int pte_file(pte_t pte) __attribute_const__; extern int pte_file(pte_t pte) __attribute_const__;
extern __inline__ int pte_file(pte_t pte) static inline int pte_file(pte_t pte)
{ {
return pte_val(pte) & BTFIXUP_HALF(pte_filei); return pte_val(pte) & BTFIXUP_HALF(pte_filei);
} }
...@@ -230,19 +230,19 @@ BTFIXUPDEF_HALF(pte_mkcleani) ...@@ -230,19 +230,19 @@ BTFIXUPDEF_HALF(pte_mkcleani)
BTFIXUPDEF_HALF(pte_mkoldi) BTFIXUPDEF_HALF(pte_mkoldi)
extern pte_t pte_wrprotect(pte_t pte) __attribute_const__; extern pte_t pte_wrprotect(pte_t pte) __attribute_const__;
extern __inline__ pte_t pte_wrprotect(pte_t pte) static inline pte_t pte_wrprotect(pte_t pte)
{ {
return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti)); return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_wrprotecti));
} }
extern pte_t pte_mkclean(pte_t pte) __attribute_const__; extern pte_t pte_mkclean(pte_t pte) __attribute_const__;
extern __inline__ pte_t pte_mkclean(pte_t pte) static inline pte_t pte_mkclean(pte_t pte)
{ {
return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani)); return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkcleani));
} }
extern pte_t pte_mkold(pte_t pte) __attribute_const__; extern pte_t pte_mkold(pte_t pte) __attribute_const__;
extern __inline__ pte_t pte_mkold(pte_t pte) static inline pte_t pte_mkold(pte_t pte)
{ {
return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi)); return __pte(pte_val(pte) & ~BTFIXUP_HALF(pte_mkoldi));
} }
...@@ -279,7 +279,7 @@ BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int) ...@@ -279,7 +279,7 @@ BTFIXUPDEF_CALL_CONST(pte_t, mk_pte_io, unsigned long, pgprot_t, int)
BTFIXUPDEF_INT(pte_modify_mask) BTFIXUPDEF_INT(pte_modify_mask)
extern pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__; extern pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
extern __inline__ pte_t pte_modify(pte_t pte, pgprot_t newprot) static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ {
return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) | return __pte((pte_val(pte) & BTFIXUP_INT(pte_modify_mask)) |
pgprot_val(newprot)); pgprot_val(newprot));
...@@ -386,13 +386,13 @@ extern struct ctx_list ctx_used; /* Head of used contexts list */ ...@@ -386,13 +386,13 @@ extern struct ctx_list ctx_used; /* Head of used contexts list */
#define NO_CONTEXT -1 #define NO_CONTEXT -1
extern __inline__ void remove_from_ctx_list(struct ctx_list *entry) static inline void remove_from_ctx_list(struct ctx_list *entry)
{ {
entry->next->prev = entry->prev; entry->next->prev = entry->prev;
entry->prev->next = entry->next; entry->prev->next = entry->next;
} }
extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry) static inline void add_to_ctx_list(struct ctx_list *head, struct ctx_list *entry)
{ {
entry->next = head; entry->next = head;
(entry->prev = head->prev)->next = entry; (entry->prev = head->prev)->next = entry;
...@@ -401,7 +401,7 @@ extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *e ...@@ -401,7 +401,7 @@ extern __inline__ void add_to_ctx_list(struct ctx_list *head, struct ctx_list *e
#define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry) #define add_to_free_ctxlist(entry) add_to_ctx_list(&ctx_free, entry)
#define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry) #define add_to_used_ctxlist(entry) add_to_ctx_list(&ctx_used, entry)
extern __inline__ unsigned long static inline unsigned long
__get_phys (unsigned long addr) __get_phys (unsigned long addr)
{ {
switch (sparc_cpu_model){ switch (sparc_cpu_model){
...@@ -416,7 +416,7 @@ __get_phys (unsigned long addr) ...@@ -416,7 +416,7 @@ __get_phys (unsigned long addr)
} }
} }
extern __inline__ int static inline int
__get_iospace (unsigned long addr) __get_iospace (unsigned long addr)
{ {
switch (sparc_cpu_model){ switch (sparc_cpu_model){
......
...@@ -148,7 +148,7 @@ extern void *srmmu_nocache_pool; ...@@ -148,7 +148,7 @@ extern void *srmmu_nocache_pool;
#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR)) #define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
/* Accessing the MMU control register. */ /* Accessing the MMU control register. */
extern __inline__ unsigned int srmmu_get_mmureg(void) static inline unsigned int srmmu_get_mmureg(void)
{ {
unsigned int retval; unsigned int retval;
__asm__ __volatile__("lda [%%g0] %1, %0\n\t" : __asm__ __volatile__("lda [%%g0] %1, %0\n\t" :
...@@ -157,14 +157,14 @@ extern __inline__ unsigned int srmmu_get_mmureg(void) ...@@ -157,14 +157,14 @@ extern __inline__ unsigned int srmmu_get_mmureg(void)
return retval; return retval;
} }
extern __inline__ void srmmu_set_mmureg(unsigned long regval) static inline void srmmu_set_mmureg(unsigned long regval)
{ {
__asm__ __volatile__("sta %0, [%%g0] %1\n\t" : : __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
"r" (regval), "i" (ASI_M_MMUREGS) : "memory"); "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
} }
extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr) static inline void srmmu_set_ctable_ptr(unsigned long paddr)
{ {
paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
__asm__ __volatile__("sta %0, [%1] %2\n\t" : : __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
...@@ -173,7 +173,7 @@ extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr) ...@@ -173,7 +173,7 @@ extern __inline__ void srmmu_set_ctable_ptr(unsigned long paddr)
"memory"); "memory");
} }
extern __inline__ unsigned long srmmu_get_ctable_ptr(void) static inline unsigned long srmmu_get_ctable_ptr(void)
{ {
unsigned int retval; unsigned int retval;
...@@ -184,14 +184,14 @@ extern __inline__ unsigned long srmmu_get_ctable_ptr(void) ...@@ -184,14 +184,14 @@ extern __inline__ unsigned long srmmu_get_ctable_ptr(void)
return (retval & SRMMU_CTX_PMASK) << 4; return (retval & SRMMU_CTX_PMASK) << 4;
} }
extern __inline__ void srmmu_set_context(int context) static inline void srmmu_set_context(int context)
{ {
__asm__ __volatile__("sta %0, [%1] %2\n\t" : : __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
"r" (context), "r" (SRMMU_CTX_REG), "r" (context), "r" (SRMMU_CTX_REG),
"i" (ASI_M_MMUREGS) : "memory"); "i" (ASI_M_MMUREGS) : "memory");
} }
extern __inline__ int srmmu_get_context(void) static inline int srmmu_get_context(void)
{ {
register int retval; register int retval;
__asm__ __volatile__("lda [%1] %2, %0\n\t" : __asm__ __volatile__("lda [%1] %2, %0\n\t" :
...@@ -201,7 +201,7 @@ extern __inline__ int srmmu_get_context(void) ...@@ -201,7 +201,7 @@ extern __inline__ int srmmu_get_context(void)
return retval; return retval;
} }
extern __inline__ unsigned int srmmu_get_fstatus(void) static inline unsigned int srmmu_get_fstatus(void)
{ {
unsigned int retval; unsigned int retval;
...@@ -211,7 +211,7 @@ extern __inline__ unsigned int srmmu_get_fstatus(void) ...@@ -211,7 +211,7 @@ extern __inline__ unsigned int srmmu_get_fstatus(void)
return retval; return retval;
} }
extern __inline__ unsigned int srmmu_get_faddr(void) static inline unsigned int srmmu_get_faddr(void)
{ {
unsigned int retval; unsigned int retval;
...@@ -222,7 +222,7 @@ extern __inline__ unsigned int srmmu_get_faddr(void) ...@@ -222,7 +222,7 @@ extern __inline__ unsigned int srmmu_get_faddr(void)
} }
/* This is guaranteed on all SRMMU's. */ /* This is guaranteed on all SRMMU's. */
extern __inline__ void srmmu_flush_whole_tlb(void) static inline void srmmu_flush_whole_tlb(void)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": : __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
"r" (0x400), /* Flush entire TLB!! */ "r" (0x400), /* Flush entire TLB!! */
...@@ -231,7 +231,7 @@ extern __inline__ void srmmu_flush_whole_tlb(void) ...@@ -231,7 +231,7 @@ extern __inline__ void srmmu_flush_whole_tlb(void)
} }
/* These flush types are not available on all chips... */ /* These flush types are not available on all chips... */
extern __inline__ void srmmu_flush_tlb_ctx(void) static inline void srmmu_flush_tlb_ctx(void)
{ {
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": : __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
"r" (0x300), /* Flush TLB ctx.. */ "r" (0x300), /* Flush TLB ctx.. */
...@@ -239,7 +239,7 @@ extern __inline__ void srmmu_flush_tlb_ctx(void) ...@@ -239,7 +239,7 @@ extern __inline__ void srmmu_flush_tlb_ctx(void)
} }
extern __inline__ void srmmu_flush_tlb_region(unsigned long addr) static inline void srmmu_flush_tlb_region(unsigned long addr)
{ {
addr &= SRMMU_PGDIR_MASK; addr &= SRMMU_PGDIR_MASK;
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": : __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
...@@ -249,7 +249,7 @@ extern __inline__ void srmmu_flush_tlb_region(unsigned long addr) ...@@ -249,7 +249,7 @@ extern __inline__ void srmmu_flush_tlb_region(unsigned long addr)
} }
extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr) static inline void srmmu_flush_tlb_segment(unsigned long addr)
{ {
addr &= SRMMU_REAL_PMD_MASK; addr &= SRMMU_REAL_PMD_MASK;
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": : __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
...@@ -258,7 +258,7 @@ extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr) ...@@ -258,7 +258,7 @@ extern __inline__ void srmmu_flush_tlb_segment(unsigned long addr)
} }
extern __inline__ void srmmu_flush_tlb_page(unsigned long page) static inline void srmmu_flush_tlb_page(unsigned long page)
{ {
page &= PAGE_MASK; page &= PAGE_MASK;
__asm__ __volatile__("sta %%g0, [%0] %1\n\t": : __asm__ __volatile__("sta %%g0, [%0] %1\n\t": :
...@@ -267,7 +267,7 @@ extern __inline__ void srmmu_flush_tlb_page(unsigned long page) ...@@ -267,7 +267,7 @@ extern __inline__ void srmmu_flush_tlb_page(unsigned long page)
} }
extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr) static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
{ {
unsigned long retval; unsigned long retval;
...@@ -279,7 +279,7 @@ extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr) ...@@ -279,7 +279,7 @@ extern __inline__ unsigned long srmmu_hwprobe(unsigned long vaddr)
return retval; return retval;
} }
extern __inline__ int static inline int
srmmu_get_pte (unsigned long addr) srmmu_get_pte (unsigned long addr)
{ {
register unsigned long entry; register unsigned long entry;
......
...@@ -79,7 +79,7 @@ struct thread_struct { ...@@ -79,7 +79,7 @@ struct thread_struct {
extern unsigned long thread_saved_pc(struct task_struct *t); extern unsigned long thread_saved_pc(struct task_struct *t);
/* Do necessary setup to start up a newly executed thread. */ /* Do necessary setup to start up a newly executed thread. */
extern __inline__ void start_thread(struct pt_regs * regs, unsigned long pc, static inline void start_thread(struct pt_regs * regs, unsigned long pc,
unsigned long sp) unsigned long sp)
{ {
register unsigned long zero asm("g1"); register unsigned long zero asm("g1");
......
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
/* Get the %psr register. */ /* Get the %psr register. */
extern __inline__ unsigned int get_psr(void) static inline unsigned int get_psr(void)
{ {
unsigned int psr; unsigned int psr;
__asm__ __volatile__( __asm__ __volatile__(
...@@ -53,7 +53,7 @@ extern __inline__ unsigned int get_psr(void) ...@@ -53,7 +53,7 @@ extern __inline__ unsigned int get_psr(void)
return psr; return psr;
} }
extern __inline__ void put_psr(unsigned int new_psr) static inline void put_psr(unsigned int new_psr)
{ {
__asm__ __volatile__( __asm__ __volatile__(
"wr %0, 0x0, %%psr\n\t" "wr %0, 0x0, %%psr\n\t"
...@@ -72,7 +72,7 @@ extern __inline__ void put_psr(unsigned int new_psr) ...@@ -72,7 +72,7 @@ extern __inline__ void put_psr(unsigned int new_psr)
extern unsigned int fsr_storage; extern unsigned int fsr_storage;
extern __inline__ unsigned int get_fsr(void) static inline unsigned int get_fsr(void)
{ {
unsigned int fsr = 0; unsigned int fsr = 0;
......
...@@ -65,7 +65,7 @@ struct sbi_regs { ...@@ -65,7 +65,7 @@ struct sbi_regs {
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
extern __inline__ int acquire_sbi(int devid, int mask) static inline int acquire_sbi(int devid, int mask)
{ {
__asm__ __volatile__ ("swapa [%2] %3, %0" : __asm__ __volatile__ ("swapa [%2] %3, %0" :
"=r" (mask) : "=r" (mask) :
...@@ -75,7 +75,7 @@ extern __inline__ int acquire_sbi(int devid, int mask) ...@@ -75,7 +75,7 @@ extern __inline__ int acquire_sbi(int devid, int mask)
return mask; return mask;
} }
extern __inline__ void release_sbi(int devid, int mask) static inline void release_sbi(int devid, int mask)
{ {
__asm__ __volatile__ ("sta %0, [%1] %2" : : __asm__ __volatile__ ("sta %0, [%1] %2" : :
"r" (mask), "r" (mask),
...@@ -83,7 +83,7 @@ extern __inline__ void release_sbi(int devid, int mask) ...@@ -83,7 +83,7 @@ extern __inline__ void release_sbi(int devid, int mask)
"i" (ASI_M_CTL)); "i" (ASI_M_CTL));
} }
extern __inline__ void set_sbi_tid(int devid, int targetid) static inline void set_sbi_tid(int devid, int targetid)
{ {
__asm__ __volatile__ ("sta %0, [%1] %2" : : __asm__ __volatile__ ("sta %0, [%1] %2" : :
"r" (targetid), "r" (targetid),
...@@ -91,7 +91,7 @@ extern __inline__ void set_sbi_tid(int devid, int targetid) ...@@ -91,7 +91,7 @@ extern __inline__ void set_sbi_tid(int devid, int targetid)
"i" (ASI_M_CTL)); "i" (ASI_M_CTL));
} }
extern __inline__ int get_sbi_ctl(int devid, int cfgno) static inline int get_sbi_ctl(int devid, int cfgno)
{ {
int cfg; int cfg;
...@@ -102,7 +102,7 @@ extern __inline__ int get_sbi_ctl(int devid, int cfgno) ...@@ -102,7 +102,7 @@ extern __inline__ int get_sbi_ctl(int devid, int cfgno)
return cfg; return cfg;
} }
extern __inline__ void set_sbi_ctl(int devid, int cfgno, int cfg) static inline void set_sbi_ctl(int devid, int cfgno, int cfg)
{ {
__asm__ __volatile__ ("sta %0, [%1] %2" : : __asm__ __volatile__ ("sta %0, [%1] %2" : :
"r" (cfg), "r" (cfg),
......
...@@ -28,12 +28,12 @@ ...@@ -28,12 +28,12 @@
* numbers + offsets, and vice versa. * numbers + offsets, and vice versa.
*/ */
extern __inline__ unsigned long sbus_devaddr(int slotnum, unsigned long offset) static inline unsigned long sbus_devaddr(int slotnum, unsigned long offset)
{ {
return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<25)+(offset)); return (unsigned long) (SUN_SBUS_BVADDR+((slotnum)<<25)+(offset));
} }
extern __inline__ int sbus_dev_slot(unsigned long dev_addr) static inline int sbus_dev_slot(unsigned long dev_addr)
{ {
return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>25); return (int) (((dev_addr)-SUN_SBUS_BVADDR)>>25);
} }
...@@ -80,7 +80,7 @@ struct sbus_bus { ...@@ -80,7 +80,7 @@ struct sbus_bus {
extern struct sbus_bus *sbus_root; extern struct sbus_bus *sbus_root;
extern __inline__ int static inline int
sbus_is_slave(struct sbus_dev *dev) sbus_is_slave(struct sbus_dev *dev)
{ {
/* XXX Have to write this for sun4c's */ /* XXX Have to write this for sun4c's */
......
...@@ -60,22 +60,22 @@ BTFIXUPDEF_BLACKBOX(load_current) ...@@ -60,22 +60,22 @@ BTFIXUPDEF_BLACKBOX(load_current)
#define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5) #define smp_cross_call(func,arg1,arg2,arg3,arg4,arg5) BTFIXUP_CALL(smp_cross_call)(func,arg1,arg2,arg3,arg4,arg5)
#define smp_message_pass(target,msg,data,wait) BTFIXUP_CALL(smp_message_pass)(target,msg,data,wait) #define smp_message_pass(target,msg,data,wait) BTFIXUP_CALL(smp_message_pass)(target,msg,data,wait)
extern __inline__ void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); } static inline void xc0(smpfunc_t func) { smp_cross_call(func, 0, 0, 0, 0, 0); }
extern __inline__ void xc1(smpfunc_t func, unsigned long arg1) static inline void xc1(smpfunc_t func, unsigned long arg1)
{ smp_cross_call(func, arg1, 0, 0, 0, 0); } { smp_cross_call(func, arg1, 0, 0, 0, 0); }
extern __inline__ void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2) static inline void xc2(smpfunc_t func, unsigned long arg1, unsigned long arg2)
{ smp_cross_call(func, arg1, arg2, 0, 0, 0); } { smp_cross_call(func, arg1, arg2, 0, 0, 0); }
extern __inline__ void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2, static inline void xc3(smpfunc_t func, unsigned long arg1, unsigned long arg2,
unsigned long arg3) unsigned long arg3)
{ smp_cross_call(func, arg1, arg2, arg3, 0, 0); } { smp_cross_call(func, arg1, arg2, arg3, 0, 0); }
extern __inline__ void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2, static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4) unsigned long arg3, unsigned long arg4)
{ smp_cross_call(func, arg1, arg2, arg3, arg4, 0); } { smp_cross_call(func, arg1, arg2, arg3, arg4, 0); }
extern __inline__ void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2, static inline void xc5(smpfunc_t func, unsigned long arg1, unsigned long arg2,
unsigned long arg3, unsigned long arg4, unsigned long arg5) unsigned long arg3, unsigned long arg4, unsigned long arg5)
{ smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); } { smp_cross_call(func, arg1, arg2, arg3, arg4, arg5); }
extern __inline__ int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait) static inline int smp_call_function(void (*func)(void *info), void *info, int nonatomic, int wait)
{ {
xc1((smpfunc_t)func, (unsigned long)info); xc1((smpfunc_t)func, (unsigned long)info);
return 0; return 0;
...@@ -84,16 +84,16 @@ extern __inline__ int smp_call_function(void (*func)(void *info), void *info, in ...@@ -84,16 +84,16 @@ extern __inline__ int smp_call_function(void (*func)(void *info), void *info, in
extern __volatile__ int __cpu_number_map[NR_CPUS]; extern __volatile__ int __cpu_number_map[NR_CPUS];
extern __volatile__ int __cpu_logical_map[NR_CPUS]; extern __volatile__ int __cpu_logical_map[NR_CPUS];
extern __inline__ int cpu_logical_map(int cpu) static inline int cpu_logical_map(int cpu)
{ {
return __cpu_logical_map[cpu]; return __cpu_logical_map[cpu];
} }
extern __inline__ int cpu_number_map(int cpu) static inline int cpu_number_map(int cpu)
{ {
return __cpu_number_map[cpu]; return __cpu_number_map[cpu];
} }
extern __inline__ int hard_smp4m_processor_id(void) static inline int hard_smp4m_processor_id(void)
{ {
int cpuid; int cpuid;
...@@ -104,7 +104,7 @@ extern __inline__ int hard_smp4m_processor_id(void) ...@@ -104,7 +104,7 @@ extern __inline__ int hard_smp4m_processor_id(void)
return cpuid; return cpuid;
} }
extern __inline__ int hard_smp4d_processor_id(void) static inline int hard_smp4d_processor_id(void)
{ {
int cpuid; int cpuid;
...@@ -114,7 +114,7 @@ extern __inline__ int hard_smp4d_processor_id(void) ...@@ -114,7 +114,7 @@ extern __inline__ int hard_smp4d_processor_id(void)
} }
#ifndef MODULE #ifndef MODULE
extern __inline__ int hard_smp_processor_id(void) static inline int hard_smp_processor_id(void)
{ {
int cpuid; int cpuid;
...@@ -136,7 +136,7 @@ extern __inline__ int hard_smp_processor_id(void) ...@@ -136,7 +136,7 @@ extern __inline__ int hard_smp_processor_id(void)
return cpuid; return cpuid;
} }
#else #else
extern __inline__ int hard_smp_processor_id(void) static inline int hard_smp_processor_id(void)
{ {
int cpuid; int cpuid;
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
* atomic. * atomic.
*/ */
extern __inline__ __volatile__ char test_and_set(void *addr) static inline __volatile__ char test_and_set(void *addr)
{ {
char state = 0; char state = 0;
...@@ -27,7 +27,7 @@ extern __inline__ __volatile__ char test_and_set(void *addr) ...@@ -27,7 +27,7 @@ extern __inline__ __volatile__ char test_and_set(void *addr)
} }
/* Initialize a spin-lock. */ /* Initialize a spin-lock. */
extern __inline__ __volatile__ smp_initlock(void *spinlock) static inline __volatile__ smp_initlock(void *spinlock)
{ {
/* Unset the lock. */ /* Unset the lock. */
*((unsigned char *) spinlock) = 0; *((unsigned char *) spinlock) = 0;
...@@ -36,7 +36,7 @@ extern __inline__ __volatile__ smp_initlock(void *spinlock) ...@@ -36,7 +36,7 @@ extern __inline__ __volatile__ smp_initlock(void *spinlock)
} }
/* This routine spins until it acquires the lock at ADDR. */ /* This routine spins until it acquires the lock at ADDR. */
extern __inline__ __volatile__ smp_lock(void *addr) static inline __volatile__ smp_lock(void *addr)
{ {
while(test_and_set(addr) == 0xff) while(test_and_set(addr) == 0xff)
; ;
...@@ -46,7 +46,7 @@ extern __inline__ __volatile__ smp_lock(void *addr) ...@@ -46,7 +46,7 @@ extern __inline__ __volatile__ smp_lock(void *addr)
} }
/* This routine releases the lock at ADDR. */ /* This routine releases the lock at ADDR. */
extern __inline__ __volatile__ smp_unlock(void *addr) static inline __volatile__ smp_unlock(void *addr)
{ {
*((unsigned char *) addr) = 0; *((unsigned char *) addr) = 0;
} }
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#define __raw_spin_unlock_wait(lock) \ #define __raw_spin_unlock_wait(lock) \
do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0)
extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock) static inline void __raw_spin_lock(raw_spinlock_t *lock)
{ {
__asm__ __volatile__( __asm__ __volatile__(
"\n1:\n\t" "\n1:\n\t"
...@@ -37,7 +37,7 @@ extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock) ...@@ -37,7 +37,7 @@ extern __inline__ void __raw_spin_lock(raw_spinlock_t *lock)
: "g2", "memory", "cc"); : "g2", "memory", "cc");
} }
extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock) static inline int __raw_spin_trylock(raw_spinlock_t *lock)
{ {
unsigned int result; unsigned int result;
__asm__ __volatile__("ldstub [%1], %0" __asm__ __volatile__("ldstub [%1], %0"
...@@ -47,7 +47,7 @@ extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock) ...@@ -47,7 +47,7 @@ extern __inline__ int __raw_spin_trylock(raw_spinlock_t *lock)
return (result == 0); return (result == 0);
} }
extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{ {
__asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory"); __asm__ __volatile__("stb %%g0, [%0]" : : "r" (lock) : "memory");
} }
...@@ -78,7 +78,7 @@ extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock) ...@@ -78,7 +78,7 @@ extern __inline__ void __raw_spin_unlock(raw_spinlock_t *lock)
* *
* Unfortunately this scheme limits us to ~16,000,000 cpus. * Unfortunately this scheme limits us to ~16,000,000 cpus.
*/ */
extern __inline__ void __read_lock(raw_rwlock_t *rw) static inline void __read_lock(raw_rwlock_t *rw)
{ {
register raw_rwlock_t *lp asm("g1"); register raw_rwlock_t *lp asm("g1");
lp = rw; lp = rw;
...@@ -98,7 +98,7 @@ do { unsigned long flags; \ ...@@ -98,7 +98,7 @@ do { unsigned long flags; \
local_irq_restore(flags); \ local_irq_restore(flags); \
} while(0) } while(0)
extern __inline__ void __read_unlock(raw_rwlock_t *rw) static inline void __read_unlock(raw_rwlock_t *rw)
{ {
register raw_rwlock_t *lp asm("g1"); register raw_rwlock_t *lp asm("g1");
lp = rw; lp = rw;
......
...@@ -204,7 +204,7 @@ static inline unsigned long getipl(void) ...@@ -204,7 +204,7 @@ static inline unsigned long getipl(void)
BTFIXUPDEF_CALL(void, ___xchg32, void) BTFIXUPDEF_CALL(void, ___xchg32, void)
#endif #endif
extern __inline__ unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val)
{ {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
__asm__ __volatile__("swap [%2], %0" __asm__ __volatile__("swap [%2], %0"
......
...@@ -22,7 +22,7 @@ struct tt_entry { ...@@ -22,7 +22,7 @@ struct tt_entry {
/* We set this to _start in system setup. */ /* We set this to _start in system setup. */
extern struct tt_entry *sparc_ttable; extern struct tt_entry *sparc_ttable;
extern __inline__ unsigned long get_tbr(void) static inline unsigned long get_tbr(void)
{ {
unsigned long tbr; unsigned long tbr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment