Commit 9d76064a authored by Anton Blanchard's avatar Anton Blanchard

ppc64: extern inline -> static inline

parent 3e89759a
......@@ -134,7 +134,7 @@ extern void iounmap(void *addr);
* Change virtual addresses to physical addresses and vv, for
* addresses in the area where the kernel has the RAM mapped.
*/
extern inline unsigned long virt_to_phys(volatile void * address)
static inline unsigned long virt_to_phys(volatile void * address)
{
#ifdef __IO_DEBUG
printk("virt_to_phys: 0x%08lx -> 0x%08lx\n",
......@@ -144,7 +144,7 @@ extern inline unsigned long virt_to_phys(volatile void * address)
return __pa((unsigned long)address);
}
extern inline void * phys_to_virt(unsigned long address)
static inline void * phys_to_virt(unsigned long address)
{
#ifdef __IO_DEBUG
printk("phys_to_virt: 0x%08lx -> 0x%08lx\n", address, __va(address));
......@@ -163,7 +163,7 @@ extern inline void * phys_to_virt(unsigned long address)
#endif /* __KERNEL__ */
extern inline void iosync(void)
static inline void iosync(void)
{
__asm__ __volatile__ ("sync" : : : "memory");
}
......@@ -178,7 +178,7 @@ extern inline void iosync(void)
/*
* 8, 16 and 32 bit, big and little endian I/O operations, with barrier.
*/
extern inline int in_8(volatile unsigned char *addr)
static inline int in_8(volatile unsigned char *addr)
{
int ret;
......@@ -186,12 +186,12 @@ extern inline int in_8(volatile unsigned char *addr)
return ret;
}
extern inline void out_8(volatile unsigned char *addr, int val)
static inline void out_8(volatile unsigned char *addr, int val)
{
__asm__ __volatile__("stb%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
}
extern inline int in_le16(volatile unsigned short *addr)
static inline int in_le16(volatile unsigned short *addr)
{
int ret;
......@@ -200,7 +200,7 @@ extern inline int in_le16(volatile unsigned short *addr)
return ret;
}
extern inline int in_be16(volatile unsigned short *addr)
static inline int in_be16(volatile unsigned short *addr)
{
int ret;
......@@ -208,18 +208,18 @@ extern inline int in_be16(volatile unsigned short *addr)
return ret;
}
extern inline void out_le16(volatile unsigned short *addr, int val)
static inline void out_le16(volatile unsigned short *addr, int val)
{
__asm__ __volatile__("sthbrx %1,0,%2" : "=m" (*addr) :
"r" (val), "r" (addr));
}
extern inline void out_be16(volatile unsigned short *addr, int val)
static inline void out_be16(volatile unsigned short *addr, int val)
{
__asm__ __volatile__("sth%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
}
extern inline unsigned in_le32(volatile unsigned *addr)
static inline unsigned in_le32(volatile unsigned *addr)
{
unsigned ret;
......@@ -228,7 +228,7 @@ extern inline unsigned in_le32(volatile unsigned *addr)
return ret;
}
extern inline unsigned in_be32(volatile unsigned *addr)
static inline unsigned in_be32(volatile unsigned *addr)
{
unsigned ret;
......@@ -236,13 +236,13 @@ extern inline unsigned in_be32(volatile unsigned *addr)
return ret;
}
extern inline void out_le32(volatile unsigned *addr, int val)
static inline void out_le32(volatile unsigned *addr, int val)
{
__asm__ __volatile__("stwbrx %1,0,%2" : "=m" (*addr) :
"r" (val), "r" (addr));
}
extern inline void out_be32(volatile unsigned *addr, int val)
static inline void out_be32(volatile unsigned *addr, int val)
{
__asm__ __volatile__("stw%U0%X0 %1,%0" : "=m" (*addr) : "r" (val));
}
......
......@@ -131,7 +131,7 @@ extern void xmon(struct pt_regs *excp);
#define PAGE_BUG(page) do { BUG(); } while (0)
/* Pure 2^n version of get_order */
extern __inline__ int get_order(unsigned long size)
static inline int get_order(unsigned long size)
{
int order;
......
......@@ -25,12 +25,12 @@ extern int pcibios_assign_all_busses(void);
#define PCIBIOS_MIN_IO 0x1000
#define PCIBIOS_MIN_MEM 0x10000000
extern inline void pcibios_set_master(struct pci_dev *dev)
static inline void pcibios_set_master(struct pci_dev *dev)
{
/* No special bus mastering setup handling */
}
extern inline void pcibios_penalize_isa_irq(int irq)
static inline void pcibios_penalize_isa_irq(int irq)
{
/* We don't do dynamic PCI IRQ allocation */
}
......@@ -78,7 +78,7 @@ extern void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
extern void pSeries_pcibios_init_early(void);
extern inline void pci_dma_sync_single(struct pci_dev *hwdev,
static inline void pci_dma_sync_single(struct pci_dev *hwdev,
dma_addr_t dma_handle,
size_t size, int direction)
{
......@@ -87,7 +87,7 @@ extern inline void pci_dma_sync_single(struct pci_dev *hwdev,
/* nothing to do */
}
extern inline void pci_dma_sync_sg(struct pci_dev *hwdev,
static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
struct scatterlist *sg,
int nelems, int direction)
{
......@@ -101,7 +101,7 @@ extern inline void pci_dma_sync_sg(struct pci_dev *hwdev,
* only drive the low 24-bits during PCI bus mastering, then
* you would pass 0x00ffffff as the mask to this function.
*/
extern inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
{
return 1;
}
......
......@@ -246,35 +246,35 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
* The following only work if pte_present() is true.
* Undefined behaviour if not..
*/
extern inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;}
extern inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
extern inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;}
extern inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
extern inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
static inline int pte_read(pte_t pte) { return pte_val(pte) & _PAGE_USER;}
static inline int pte_write(pte_t pte) { return pte_val(pte) & _PAGE_RW;}
static inline int pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC;}
static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY;}
static inline int pte_young(pte_t pte) { return pte_val(pte) & _PAGE_ACCESSED;}
extern inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
extern inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
static inline void pte_uncache(pte_t pte) { pte_val(pte) |= _PAGE_NO_CACHE; }
static inline void pte_cache(pte_t pte) { pte_val(pte) &= ~_PAGE_NO_CACHE; }
extern inline pte_t pte_rdprotect(pte_t pte) {
static inline pte_t pte_rdprotect(pte_t pte) {
pte_val(pte) &= ~_PAGE_USER; return pte; }
extern inline pte_t pte_exprotect(pte_t pte) {
static inline pte_t pte_exprotect(pte_t pte) {
pte_val(pte) &= ~_PAGE_EXEC; return pte; }
extern inline pte_t pte_wrprotect(pte_t pte) {
static inline pte_t pte_wrprotect(pte_t pte) {
pte_val(pte) &= ~(_PAGE_RW); return pte; }
extern inline pte_t pte_mkclean(pte_t pte) {
static inline pte_t pte_mkclean(pte_t pte) {
pte_val(pte) &= ~(_PAGE_DIRTY); return pte; }
extern inline pte_t pte_mkold(pte_t pte) {
static inline pte_t pte_mkold(pte_t pte) {
pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
extern inline pte_t pte_mkread(pte_t pte) {
static inline pte_t pte_mkread(pte_t pte) {
pte_val(pte) |= _PAGE_USER; return pte; }
extern inline pte_t pte_mkexec(pte_t pte) {
static inline pte_t pte_mkexec(pte_t pte) {
pte_val(pte) |= _PAGE_USER | _PAGE_EXEC; return pte; }
extern inline pte_t pte_mkwrite(pte_t pte) {
static inline pte_t pte_mkwrite(pte_t pte) {
pte_val(pte) |= _PAGE_RW; return pte; }
extern inline pte_t pte_mkdirty(pte_t pte) {
static inline pte_t pte_mkdirty(pte_t pte) {
pte_val(pte) |= _PAGE_DIRTY; return pte; }
extern inline pte_t pte_mkyoung(pte_t pte) {
static inline pte_t pte_mkyoung(pte_t pte) {
pte_val(pte) |= _PAGE_ACCESSED; return pte; }
/* Atomic PTE updates */
......
......@@ -706,12 +706,12 @@ unsigned long get_wchan(struct task_struct *p);
#define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH
extern inline void prefetch(const void *x)
static inline void prefetch(const void *x)
{
__asm__ __volatile__ ("dcbt 0,%0" : : "r" (x));
}
extern inline void prefetchw(const void *x)
static inline void prefetchw(const void *x)
{
__asm__ __volatile__ ("dcbtst 0,%0" : : "r" (x));
}
......
......@@ -72,7 +72,7 @@ extern void __down(struct semaphore * sem);
extern int __down_interruptible(struct semaphore * sem);
extern void __up(struct semaphore * sem);
extern inline void down(struct semaphore * sem)
static inline void down(struct semaphore * sem)
{
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
......@@ -86,7 +86,7 @@ extern inline void down(struct semaphore * sem)
smp_wmb();
}
extern inline int down_interruptible(struct semaphore * sem)
static inline int down_interruptible(struct semaphore * sem)
{
int ret = 0;
......@@ -100,7 +100,7 @@ extern inline int down_interruptible(struct semaphore * sem)
return ret;
}
extern inline int down_trylock(struct semaphore * sem)
static inline int down_trylock(struct semaphore * sem)
{
int ret;
......@@ -113,7 +113,7 @@ extern inline int down_trylock(struct semaphore * sem)
return ret;
}
extern inline void up(struct semaphore * sem)
static inline void up(struct semaphore * sem)
{
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
......
......@@ -223,7 +223,7 @@ typedef struct sigevent {
#ifdef __KERNEL__
#include <linux/string.h>
extern inline void copy_siginfo(siginfo_t *to, siginfo_t *from)
static inline void copy_siginfo(siginfo_t *to, siginfo_t *from)
{
if (from->si_code < 0)
memcpy(to, from, sizeof(siginfo_t));
......
......@@ -87,7 +87,8 @@ static __inline__ void set_dec(int val)
mtspr(SPRN_DEC, val);
}
extern __inline__ unsigned long tb_ticks_since(unsigned long tstamp) {
static inline unsigned long tb_ticks_since(unsigned long tstamp)
{
return get_tb() - tstamp;
}
......
......@@ -25,7 +25,7 @@ extern void __flush_tlb_range(struct mm_struct *mm,
#define flush_tlb_kernel_range(start, end) \
__flush_tlb_range(&init_mm, (start), (end))
extern inline void flush_tlb_pgtables(struct mm_struct *mm,
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* PPC has hw page tables. */
......
......@@ -38,7 +38,7 @@
#define __access_ok(addr,size) (__kernel_ok || __user_ok((addr),(size)))
#define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
extern inline int verify_area(int type, const void * addr, unsigned long size)
static inline int verify_area(int type, const void * addr, unsigned long size)
{
return access_ok(type,addr,size) ? 0 : -EFAULT;
}
......@@ -200,7 +200,7 @@ do { \
extern unsigned long __copy_tofrom_user(void *to, const void *from, unsigned long size);
extern inline unsigned long
static inline unsigned long
copy_from_user(void *to, const void *from, unsigned long n)
{
unsigned long over;
......@@ -214,7 +214,7 @@ copy_from_user(void *to, const void *from, unsigned long n)
return n;
}
extern inline unsigned long
static inline unsigned long
copy_to_user(void *to, const void *from, unsigned long n)
{
unsigned long over;
......@@ -235,7 +235,7 @@ copy_to_user(void *to, const void *from, unsigned long n)
extern unsigned long __clear_user(void *addr, unsigned long size);
extern inline unsigned long
static inline unsigned long
clear_user(void *addr, unsigned long size)
{
if (access_ok(VERIFY_WRITE, addr, size))
......@@ -245,7 +245,7 @@ clear_user(void *addr, unsigned long size)
extern int __strncpy_from_user(char *dst, const char *src, long count);
extern inline long
static inline long
strncpy_from_user(char *dst, const char *src, long count)
{
if (access_ok(VERIFY_READ, src, 1))
......@@ -269,7 +269,7 @@ extern int __strnlen_user(const char *str, long len, unsigned long top);
* The `top' parameter to __strnlen_user is to make sure that
* we can never overflow from the user area into kernel space.
*/
extern __inline__ int strnlen_user(const char *str, long len)
static inline int strnlen_user(const char *str, long len)
{
unsigned long top = __kernel_ok? ~0UL: TASK_SIZE - 1;
......
......@@ -26,12 +26,12 @@
* <linux/vt_buffer.h> has already done the right job for us.
*/
extern inline void scr_writew(u16 val, volatile u16 *addr)
static inline void scr_writew(u16 val, volatile u16 *addr)
{
st_le16(addr, val);
}
extern inline u16 scr_readw(volatile const u16 *addr)
static inline u16 scr_readw(volatile const u16 *addr)
{
return ld_le16(addr);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment