Commit 920c7a5d authored by Harvey Harrison's avatar Harvey Harrison Committed by Linus Torvalds

mm: remove fastcall from mm/

fastcall is always defined to be empty, remove it

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: default avatarHarvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 1e548deb
...@@ -527,7 +527,7 @@ static inline void wake_up_page(struct page *page, int bit) ...@@ -527,7 +527,7 @@ static inline void wake_up_page(struct page *page, int bit)
__wake_up_bit(page_waitqueue(page), &page->flags, bit); __wake_up_bit(page_waitqueue(page), &page->flags, bit);
} }
void fastcall wait_on_page_bit(struct page *page, int bit_nr) void wait_on_page_bit(struct page *page, int bit_nr)
{ {
DEFINE_WAIT_BIT(wait, &page->flags, bit_nr); DEFINE_WAIT_BIT(wait, &page->flags, bit_nr);
...@@ -551,7 +551,7 @@ EXPORT_SYMBOL(wait_on_page_bit); ...@@ -551,7 +551,7 @@ EXPORT_SYMBOL(wait_on_page_bit);
* the clear_bit and the read of the waitqueue (to avoid SMP races with a * the clear_bit and the read of the waitqueue (to avoid SMP races with a
* parallel wait_on_page_locked()). * parallel wait_on_page_locked()).
*/ */
void fastcall unlock_page(struct page *page) void unlock_page(struct page *page)
{ {
smp_mb__before_clear_bit(); smp_mb__before_clear_bit();
if (!TestClearPageLocked(page)) if (!TestClearPageLocked(page))
...@@ -585,7 +585,7 @@ EXPORT_SYMBOL(end_page_writeback); ...@@ -585,7 +585,7 @@ EXPORT_SYMBOL(end_page_writeback);
* chances are that on the second loop, the block layer's plug list is empty, * chances are that on the second loop, the block layer's plug list is empty,
* so sync_page() will then return in state TASK_UNINTERRUPTIBLE. * so sync_page() will then return in state TASK_UNINTERRUPTIBLE.
*/ */
void fastcall __lock_page(struct page *page) void __lock_page(struct page *page)
{ {
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
...@@ -606,7 +606,7 @@ int fastcall __lock_page_killable(struct page *page) ...@@ -606,7 +606,7 @@ int fastcall __lock_page_killable(struct page *page)
* Variant of lock_page that does not require the caller to hold a reference * Variant of lock_page that does not require the caller to hold a reference
* on the page's mapping. * on the page's mapping.
*/ */
void fastcall __lock_page_nosync(struct page *page) void __lock_page_nosync(struct page *page)
{ {
DEFINE_WAIT_BIT(wait, &page->flags, PG_locked); DEFINE_WAIT_BIT(wait, &page->flags, PG_locked);
__wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock, __wait_on_bit_lock(page_waitqueue(page), &wait, __sleep_on_page_lock,
...@@ -1276,7 +1276,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count) ...@@ -1276,7 +1276,7 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
* This adds the requested page to the page cache if it isn't already there, * This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk. * and schedules an I/O to read in its contents from disk.
*/ */
static int fastcall page_cache_read(struct file * file, pgoff_t offset) static int page_cache_read(struct file *file, pgoff_t offset)
{ {
struct address_space *mapping = file->f_mapping; struct address_space *mapping = file->f_mapping;
struct page *page; struct page *page;
......
...@@ -163,7 +163,7 @@ static inline unsigned long map_new_virtual(struct page *page) ...@@ -163,7 +163,7 @@ static inline unsigned long map_new_virtual(struct page *page)
return vaddr; return vaddr;
} }
void fastcall *kmap_high(struct page *page) void *kmap_high(struct page *page)
{ {
unsigned long vaddr; unsigned long vaddr;
...@@ -185,7 +185,7 @@ void fastcall *kmap_high(struct page *page) ...@@ -185,7 +185,7 @@ void fastcall *kmap_high(struct page *page)
EXPORT_SYMBOL(kmap_high); EXPORT_SYMBOL(kmap_high);
void fastcall kunmap_high(struct page *page) void kunmap_high(struct page *page)
{ {
unsigned long vaddr; unsigned long vaddr;
unsigned long nr; unsigned long nr;
......
...@@ -34,7 +34,7 @@ static inline void __put_page(struct page *page) ...@@ -34,7 +34,7 @@ static inline void __put_page(struct page *page)
atomic_dec(&page->_count); atomic_dec(&page->_count);
} }
extern void fastcall __init __free_pages_bootmem(struct page *page, extern void __init __free_pages_bootmem(struct page *page,
unsigned int order); unsigned int order);
/* /*
......
...@@ -1109,7 +1109,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm, ...@@ -1109,7 +1109,8 @@ int get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
} }
EXPORT_SYMBOL(get_user_pages); EXPORT_SYMBOL(get_user_pages);
pte_t * fastcall get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
spinlock_t **ptl)
{ {
pgd_t * pgd = pgd_offset(mm, addr); pgd_t * pgd = pgd_offset(mm, addr);
pud_t * pud = pud_alloc(mm, pgd, addr); pud_t * pud = pud_alloc(mm, pgd, addr);
......
...@@ -1073,7 +1073,7 @@ static int __set_page_dirty(struct page *page) ...@@ -1073,7 +1073,7 @@ static int __set_page_dirty(struct page *page)
return 0; return 0;
} }
int fastcall set_page_dirty(struct page *page) int set_page_dirty(struct page *page)
{ {
int ret = __set_page_dirty(page); int ret = __set_page_dirty(page);
if (ret) if (ret)
......
...@@ -537,7 +537,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) ...@@ -537,7 +537,7 @@ static void __free_pages_ok(struct page *page, unsigned int order)
/* /*
* permit the bootmem allocator to evade page validation on high-order frees * permit the bootmem allocator to evade page validation on high-order frees
*/ */
void fastcall __init __free_pages_bootmem(struct page *page, unsigned int order) void __init __free_pages_bootmem(struct page *page, unsigned int order)
{ {
if (order == 0) { if (order == 0) {
__ClearPageReserved(page); __ClearPageReserved(page);
...@@ -974,7 +974,7 @@ void mark_free_pages(struct zone *zone) ...@@ -974,7 +974,7 @@ void mark_free_pages(struct zone *zone)
/* /*
* Free a 0-order page * Free a 0-order page
*/ */
static void fastcall free_hot_cold_page(struct page *page, int cold) static void free_hot_cold_page(struct page *page, int cold)
{ {
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
...@@ -1007,12 +1007,12 @@ static void fastcall free_hot_cold_page(struct page *page, int cold) ...@@ -1007,12 +1007,12 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
put_cpu(); put_cpu();
} }
void fastcall free_hot_page(struct page *page) void free_hot_page(struct page *page)
{ {
free_hot_cold_page(page, 0); free_hot_cold_page(page, 0);
} }
void fastcall free_cold_page(struct page *page) void free_cold_page(struct page *page)
{ {
free_hot_cold_page(page, 1); free_hot_cold_page(page, 1);
} }
...@@ -1641,7 +1641,7 @@ EXPORT_SYMBOL(__alloc_pages); ...@@ -1641,7 +1641,7 @@ EXPORT_SYMBOL(__alloc_pages);
/* /*
* Common helper functions. * Common helper functions.
*/ */
fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
{ {
struct page * page; struct page * page;
page = alloc_pages(gfp_mask, order); page = alloc_pages(gfp_mask, order);
...@@ -1652,7 +1652,7 @@ fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order) ...@@ -1652,7 +1652,7 @@ fastcall unsigned long __get_free_pages(gfp_t gfp_mask, unsigned int order)
EXPORT_SYMBOL(__get_free_pages); EXPORT_SYMBOL(__get_free_pages);
fastcall unsigned long get_zeroed_page(gfp_t gfp_mask) unsigned long get_zeroed_page(gfp_t gfp_mask)
{ {
struct page * page; struct page * page;
...@@ -1678,7 +1678,7 @@ void __pagevec_free(struct pagevec *pvec) ...@@ -1678,7 +1678,7 @@ void __pagevec_free(struct pagevec *pvec)
free_hot_cold_page(pvec->pages[i], pvec->cold); free_hot_cold_page(pvec->pages[i], pvec->cold);
} }
fastcall void __free_pages(struct page *page, unsigned int order) void __free_pages(struct page *page, unsigned int order)
{ {
if (put_page_testzero(page)) { if (put_page_testzero(page)) {
if (order == 0) if (order == 0)
...@@ -1690,7 +1690,7 @@ fastcall void __free_pages(struct page *page, unsigned int order) ...@@ -1690,7 +1690,7 @@ fastcall void __free_pages(struct page *page, unsigned int order)
EXPORT_SYMBOL(__free_pages); EXPORT_SYMBOL(__free_pages);
fastcall void free_pages(unsigned long addr, unsigned int order) void free_pages(unsigned long addr, unsigned int order)
{ {
if (addr != 0) { if (addr != 0) {
VM_BUG_ON(!virt_addr_valid((void *)addr)); VM_BUG_ON(!virt_addr_valid((void *)addr));
......
...@@ -41,7 +41,7 @@ static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs) = { 0, }; ...@@ -41,7 +41,7 @@ static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs) = { 0, };
* This path almost never happens for VM activity - pages are normally * This path almost never happens for VM activity - pages are normally
* freed via pagevecs. But it gets used by networking. * freed via pagevecs. But it gets used by networking.
*/ */
static void fastcall __page_cache_release(struct page *page) static void __page_cache_release(struct page *page)
{ {
if (PageLRU(page)) { if (PageLRU(page)) {
unsigned long flags; unsigned long flags;
...@@ -165,7 +165,7 @@ int rotate_reclaimable_page(struct page *page) ...@@ -165,7 +165,7 @@ int rotate_reclaimable_page(struct page *page)
/* /*
* FIXME: speed this up? * FIXME: speed this up?
*/ */
void fastcall activate_page(struct page *page) void activate_page(struct page *page)
{ {
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
...@@ -186,7 +186,7 @@ void fastcall activate_page(struct page *page) ...@@ -186,7 +186,7 @@ void fastcall activate_page(struct page *page)
* inactive,referenced -> active,unreferenced * inactive,referenced -> active,unreferenced
* active,unreferenced -> active,referenced * active,unreferenced -> active,referenced
*/ */
void fastcall mark_page_accessed(struct page *page) void mark_page_accessed(struct page *page)
{ {
if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) { if (!PageActive(page) && PageReferenced(page) && PageLRU(page)) {
activate_page(page); activate_page(page);
...@@ -202,7 +202,7 @@ EXPORT_SYMBOL(mark_page_accessed); ...@@ -202,7 +202,7 @@ EXPORT_SYMBOL(mark_page_accessed);
* lru_cache_add: add a page to the page lists * lru_cache_add: add a page to the page lists
* @page: the page to add * @page: the page to add
*/ */
void fastcall lru_cache_add(struct page *page) void lru_cache_add(struct page *page)
{ {
struct pagevec *pvec = &get_cpu_var(lru_add_pvecs); struct pagevec *pvec = &get_cpu_var(lru_add_pvecs);
...@@ -212,7 +212,7 @@ void fastcall lru_cache_add(struct page *page) ...@@ -212,7 +212,7 @@ void fastcall lru_cache_add(struct page *page)
put_cpu_var(lru_add_pvecs); put_cpu_var(lru_add_pvecs);
} }
void fastcall lru_cache_add_active(struct page *page) void lru_cache_add_active(struct page *page)
{ {
struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs); struct pagevec *pvec = &get_cpu_var(lru_add_active_pvecs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment