Commit 48db57f8 authored by Nick Piggin's avatar Nick Piggin Committed by Linus Torvalds

[PATCH] mm: free_pages opt

Try to streamline free_pages_bulk by ensuring callers don't pass in a
'count' that exceeds the list size.

Some cleanups:
Rename __free_pages_bulk to __free_one_page.
Put the page list manipulation from __free_pages_ok into free_one_page.
Make __free_pages_ok static.
Signed-off-by: default avatarNick Piggin <npiggin@suse.de>
Cc: Hugh Dickins <hugh@veritas.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 23316bc8
...@@ -308,7 +308,7 @@ static inline int page_is_buddy(struct page *page, int order) ...@@ -308,7 +308,7 @@ static inline int page_is_buddy(struct page *page, int order)
* -- wli * -- wli
*/ */
static inline void __free_pages_bulk (struct page *page, static inline void __free_one_page(struct page *page,
struct zone *zone, unsigned int order) struct zone *zone, unsigned int order)
{ {
unsigned long page_idx; unsigned long page_idx;
...@@ -383,40 +383,42 @@ static inline int free_pages_check(struct page *page) ...@@ -383,40 +383,42 @@ static inline int free_pages_check(struct page *page)
* And clear the zone's pages_scanned counter, to hold off the "all pages are * And clear the zone's pages_scanned counter, to hold off the "all pages are
* pinned" detection logic. * pinned" detection logic.
*/ */
static int static void free_pages_bulk(struct zone *zone, int count,
free_pages_bulk(struct zone *zone, int count, struct list_head *list, int order)
struct list_head *list, unsigned int order)
{ {
struct page *page = NULL;
int ret = 0;
spin_lock(&zone->lock); spin_lock(&zone->lock);
zone->all_unreclaimable = 0; zone->all_unreclaimable = 0;
zone->pages_scanned = 0; zone->pages_scanned = 0;
while (!list_empty(list) && count--) { while (count--) {
struct page *page;
BUG_ON(list_empty(list));
page = list_entry(list->prev, struct page, lru); page = list_entry(list->prev, struct page, lru);
/* have to delete it as __free_pages_bulk list manipulates */ /* have to delete it as __free_one_page list manipulates */
list_del(&page->lru); list_del(&page->lru);
__free_pages_bulk(page, zone, order); __free_one_page(page, zone, order);
ret++;
} }
spin_unlock(&zone->lock); spin_unlock(&zone->lock);
return ret;
} }
void __free_pages_ok(struct page *page, unsigned int order) static void free_one_page(struct zone *zone, struct page *page, int order)
{ {
unsigned long flags;
LIST_HEAD(list); LIST_HEAD(list);
list_add(&page->lru, &list);
free_pages_bulk(zone, 1, &list, order);
}
static void __free_pages_ok(struct page *page, unsigned int order)
{
unsigned long flags;
int i; int i;
int reserved = 0; int reserved = 0;
arch_free_page(page, order); arch_free_page(page, order);
#ifndef CONFIG_MMU #ifndef CONFIG_MMU
if (order > 0) for (i = 1 ; i < (1 << order) ; ++i)
for (i = 1 ; i < (1 << order) ; ++i) __put_page(page + i);
__put_page(page + i);
#endif #endif
for (i = 0 ; i < (1 << order) ; ++i) for (i = 0 ; i < (1 << order) ; ++i)
...@@ -424,11 +426,10 @@ void __free_pages_ok(struct page *page, unsigned int order) ...@@ -424,11 +426,10 @@ void __free_pages_ok(struct page *page, unsigned int order)
if (reserved) if (reserved)
return; return;
list_add(&page->lru, &list); kernel_map_pages(page, 1 << order, 0);
kernel_map_pages(page, 1<<order, 0);
local_irq_save(flags); local_irq_save(flags);
__mod_page_state(pgfree, 1 << order); __mod_page_state(pgfree, 1 << order);
free_pages_bulk(page_zone(page), 1, &list, order); free_one_page(page_zone(page), page, order);
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -602,9 +603,8 @@ void drain_remote_pages(void) ...@@ -602,9 +603,8 @@ void drain_remote_pages(void)
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
pcp = &pset->pcp[i]; pcp = &pset->pcp[i];
if (pcp->count) free_pages_bulk(zone, pcp->count, &pcp->list, 0);
pcp->count -= free_pages_bulk(zone, pcp->count, pcp->count = 0;
&pcp->list, 0);
} }
} }
local_irq_restore(flags); local_irq_restore(flags);
...@@ -627,8 +627,8 @@ static void __drain_pages(unsigned int cpu) ...@@ -627,8 +627,8 @@ static void __drain_pages(unsigned int cpu)
pcp = &pset->pcp[i]; pcp = &pset->pcp[i];
local_irq_save(flags); local_irq_save(flags);
pcp->count -= free_pages_bulk(zone, pcp->count, free_pages_bulk(zone, pcp->count, &pcp->list, 0);
&pcp->list, 0); pcp->count = 0;
local_irq_restore(flags); local_irq_restore(flags);
} }
} }
...@@ -719,8 +719,10 @@ static void fastcall free_hot_cold_page(struct page *page, int cold) ...@@ -719,8 +719,10 @@ static void fastcall free_hot_cold_page(struct page *page, int cold)
__inc_page_state(pgfree); __inc_page_state(pgfree);
list_add(&page->lru, &pcp->list); list_add(&page->lru, &pcp->list);
pcp->count++; pcp->count++;
if (pcp->count >= pcp->high) if (pcp->count >= pcp->high) {
pcp->count -= free_pages_bulk(zone, pcp->batch, &pcp->list, 0); free_pages_bulk(zone, pcp->batch, &pcp->list, 0);
pcp->count -= pcp->batch;
}
local_irq_restore(flags); local_irq_restore(flags);
put_cpu(); put_cpu();
} }
...@@ -759,7 +761,7 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist, ...@@ -759,7 +761,7 @@ static struct page *buffered_rmqueue(struct zonelist *zonelist,
again: again:
cpu = get_cpu(); cpu = get_cpu();
if (order == 0) { if (likely(order == 0)) {
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
pcp = &zone_pcp(zone, cpu)->pcp[cold]; pcp = &zone_pcp(zone, cpu)->pcp[cold];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment