Commit a6f9edd6 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

page-allocator: maintain rolling count of pages to free from the PCP

When round-robin freeing pages from the PCP lists, empty lists may be
encountered.  In the event one of the lists has more pages than another,
there may be numerous checks for list_empty() which is undesirable.  This
patch maintains a count of pages to free which is incremented when empty
lists are encountered.  The intention is that more pages will then be
freed from fuller lists than the empty ones reducing the number of empty
list checks in the free path.

[akpm@linux-foundation.org: coding-style fixes]
Signed-off-by: default avatarMel Gorman <mel@csn.ul.ie>
Cc: Nick Piggin <npiggin@suse.de>
Cc: Christoph Lameter <cl@linux-foundation.org>
Reviewed-by: default avatarMinchan Kim <minchan.kim@gmail.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5f8dcc21
...@@ -525,32 +525,38 @@ static void free_pcppages_bulk(struct zone *zone, int count, ...@@ -525,32 +525,38 @@ static void free_pcppages_bulk(struct zone *zone, int count,
struct per_cpu_pages *pcp) struct per_cpu_pages *pcp)
{ {
int migratetype = 0; int migratetype = 0;
int batch_free = 0;
spin_lock(&zone->lock); spin_lock(&zone->lock);
zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0; zone->pages_scanned = 0;
__mod_zone_page_state(zone, NR_FREE_PAGES, count); __mod_zone_page_state(zone, NR_FREE_PAGES, count);
while (count--) { while (count) {
struct page *page; struct page *page;
struct list_head *list; struct list_head *list;
/* /*
* Remove pages from lists in a round-robin fashion. This spinning * Remove pages from lists in a round-robin fashion. A
* around potentially empty lists is bloody awful, alternatives that * batch_free count is maintained that is incremented when an
* don't suck are welcome * empty list is encountered. This is so more pages are freed
* off fuller lists instead of spinning excessively around empty
* lists
*/ */
do { do {
batch_free++;
if (++migratetype == MIGRATE_PCPTYPES) if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0; migratetype = 0;
list = &pcp->lists[migratetype]; list = &pcp->lists[migratetype];
} while (list_empty(list)); } while (list_empty(list));
do {
page = list_entry(list->prev, struct page, lru); page = list_entry(list->prev, struct page, lru);
/* have to delete it as __free_one_page list manipulates */ /* must delete as __free_one_page list manipulates */
list_del(&page->lru); list_del(&page->lru);
trace_mm_page_pcpu_drain(page, 0, migratetype);
__free_one_page(page, zone, 0, migratetype); __free_one_page(page, zone, 0, migratetype);
trace_mm_page_pcpu_drain(page, 0, migratetype);
} while (--count && --batch_free && !list_empty(list));
} }
spin_unlock(&zone->lock); spin_unlock(&zone->lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment