Commit 5f8dcc21 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

page-allocator: split per-cpu list into one-list-per-migrate-type

The following two patches remove searching in the page allocator fast-path
by maintaining multiple free-lists in the per-cpu structure.  At the time
the search was introduced, increasing the per-cpu structures would waste a
lot of memory as per-cpu structures were statically allocated at
compile-time.  This is no longer the case.

The patches are as follows. They are based on mmotm-2009-08-27.

Patch 1 adds multiple lists to struct per_cpu_pages, one per
	migratetype that can be stored on the PCP lists.

Patch 2 notes that the pcpu drain path check empty lists multiple times. The
	patch reduces the number of checks by maintaining a count of free
	lists encountered. Lists containing pages will then free multiple
	pages in batch

The patches were tested with kernbench, netperf udp/tcp, hackbench and
sysbench.  The netperf tests were not bound to any CPU in particular and
were run such that the results should be 99% confidence that the reported
results are within 1% of the estimated mean.  sysbench was run with a
postgres background and read-only tests.  Similar to netperf, it was run
multiple times so that it's 99% confidence results are within 1%.  The
patches were tested on x86, x86-64 and ppc64 as

x86:	Intel Pentium D 3GHz with 8G RAM (no-brand machine)
	kernbench	- No significant difference, variance well within noise
	netperf-udp	- 1.34% to 2.28% gain
	netperf-tcp	- 0.45% to 1.22% gain
	hackbench	- Small variances, very close to noise
	sysbench	- Very small gains

x86-64:	AMD Phenom 9950 1.3GHz with 8G RAM (no-brand machine)
	kernbench	- No significant difference, variance well within noise
	netperf-udp	- 1.83% to 10.42% gains
	netperf-tcp	- No conclusive until buffer >= PAGE_SIZE
				4096	+15.83%
				8192	+ 0.34% (not significant)
				16384	+ 1%
	hackbench	- Small gains, very close to noise
	sysbench	- 0.79% to 1.6% gain

ppc64:	PPC970MP 2.5GHz with 10GB RAM (it's a terrasoft powerstation)
	kernbench	- No significant difference, variance well within noise
	netperf-udp	- 2-3% gain for almost all buffer sizes tested
	netperf-tcp	- losses on small buffers, gains on larger buffers
			  possibly indicates some bad caching effect.
	hackbench	- No significant difference
	sysbench	- 2-4% gain

This patch:

Currently the per-cpu page allocator searches the PCP list for pages of
the correct migrate-type to reduce the possibility of pages being
inappropriate placed from a fragmentation perspective.  This search is
potentially expensive in a fast-path and undesirable.  Splitting the
per-cpu list into multiple lists increases the size of a per-cpu structure
and this was potentially a major problem at the time the search was
introduced.  These problem has been mitigated as now only the necessary
number of structures is allocated for the running system.

This patch replaces a list search in the per-cpu allocator with one list
per migrate type.  The potential snag with this approach is when bulk
freeing pages.  We round-robin free pages based on migrate type which has
little bearing on the cache hotness of the page and potentially checks
empty lists repeatedly in the event the majority of PCP pages are of one
type.
Signed-off-by: default avatarMel Gorman <mel@csn.ul.ie>
Acked-by: default avatarNick Piggin <npiggin@suse.de>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5d863b89
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#define MIGRATE_UNMOVABLE 0 #define MIGRATE_UNMOVABLE 0
#define MIGRATE_RECLAIMABLE 1 #define MIGRATE_RECLAIMABLE 1
#define MIGRATE_MOVABLE 2 #define MIGRATE_MOVABLE 2
#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */
#define MIGRATE_RESERVE 3 #define MIGRATE_RESERVE 3
#define MIGRATE_ISOLATE 4 /* can't allocate from here */ #define MIGRATE_ISOLATE 4 /* can't allocate from here */
#define MIGRATE_TYPES 5 #define MIGRATE_TYPES 5
...@@ -169,7 +170,9 @@ struct per_cpu_pages { ...@@ -169,7 +170,9 @@ struct per_cpu_pages {
int count; /* number of pages in the list */ int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */ int high; /* high watermark, emptying needed */
int batch; /* chunk size for buddy add/remove */ int batch; /* chunk size for buddy add/remove */
struct list_head list; /* the list of pages */
/* Lists of pages, one per migrate type stored on the pcp-lists */
struct list_head lists[MIGRATE_PCPTYPES];
}; };
struct per_cpu_pageset { struct per_cpu_pageset {
......
...@@ -511,7 +511,7 @@ static inline int free_pages_check(struct page *page) ...@@ -511,7 +511,7 @@ static inline int free_pages_check(struct page *page)
} }
/* /*
* Frees a list of pages. * Frees a number of pages from the PCP lists
* Assumes all pages on list are in same zone, and of same order. * Assumes all pages on list are in same zone, and of same order.
* count is the number of pages to free. * count is the number of pages to free.
* *
...@@ -521,23 +521,36 @@ static inline int free_pages_check(struct page *page) ...@@ -521,23 +521,36 @@ static inline int free_pages_check(struct page *page)
* And clear the zone's pages_scanned counter, to hold off the "all pages are * And clear the zone's pages_scanned counter, to hold off the "all pages are
* pinned" detection logic. * pinned" detection logic.
*/ */
static void free_pages_bulk(struct zone *zone, int count, static void free_pcppages_bulk(struct zone *zone, int count,
struct list_head *list, int order) struct per_cpu_pages *pcp)
{ {
int migratetype = 0;
spin_lock(&zone->lock); spin_lock(&zone->lock);
zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE); zone_clear_flag(zone, ZONE_ALL_UNRECLAIMABLE);
zone->pages_scanned = 0; zone->pages_scanned = 0;
__mod_zone_page_state(zone, NR_FREE_PAGES, count << order); __mod_zone_page_state(zone, NR_FREE_PAGES, count);
while (count--) { while (count--) {
struct page *page; struct page *page;
struct list_head *list;
/*
* Remove pages from lists in a round-robin fashion. This spinning
* around potentially empty lists is bloody awful, alternatives that
* don't suck are welcome
*/
do {
if (++migratetype == MIGRATE_PCPTYPES)
migratetype = 0;
list = &pcp->lists[migratetype];
} while (list_empty(list));
VM_BUG_ON(list_empty(list));
page = list_entry(list->prev, struct page, lru); page = list_entry(list->prev, struct page, lru);
/* have to delete it as __free_one_page list manipulates */ /* have to delete it as __free_one_page list manipulates */
list_del(&page->lru); list_del(&page->lru);
trace_mm_page_pcpu_drain(page, order, page_private(page)); trace_mm_page_pcpu_drain(page, 0, migratetype);
__free_one_page(page, zone, order, page_private(page)); __free_one_page(page, zone, 0, migratetype);
} }
spin_unlock(&zone->lock); spin_unlock(&zone->lock);
} }
...@@ -953,7 +966,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp) ...@@ -953,7 +966,7 @@ void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp)
to_drain = pcp->batch; to_drain = pcp->batch;
else else
to_drain = pcp->count; to_drain = pcp->count;
free_pages_bulk(zone, to_drain, &pcp->list, 0); free_pcppages_bulk(zone, to_drain, pcp);
pcp->count -= to_drain; pcp->count -= to_drain;
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -979,7 +992,7 @@ static void drain_pages(unsigned int cpu) ...@@ -979,7 +992,7 @@ static void drain_pages(unsigned int cpu)
pcp = &pset->pcp; pcp = &pset->pcp;
local_irq_save(flags); local_irq_save(flags);
free_pages_bulk(zone, pcp->count, &pcp->list, 0); free_pcppages_bulk(zone, pcp->count, pcp);
pcp->count = 0; pcp->count = 0;
local_irq_restore(flags); local_irq_restore(flags);
} }
...@@ -1045,6 +1058,7 @@ static void free_hot_cold_page(struct page *page, int cold) ...@@ -1045,6 +1058,7 @@ static void free_hot_cold_page(struct page *page, int cold)
struct zone *zone = page_zone(page); struct zone *zone = page_zone(page);
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
unsigned long flags; unsigned long flags;
int migratetype;
int wasMlocked = __TestClearPageMlocked(page); int wasMlocked = __TestClearPageMlocked(page);
kmemcheck_free_shadow(page, 0); kmemcheck_free_shadow(page, 0);
...@@ -1062,21 +1076,39 @@ static void free_hot_cold_page(struct page *page, int cold) ...@@ -1062,21 +1076,39 @@ static void free_hot_cold_page(struct page *page, int cold)
kernel_map_pages(page, 1, 0); kernel_map_pages(page, 1, 0);
pcp = &zone_pcp(zone, get_cpu())->pcp; pcp = &zone_pcp(zone, get_cpu())->pcp;
set_page_private(page, get_pageblock_migratetype(page)); migratetype = get_pageblock_migratetype(page);
set_page_private(page, migratetype);
local_irq_save(flags); local_irq_save(flags);
if (unlikely(wasMlocked)) if (unlikely(wasMlocked))
free_page_mlock(page); free_page_mlock(page);
__count_vm_event(PGFREE); __count_vm_event(PGFREE);
/*
* We only track unmovable, reclaimable and movable on pcp lists.
* Free ISOLATE pages back to the allocator because they are being
* offlined but treat RESERVE as movable pages so we can get those
* areas back if necessary. Otherwise, we may have to free
* excessively into the page allocator
*/
if (migratetype >= MIGRATE_PCPTYPES) {
if (unlikely(migratetype == MIGRATE_ISOLATE)) {
free_one_page(zone, page, 0, migratetype);
goto out;
}
migratetype = MIGRATE_MOVABLE;
}
if (cold) if (cold)
list_add_tail(&page->lru, &pcp->list); list_add_tail(&page->lru, &pcp->lists[migratetype]);
else else
list_add(&page->lru, &pcp->list); list_add(&page->lru, &pcp->lists[migratetype]);
pcp->count++; pcp->count++;
if (pcp->count >= pcp->high) { if (pcp->count >= pcp->high) {
free_pages_bulk(zone, pcp->batch, &pcp->list, 0); free_pcppages_bulk(zone, pcp->batch, pcp);
pcp->count -= pcp->batch; pcp->count -= pcp->batch;
} }
out:
local_irq_restore(flags); local_irq_restore(flags);
put_cpu(); put_cpu();
} }
...@@ -1134,46 +1166,24 @@ struct page *buffered_rmqueue(struct zone *preferred_zone, ...@@ -1134,46 +1166,24 @@ struct page *buffered_rmqueue(struct zone *preferred_zone,
cpu = get_cpu(); cpu = get_cpu();
if (likely(order == 0)) { if (likely(order == 0)) {
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
struct list_head *list;
pcp = &zone_pcp(zone, cpu)->pcp; pcp = &zone_pcp(zone, cpu)->pcp;
list = &pcp->lists[migratetype];
local_irq_save(flags); local_irq_save(flags);
if (!pcp->count) { if (list_empty(list)) {
pcp->count = rmqueue_bulk(zone, 0,
pcp->batch, &pcp->list,
migratetype, cold);
if (unlikely(!pcp->count))
goto failed;
}
/* Find a page of the appropriate migrate type */
if (cold) {
list_for_each_entry_reverse(page, &pcp->list, lru)
if (page_private(page) == migratetype)
break;
} else {
list_for_each_entry(page, &pcp->list, lru)
if (page_private(page) == migratetype)
break;
}
/* Allocate more to the pcp list if necessary */
if (unlikely(&page->lru == &pcp->list)) {
int get_one_page = 0;
pcp->count += rmqueue_bulk(zone, 0, pcp->count += rmqueue_bulk(zone, 0,
pcp->batch, &pcp->list, pcp->batch, list,
migratetype, cold); migratetype, cold);
list_for_each_entry(page, &pcp->list, lru) { if (unlikely(list_empty(list)))
if (get_pageblock_migratetype(page) !=
MIGRATE_ISOLATE) {
get_one_page = 1;
break;
}
}
if (!get_one_page)
goto failed; goto failed;
} }
if (cold)
page = list_entry(list->prev, struct page, lru);
else
page = list_entry(list->next, struct page, lru);
list_del(&page->lru); list_del(&page->lru);
pcp->count--; pcp->count--;
} else { } else {
...@@ -3024,6 +3034,7 @@ static int zone_batchsize(struct zone *zone) ...@@ -3024,6 +3034,7 @@ static int zone_batchsize(struct zone *zone)
static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
{ {
struct per_cpu_pages *pcp; struct per_cpu_pages *pcp;
int migratetype;
memset(p, 0, sizeof(*p)); memset(p, 0, sizeof(*p));
...@@ -3031,7 +3042,8 @@ static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) ...@@ -3031,7 +3042,8 @@ static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch)
pcp->count = 0; pcp->count = 0;
pcp->high = 6 * batch; pcp->high = 6 * batch;
pcp->batch = max(1UL, 1 * batch); pcp->batch = max(1UL, 1 * batch);
INIT_LIST_HEAD(&pcp->list); for (migratetype = 0; migratetype < MIGRATE_PCPTYPES; migratetype++)
INIT_LIST_HEAD(&pcp->lists[migratetype]);
} }
/* /*
...@@ -3223,7 +3235,7 @@ static int __zone_pcp_update(void *data) ...@@ -3223,7 +3235,7 @@ static int __zone_pcp_update(void *data)
pcp = &pset->pcp; pcp = &pset->pcp;
local_irq_save(flags); local_irq_save(flags);
free_pages_bulk(zone, pcp->count, &pcp->list, 0); free_pcppages_bulk(zone, pcp->count, pcp);
setup_pageset(pset, batch); setup_pageset(pset, batch);
local_irq_restore(flags); local_irq_restore(flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment