Commit e2c55dc8 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

Drain per-cpu lists when high-order allocations fail

Per-cpu pages can accidentally cause fragmentation because they are free, but
pinned pages in an otherwise contiguous block.  When this patch is applied,
the per-cpu caches are drained after the direct-reclaim is entered if the
requested order is greater than 0.  It simply reuses the code used by suspend
and hotplug.
Signed-off-by: default avatarMel Gorman <mel@csn.ul.ie>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent b92a6edd
...@@ -876,7 +876,9 @@ void mark_free_pages(struct zone *zone) ...@@ -876,7 +876,9 @@ void mark_free_pages(struct zone *zone)
} }
spin_unlock_irqrestore(&zone->lock, flags); spin_unlock_irqrestore(&zone->lock, flags);
} }
#endif /* CONFIG_PM */
#if defined(CONFIG_HIBERNATION) || defined(CONFIG_PAGE_GROUP_BY_MOBILITY)
/* /*
* Spill all of this CPU's per-cpu pages back into the buddy allocator. * Spill all of this CPU's per-cpu pages back into the buddy allocator.
*/ */
...@@ -888,7 +890,28 @@ void drain_local_pages(void) ...@@ -888,7 +890,28 @@ void drain_local_pages(void)
__drain_pages(smp_processor_id()); __drain_pages(smp_processor_id());
local_irq_restore(flags); local_irq_restore(flags);
} }
#endif /* CONFIG_HIBERNATION */
void smp_drain_local_pages(void *arg)
{
drain_local_pages();
}
/*
* Spill all the per-cpu pages from all CPUs back into the buddy allocator
*/
void drain_all_local_pages(void)
{
unsigned long flags;
local_irq_save(flags);
__drain_pages(smp_processor_id());
local_irq_restore(flags);
smp_call_function(smp_drain_local_pages, NULL, 0, 1);
}
#else
void drain_all_local_pages(void) {}
#endif /* CONFIG_HIBERNATION || CONFIG_PAGE_GROUP_BY_MOBILITY */
/* /*
* Free a 0-order page * Free a 0-order page
...@@ -1480,6 +1503,9 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order, ...@@ -1480,6 +1503,9 @@ __alloc_pages(gfp_t gfp_mask, unsigned int order,
cond_resched(); cond_resched();
if (order != 0)
drain_all_local_pages();
if (likely(did_some_progress)) { if (likely(did_some_progress)) {
page = get_page_from_freelist(gfp_mask, order, page = get_page_from_freelist(gfp_mask, order,
zonelist, alloc_flags); zonelist, alloc_flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment