Commit e9ade569 authored by Vlastimil Babka's avatar Vlastimil Babka Committed by Linus Torvalds

mm/compaction: avoid rescanning pageblocks in isolate_freepages

The compaction free scanner in isolate_freepages() currently remembers PFN
of the highest pageblock where it successfully isolates, to be used as the
starting pageblock for the next invocation.  The rationale behind this is
that page migration might return free pages to the allocator when
migration fails and we don't want to skip them if the compaction
continues.

Since migration now returns free pages back to compaction code where they
can be reused, this is no longer a concern.  This patch changes
isolate_freepages() so that the PFN for restarting is updated with each
pageblock where isolation is attempted.  Using stress-highalloc from
mmtests, this resulted in 10% reduction of the pages scanned by the free
scanner.

Note that the somewhat similar functionality that records highest
successful pageblock in zone->compact_cached_free_pfn, remains unchanged.
This cache is used when the whole compaction is restarted, not for
multiple invocations of the free scanner during single compaction.
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>
Acked-by: default avatarMichal Nazarewicz <mina86@mina86.com>
Reviewed-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Rik van Riel <riel@redhat.com>
Acked-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f8c9301f
...@@ -688,7 +688,6 @@ static void isolate_freepages(struct zone *zone, ...@@ -688,7 +688,6 @@ static void isolate_freepages(struct zone *zone,
unsigned long block_start_pfn; /* start of current pageblock */ unsigned long block_start_pfn; /* start of current pageblock */
unsigned long block_end_pfn; /* end of current pageblock */ unsigned long block_end_pfn; /* end of current pageblock */
unsigned long low_pfn; /* lowest pfn scanner is able to scan */ unsigned long low_pfn; /* lowest pfn scanner is able to scan */
unsigned long next_free_pfn; /* start pfn for scaning at next round */
int nr_freepages = cc->nr_freepages; int nr_freepages = cc->nr_freepages;
struct list_head *freelist = &cc->freepages; struct list_head *freelist = &cc->freepages;
...@@ -708,12 +707,6 @@ static void isolate_freepages(struct zone *zone, ...@@ -708,12 +707,6 @@ static void isolate_freepages(struct zone *zone,
zone_end_pfn(zone)); zone_end_pfn(zone));
low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
/*
* If no pages are isolated, the block_start_pfn < low_pfn check
* will kick in.
*/
next_free_pfn = 0;
/* /*
* Isolate free pages until enough are available to migrate the * Isolate free pages until enough are available to migrate the
* pages on cc->migratepages. We stop searching if the migrate * pages on cc->migratepages. We stop searching if the migrate
...@@ -754,19 +747,19 @@ static void isolate_freepages(struct zone *zone, ...@@ -754,19 +747,19 @@ static void isolate_freepages(struct zone *zone,
continue; continue;
/* Found a block suitable for isolating free pages from */ /* Found a block suitable for isolating free pages from */
cc->free_pfn = block_start_pfn;
isolated = isolate_freepages_block(cc, block_start_pfn, isolated = isolate_freepages_block(cc, block_start_pfn,
block_end_pfn, freelist, false); block_end_pfn, freelist, false);
nr_freepages += isolated; nr_freepages += isolated;
/* /*
* Record the highest PFN we isolated pages from. When next * Set a flag that we successfully isolated in this pageblock.
* looking for free pages, the search will restart here as * In the next loop iteration, zone->compact_cached_free_pfn
* page migration may have returned some pages to the allocator * will not be updated and thus it will effectively contain the
* highest pageblock we isolated pages from.
*/ */
if (isolated && next_free_pfn == 0) { if (isolated)
cc->finished_update_free = true; cc->finished_update_free = true;
next_free_pfn = block_start_pfn;
}
} }
/* split_free_page does not map the pages */ /* split_free_page does not map the pages */
...@@ -777,9 +770,8 @@ static void isolate_freepages(struct zone *zone, ...@@ -777,9 +770,8 @@ static void isolate_freepages(struct zone *zone,
* so that compact_finished() may detect this * so that compact_finished() may detect this
*/ */
if (block_start_pfn < low_pfn) if (block_start_pfn < low_pfn)
next_free_pfn = cc->migrate_pfn; cc->free_pfn = cc->migrate_pfn;
cc->free_pfn = next_free_pfn;
cc->nr_freepages = nr_freepages; cc->nr_freepages = nr_freepages;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment