Commit d097a6f6 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm, compaction: reduce premature advancement of the migration target scanner

The fast isolation of free pages allows the cached PFN of the free
scanner to advance faster than necessary depending on the contents of
the free list.  The key is that fast_isolate_freepages() can update
zone->compact_cached_free_pfn via isolate_freepages_block().  When the
fast search fails, the linear scan can start from a point that has
skipped valid migration targets, particularly pageblocks with just
low-order free pages.  This can cause the migration source/target
scanners to meet prematurely causing a reset.

This patch starts by avoiding an update of the pageblock skip
information and cached PFN from isolate_freepages_block() and puts the
responsibility of updating that information in the callers.  The fast
scanner will update the cached PFN if and only if it finds a block that
is higher than the existing cached PFN and sets the skip if the
pageblock is full or nearly full.  The linear scanner will update
skipped information and the cached PFN only when a block is completely
scanned.  The total impact is that the free scanner advances more slowly
as it is primarily driven by the linear scanner instead of the fast
search.

                                     5.0.0-rc1              5.0.0-rc1
                               noresched-v3r17         slowfree-v3r17
Amean     fault-both-3      2965.68 (   0.00%)     3036.75 (  -2.40%)
Amean     fault-both-5      3995.90 (   0.00%)     4522.24 * -13.17%*
Amean     fault-both-7      5842.12 (   0.00%)     6365.35 (  -8.96%)
Amean     fault-both-12     9550.87 (   0.00%)    10340.93 (  -8.27%)
Amean     fault-both-18    13304.72 (   0.00%)    14732.46 ( -10.73%)
Amean     fault-both-24    14618.59 (   0.00%)    16288.96 ( -11.43%)
Amean     fault-both-30    16650.96 (   0.00%)    16346.21 (   1.83%)
Amean     fault-both-32    17145.15 (   0.00%)    19317.49 ( -12.67%)

The impact to latency is higher than the last version but it appears to
be due to a slight increase in the free scan rates which is a potential
side-effect of the patch.  However, this is necessary for later patches
that are more careful about how pageblocks are treated as earlier
iterations of those patches hit corner cases where the restarts were
punishing and very visible.

Link: http://lkml.kernel.org/r/20190118175136.31341-19-mgorman@techsingularity.netSigned-off-by: default avatarMel Gorman <mgorman@techsingularity.net>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: David Rientjes <rientjes@google.com>
Cc: YueHaibing <yuehaibing@huawei.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cf66f070
...@@ -330,10 +330,9 @@ static void update_cached_migrate(struct compact_control *cc, unsigned long pfn) ...@@ -330,10 +330,9 @@ static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
* future. The information is later cleared by __reset_isolation_suitable(). * future. The information is later cleared by __reset_isolation_suitable().
*/ */
static void update_pageblock_skip(struct compact_control *cc, static void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated) struct page *page, unsigned long pfn)
{ {
struct zone *zone = cc->zone; struct zone *zone = cc->zone;
unsigned long pfn;
if (cc->no_set_skip_hint) if (cc->no_set_skip_hint)
return; return;
...@@ -341,13 +340,8 @@ static void update_pageblock_skip(struct compact_control *cc, ...@@ -341,13 +340,8 @@ static void update_pageblock_skip(struct compact_control *cc,
if (!page) if (!page)
return; return;
if (nr_isolated)
return;
set_pageblock_skip(page); set_pageblock_skip(page);
pfn = page_to_pfn(page);
/* Update where async and sync compaction should restart */ /* Update where async and sync compaction should restart */
if (pfn < zone->compact_cached_free_pfn) if (pfn < zone->compact_cached_free_pfn)
zone->compact_cached_free_pfn = pfn; zone->compact_cached_free_pfn = pfn;
...@@ -365,7 +359,7 @@ static inline bool pageblock_skip_persistent(struct page *page) ...@@ -365,7 +359,7 @@ static inline bool pageblock_skip_persistent(struct page *page)
} }
static inline void update_pageblock_skip(struct compact_control *cc, static inline void update_pageblock_skip(struct compact_control *cc,
struct page *page, unsigned long nr_isolated) struct page *page, unsigned long pfn)
{ {
} }
...@@ -449,7 +443,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, ...@@ -449,7 +443,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
bool strict) bool strict)
{ {
int nr_scanned = 0, total_isolated = 0; int nr_scanned = 0, total_isolated = 0;
struct page *cursor, *valid_page = NULL; struct page *cursor;
unsigned long flags = 0; unsigned long flags = 0;
bool locked = false; bool locked = false;
unsigned long blockpfn = *start_pfn; unsigned long blockpfn = *start_pfn;
...@@ -476,9 +470,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, ...@@ -476,9 +470,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (!pfn_valid_within(blockpfn)) if (!pfn_valid_within(blockpfn))
goto isolate_fail; goto isolate_fail;
if (!valid_page)
valid_page = page;
/* /*
* For compound pages such as THP and hugetlbfs, we can save * For compound pages such as THP and hugetlbfs, we can save
* potentially a lot of iterations if we skip them at once. * potentially a lot of iterations if we skip them at once.
...@@ -566,10 +557,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, ...@@ -566,10 +557,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
if (strict && blockpfn < end_pfn) if (strict && blockpfn < end_pfn)
total_isolated = 0; total_isolated = 0;
/* Update the pageblock-skip if the whole pageblock was scanned */
if (blockpfn == end_pfn)
update_pageblock_skip(cc, valid_page, total_isolated);
cc->total_free_scanned += nr_scanned; cc->total_free_scanned += nr_scanned;
if (total_isolated) if (total_isolated)
count_compact_events(COMPACTISOLATED, total_isolated); count_compact_events(COMPACTISOLATED, total_isolated);
...@@ -1293,8 +1280,10 @@ fast_isolate_freepages(struct compact_control *cc) ...@@ -1293,8 +1280,10 @@ fast_isolate_freepages(struct compact_control *cc)
} }
} }
if (highest && highest > cc->zone->compact_cached_free_pfn) if (highest && highest >= cc->zone->compact_cached_free_pfn) {
highest -= pageblock_nr_pages;
cc->zone->compact_cached_free_pfn = highest; cc->zone->compact_cached_free_pfn = highest;
}
cc->total_free_scanned += nr_scanned; cc->total_free_scanned += nr_scanned;
if (!page) if (!page)
...@@ -1374,6 +1363,10 @@ static void isolate_freepages(struct compact_control *cc) ...@@ -1374,6 +1363,10 @@ static void isolate_freepages(struct compact_control *cc)
isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
freelist, false); freelist, false);
/* Update the skip hint if the full pageblock was scanned */
if (isolate_start_pfn == block_end_pfn)
update_pageblock_skip(cc, page, block_start_pfn);
/* Are enough freepages isolated? */ /* Are enough freepages isolated? */
if (cc->nr_freepages >= cc->nr_migratepages) { if (cc->nr_freepages >= cc->nr_migratepages) {
if (isolate_start_pfn >= block_end_pfn) { if (isolate_start_pfn >= block_end_pfn) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment