Commit 9861a62c authored by Vlastimil Babka's avatar Vlastimil Babka Committed by Linus Torvalds

mm, compaction: create compact_gap wrapper

Compaction uses a watermark gap of (2UL << order) pages at various
places and it's not immediately obvious why.  Abstract it through a
compact_gap() wrapper to create a single place with a thorough
explanation.

[vbabka@suse.cz: clarify the comment of compact_gap()]
 Link: http://lkml.kernel.org/r/7b6aed1f-fdf8-2063-9ff4-bbe4de712d37@suse.cz
Link: http://lkml.kernel.org/r/20160810091226.6709-9-vbabka@suse.czSigned-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Tested-by: default avatarLorenzo Stoakes <lstoakes@gmail.com>
Acked-by: default avatarMichal Hocko <mhocko@suse.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent f2b8228c
...@@ -58,6 +58,29 @@ enum compact_result { ...@@ -58,6 +58,29 @@ enum compact_result {
struct alloc_context; /* in mm/internal.h */ struct alloc_context; /* in mm/internal.h */
/*
* Number of free order-0 pages that should be available above given watermark
* to make sure compaction has reasonable chance of not running out of free
* pages that it needs to isolate as migration target during its work.
*/
static inline unsigned long compact_gap(unsigned int order)
{
/*
* Although all the isolations for migration are temporary, compaction
* free scanner may have up to 1 << order pages on its list and then
* try to split an (order - 1) free page. At that point, a gap of
* 1 << order might not be enough, so it's safer to require twice that
* amount. Note that the number of pages on the list is also
* effectively limited by COMPACT_CLUSTER_MAX, as that's the maximum
* that the migrate scanner can have isolated on migrate list, and free
* scanner is only invoked when the number of isolated free pages is
* lower than that. But it's not worth to complicate the formula here
* as a bigger gap for higher orders than strictly necessary can also
* improve chances of compaction success.
*/
return 2UL << order;
}
#ifdef CONFIG_COMPACTION #ifdef CONFIG_COMPACTION
extern int sysctl_compact_memory; extern int sysctl_compact_memory;
extern int sysctl_compaction_handler(struct ctl_table *table, int write, extern int sysctl_compaction_handler(struct ctl_table *table, int write,
......
...@@ -1391,11 +1391,10 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order, ...@@ -1391,11 +1391,10 @@ static enum compact_result __compaction_suitable(struct zone *zone, int order,
return COMPACT_SUCCESS; return COMPACT_SUCCESS;
/* /*
* Watermarks for order-0 must be met for compaction. Note the 2UL. * Watermarks for order-0 must be met for compaction to be able to
* This is because during migration, copies of pages need to be * isolate free pages for migration targets.
* allocated and for a short time, the footprint is higher
*/ */
watermark = low_wmark_pages(zone) + (2UL << order); watermark = low_wmark_pages(zone) + compact_gap(order);
if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx, if (!__zone_watermark_ok(zone, 0, watermark, classzone_idx,
alloc_flags, wmark_target)) alloc_flags, wmark_target))
return COMPACT_SKIPPED; return COMPACT_SKIPPED;
......
...@@ -2480,7 +2480,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat, ...@@ -2480,7 +2480,7 @@ static inline bool should_continue_reclaim(struct pglist_data *pgdat,
* If we have not reclaimed enough pages for compaction and the * If we have not reclaimed enough pages for compaction and the
* inactive lists are large enough, continue reclaiming * inactive lists are large enough, continue reclaiming
*/ */
pages_for_compaction = (2UL << sc->order); pages_for_compaction = compact_gap(sc->order);
inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE); inactive_lru_pages = node_page_state(pgdat, NR_INACTIVE_FILE);
if (get_nr_swap_pages() > 0) if (get_nr_swap_pages() > 0)
inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON); inactive_lru_pages += node_page_state(pgdat, NR_INACTIVE_ANON);
...@@ -2612,7 +2612,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) ...@@ -2612,7 +2612,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
* there is a buffer of free pages available to give compaction * there is a buffer of free pages available to give compaction
* a reasonable chance of completing and allocating the page * a reasonable chance of completing and allocating the page
*/ */
watermark = high_wmark_pages(zone) + (2UL << sc->order); watermark = high_wmark_pages(zone) + compact_gap(sc->order);
watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx);
/* /*
...@@ -3169,7 +3169,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat, ...@@ -3169,7 +3169,7 @@ static bool kswapd_shrink_node(pg_data_t *pgdat,
* excessive reclaim. Assume that a process requested a high-order * excessive reclaim. Assume that a process requested a high-order
* can direct reclaim/compact. * can direct reclaim/compact.
*/ */
if (sc->order && sc->nr_reclaimed >= 2UL << sc->order) if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order))
sc->order = 0; sc->order = 0;
return sc->nr_scanned >= sc->nr_to_reclaim; return sc->nr_scanned >= sc->nr_to_reclaim;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment