Commit de6c60a6 authored by Vlastimil Babka's avatar Vlastimil Babka Committed by Linus Torvalds

mm: compaction: encapsulate defer reset logic

Currently there are several functions to manipulate the deferred
compaction state variables.  The remaining case where the variables are
touched directly is when a successful allocation occurs in direct
compaction, or is expected to be successful in the future by kswapd.
Here, the lowest order that is expected to fail is updated, and in the
case of successful allocation, the deferred status and counter is reset
completely.

Create a new function compaction_defer_reset() to encapsulate this
functionality and make it easier to understand the code.  No functional
change.
Signed-off-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarMel Gorman <mgorman@suse.de>
Reviewed-by: default avatarRik van Riel <riel@redhat.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0eb927c0
...@@ -62,6 +62,22 @@ static inline bool compaction_deferred(struct zone *zone, int order) ...@@ -62,6 +62,22 @@ static inline bool compaction_deferred(struct zone *zone, int order)
return zone->compact_considered < defer_limit; return zone->compact_considered < defer_limit;
} }
/*
* Update defer tracking counters after successful compaction of given order,
* which means an allocation either succeeded (alloc_success == true) or is
* expected to succeed.
*/
static inline void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success)
{
if (alloc_success) {
zone->compact_considered = 0;
zone->compact_defer_shift = 0;
}
if (order >= zone->compact_order_failed)
zone->compact_order_failed = order + 1;
}
/* Returns true if restarting compaction after many failures */ /* Returns true if restarting compaction after many failures */
static inline bool compaction_restarting(struct zone *zone, int order) static inline bool compaction_restarting(struct zone *zone, int order)
{ {
......
...@@ -1124,12 +1124,11 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) ...@@ -1124,12 +1124,11 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
compact_zone(zone, cc); compact_zone(zone, cc);
if (cc->order > 0) { if (cc->order > 0) {
int ok = zone_watermark_ok(zone, cc->order, if (zone_watermark_ok(zone, cc->order,
low_wmark_pages(zone), 0, 0); low_wmark_pages(zone), 0, 0))
if (ok && cc->order >= zone->compact_order_failed) compaction_defer_reset(zone, cc->order, false);
zone->compact_order_failed = cc->order + 1;
/* Currently async compaction is never deferred. */ /* Currently async compaction is never deferred. */
else if (!ok && cc->sync) else if (cc->sync)
defer_compaction(zone, cc->order); defer_compaction(zone, cc->order);
} }
......
...@@ -2235,10 +2235,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, ...@@ -2235,10 +2235,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
preferred_zone, migratetype); preferred_zone, migratetype);
if (page) { if (page) {
preferred_zone->compact_blockskip_flush = false; preferred_zone->compact_blockskip_flush = false;
preferred_zone->compact_considered = 0; compaction_defer_reset(preferred_zone, order, true);
preferred_zone->compact_defer_shift = 0;
if (order >= preferred_zone->compact_order_failed)
preferred_zone->compact_order_failed = order + 1;
count_vm_event(COMPACTSUCCESS); count_vm_event(COMPACTSUCCESS);
return page; return page;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment