Commit c5d01d0d authored by Michal Hocko's avatar Michal Hocko Committed by Linus Torvalds

mm, compaction: simplify __alloc_pages_direct_compact feedback interface

__alloc_pages_direct_compact communicates potential back off by two
variables:
	- deferred_compaction tells that the compaction returned
	  COMPACT_DEFERRED
	- contended_compaction is set when there is a contention on
	  zone->lock resp. zone->lru_lock locks

__alloc_pages_slowpath then backs of for THP allocation requests to
prevent from long stalls. This is rather messy and it would be much
cleaner to return a single compact result value and hide all the nasty
details into __alloc_pages_direct_compact.

This patch shouldn't introduce any functional changes.
Signed-off-by: default avatarMichal Hocko <mhocko@suse.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Acked-by: default avatarHillf Danton <hillf.zj@alibaba-inc.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Joonsoo Kim <js1304@gmail.com>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Vladimir Davydov <vdavydov@virtuozzo.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 4f9a358c
...@@ -3185,29 +3185,21 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order, ...@@ -3185,29 +3185,21 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
static struct page * static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags, const struct alloc_context *ac, unsigned int alloc_flags, const struct alloc_context *ac,
enum migrate_mode mode, int *contended_compaction, enum migrate_mode mode, enum compact_result *compact_result)
bool *deferred_compaction)
{ {
enum compact_result compact_result;
struct page *page; struct page *page;
int contended_compaction;
if (!order) if (!order)
return NULL; return NULL;
current->flags |= PF_MEMALLOC; current->flags |= PF_MEMALLOC;
compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
mode, contended_compaction); mode, &contended_compaction);
current->flags &= ~PF_MEMALLOC; current->flags &= ~PF_MEMALLOC;
switch (compact_result) { if (*compact_result <= COMPACT_INACTIVE)
case COMPACT_DEFERRED:
*deferred_compaction = true;
/* fall-through */
case COMPACT_SKIPPED:
return NULL; return NULL;
default:
break;
}
/* /*
* At least in one zone compaction wasn't deferred or skipped, so let's * At least in one zone compaction wasn't deferred or skipped, so let's
...@@ -3233,6 +3225,24 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, ...@@ -3233,6 +3225,24 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
*/ */
count_vm_event(COMPACTFAIL); count_vm_event(COMPACTFAIL);
/*
* In all zones where compaction was attempted (and not
* deferred or skipped), lock contention has been detected.
* For THP allocation we do not want to disrupt the others
* so we fallback to base pages instead.
*/
if (contended_compaction == COMPACT_CONTENDED_LOCK)
*compact_result = COMPACT_CONTENDED;
/*
* If compaction was aborted due to need_resched(), we do not
* want to further increase allocation latency, unless it is
* khugepaged trying to collapse.
*/
if (contended_compaction == COMPACT_CONTENDED_SCHED
&& !(current->flags & PF_KTHREAD))
*compact_result = COMPACT_CONTENDED;
cond_resched(); cond_resched();
return NULL; return NULL;
...@@ -3241,8 +3251,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, ...@@ -3241,8 +3251,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
static inline struct page * static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
unsigned int alloc_flags, const struct alloc_context *ac, unsigned int alloc_flags, const struct alloc_context *ac,
enum migrate_mode mode, int *contended_compaction, enum migrate_mode mode, enum compact_result *compact_result)
bool *deferred_compaction)
{ {
return NULL; return NULL;
} }
...@@ -3387,8 +3396,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -3387,8 +3396,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
unsigned long pages_reclaimed = 0; unsigned long pages_reclaimed = 0;
unsigned long did_some_progress; unsigned long did_some_progress;
enum migrate_mode migration_mode = MIGRATE_ASYNC; enum migrate_mode migration_mode = MIGRATE_ASYNC;
bool deferred_compaction = false; enum compact_result compact_result;
int contended_compaction = COMPACT_CONTENDED_NONE;
/* /*
* In the slowpath, we sanity check order to avoid ever trying to * In the slowpath, we sanity check order to avoid ever trying to
...@@ -3475,8 +3483,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -3475,8 +3483,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
*/ */
page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
migration_mode, migration_mode,
&contended_compaction, &compact_result);
&deferred_compaction);
if (page) if (page)
goto got_pg; goto got_pg;
...@@ -3489,25 +3496,14 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -3489,25 +3496,14 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
* to heavily disrupt the system, so we fail the allocation * to heavily disrupt the system, so we fail the allocation
* instead of entering direct reclaim. * instead of entering direct reclaim.
*/ */
if (deferred_compaction) if (compact_result == COMPACT_DEFERRED)
goto nopage;
/*
* In all zones where compaction was attempted (and not
* deferred or skipped), lock contention has been detected.
* For THP allocation we do not want to disrupt the others
* so we fallback to base pages instead.
*/
if (contended_compaction == COMPACT_CONTENDED_LOCK)
goto nopage; goto nopage;
/* /*
* If compaction was aborted due to need_resched(), we do not * Compaction is contended so rather back off than cause
* want to further increase allocation latency, unless it is * excessive stalls.
* khugepaged trying to collapse.
*/ */
if (contended_compaction == COMPACT_CONTENDED_SCHED if(compact_result == COMPACT_CONTENDED)
&& !(current->flags & PF_KTHREAD))
goto nopage; goto nopage;
} }
...@@ -3555,8 +3551,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -3555,8 +3551,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
*/ */
page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags,
ac, migration_mode, ac, migration_mode,
&contended_compaction, &compact_result);
&deferred_compaction);
if (page) if (page)
goto got_pg; goto got_pg;
nopage: nopage:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment