Commit 77f1fe6b authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: migration: allow migration to operate asynchronously and avoid synchronous...

mm: migration: allow migration to operate asynchronously and avoid synchronous compaction in the faster path

Migration synchronously waits for writeback if the initial passes fails.
Callers of memory compaction do not necessarily want this behaviour if the
caller is latency sensitive or expects that synchronous migration is not
going to have a significantly better success rate.

This patch adds a sync parameter to migrate_pages() allowing the caller to
indicate if wait_on_page_writeback() is allowed within migration or not.
For reclaim/compaction, try_to_compact_pages() is first called
asynchronously, direct reclaim runs and then try_to_compact_pages() is
called synchronously as there is a greater expectation that it'll succeed.

[akpm@linux-foundation.org: build/merge fix]
Signed-off-by: default avatarMel Gorman <mel@csn.ul.ie>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Acked-by: default avatarJohannes Weiner <hannes@cmpxchg.org>
Cc: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 3e7d3449
...@@ -21,10 +21,11 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, ...@@ -21,10 +21,11 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
extern int fragmentation_index(struct zone *zone, unsigned int order); extern int fragmentation_index(struct zone *zone, unsigned int order);
extern unsigned long try_to_compact_pages(struct zonelist *zonelist, extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *mask); int order, gfp_t gfp_mask, nodemask_t *mask,
bool sync);
extern unsigned long compaction_suitable(struct zone *zone, int order); extern unsigned long compaction_suitable(struct zone *zone, int order);
extern unsigned long compact_zone_order(struct zone *zone, int order, extern unsigned long compact_zone_order(struct zone *zone, int order,
gfp_t gfp_mask); gfp_t gfp_mask, bool sync);
/* Do not skip compaction more than 64 times */ /* Do not skip compaction more than 64 times */
#define COMPACT_MAX_DEFER_SHIFT 6 #define COMPACT_MAX_DEFER_SHIFT 6
...@@ -57,7 +58,8 @@ static inline bool compaction_deferred(struct zone *zone) ...@@ -57,7 +58,8 @@ static inline bool compaction_deferred(struct zone *zone)
#else #else
static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask) int order, gfp_t gfp_mask, nodemask_t *nodemask,
bool sync)
{ {
return COMPACT_CONTINUE; return COMPACT_CONTINUE;
} }
...@@ -68,7 +70,7 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order) ...@@ -68,7 +70,7 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order)
} }
static inline unsigned long compact_zone_order(struct zone *zone, int order, static inline unsigned long compact_zone_order(struct zone *zone, int order,
gfp_t gfp_mask) gfp_t gfp_mask, bool sync)
{ {
return 0; return 0;
} }
......
...@@ -13,9 +13,11 @@ extern void putback_lru_pages(struct list_head *l); ...@@ -13,9 +13,11 @@ extern void putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *, extern int migrate_page(struct address_space *,
struct page *, struct page *); struct page *, struct page *);
extern int migrate_pages(struct list_head *l, new_page_t x, extern int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, int offlining); unsigned long private, int offlining,
bool sync);
extern int migrate_huge_pages(struct list_head *l, new_page_t x, extern int migrate_huge_pages(struct list_head *l, new_page_t x,
unsigned long private, int offlining); unsigned long private, int offlining,
bool sync);
extern int fail_migrate_page(struct address_space *, extern int fail_migrate_page(struct address_space *,
struct page *, struct page *); struct page *, struct page *);
...@@ -33,9 +35,11 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping, ...@@ -33,9 +35,11 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
static inline void putback_lru_pages(struct list_head *l) {} static inline void putback_lru_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t x, static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, int offlining) { return -ENOSYS; } unsigned long private, int offlining,
bool sync) { return -ENOSYS; }
static inline int migrate_huge_pages(struct list_head *l, new_page_t x, static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
unsigned long private, int offlining) { return -ENOSYS; } unsigned long private, int offlining,
bool sync) { return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; } static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; } static inline int migrate_prep_local(void) { return -ENOSYS; }
......
...@@ -33,6 +33,7 @@ struct compact_control { ...@@ -33,6 +33,7 @@ struct compact_control {
unsigned long nr_migratepages; /* Number of pages to migrate */ unsigned long nr_migratepages; /* Number of pages to migrate */
unsigned long free_pfn; /* isolate_freepages search base */ unsigned long free_pfn; /* isolate_freepages search base */
unsigned long migrate_pfn; /* isolate_migratepages search base */ unsigned long migrate_pfn; /* isolate_migratepages search base */
bool sync; /* Synchronous migration */
/* Account for isolated anon and file pages */ /* Account for isolated anon and file pages */
unsigned long nr_anon; unsigned long nr_anon;
...@@ -455,7 +456,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) ...@@ -455,7 +456,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
nr_migrate = cc->nr_migratepages; nr_migrate = cc->nr_migratepages;
migrate_pages(&cc->migratepages, compaction_alloc, migrate_pages(&cc->migratepages, compaction_alloc,
(unsigned long)cc, 0); (unsigned long)cc, 0,
cc->sync);
update_nr_listpages(cc); update_nr_listpages(cc);
nr_remaining = cc->nr_migratepages; nr_remaining = cc->nr_migratepages;
...@@ -482,7 +484,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) ...@@ -482,7 +484,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
} }
unsigned long compact_zone_order(struct zone *zone, unsigned long compact_zone_order(struct zone *zone,
int order, gfp_t gfp_mask) int order, gfp_t gfp_mask,
bool sync)
{ {
struct compact_control cc = { struct compact_control cc = {
.nr_freepages = 0, .nr_freepages = 0,
...@@ -490,6 +493,7 @@ unsigned long compact_zone_order(struct zone *zone, ...@@ -490,6 +493,7 @@ unsigned long compact_zone_order(struct zone *zone,
.order = order, .order = order,
.migratetype = allocflags_to_migratetype(gfp_mask), .migratetype = allocflags_to_migratetype(gfp_mask),
.zone = zone, .zone = zone,
.sync = sync,
}; };
INIT_LIST_HEAD(&cc.freepages); INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages); INIT_LIST_HEAD(&cc.migratepages);
...@@ -505,11 +509,13 @@ int sysctl_extfrag_threshold = 500; ...@@ -505,11 +509,13 @@ int sysctl_extfrag_threshold = 500;
* @order: The order of the current allocation * @order: The order of the current allocation
* @gfp_mask: The GFP mask of the current allocation * @gfp_mask: The GFP mask of the current allocation
* @nodemask: The allowed nodes to allocate from * @nodemask: The allowed nodes to allocate from
* @sync: Whether migration is synchronous or not
* *
* This is the main entry point for direct page compaction. * This is the main entry point for direct page compaction.
*/ */
unsigned long try_to_compact_pages(struct zonelist *zonelist, unsigned long try_to_compact_pages(struct zonelist *zonelist,
int order, gfp_t gfp_mask, nodemask_t *nodemask) int order, gfp_t gfp_mask, nodemask_t *nodemask,
bool sync)
{ {
enum zone_type high_zoneidx = gfp_zone(gfp_mask); enum zone_type high_zoneidx = gfp_zone(gfp_mask);
int may_enter_fs = gfp_mask & __GFP_FS; int may_enter_fs = gfp_mask & __GFP_FS;
...@@ -533,7 +539,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, ...@@ -533,7 +539,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
nodemask) { nodemask) {
int status; int status;
status = compact_zone_order(zone, order, gfp_mask); status = compact_zone_order(zone, order, gfp_mask, sync);
rc = max(status, rc); rc = max(status, rc);
/* If a normal allocation would succeed, stop compacting */ /* If a normal allocation would succeed, stop compacting */
......
...@@ -1290,9 +1290,10 @@ static int soft_offline_huge_page(struct page *page, int flags) ...@@ -1290,9 +1290,10 @@ static int soft_offline_huge_page(struct page *page, int flags)
/* Keep page count to indicate a given hugepage is isolated. */ /* Keep page count to indicate a given hugepage is isolated. */
list_add(&hpage->lru, &pagelist); list_add(&hpage->lru, &pagelist);
ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0); ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
true);
if (ret) { if (ret) {
putback_lru_pages(&pagelist); putback_lru_pages(&pagelist);
pr_debug("soft offline: %#lx: migration failed %d, type %lx\n", pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags); pfn, ret, page->flags);
if (ret > 0) if (ret > 0)
...@@ -1413,7 +1414,8 @@ int soft_offline_page(struct page *page, int flags) ...@@ -1413,7 +1414,8 @@ int soft_offline_page(struct page *page, int flags)
LIST_HEAD(pagelist); LIST_HEAD(pagelist);
list_add(&page->lru, &pagelist); list_add(&page->lru, &pagelist);
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0); ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
0, true);
if (ret) { if (ret) {
pr_info("soft offline: %#lx: migration failed %d, type %lx\n", pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
pfn, ret, page->flags); pfn, ret, page->flags);
......
...@@ -733,7 +733,8 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn) ...@@ -733,7 +733,8 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
goto out; goto out;
} }
/* this function returns # of failed pages */ /* this function returns # of failed pages */
ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1); ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
1, true);
if (ret) if (ret)
putback_lru_pages(&source); putback_lru_pages(&source);
} }
......
...@@ -935,7 +935,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest, ...@@ -935,7 +935,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
return PTR_ERR(vma); return PTR_ERR(vma);
if (!list_empty(&pagelist)) { if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_node_page, dest, 0); err = migrate_pages(&pagelist, new_node_page, dest, 0, true);
if (err) if (err)
putback_lru_pages(&pagelist); putback_lru_pages(&pagelist);
} }
...@@ -1155,7 +1155,7 @@ static long do_mbind(unsigned long start, unsigned long len, ...@@ -1155,7 +1155,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (!list_empty(&pagelist)) { if (!list_empty(&pagelist)) {
nr_failed = migrate_pages(&pagelist, new_vma_page, nr_failed = migrate_pages(&pagelist, new_vma_page,
(unsigned long)vma, 0); (unsigned long)vma, 0, true);
if (nr_failed) if (nr_failed)
putback_lru_pages(&pagelist); putback_lru_pages(&pagelist);
} }
......
...@@ -614,7 +614,7 @@ static int move_to_new_page(struct page *newpage, struct page *page, ...@@ -614,7 +614,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
* to the newly allocated page in newpage. * to the newly allocated page in newpage.
*/ */
static int unmap_and_move(new_page_t get_new_page, unsigned long private, static int unmap_and_move(new_page_t get_new_page, unsigned long private,
struct page *page, int force, int offlining) struct page *page, int force, int offlining, bool sync)
{ {
int rc = 0; int rc = 0;
int *result = NULL; int *result = NULL;
...@@ -682,7 +682,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, ...@@ -682,7 +682,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
BUG_ON(charge); BUG_ON(charge);
if (PageWriteback(page)) { if (PageWriteback(page)) {
if (!force) if (!force || !sync)
goto uncharge; goto uncharge;
wait_on_page_writeback(page); wait_on_page_writeback(page);
} }
...@@ -827,7 +827,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, ...@@ -827,7 +827,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
*/ */
static int unmap_and_move_huge_page(new_page_t get_new_page, static int unmap_and_move_huge_page(new_page_t get_new_page,
unsigned long private, struct page *hpage, unsigned long private, struct page *hpage,
int force, int offlining) int force, int offlining, bool sync)
{ {
int rc = 0; int rc = 0;
int *result = NULL; int *result = NULL;
...@@ -841,7 +841,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -841,7 +841,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
rc = -EAGAIN; rc = -EAGAIN;
if (!trylock_page(hpage)) { if (!trylock_page(hpage)) {
if (!force) if (!force || !sync)
goto out; goto out;
lock_page(hpage); lock_page(hpage);
} }
...@@ -909,7 +909,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, ...@@ -909,7 +909,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
* Return: Number of pages not migrated or error code. * Return: Number of pages not migrated or error code.
*/ */
int migrate_pages(struct list_head *from, int migrate_pages(struct list_head *from,
new_page_t get_new_page, unsigned long private, int offlining) new_page_t get_new_page, unsigned long private, int offlining,
bool sync)
{ {
int retry = 1; int retry = 1;
int nr_failed = 0; int nr_failed = 0;
...@@ -929,7 +930,8 @@ int migrate_pages(struct list_head *from, ...@@ -929,7 +930,8 @@ int migrate_pages(struct list_head *from,
cond_resched(); cond_resched();
rc = unmap_and_move(get_new_page, private, rc = unmap_and_move(get_new_page, private,
page, pass > 2, offlining); page, pass > 2, offlining,
sync);
switch(rc) { switch(rc) {
case -ENOMEM: case -ENOMEM:
...@@ -958,7 +960,8 @@ int migrate_pages(struct list_head *from, ...@@ -958,7 +960,8 @@ int migrate_pages(struct list_head *from,
} }
int migrate_huge_pages(struct list_head *from, int migrate_huge_pages(struct list_head *from,
new_page_t get_new_page, unsigned long private, int offlining) new_page_t get_new_page, unsigned long private, int offlining,
bool sync)
{ {
int retry = 1; int retry = 1;
int nr_failed = 0; int nr_failed = 0;
...@@ -974,7 +977,8 @@ int migrate_huge_pages(struct list_head *from, ...@@ -974,7 +977,8 @@ int migrate_huge_pages(struct list_head *from,
cond_resched(); cond_resched();
rc = unmap_and_move_huge_page(get_new_page, rc = unmap_and_move_huge_page(get_new_page,
private, page, pass > 2, offlining); private, page, pass > 2, offlining,
sync);
switch(rc) { switch(rc) {
case -ENOMEM: case -ENOMEM:
...@@ -1107,7 +1111,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm, ...@@ -1107,7 +1111,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
err = 0; err = 0;
if (!list_empty(&pagelist)) { if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_page_node, err = migrate_pages(&pagelist, new_page_node,
(unsigned long)pm, 0); (unsigned long)pm, 0, true);
if (err) if (err)
putback_lru_pages(&pagelist); putback_lru_pages(&pagelist);
} }
......
...@@ -1812,7 +1812,8 @@ static struct page * ...@@ -1812,7 +1812,8 @@ static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx, struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
int migratetype, unsigned long *did_some_progress) int migratetype, unsigned long *did_some_progress,
bool sync_migration)
{ {
struct page *page; struct page *page;
struct task_struct *tsk = current; struct task_struct *tsk = current;
...@@ -1822,7 +1823,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, ...@@ -1822,7 +1823,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
tsk->flags |= PF_MEMALLOC; tsk->flags |= PF_MEMALLOC;
*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
nodemask); nodemask, sync_migration);
tsk->flags &= ~PF_MEMALLOC; tsk->flags &= ~PF_MEMALLOC;
if (*did_some_progress != COMPACT_SKIPPED) { if (*did_some_progress != COMPACT_SKIPPED) {
...@@ -1859,7 +1860,8 @@ static inline struct page * ...@@ -1859,7 +1860,8 @@ static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx, struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
int migratetype, unsigned long *did_some_progress) int migratetype, unsigned long *did_some_progress,
bool sync_migration)
{ {
return NULL; return NULL;
} }
...@@ -2001,6 +2003,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2001,6 +2003,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
unsigned long pages_reclaimed = 0; unsigned long pages_reclaimed = 0;
unsigned long did_some_progress; unsigned long did_some_progress;
struct task_struct *p = current; struct task_struct *p = current;
bool sync_migration = false;
/* /*
* In the slowpath, we sanity check order to avoid ever trying to * In the slowpath, we sanity check order to avoid ever trying to
...@@ -2063,14 +2066,19 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2063,14 +2066,19 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
goto nopage; goto nopage;
/* Try direct compaction */ /*
* Try direct compaction. The first pass is asynchronous. Subsequent
* attempts after direct reclaim are synchronous
*/
page = __alloc_pages_direct_compact(gfp_mask, order, page = __alloc_pages_direct_compact(gfp_mask, order,
zonelist, high_zoneidx, zonelist, high_zoneidx,
nodemask, nodemask,
alloc_flags, preferred_zone, alloc_flags, preferred_zone,
migratetype, &did_some_progress); migratetype, &did_some_progress,
sync_migration);
if (page) if (page)
goto got_pg; goto got_pg;
sync_migration = true;
/* Try direct reclaim and then allocating */ /* Try direct reclaim and then allocating */
page = __alloc_pages_direct_reclaim(gfp_mask, order, page = __alloc_pages_direct_reclaim(gfp_mask, order,
...@@ -2134,7 +2142,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, ...@@ -2134,7 +2142,8 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
zonelist, high_zoneidx, zonelist, high_zoneidx,
nodemask, nodemask,
alloc_flags, preferred_zone, alloc_flags, preferred_zone,
migratetype, &did_some_progress); migratetype, &did_some_progress,
sync_migration);
if (page) if (page)
goto got_pg; goto got_pg;
} }
......
...@@ -2377,7 +2377,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) ...@@ -2377,7 +2377,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
* would ordinarily call try_to_compact_pages() * would ordinarily call try_to_compact_pages()
*/ */
if (sc.order > PAGE_ALLOC_COSTLY_ORDER) if (sc.order > PAGE_ALLOC_COSTLY_ORDER)
compact_zone_order(zone, sc.order, sc.gfp_mask); compact_zone_order(zone, sc.order, sc.gfp_mask,
false);
if (!zone_watermark_ok_safe(zone, order, if (!zone_watermark_ok_safe(zone, order,
high_wmark_pages(zone), end_zone, 0)) { high_wmark_pages(zone), end_zone, 0)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment