Commit ebe75e47 authored by Huang Ying's avatar Huang Ying Committed by Andrew Morton

migrate_pages: share more code between _unmap and _move

This is a code cleanup patch to reduce the duplicated code between the
_unmap and _move stages of migrate_pages().  No functionality change is
expected.

Link: https://lkml.kernel.org/r/20230213123444.155149-8-ying.huang@intel.comSigned-off-by: default avatar"Huang, Ying" <ying.huang@intel.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Yang Shi <shy828301@gmail.com>
Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Bharata B Rao <bharata@amd.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Xin Hao <xhao@linux.alibaba.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 80562ba0
...@@ -1055,6 +1055,7 @@ static void __migrate_folio_extract(struct folio *dst, ...@@ -1055,6 +1055,7 @@ static void __migrate_folio_extract(struct folio *dst,
static void migrate_folio_undo_src(struct folio *src, static void migrate_folio_undo_src(struct folio *src,
int page_was_mapped, int page_was_mapped,
struct anon_vma *anon_vma, struct anon_vma *anon_vma,
bool locked,
struct list_head *ret) struct list_head *ret)
{ {
if (page_was_mapped) if (page_was_mapped)
...@@ -1062,16 +1063,20 @@ static void migrate_folio_undo_src(struct folio *src, ...@@ -1062,16 +1063,20 @@ static void migrate_folio_undo_src(struct folio *src,
/* Drop an anon_vma reference if we took one */ /* Drop an anon_vma reference if we took one */
if (anon_vma) if (anon_vma)
put_anon_vma(anon_vma); put_anon_vma(anon_vma);
folio_unlock(src); if (locked)
list_move_tail(&src->lru, ret); folio_unlock(src);
if (ret)
list_move_tail(&src->lru, ret);
} }
/* Restore the destination folio to the original state upon failure */ /* Restore the destination folio to the original state upon failure */
static void migrate_folio_undo_dst(struct folio *dst, static void migrate_folio_undo_dst(struct folio *dst,
bool locked,
free_page_t put_new_page, free_page_t put_new_page,
unsigned long private) unsigned long private)
{ {
folio_unlock(dst); if (locked)
folio_unlock(dst);
if (put_new_page) if (put_new_page)
put_new_page(&dst->page, private); put_new_page(&dst->page, private);
else else
...@@ -1096,13 +1101,42 @@ static void migrate_folio_done(struct folio *src, ...@@ -1096,13 +1101,42 @@ static void migrate_folio_done(struct folio *src,
folio_put(src); folio_put(src);
} }
static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force, /* Obtain the lock on page, remove all ptes. */
bool avoid_force_lock, enum migrate_mode mode) static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
unsigned long private, struct folio *src,
struct folio **dstp, int force, bool avoid_force_lock,
enum migrate_mode mode, enum migrate_reason reason,
struct list_head *ret)
{ {
struct folio *dst;
int rc = -EAGAIN; int rc = -EAGAIN;
struct page *newpage = NULL;
int page_was_mapped = 0; int page_was_mapped = 0;
struct anon_vma *anon_vma = NULL; struct anon_vma *anon_vma = NULL;
bool is_lru = !__PageMovable(&src->page); bool is_lru = !__PageMovable(&src->page);
bool locked = false;
bool dst_locked = false;
if (!thp_migration_supported() && folio_test_transhuge(src))
return -ENOSYS;
if (folio_ref_count(src) == 1) {
/* Folio was freed from under us. So we are done. */
folio_clear_active(src);
folio_clear_unevictable(src);
/* free_pages_prepare() will clear PG_isolated. */
list_del(&src->lru);
migrate_folio_done(src, reason);
return MIGRATEPAGE_SUCCESS;
}
newpage = get_new_page(&src->page, private);
if (!newpage)
return -ENOMEM;
dst = page_folio(newpage);
*dstp = dst;
dst->private = NULL;
if (!folio_trylock(src)) { if (!folio_trylock(src)) {
if (!force || mode == MIGRATE_ASYNC) if (!force || mode == MIGRATE_ASYNC)
...@@ -1137,6 +1171,7 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force ...@@ -1137,6 +1171,7 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force
folio_lock(src); folio_lock(src);
} }
locked = true;
if (folio_test_writeback(src)) { if (folio_test_writeback(src)) {
/* /*
...@@ -1151,10 +1186,10 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force ...@@ -1151,10 +1186,10 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force
break; break;
default: default:
rc = -EBUSY; rc = -EBUSY;
goto out_unlock; goto out;
} }
if (!force) if (!force)
goto out_unlock; goto out;
folio_wait_writeback(src); folio_wait_writeback(src);
} }
...@@ -1184,7 +1219,8 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force ...@@ -1184,7 +1219,8 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force
* This is much like races on refcount of oldpage: just don't BUG(). * This is much like races on refcount of oldpage: just don't BUG().
*/ */
if (unlikely(!folio_trylock(dst))) if (unlikely(!folio_trylock(dst)))
goto out_unlock; goto out;
dst_locked = true;
if (unlikely(!is_lru)) { if (unlikely(!is_lru)) {
__migrate_folio_record(dst, page_was_mapped, anon_vma); __migrate_folio_record(dst, page_was_mapped, anon_vma);
...@@ -1206,7 +1242,7 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force ...@@ -1206,7 +1242,7 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force
if (!src->mapping) { if (!src->mapping) {
if (folio_test_private(src)) { if (folio_test_private(src)) {
try_to_free_buffers(src); try_to_free_buffers(src);
goto out_unlock_both; goto out;
} }
} else if (folio_mapped(src)) { } else if (folio_mapped(src)) {
/* Establish migration ptes */ /* Establish migration ptes */
...@@ -1221,73 +1257,25 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force ...@@ -1221,73 +1257,25 @@ static int __migrate_folio_unmap(struct folio *src, struct folio *dst, int force
return MIGRATEPAGE_UNMAP; return MIGRATEPAGE_UNMAP;
} }
if (page_was_mapped)
remove_migration_ptes(src, src, false);
out_unlock_both:
folio_unlock(dst);
out_unlock:
/* Drop an anon_vma reference if we took one */
if (anon_vma)
put_anon_vma(anon_vma);
folio_unlock(src);
out: out:
return rc;
}
/* Obtain the lock on page, remove all ptes. */
static int migrate_folio_unmap(new_page_t get_new_page, free_page_t put_new_page,
unsigned long private, struct folio *src,
struct folio **dstp, int force, bool avoid_force_lock,
enum migrate_mode mode, enum migrate_reason reason,
struct list_head *ret)
{
struct folio *dst;
int rc = MIGRATEPAGE_UNMAP;
struct page *newpage = NULL;
if (!thp_migration_supported() && folio_test_transhuge(src))
return -ENOSYS;
if (folio_ref_count(src) == 1) {
/* Folio was freed from under us. So we are done. */
folio_clear_active(src);
folio_clear_unevictable(src);
/* free_pages_prepare() will clear PG_isolated. */
list_del(&src->lru);
migrate_folio_done(src, reason);
return MIGRATEPAGE_SUCCESS;
}
newpage = get_new_page(&src->page, private);
if (!newpage)
return -ENOMEM;
dst = page_folio(newpage);
*dstp = dst;
dst->private = NULL;
rc = __migrate_folio_unmap(src, dst, force, avoid_force_lock, mode);
if (rc == MIGRATEPAGE_UNMAP)
return rc;
/* /*
* A folio that has not been unmapped will be restored to * A folio that has not been unmapped will be restored to
* right list unless we want to retry. * right list unless we want to retry.
*/ */
if (rc != -EAGAIN && rc != -EDEADLOCK) if (rc == -EAGAIN || rc == -EDEADLOCK)
list_move_tail(&src->lru, ret); ret = NULL;
if (put_new_page) migrate_folio_undo_src(src, page_was_mapped, anon_vma, locked, ret);
put_new_page(&dst->page, private); migrate_folio_undo_dst(dst, dst_locked, put_new_page, private);
else
folio_put(dst);
return rc; return rc;
} }
static int __migrate_folio_move(struct folio *src, struct folio *dst, /* Migrate the folio to the newly allocated folio in dst. */
enum migrate_mode mode) static int migrate_folio_move(free_page_t put_new_page, unsigned long private,
struct folio *src, struct folio *dst,
enum migrate_mode mode, enum migrate_reason reason,
struct list_head *ret)
{ {
int rc; int rc;
int page_was_mapped = 0; int page_was_mapped = 0;
...@@ -1300,12 +1288,8 @@ static int __migrate_folio_move(struct folio *src, struct folio *dst, ...@@ -1300,12 +1288,8 @@ static int __migrate_folio_move(struct folio *src, struct folio *dst,
list_del(&dst->lru); list_del(&dst->lru);
rc = move_to_new_folio(dst, src, mode); rc = move_to_new_folio(dst, src, mode);
if (rc)
if (rc == -EAGAIN) { goto out;
list_add(&dst->lru, prev);
__migrate_folio_record(dst, page_was_mapped, anon_vma);
return rc;
}
if (unlikely(!is_lru)) if (unlikely(!is_lru))
goto out_unlock_both; goto out_unlock_both;
...@@ -1319,70 +1303,49 @@ static int __migrate_folio_move(struct folio *src, struct folio *dst, ...@@ -1319,70 +1303,49 @@ static int __migrate_folio_move(struct folio *src, struct folio *dst,
* unsuccessful, and other cases when a page has been temporarily * unsuccessful, and other cases when a page has been temporarily
* isolated from the unevictable LRU: but this case is the easiest. * isolated from the unevictable LRU: but this case is the easiest.
*/ */
if (rc == MIGRATEPAGE_SUCCESS) { folio_add_lru(dst);
folio_add_lru(dst); if (page_was_mapped)
if (page_was_mapped) lru_add_drain();
lru_add_drain();
}
if (page_was_mapped) if (page_was_mapped)
remove_migration_ptes(src, remove_migration_ptes(src, dst, false);
rc == MIGRATEPAGE_SUCCESS ? dst : src, false);
out_unlock_both: out_unlock_both:
folio_unlock(dst); folio_unlock(dst);
/* Drop an anon_vma reference if we took one */ set_page_owner_migrate_reason(&dst->page, reason);
if (anon_vma)
put_anon_vma(anon_vma);
folio_unlock(src);
/* /*
* If migration is successful, decrease refcount of dst, * If migration is successful, decrease refcount of dst,
* which will not free the page because new page owner increased * which will not free the page because new page owner increased
* refcounter. * refcounter.
*/ */
if (rc == MIGRATEPAGE_SUCCESS) folio_put(dst);
folio_put(dst);
return rc;
}
/* Migrate the folio to the newly allocated folio in dst. */
static int migrate_folio_move(free_page_t put_new_page, unsigned long private,
struct folio *src, struct folio *dst,
enum migrate_mode mode, enum migrate_reason reason,
struct list_head *ret)
{
int rc;
rc = __migrate_folio_move(src, dst, mode);
if (rc == MIGRATEPAGE_SUCCESS)
set_page_owner_migrate_reason(&dst->page, reason);
if (rc != -EAGAIN) {
/*
* A folio that has been migrated has all references
* removed and will be freed. A folio that has not been
* migrated will have kept its references and be restored.
*/
list_del(&src->lru);
}
/* /*
* If migration is successful, releases reference grabbed during * A folio that has been migrated has all references removed
* isolation. Otherwise, restore the folio to right list unless * and will be freed.
* we want to retry.
*/ */
if (rc == MIGRATEPAGE_SUCCESS) { list_del(&src->lru);
migrate_folio_done(src, reason); /* Drop an anon_vma reference if we took one */
} else if (rc != -EAGAIN) { if (anon_vma)
list_add_tail(&src->lru, ret); put_anon_vma(anon_vma);
folio_unlock(src);
migrate_folio_done(src, reason);
if (put_new_page) return rc;
put_new_page(&dst->page, private); out:
else /*
folio_put(dst); * A folio that has not been migrated will be restored to
* right list unless we want to retry.
*/
if (rc == -EAGAIN) {
list_add(&dst->lru, prev);
__migrate_folio_record(dst, page_was_mapped, anon_vma);
return rc;
} }
migrate_folio_undo_src(src, page_was_mapped, anon_vma, true, ret);
migrate_folio_undo_dst(dst, true, put_new_page, private);
return rc; return rc;
} }
...@@ -1918,9 +1881,9 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page, ...@@ -1918,9 +1881,9 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
__migrate_folio_extract(dst, &page_was_mapped, &anon_vma); __migrate_folio_extract(dst, &page_was_mapped, &anon_vma);
migrate_folio_undo_src(folio, page_was_mapped, anon_vma, migrate_folio_undo_src(folio, page_was_mapped, anon_vma,
ret_folios); true, ret_folios);
list_del(&dst->lru); list_del(&dst->lru);
migrate_folio_undo_dst(dst, put_new_page, private); migrate_folio_undo_dst(dst, true, put_new_page, private);
dst = dst2; dst = dst2;
dst2 = list_next_entry(dst, lru); dst2 = list_next_entry(dst, lru);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment