Commit 4e096ae1 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: convert migrate_pages() to work on folios

Almost all of the callers & implementors of migrate_pages() were already
converted to use folios.  compaction_alloc() & compaction_free() are
trivial to convert a part of this patch and not worth splitting out.

Link: https://lkml.kernel.org/r/20230513001101.276972-1-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatar"Huang, Ying" <ying.huang@intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent b2cac248
...@@ -73,14 +73,13 @@ In kernel use of migrate_pages() ...@@ -73,14 +73,13 @@ In kernel use of migrate_pages()
It also prevents the swapper or other scans from encountering It also prevents the swapper or other scans from encountering
the page. the page.
2. We need to have a function of type new_page_t that can be 2. We need to have a function of type new_folio_t that can be
passed to migrate_pages(). This function should figure out passed to migrate_pages(). This function should figure out
how to allocate the correct new page given the old page. how to allocate the correct new folio given the old folio.
3. The migrate_pages() function is called which attempts 3. The migrate_pages() function is called which attempts
to do the migration. It will call the function to allocate to do the migration. It will call the function to allocate
the new page for each page that is considered for the new folio for each folio that is considered for moving.
moving.
How migrate_pages() works How migrate_pages() works
========================= =========================
......
...@@ -55,7 +55,7 @@ mbind()设置一个新的内存策略。一个进程的页面也可以通过sys_ ...@@ -55,7 +55,7 @@ mbind()设置一个新的内存策略。一个进程的页面也可以通过sys_
消失。它还可以防止交换器或其他扫描器遇到该页。 消失。它还可以防止交换器或其他扫描器遇到该页。
2. 我们需要有一个new_page_t类型的函数,可以传递给migrate_pages()。这个函数应该计算 2. 我们需要有一个new_folio_t类型的函数,可以传递给migrate_pages()。这个函数应该计算
出如何在给定的旧页面中分配正确的新页面。 出如何在给定的旧页面中分配正确的新页面。
3. migrate_pages()函数被调用,它试图进行迁移。它将调用该函数为每个被考虑迁移的页面分 3. migrate_pages()函数被调用,它试图进行迁移。它将调用该函数为每个被考虑迁移的页面分
......
...@@ -7,8 +7,8 @@ ...@@ -7,8 +7,8 @@
#include <linux/migrate_mode.h> #include <linux/migrate_mode.h>
#include <linux/hugetlb.h> #include <linux/hugetlb.h>
typedef struct page *new_page_t(struct page *page, unsigned long private); typedef struct folio *new_folio_t(struct folio *folio, unsigned long private);
typedef void free_page_t(struct page *page, unsigned long private); typedef void free_folio_t(struct folio *folio, unsigned long private);
struct migration_target_control; struct migration_target_control;
...@@ -67,10 +67,10 @@ int migrate_folio_extra(struct address_space *mapping, struct folio *dst, ...@@ -67,10 +67,10 @@ int migrate_folio_extra(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode, int extra_count); struct folio *src, enum migrate_mode mode, int extra_count);
int migrate_folio(struct address_space *mapping, struct folio *dst, int migrate_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode); struct folio *src, enum migrate_mode mode);
int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, int migrate_pages(struct list_head *l, new_folio_t new, free_folio_t free,
unsigned long private, enum migrate_mode mode, int reason, unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded); unsigned int *ret_succeeded);
struct page *alloc_migration_target(struct page *page, unsigned long private); struct folio *alloc_migration_target(struct folio *src, unsigned long private);
bool isolate_movable_page(struct page *page, isolate_mode_t mode); bool isolate_movable_page(struct page *page, isolate_mode_t mode);
int migrate_huge_page_move_mapping(struct address_space *mapping, int migrate_huge_page_move_mapping(struct address_space *mapping,
...@@ -85,11 +85,11 @@ int folio_migrate_mapping(struct address_space *mapping, ...@@ -85,11 +85,11 @@ int folio_migrate_mapping(struct address_space *mapping,
#else #else
static inline void putback_movable_pages(struct list_head *l) {} static inline void putback_movable_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t new, static inline int migrate_pages(struct list_head *l, new_folio_t new,
free_page_t free, unsigned long private, enum migrate_mode mode, free_folio_t free, unsigned long private,
int reason, unsigned int *ret_succeeded) enum migrate_mode mode, int reason, unsigned int *ret_succeeded)
{ return -ENOSYS; } { return -ENOSYS; }
static inline struct page *alloc_migration_target(struct page *page, static inline struct folio *alloc_migration_target(struct folio *src,
unsigned long private) unsigned long private)
{ return NULL; } { return NULL; }
static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode) static inline bool isolate_movable_page(struct page *page, isolate_mode_t mode)
......
...@@ -1685,11 +1685,10 @@ static void isolate_freepages(struct compact_control *cc) ...@@ -1685,11 +1685,10 @@ static void isolate_freepages(struct compact_control *cc)
* This is a migrate-callback that "allocates" freepages by taking pages * This is a migrate-callback that "allocates" freepages by taking pages
* from the isolated freelists in the block we are migrating to. * from the isolated freelists in the block we are migrating to.
*/ */
static struct page *compaction_alloc(struct page *migratepage, static struct folio *compaction_alloc(struct folio *src, unsigned long data)
unsigned long data)
{ {
struct compact_control *cc = (struct compact_control *)data; struct compact_control *cc = (struct compact_control *)data;
struct page *freepage; struct folio *dst;
if (list_empty(&cc->freepages)) { if (list_empty(&cc->freepages)) {
isolate_freepages(cc); isolate_freepages(cc);
...@@ -1698,11 +1697,11 @@ static struct page *compaction_alloc(struct page *migratepage, ...@@ -1698,11 +1697,11 @@ static struct page *compaction_alloc(struct page *migratepage,
return NULL; return NULL;
} }
freepage = list_entry(cc->freepages.next, struct page, lru); dst = list_entry(cc->freepages.next, struct folio, lru);
list_del(&freepage->lru); list_del(&dst->lru);
cc->nr_freepages--; cc->nr_freepages--;
return freepage; return dst;
} }
/* /*
...@@ -1710,11 +1709,11 @@ static struct page *compaction_alloc(struct page *migratepage, ...@@ -1710,11 +1709,11 @@ static struct page *compaction_alloc(struct page *migratepage,
* freelist. All pages on the freelist are from the same zone, so there is no * freelist. All pages on the freelist are from the same zone, so there is no
* special handling needed for NUMA. * special handling needed for NUMA.
*/ */
static void compaction_free(struct page *page, unsigned long data) static void compaction_free(struct folio *dst, unsigned long data)
{ {
struct compact_control *cc = (struct compact_control *)data; struct compact_control *cc = (struct compact_control *)data;
list_add(&page->lru, &cc->freepages); list_add(&dst->lru, &cc->freepages);
cc->nr_freepages++; cc->nr_freepages++;
} }
......
...@@ -1195,24 +1195,22 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, ...@@ -1195,24 +1195,22 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
* list of pages handed to migrate_pages()--which is how we get here-- * list of pages handed to migrate_pages()--which is how we get here--
* is in virtual address order. * is in virtual address order.
*/ */
static struct page *new_page(struct page *page, unsigned long start) static struct folio *new_folio(struct folio *src, unsigned long start)
{ {
struct folio *dst, *src = page_folio(page);
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long address; unsigned long address;
VMA_ITERATOR(vmi, current->mm, start); VMA_ITERATOR(vmi, current->mm, start);
gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL; gfp_t gfp = GFP_HIGHUSER_MOVABLE | __GFP_RETRY_MAYFAIL;
for_each_vma(vmi, vma) { for_each_vma(vmi, vma) {
address = page_address_in_vma(page, vma); address = page_address_in_vma(&src->page, vma);
if (address != -EFAULT) if (address != -EFAULT)
break; break;
} }
if (folio_test_hugetlb(src)) { if (folio_test_hugetlb(src)) {
dst = alloc_hugetlb_folio_vma(folio_hstate(src), return alloc_hugetlb_folio_vma(folio_hstate(src),
vma, address); vma, address);
return &dst->page;
} }
if (folio_test_large(src)) if (folio_test_large(src))
...@@ -1221,9 +1219,8 @@ static struct page *new_page(struct page *page, unsigned long start) ...@@ -1221,9 +1219,8 @@ static struct page *new_page(struct page *page, unsigned long start)
/* /*
* if !vma, vma_alloc_folio() will use task or system default policy * if !vma, vma_alloc_folio() will use task or system default policy
*/ */
dst = vma_alloc_folio(gfp, folio_order(src), vma, address, return vma_alloc_folio(gfp, folio_order(src), vma, address,
folio_test_large(src)); folio_test_large(src));
return &dst->page;
} }
#else #else
...@@ -1239,7 +1236,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, ...@@ -1239,7 +1236,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
return -ENOSYS; return -ENOSYS;
} }
static struct page *new_page(struct page *page, unsigned long start) static struct folio *new_folio(struct folio *src, unsigned long start)
{ {
return NULL; return NULL;
} }
...@@ -1334,7 +1331,7 @@ static long do_mbind(unsigned long start, unsigned long len, ...@@ -1334,7 +1331,7 @@ static long do_mbind(unsigned long start, unsigned long len,
if (!list_empty(&pagelist)) { if (!list_empty(&pagelist)) {
WARN_ON_ONCE(flags & MPOL_MF_LAZY); WARN_ON_ONCE(flags & MPOL_MF_LAZY);
nr_failed = migrate_pages(&pagelist, new_page, NULL, nr_failed = migrate_pages(&pagelist, new_folio, NULL,
start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL); start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND, NULL);
if (nr_failed) if (nr_failed)
putback_movable_pages(&pagelist); putback_movable_pages(&pagelist);
......
This diff is collapsed.
...@@ -1621,9 +1621,10 @@ static void folio_check_dirty_writeback(struct folio *folio, ...@@ -1621,9 +1621,10 @@ static void folio_check_dirty_writeback(struct folio *folio,
mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); mapping->a_ops->is_dirty_writeback(folio, dirty, writeback);
} }
static struct page *alloc_demote_page(struct page *page, unsigned long private) static struct folio *alloc_demote_folio(struct folio *src,
unsigned long private)
{ {
struct page *target_page; struct folio *dst;
nodemask_t *allowed_mask; nodemask_t *allowed_mask;
struct migration_target_control *mtc; struct migration_target_control *mtc;
...@@ -1641,14 +1642,14 @@ static struct page *alloc_demote_page(struct page *page, unsigned long private) ...@@ -1641,14 +1642,14 @@ static struct page *alloc_demote_page(struct page *page, unsigned long private)
*/ */
mtc->nmask = NULL; mtc->nmask = NULL;
mtc->gfp_mask |= __GFP_THISNODE; mtc->gfp_mask |= __GFP_THISNODE;
target_page = alloc_migration_target(page, (unsigned long)mtc); dst = alloc_migration_target(src, (unsigned long)mtc);
if (target_page) if (dst)
return target_page; return dst;
mtc->gfp_mask &= ~__GFP_THISNODE; mtc->gfp_mask &= ~__GFP_THISNODE;
mtc->nmask = allowed_mask; mtc->nmask = allowed_mask;
return alloc_migration_target(page, (unsigned long)mtc); return alloc_migration_target(src, (unsigned long)mtc);
} }
/* /*
...@@ -1683,7 +1684,7 @@ static unsigned int demote_folio_list(struct list_head *demote_folios, ...@@ -1683,7 +1684,7 @@ static unsigned int demote_folio_list(struct list_head *demote_folios,
node_get_allowed_targets(pgdat, &allowed_mask); node_get_allowed_targets(pgdat, &allowed_mask);
/* Demotion ignores all cpuset and mempolicy settings */ /* Demotion ignores all cpuset and mempolicy settings */
migrate_pages(demote_folios, alloc_demote_page, NULL, migrate_pages(demote_folios, alloc_demote_folio, NULL,
(unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION, (unsigned long)&mtc, MIGRATE_ASYNC, MR_DEMOTION,
&nr_succeeded); &nr_succeeded);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment