mm/migrate: Use a folio in alloc_migration_target()

This removes an assumption that a large folio is HPAGE_PMD_ORDER
as well as letting us remove the call to prep_transhuge_page()
and a few hidden calls to compound_head().
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarZi Yan <ziy@nvidia.com>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
parent 83a8441f
...@@ -1520,10 +1520,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page, ...@@ -1520,10 +1520,11 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
struct page *alloc_migration_target(struct page *page, unsigned long private) struct page *alloc_migration_target(struct page *page, unsigned long private)
{ {
struct folio *folio = page_folio(page);
struct migration_target_control *mtc; struct migration_target_control *mtc;
gfp_t gfp_mask; gfp_t gfp_mask;
unsigned int order = 0; unsigned int order = 0;
struct page *new_page = NULL; struct folio *new_folio = NULL;
int nid; int nid;
int zidx; int zidx;
...@@ -1531,34 +1532,31 @@ struct page *alloc_migration_target(struct page *page, unsigned long private) ...@@ -1531,34 +1532,31 @@ struct page *alloc_migration_target(struct page *page, unsigned long private)
gfp_mask = mtc->gfp_mask; gfp_mask = mtc->gfp_mask;
nid = mtc->nid; nid = mtc->nid;
if (nid == NUMA_NO_NODE) if (nid == NUMA_NO_NODE)
nid = page_to_nid(page); nid = folio_nid(folio);
if (PageHuge(page)) { if (folio_test_hugetlb(folio)) {
struct hstate *h = page_hstate(compound_head(page)); struct hstate *h = page_hstate(&folio->page);
gfp_mask = htlb_modify_alloc_mask(h, gfp_mask); gfp_mask = htlb_modify_alloc_mask(h, gfp_mask);
return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask); return alloc_huge_page_nodemask(h, nid, mtc->nmask, gfp_mask);
} }
if (PageTransHuge(page)) { if (folio_test_large(folio)) {
/* /*
* clear __GFP_RECLAIM to make the migration callback * clear __GFP_RECLAIM to make the migration callback
* consistent with regular THP allocations. * consistent with regular THP allocations.
*/ */
gfp_mask &= ~__GFP_RECLAIM; gfp_mask &= ~__GFP_RECLAIM;
gfp_mask |= GFP_TRANSHUGE; gfp_mask |= GFP_TRANSHUGE;
order = HPAGE_PMD_ORDER; order = folio_order(folio);
} }
zidx = zone_idx(page_zone(page)); zidx = zone_idx(folio_zone(folio));
if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE) if (is_highmem_idx(zidx) || zidx == ZONE_MOVABLE)
gfp_mask |= __GFP_HIGHMEM; gfp_mask |= __GFP_HIGHMEM;
new_page = __alloc_pages(gfp_mask, order, nid, mtc->nmask); new_folio = __folio_alloc(gfp_mask, order, nid, mtc->nmask);
if (new_page && PageTransHuge(new_page))
prep_transhuge_page(new_page);
return new_page; return &new_folio->page;
} }
#ifdef CONFIG_NUMA #ifdef CONFIG_NUMA
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment