Commit 4a64981d authored by Vishal Moola (Oracle)'s avatar Vishal Moola (Oracle) Committed by Andrew Morton

mm/mempolicy: convert migrate_page_add() to migrate_folio_add()

Replace migrate_page_add() with migrate_folio_add().  migrate_folio_add()
does the same a migrate_page_add() but takes in a folio instead of a page.
This removes a couple of calls to compound_head().

Link: https://lkml.kernel.org/r/20230130201833.27042-7-vishal.moola@gmail.comSigned-off-by: default avatarVishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: default avatarYin Fengwei <fengwei.yin@intel.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jane Chu <jane.chu@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent d451b89d
......@@ -414,7 +414,7 @@ static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
},
};
static int migrate_page_add(struct page *page, struct list_head *pagelist,
static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
unsigned long flags);
struct queue_pages {
......@@ -476,7 +476,7 @@ static int queue_folios_pmd(pmd_t *pmd, spinlock_t *ptl, unsigned long addr,
/* go to folio migration */
if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
if (!vma_migratable(walk->vma) ||
migrate_page_add(&folio->page, qp->pagelist, flags)) {
migrate_folio_add(folio, qp->pagelist, flags)) {
ret = 1;
goto unlock;
}
......@@ -544,7 +544,7 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
* temporary off LRU pages in the range. Still
* need migrate other LRU pages.
*/
if (migrate_page_add(&folio->page, qp->pagelist, flags))
if (migrate_folio_add(folio, qp->pagelist, flags))
has_unmovable = true;
} else
break;
......@@ -1021,27 +1021,28 @@ static long do_get_mempolicy(int *policy, nodemask_t *nmask,
}
#ifdef CONFIG_MIGRATION
/*
* page migration, thp tail pages can be passed.
*/
static int migrate_page_add(struct page *page, struct list_head *pagelist,
static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
unsigned long flags)
{
struct page *head = compound_head(page);
/*
* Avoid migrating a page that is shared with others.
* We try to migrate only unshared folios. If it is shared it
* is likely not worth migrating.
*
* To check if the folio is shared, ideally we want to make sure
* every page is mapped to the same process. Doing that is very
* expensive, so check the estimated mapcount of the folio instead.
*/
if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(head) == 1) {
if (!isolate_lru_page(head)) {
list_add_tail(&head->lru, pagelist);
mod_node_page_state(page_pgdat(head),
NR_ISOLATED_ANON + page_is_file_lru(head),
thp_nr_pages(head));
if ((flags & MPOL_MF_MOVE_ALL) || folio_estimated_sharers(folio) == 1) {
if (!folio_isolate_lru(folio)) {
list_add_tail(&folio->lru, foliolist);
node_stat_mod_folio(folio,
NR_ISOLATED_ANON + folio_is_file_lru(folio),
folio_nr_pages(folio));
} else if (flags & MPOL_MF_STRICT) {
/*
* Non-movable page may reach here. And, there may be
* temporary off LRU pages or non-LRU movable pages.
* Treat them as unmovable pages since they can't be
* Non-movable folio may reach here. And, there may be
* temporary off LRU folios or non-LRU movable folios.
* Treat them as unmovable folios since they can't be
* isolated, so they can't be moved at the moment. It
* should return -EIO for this case too.
*/
......@@ -1235,7 +1236,7 @@ static struct page *new_page(struct page *page, unsigned long start)
}
#else
static int migrate_page_add(struct page *page, struct list_head *pagelist,
static int migrate_folio_add(struct folio *folio, struct list_head *foliolist,
unsigned long flags)
{
return -EIO;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment