mm/migrate: Convert migrate_page() to migrate_folio()

Convert all callers to pass a folio.  Most have the folio
already available.  Switch all users from aops->migratepage to
aops->migrate_folio.  Also turn the documentation into kerneldoc.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarDavid Sterba <dsterba@suse.com>
parent 4ae84a80
...@@ -216,8 +216,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj, ...@@ -216,8 +216,8 @@ i915_gem_userptr_put_pages(struct drm_i915_gem_object *obj,
* However...! * However...!
* *
* The mmu-notifier can be invalidated for a * The mmu-notifier can be invalidated for a
* migrate_page, that is alreadying holding the lock * migrate_folio, that is alreadying holding the lock
* on the page. Such a try_to_unmap() will result * on the folio. Such a try_to_unmap() will result
* in us calling put_pages() and so recursively try * in us calling put_pages() and so recursively try
* to lock the page. We avoid that deadlock with * to lock the page. We avoid that deadlock with
* a trylock_page() and in exchange we risk missing * a trylock_page() and in exchange we risk missing
......
...@@ -968,7 +968,7 @@ static int btree_migrate_folio(struct address_space *mapping, ...@@ -968,7 +968,7 @@ static int btree_migrate_folio(struct address_space *mapping,
if (folio_get_private(src) && if (folio_get_private(src) &&
!filemap_release_folio(src, GFP_KERNEL)) !filemap_release_folio(src, GFP_KERNEL))
return -EAGAIN; return -EAGAIN;
return migrate_page(mapping, &dst->page, &src->page, mode); return migrate_folio(mapping, dst, src, mode);
} }
#else #else
#define btree_migrate_folio NULL #define btree_migrate_folio NULL
......
...@@ -2139,7 +2139,7 @@ int nfs_migrate_folio(struct address_space *mapping, struct folio *dst, ...@@ -2139,7 +2139,7 @@ int nfs_migrate_folio(struct address_space *mapping, struct folio *dst,
folio_wait_fscache(src); folio_wait_fscache(src);
} }
return migrate_page(mapping, &dst->page, &src->page, mode); return migrate_folio(mapping, dst, src, mode);
} }
#endif #endif
......
...@@ -62,9 +62,8 @@ extern const char *migrate_reason_names[MR_TYPES]; ...@@ -62,9 +62,8 @@ extern const char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l); extern void putback_movable_pages(struct list_head *l);
extern int migrate_page(struct address_space *mapping, int migrate_folio(struct address_space *mapping, struct folio *dst,
struct page *newpage, struct page *page, struct folio *src, enum migrate_mode mode);
enum migrate_mode mode);
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free, extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason, unsigned long private, enum migrate_mode mode, int reason,
unsigned int *ret_succeeded); unsigned int *ret_succeeded);
......
...@@ -593,34 +593,37 @@ EXPORT_SYMBOL(folio_migrate_copy); ...@@ -593,34 +593,37 @@ EXPORT_SYMBOL(folio_migrate_copy);
* Migration functions * Migration functions
***********************************************************/ ***********************************************************/
/* /**
* Common logic to directly migrate a single LRU page suitable for * migrate_folio() - Simple folio migration.
* pages that do not use PagePrivate/PagePrivate2. * @mapping: The address_space containing the folio.
* @dst: The folio to migrate the data to.
* @src: The folio containing the current data.
* @mode: How to migrate the page.
* *
* Pages are locked upon entry and exit. * Common logic to directly migrate a single LRU folio suitable for
* folios that do not use PagePrivate/PagePrivate2.
*
* Folios are locked upon entry and exit.
*/ */
int migrate_page(struct address_space *mapping, int migrate_folio(struct address_space *mapping, struct folio *dst,
struct page *newpage, struct page *page, struct folio *src, enum migrate_mode mode)
enum migrate_mode mode)
{ {
struct folio *newfolio = page_folio(newpage);
struct folio *folio = page_folio(page);
int rc; int rc;
BUG_ON(folio_test_writeback(folio)); /* Writeback must be complete */ BUG_ON(folio_test_writeback(src)); /* Writeback must be complete */
rc = folio_migrate_mapping(mapping, newfolio, folio, 0); rc = folio_migrate_mapping(mapping, dst, src, 0);
if (rc != MIGRATEPAGE_SUCCESS) if (rc != MIGRATEPAGE_SUCCESS)
return rc; return rc;
if (mode != MIGRATE_SYNC_NO_COPY) if (mode != MIGRATE_SYNC_NO_COPY)
folio_migrate_copy(newfolio, folio); folio_migrate_copy(dst, src);
else else
folio_migrate_flags(newfolio, folio); folio_migrate_flags(dst, src);
return MIGRATEPAGE_SUCCESS; return MIGRATEPAGE_SUCCESS;
} }
EXPORT_SYMBOL(migrate_page); EXPORT_SYMBOL(migrate_folio);
#ifdef CONFIG_BLOCK #ifdef CONFIG_BLOCK
/* Returns true if all buffers are successfully locked */ /* Returns true if all buffers are successfully locked */
...@@ -671,7 +674,7 @@ static int __buffer_migrate_folio(struct address_space *mapping, ...@@ -671,7 +674,7 @@ static int __buffer_migrate_folio(struct address_space *mapping,
head = folio_buffers(src); head = folio_buffers(src);
if (!head) if (!head)
return migrate_page(mapping, &dst->page, &src->page, mode); return migrate_folio(mapping, dst, src, mode);
/* Check whether page does not have extra refs before we do more work */ /* Check whether page does not have extra refs before we do more work */
expected_count = folio_expected_refs(mapping, src); expected_count = folio_expected_refs(mapping, src);
...@@ -848,7 +851,7 @@ static int fallback_migrate_folio(struct address_space *mapping, ...@@ -848,7 +851,7 @@ static int fallback_migrate_folio(struct address_space *mapping,
!filemap_release_folio(src, GFP_KERNEL)) !filemap_release_folio(src, GFP_KERNEL))
return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY; return mode == MIGRATE_SYNC ? -EAGAIN : -EBUSY;
return migrate_page(mapping, &dst->page, &src->page, mode); return migrate_folio(mapping, dst, src, mode);
} }
/* /*
...@@ -875,7 +878,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src, ...@@ -875,7 +878,7 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
struct address_space *mapping = folio_mapping(src); struct address_space *mapping = folio_mapping(src);
if (!mapping) if (!mapping)
rc = migrate_page(mapping, &dst->page, &src->page, mode); rc = migrate_folio(mapping, dst, src, mode);
else if (mapping->a_ops->migrate_folio) else if (mapping->a_ops->migrate_folio)
/* /*
* Most folios have a mapping and most filesystems * Most folios have a mapping and most filesystems
......
...@@ -718,7 +718,8 @@ void migrate_vma_pages(struct migrate_vma *migrate) ...@@ -718,7 +718,8 @@ void migrate_vma_pages(struct migrate_vma *migrate)
continue; continue;
} }
r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); r = migrate_folio(mapping, page_folio(newpage),
page_folio(page), MIGRATE_SYNC_NO_COPY);
if (r != MIGRATEPAGE_SUCCESS) if (r != MIGRATEPAGE_SUCCESS)
migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; migrate->src[i] &= ~MIGRATE_PFN_MIGRATE;
} }
......
...@@ -3801,7 +3801,7 @@ const struct address_space_operations shmem_aops = { ...@@ -3801,7 +3801,7 @@ const struct address_space_operations shmem_aops = {
.write_end = shmem_write_end, .write_end = shmem_write_end,
#endif #endif
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
.migratepage = migrate_page, .migrate_folio = migrate_folio,
#endif #endif
.error_remove_page = shmem_error_remove_page, .error_remove_page = shmem_error_remove_page,
}; };
......
...@@ -33,7 +33,7 @@ static const struct address_space_operations swap_aops = { ...@@ -33,7 +33,7 @@ static const struct address_space_operations swap_aops = {
.writepage = swap_writepage, .writepage = swap_writepage,
.dirty_folio = noop_dirty_folio, .dirty_folio = noop_dirty_folio,
#ifdef CONFIG_MIGRATION #ifdef CONFIG_MIGRATION
.migratepage = migrate_page, .migrate_folio = migrate_folio,
#endif #endif
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment