mm/migrate: Convert buffer_migrate_page() to buffer_migrate_folio()

Use a folio throughout __buffer_migrate_folio(), add kernel-doc for
buffer_migrate_folio() and buffer_migrate_folio_norefs(), move their
declarations to buffer.h and switch all filesystems that have wired
them up.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 2be7fa10
...@@ -417,7 +417,7 @@ const struct address_space_operations def_blk_aops = { ...@@ -417,7 +417,7 @@ const struct address_space_operations def_blk_aops = {
.write_end = blkdev_write_end, .write_end = blkdev_write_end,
.writepages = blkdev_writepages, .writepages = blkdev_writepages,
.direct_IO = blkdev_direct_IO, .direct_IO = blkdev_direct_IO,
.migratepage = buffer_migrate_page_norefs, .migrate_folio = buffer_migrate_folio_norefs,
.is_dirty_writeback = buffer_check_dirty_writeback, .is_dirty_writeback = buffer_check_dirty_writeback,
}; };
......
...@@ -973,7 +973,7 @@ const struct address_space_operations ext2_aops = { ...@@ -973,7 +973,7 @@ const struct address_space_operations ext2_aops = {
.bmap = ext2_bmap, .bmap = ext2_bmap,
.direct_IO = ext2_direct_IO, .direct_IO = ext2_direct_IO,
.writepages = ext2_writepages, .writepages = ext2_writepages,
.migratepage = buffer_migrate_page, .migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate, .is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page, .error_remove_page = generic_error_remove_page,
}; };
...@@ -989,7 +989,7 @@ const struct address_space_operations ext2_nobh_aops = { ...@@ -989,7 +989,7 @@ const struct address_space_operations ext2_nobh_aops = {
.bmap = ext2_bmap, .bmap = ext2_bmap,
.direct_IO = ext2_direct_IO, .direct_IO = ext2_direct_IO,
.writepages = ext2_writepages, .writepages = ext2_writepages,
.migratepage = buffer_migrate_page, .migrate_folio = buffer_migrate_folio,
.error_remove_page = generic_error_remove_page, .error_remove_page = generic_error_remove_page,
}; };
......
...@@ -3633,7 +3633,7 @@ static const struct address_space_operations ext4_aops = { ...@@ -3633,7 +3633,7 @@ static const struct address_space_operations ext4_aops = {
.invalidate_folio = ext4_invalidate_folio, .invalidate_folio = ext4_invalidate_folio,
.release_folio = ext4_release_folio, .release_folio = ext4_release_folio,
.direct_IO = noop_direct_IO, .direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page, .migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate, .is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page, .error_remove_page = generic_error_remove_page,
.swap_activate = ext4_iomap_swap_activate, .swap_activate = ext4_iomap_swap_activate,
...@@ -3668,7 +3668,7 @@ static const struct address_space_operations ext4_da_aops = { ...@@ -3668,7 +3668,7 @@ static const struct address_space_operations ext4_da_aops = {
.invalidate_folio = ext4_invalidate_folio, .invalidate_folio = ext4_invalidate_folio,
.release_folio = ext4_release_folio, .release_folio = ext4_release_folio,
.direct_IO = noop_direct_IO, .direct_IO = noop_direct_IO,
.migratepage = buffer_migrate_page, .migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate, .is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page, .error_remove_page = generic_error_remove_page,
.swap_activate = ext4_iomap_swap_activate, .swap_activate = ext4_iomap_swap_activate,
......
...@@ -1659,7 +1659,7 @@ const struct address_space_operations ntfs_normal_aops = { ...@@ -1659,7 +1659,7 @@ const struct address_space_operations ntfs_normal_aops = {
.dirty_folio = block_dirty_folio, .dirty_folio = block_dirty_folio,
#endif /* NTFS_RW */ #endif /* NTFS_RW */
.bmap = ntfs_bmap, .bmap = ntfs_bmap,
.migratepage = buffer_migrate_page, .migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate, .is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page, .error_remove_page = generic_error_remove_page,
}; };
...@@ -1673,7 +1673,7 @@ const struct address_space_operations ntfs_compressed_aops = { ...@@ -1673,7 +1673,7 @@ const struct address_space_operations ntfs_compressed_aops = {
.writepage = ntfs_writepage, .writepage = ntfs_writepage,
.dirty_folio = block_dirty_folio, .dirty_folio = block_dirty_folio,
#endif /* NTFS_RW */ #endif /* NTFS_RW */
.migratepage = buffer_migrate_page, .migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate, .is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page, .error_remove_page = generic_error_remove_page,
}; };
...@@ -1688,7 +1688,7 @@ const struct address_space_operations ntfs_mst_aops = { ...@@ -1688,7 +1688,7 @@ const struct address_space_operations ntfs_mst_aops = {
.writepage = ntfs_writepage, /* Write dirty page to disk. */ .writepage = ntfs_writepage, /* Write dirty page to disk. */
.dirty_folio = filemap_dirty_folio, .dirty_folio = filemap_dirty_folio,
#endif /* NTFS_RW */ #endif /* NTFS_RW */
.migratepage = buffer_migrate_page, .migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate, .is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page, .error_remove_page = generic_error_remove_page,
}; };
......
...@@ -2462,7 +2462,7 @@ const struct address_space_operations ocfs2_aops = { ...@@ -2462,7 +2462,7 @@ const struct address_space_operations ocfs2_aops = {
.direct_IO = ocfs2_direct_IO, .direct_IO = ocfs2_direct_IO,
.invalidate_folio = block_invalidate_folio, .invalidate_folio = block_invalidate_folio,
.release_folio = ocfs2_release_folio, .release_folio = ocfs2_release_folio,
.migratepage = buffer_migrate_page, .migrate_folio = buffer_migrate_folio,
.is_partially_uptodate = block_is_partially_uptodate, .is_partially_uptodate = block_is_partially_uptodate,
.error_remove_page = generic_error_remove_page, .error_remove_page = generic_error_remove_page,
}; };
...@@ -267,6 +267,16 @@ int nobh_truncate_page(struct address_space *, loff_t, get_block_t *); ...@@ -267,6 +267,16 @@ int nobh_truncate_page(struct address_space *, loff_t, get_block_t *);
int nobh_writepage(struct page *page, get_block_t *get_block, int nobh_writepage(struct page *page, get_block_t *get_block,
struct writeback_control *wbc); struct writeback_control *wbc);
#ifdef CONFIG_MIGRATION
extern int buffer_migrate_folio(struct address_space *,
struct folio *dst, struct folio *src, enum migrate_mode);
extern int buffer_migrate_folio_norefs(struct address_space *,
struct folio *dst, struct folio *src, enum migrate_mode);
#else
#define buffer_migrate_folio NULL
#define buffer_migrate_folio_norefs NULL
#endif
void buffer_init(void); void buffer_init(void);
/* /*
......
...@@ -3215,18 +3215,6 @@ extern int generic_check_addressable(unsigned, u64); ...@@ -3215,18 +3215,6 @@ extern int generic_check_addressable(unsigned, u64);
extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry); extern void generic_set_encrypted_ci_d_ops(struct dentry *dentry);
#ifdef CONFIG_MIGRATION
extern int buffer_migrate_page(struct address_space *,
struct page *, struct page *,
enum migrate_mode);
extern int buffer_migrate_page_norefs(struct address_space *,
struct page *, struct page *,
enum migrate_mode);
#else
#define buffer_migrate_page NULL
#define buffer_migrate_page_norefs NULL
#endif
int may_setattr(struct user_namespace *mnt_userns, struct inode *inode, int may_setattr(struct user_namespace *mnt_userns, struct inode *inode,
unsigned int ia_valid); unsigned int ia_valid);
int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *); int setattr_prepare(struct user_namespace *, struct dentry *, struct iattr *);
......
...@@ -656,23 +656,23 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head, ...@@ -656,23 +656,23 @@ static bool buffer_migrate_lock_buffers(struct buffer_head *head,
return true; return true;
} }
static int __buffer_migrate_page(struct address_space *mapping, static int __buffer_migrate_folio(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode, struct folio *dst, struct folio *src, enum migrate_mode mode,
bool check_refs) bool check_refs)
{ {
struct buffer_head *bh, *head; struct buffer_head *bh, *head;
int rc; int rc;
int expected_count; int expected_count;
if (!page_has_buffers(page)) head = folio_buffers(src);
return migrate_page(mapping, newpage, page, mode); if (!head)
return migrate_page(mapping, &dst->page, &src->page, mode);
/* Check whether page does not have extra refs before we do more work */ /* Check whether page does not have extra refs before we do more work */
expected_count = expected_page_refs(mapping, page); expected_count = expected_page_refs(mapping, &src->page);
if (page_count(page) != expected_count) if (folio_ref_count(src) != expected_count)
return -EAGAIN; return -EAGAIN;
head = page_buffers(page);
if (!buffer_migrate_lock_buffers(head, mode)) if (!buffer_migrate_lock_buffers(head, mode))
return -EAGAIN; return -EAGAIN;
...@@ -703,23 +703,22 @@ static int __buffer_migrate_page(struct address_space *mapping, ...@@ -703,23 +703,22 @@ static int __buffer_migrate_page(struct address_space *mapping,
} }
} }
rc = migrate_page_move_mapping(mapping, newpage, page, 0); rc = folio_migrate_mapping(mapping, dst, src, 0);
if (rc != MIGRATEPAGE_SUCCESS) if (rc != MIGRATEPAGE_SUCCESS)
goto unlock_buffers; goto unlock_buffers;
attach_page_private(newpage, detach_page_private(page)); folio_attach_private(dst, folio_detach_private(src));
bh = head; bh = head;
do { do {
set_bh_page(bh, newpage, bh_offset(bh)); set_bh_page(bh, &dst->page, bh_offset(bh));
bh = bh->b_this_page; bh = bh->b_this_page;
} while (bh != head); } while (bh != head);
if (mode != MIGRATE_SYNC_NO_COPY) if (mode != MIGRATE_SYNC_NO_COPY)
migrate_page_copy(newpage, page); folio_migrate_copy(dst, src);
else else
migrate_page_states(newpage, page); folio_migrate_flags(dst, src);
rc = MIGRATEPAGE_SUCCESS; rc = MIGRATEPAGE_SUCCESS;
unlock_buffers: unlock_buffers:
...@@ -729,34 +728,51 @@ static int __buffer_migrate_page(struct address_space *mapping, ...@@ -729,34 +728,51 @@ static int __buffer_migrate_page(struct address_space *mapping,
do { do {
unlock_buffer(bh); unlock_buffer(bh);
bh = bh->b_this_page; bh = bh->b_this_page;
} while (bh != head); } while (bh != head);
return rc; return rc;
} }
/* /**
* Migration function for pages with buffers. This function can only be used * buffer_migrate_folio() - Migration function for folios with buffers.
* if the underlying filesystem guarantees that no other references to "page" * @mapping: The address space containing @src.
* exist. For example attached buffer heads are accessed only under page lock. * @dst: The folio to migrate to.
* @src: The folio to migrate from.
* @mode: How to migrate the folio.
*
* This function can only be used if the underlying filesystem guarantees
* that no other references to @src exist. For example attached buffer
* heads are accessed only under the folio lock. If your filesystem cannot
* provide this guarantee, buffer_migrate_folio_norefs() may be more
* appropriate.
*
* Return: 0 on success or a negative errno on failure.
*/ */
int buffer_migrate_page(struct address_space *mapping, int buffer_migrate_folio(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode) struct folio *dst, struct folio *src, enum migrate_mode mode)
{ {
return __buffer_migrate_page(mapping, newpage, page, mode, false); return __buffer_migrate_folio(mapping, dst, src, mode, false);
} }
EXPORT_SYMBOL(buffer_migrate_page); EXPORT_SYMBOL(buffer_migrate_folio);
/* /**
* Same as above except that this variant is more careful and checks that there * buffer_migrate_folio_norefs() - Migration function for folios with buffers.
* are also no buffer head references. This function is the right one for * @mapping: The address space containing @src.
* mappings where buffer heads are directly looked up and referenced (such as * @dst: The folio to migrate to.
* block device mappings). * @src: The folio to migrate from.
* @mode: How to migrate the folio.
*
* Like buffer_migrate_folio() except that this variant is more careful
* and checks that there are also no buffer head references. This function
* is the right one for mappings where buffer heads are directly looked
* up and referenced (such as block device mappings).
*
* Return: 0 on success or a negative errno on failure.
*/ */
int buffer_migrate_page_norefs(struct address_space *mapping, int buffer_migrate_folio_norefs(struct address_space *mapping,
struct page *newpage, struct page *page, enum migrate_mode mode) struct folio *dst, struct folio *src, enum migrate_mode mode)
{ {
return __buffer_migrate_page(mapping, newpage, page, mode, true); return __buffer_migrate_folio(mapping, dst, src, mode, true);
} }
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment