Commit e8dfc854 authored by Vishal Moola (Oracle)'s avatar Vishal Moola (Oracle) Committed by Andrew Morton

ext4: convert mext_page_double_lock() to mext_folio_double_lock()

Convert mext_page_double_lock() to use folios.  This change saves 146
bytes of kernel text.  It also removes 6 calls to compound_head() and 2
calls to folio_file_page().

Link: https://lkml.kernel.org/r/20221207181009.4016-1-vishal.moola@gmail.comSigned-off-by: default avatarVishal Moola (Oracle) <vishal.moola@gmail.com>
Reviewed-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Theodore Ts'o <tytso@mit.edu>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent f4d9139f
...@@ -110,22 +110,23 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count, ...@@ -110,22 +110,23 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
} }
/** /**
* mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2 * mext_folio_double_lock - Grab and lock folio on both @inode1 and @inode2
* *
* @inode1: the inode structure * @inode1: the inode structure
* @inode2: the inode structure * @inode2: the inode structure
* @index1: page index * @index1: folio index
* @index2: page index * @index2: folio index
* @page: result page vector * @folio: result folio vector
* *
* Grab two locked pages for inode's by inode order * Grab two locked folio for inode's by inode order
*/ */
static int static int
mext_page_double_lock(struct inode *inode1, struct inode *inode2, mext_folio_double_lock(struct inode *inode1, struct inode *inode2,
pgoff_t index1, pgoff_t index2, struct page *page[2]) pgoff_t index1, pgoff_t index2, struct folio *folio[2])
{ {
struct address_space *mapping[2]; struct address_space *mapping[2];
unsigned int flags; unsigned int flags;
unsigned fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
BUG_ON(!inode1 || !inode2); BUG_ON(!inode1 || !inode2);
if (inode1 < inode2) { if (inode1 < inode2) {
...@@ -138,28 +139,30 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2, ...@@ -138,28 +139,30 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
} }
flags = memalloc_nofs_save(); flags = memalloc_nofs_save();
page[0] = grab_cache_page_write_begin(mapping[0], index1); folio[0] = __filemap_get_folio(mapping[0], index1, fgp_flags,
if (!page[0]) { mapping_gfp_mask(mapping[0]));
if (!folio[0]) {
memalloc_nofs_restore(flags); memalloc_nofs_restore(flags);
return -ENOMEM; return -ENOMEM;
} }
page[1] = grab_cache_page_write_begin(mapping[1], index2); folio[1] = __filemap_get_folio(mapping[1], index2, fgp_flags,
mapping_gfp_mask(mapping[1]));
memalloc_nofs_restore(flags); memalloc_nofs_restore(flags);
if (!page[1]) { if (!folio[1]) {
unlock_page(page[0]); folio_unlock(folio[0]);
put_page(page[0]); folio_put(folio[0]);
return -ENOMEM; return -ENOMEM;
} }
/* /*
* grab_cache_page_write_begin() may not wait on page's writeback if * __filemap_get_folio() may not wait on folio's writeback if
* BDI not demand that. But it is reasonable to be very conservative * BDI not demand that. But it is reasonable to be very conservative
* here and explicitly wait on page's writeback * here and explicitly wait on folio's writeback
*/ */
wait_on_page_writeback(page[0]); folio_wait_writeback(folio[0]);
wait_on_page_writeback(page[1]); folio_wait_writeback(folio[1]);
if (inode1 > inode2) if (inode1 > inode2)
swap(page[0], page[1]); swap(folio[0], folio[1]);
return 0; return 0;
} }
...@@ -252,7 +255,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, ...@@ -252,7 +255,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
int block_len_in_page, int unwritten, int *err) int block_len_in_page, int unwritten, int *err)
{ {
struct inode *orig_inode = file_inode(o_filp); struct inode *orig_inode = file_inode(o_filp);
struct page *pagep[2] = {NULL, NULL};
struct folio *folio[2] = {NULL, NULL}; struct folio *folio[2] = {NULL, NULL};
handle_t *handle; handle_t *handle;
ext4_lblk_t orig_blk_offset, donor_blk_offset; ext4_lblk_t orig_blk_offset, donor_blk_offset;
...@@ -303,8 +305,8 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, ...@@ -303,8 +305,8 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
replaced_size = data_size; replaced_size = data_size;
*err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset, *err = mext_folio_double_lock(orig_inode, donor_inode, orig_page_offset,
donor_page_offset, pagep); donor_page_offset, folio);
if (unlikely(*err < 0)) if (unlikely(*err < 0))
goto stop_journal; goto stop_journal;
/* /*
...@@ -314,8 +316,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, ...@@ -314,8 +316,6 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
* hold page's lock, if it is still the case data copy is not * hold page's lock, if it is still the case data copy is not
* necessary, just swap data blocks between orig and donor. * necessary, just swap data blocks between orig and donor.
*/ */
folio[0] = page_folio(pagep[0]);
folio[1] = page_folio(pagep[1]);
VM_BUG_ON_FOLIO(folio_test_large(folio[0]), folio[0]); VM_BUG_ON_FOLIO(folio_test_large(folio[0]), folio[0]);
VM_BUG_ON_FOLIO(folio_test_large(folio[1]), folio[1]); VM_BUG_ON_FOLIO(folio_test_large(folio[1]), folio[1]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment