Commit 44f68575 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

reiserfs: convert writepage to use a folio

Convert the incoming page to a folio and then use it throughout the
writeback path.  This definitely isn't enough to support large folios, but
I don't expect reiserfs to gain support for those before it is removed.

Link: https://lkml.kernel.org/r/20231016201114.1928083-23-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Andreas Gruenbacher <agruenba@redhat.com>
Cc: Pankaj Raghav <p.raghav@samsung.com>
Cc: Ryusuke Konishi <konishi.ryusuke@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 414ae0a4
...@@ -2507,10 +2507,10 @@ static int map_block_for_writepage(struct inode *inode, ...@@ -2507,10 +2507,10 @@ static int map_block_for_writepage(struct inode *inode,
* start/recovery path as __block_write_full_folio, along with special * start/recovery path as __block_write_full_folio, along with special
* code to handle reiserfs tails. * code to handle reiserfs tails.
*/ */
static int reiserfs_write_full_page(struct page *page, static int reiserfs_write_full_folio(struct folio *folio,
struct writeback_control *wbc) struct writeback_control *wbc)
{ {
struct inode *inode = page->mapping->host; struct inode *inode = folio->mapping->host;
unsigned long end_index = inode->i_size >> PAGE_SHIFT; unsigned long end_index = inode->i_size >> PAGE_SHIFT;
int error = 0; int error = 0;
unsigned long block; unsigned long block;
...@@ -2518,7 +2518,7 @@ static int reiserfs_write_full_page(struct page *page, ...@@ -2518,7 +2518,7 @@ static int reiserfs_write_full_page(struct page *page,
struct buffer_head *head, *bh; struct buffer_head *head, *bh;
int partial = 0; int partial = 0;
int nr = 0; int nr = 0;
int checked = PageChecked(page); int checked = folio_test_checked(folio);
struct reiserfs_transaction_handle th; struct reiserfs_transaction_handle th;
struct super_block *s = inode->i_sb; struct super_block *s = inode->i_sb;
int bh_per_page = PAGE_SIZE / s->s_blocksize; int bh_per_page = PAGE_SIZE / s->s_blocksize;
...@@ -2526,47 +2526,46 @@ static int reiserfs_write_full_page(struct page *page, ...@@ -2526,47 +2526,46 @@ static int reiserfs_write_full_page(struct page *page,
/* no logging allowed when nonblocking or from PF_MEMALLOC */ /* no logging allowed when nonblocking or from PF_MEMALLOC */
if (checked && (current->flags & PF_MEMALLOC)) { if (checked && (current->flags & PF_MEMALLOC)) {
redirty_page_for_writepage(wbc, page); folio_redirty_for_writepage(wbc, folio);
unlock_page(page); folio_unlock(folio);
return 0; return 0;
} }
/* /*
* The page dirty bit is cleared before writepage is called, which * The folio dirty bit is cleared before writepage is called, which
* means we have to tell create_empty_buffers to make dirty buffers * means we have to tell create_empty_buffers to make dirty buffers
* The page really should be up to date at this point, so tossing * The folio really should be up to date at this point, so tossing
* in the BH_Uptodate is just a sanity check. * in the BH_Uptodate is just a sanity check.
*/ */
if (!page_has_buffers(page)) { head = folio_buffers(folio);
create_empty_buffers(page, s->s_blocksize, if (!head)
head = folio_create_empty_buffers(folio, s->s_blocksize,
(1 << BH_Dirty) | (1 << BH_Uptodate)); (1 << BH_Dirty) | (1 << BH_Uptodate));
}
head = page_buffers(page);
/* /*
* last page in the file, zero out any contents past the * last folio in the file, zero out any contents past the
* last byte in the file * last byte in the file
*/ */
if (page->index >= end_index) { if (folio->index >= end_index) {
unsigned last_offset; unsigned last_offset;
last_offset = inode->i_size & (PAGE_SIZE - 1); last_offset = inode->i_size & (PAGE_SIZE - 1);
/* no file contents in this page */ /* no file contents in this folio */
if (page->index >= end_index + 1 || !last_offset) { if (folio->index >= end_index + 1 || !last_offset) {
unlock_page(page); folio_unlock(folio);
return 0; return 0;
} }
zero_user_segment(page, last_offset, PAGE_SIZE); folio_zero_segment(folio, last_offset, folio_size(folio));
} }
bh = head; bh = head;
block = page->index << (PAGE_SHIFT - s->s_blocksize_bits); block = folio->index << (PAGE_SHIFT - s->s_blocksize_bits);
last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; last_block = (i_size_read(inode) - 1) >> inode->i_blkbits;
/* first map all the buffers, logging any direct items we find */ /* first map all the buffers, logging any direct items we find */
do { do {
if (block > last_block) { if (block > last_block) {
/* /*
* This can happen when the block size is less than * This can happen when the block size is less than
* the page size. The corresponding bytes in the page * the folio size. The corresponding bytes in the folio
* were zero filled above * were zero filled above
*/ */
clear_buffer_dirty(bh); clear_buffer_dirty(bh);
...@@ -2593,7 +2592,7 @@ static int reiserfs_write_full_page(struct page *page, ...@@ -2593,7 +2592,7 @@ static int reiserfs_write_full_page(struct page *page,
* blocks we're going to log * blocks we're going to log
*/ */
if (checked) { if (checked) {
ClearPageChecked(page); folio_clear_checked(folio);
reiserfs_write_lock(s); reiserfs_write_lock(s);
error = journal_begin(&th, s, bh_per_page + 1); error = journal_begin(&th, s, bh_per_page + 1);
if (error) { if (error) {
...@@ -2602,7 +2601,7 @@ static int reiserfs_write_full_page(struct page *page, ...@@ -2602,7 +2601,7 @@ static int reiserfs_write_full_page(struct page *page,
} }
reiserfs_update_inode_transaction(inode); reiserfs_update_inode_transaction(inode);
} }
/* now go through and lock any dirty buffers on the page */ /* now go through and lock any dirty buffers on the folio */
do { do {
get_bh(bh); get_bh(bh);
if (!buffer_mapped(bh)) if (!buffer_mapped(bh))
...@@ -2623,7 +2622,7 @@ static int reiserfs_write_full_page(struct page *page, ...@@ -2623,7 +2622,7 @@ static int reiserfs_write_full_page(struct page *page,
lock_buffer(bh); lock_buffer(bh);
} else { } else {
if (!trylock_buffer(bh)) { if (!trylock_buffer(bh)) {
redirty_page_for_writepage(wbc, page); folio_redirty_for_writepage(wbc, folio);
continue; continue;
} }
} }
...@@ -2640,13 +2639,13 @@ static int reiserfs_write_full_page(struct page *page, ...@@ -2640,13 +2639,13 @@ static int reiserfs_write_full_page(struct page *page,
if (error) if (error)
goto fail; goto fail;
} }
BUG_ON(PageWriteback(page)); BUG_ON(folio_test_writeback(folio));
set_page_writeback(page); folio_start_writeback(folio);
unlock_page(page); folio_unlock(folio);
/* /*
* since any buffer might be the only dirty buffer on the page, * since any buffer might be the only dirty buffer on the folio,
* the first submit_bh can bring the page out of writeback. * the first submit_bh can bring the folio out of writeback.
* be careful with the buffers. * be careful with the buffers.
*/ */
do { do {
...@@ -2663,10 +2662,10 @@ static int reiserfs_write_full_page(struct page *page, ...@@ -2663,10 +2662,10 @@ static int reiserfs_write_full_page(struct page *page,
done: done:
if (nr == 0) { if (nr == 0) {
/* /*
* if this page only had a direct item, it is very possible for * if this folio only had a direct item, it is very possible for
* no io to be required without there being an error. Or, * no io to be required without there being an error. Or,
* someone else could have locked them and sent them down the * someone else could have locked them and sent them down the
* pipe without locking the page * pipe without locking the folio
*/ */
bh = head; bh = head;
do { do {
...@@ -2677,18 +2676,18 @@ static int reiserfs_write_full_page(struct page *page, ...@@ -2677,18 +2676,18 @@ static int reiserfs_write_full_page(struct page *page,
bh = bh->b_this_page; bh = bh->b_this_page;
} while (bh != head); } while (bh != head);
if (!partial) if (!partial)
SetPageUptodate(page); folio_mark_uptodate(folio);
end_page_writeback(page); folio_end_writeback(folio);
} }
return error; return error;
fail: fail:
/* /*
* catches various errors, we need to make sure any valid dirty blocks * catches various errors, we need to make sure any valid dirty blocks
* get to the media. The page is currently locked and not marked for * get to the media. The folio is currently locked and not marked for
* writeback * writeback
*/ */
ClearPageUptodate(page); folio_clear_uptodate(folio);
bh = head; bh = head;
do { do {
get_bh(bh); get_bh(bh);
...@@ -2698,16 +2697,16 @@ static int reiserfs_write_full_page(struct page *page, ...@@ -2698,16 +2697,16 @@ static int reiserfs_write_full_page(struct page *page,
} else { } else {
/* /*
* clear any dirty bits that might have come from * clear any dirty bits that might have come from
* getting attached to a dirty page * getting attached to a dirty folio
*/ */
clear_buffer_dirty(bh); clear_buffer_dirty(bh);
} }
bh = bh->b_this_page; bh = bh->b_this_page;
} while (bh != head); } while (bh != head);
SetPageError(page); folio_set_error(folio);
BUG_ON(PageWriteback(page)); BUG_ON(folio_test_writeback(folio));
set_page_writeback(page); folio_start_writeback(folio);
unlock_page(page); folio_unlock(folio);
do { do {
struct buffer_head *next = bh->b_this_page; struct buffer_head *next = bh->b_this_page;
if (buffer_async_write(bh)) { if (buffer_async_write(bh)) {
...@@ -2728,9 +2727,10 @@ static int reiserfs_read_folio(struct file *f, struct folio *folio) ...@@ -2728,9 +2727,10 @@ static int reiserfs_read_folio(struct file *f, struct folio *folio)
static int reiserfs_writepage(struct page *page, struct writeback_control *wbc) static int reiserfs_writepage(struct page *page, struct writeback_control *wbc)
{ {
struct inode *inode = page->mapping->host; struct folio *folio = page_folio(page);
struct inode *inode = folio->mapping->host;
reiserfs_wait_on_write_block(inode->i_sb); reiserfs_wait_on_write_block(inode->i_sb);
return reiserfs_write_full_page(page, wbc); return reiserfs_write_full_folio(folio, wbc);
} }
static void reiserfs_truncate_failed_write(struct inode *inode) static void reiserfs_truncate_failed_write(struct inode *inode)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment