Commit 9160cffd authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mpage: convert __mpage_writepage() to use a folio more fully

This is just a conversion to the folio API.  While there are some nods
towards supporting multi-page folios in here, the blocks array is still
sized for one page's worth of blocks, and there are other assumptions such
as the blocks_per_page variable.

[willy@infradead.org: fix accidentally-triggering WARN_ON_ONCE]
  Link: https://lkml.kernel.org/r/Y9kuaBgXf9lKJ8b0@casper.infradead.org
Link: https://lkml.kernel.org/r/20230126201255.1681189-3-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: Jan Kara <jack@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent d585bdbe
...@@ -443,13 +443,11 @@ void clean_page_buffers(struct page *page) ...@@ -443,13 +443,11 @@ void clean_page_buffers(struct page *page)
static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc, static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
void *data) void *data)
{ {
struct page *page = &folio->page;
struct mpage_data *mpd = data; struct mpage_data *mpd = data;
struct bio *bio = mpd->bio; struct bio *bio = mpd->bio;
struct address_space *mapping = page->mapping; struct address_space *mapping = folio->mapping;
struct inode *inode = page->mapping->host; struct inode *inode = mapping->host;
const unsigned blkbits = inode->i_blkbits; const unsigned blkbits = inode->i_blkbits;
unsigned long end_index;
const unsigned blocks_per_page = PAGE_SIZE >> blkbits; const unsigned blocks_per_page = PAGE_SIZE >> blkbits;
sector_t last_block; sector_t last_block;
sector_t block_in_file; sector_t block_in_file;
...@@ -460,13 +458,13 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc, ...@@ -460,13 +458,13 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
int boundary = 0; int boundary = 0;
sector_t boundary_block = 0; sector_t boundary_block = 0;
struct block_device *boundary_bdev = NULL; struct block_device *boundary_bdev = NULL;
int length; size_t length;
struct buffer_head map_bh; struct buffer_head map_bh;
loff_t i_size = i_size_read(inode); loff_t i_size = i_size_read(inode);
int ret = 0; int ret = 0;
struct buffer_head *head = folio_buffers(folio);
if (page_has_buffers(page)) { if (head) {
struct buffer_head *head = page_buffers(page);
struct buffer_head *bh = head; struct buffer_head *bh = head;
/* If they're all mapped and dirty, do it */ /* If they're all mapped and dirty, do it */
...@@ -518,8 +516,8 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc, ...@@ -518,8 +516,8 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
/* /*
* The page has no buffers: map it to disk * The page has no buffers: map it to disk
*/ */
BUG_ON(!PageUptodate(page)); BUG_ON(!folio_test_uptodate(folio));
block_in_file = (sector_t)page->index << (PAGE_SHIFT - blkbits); block_in_file = (sector_t)folio->index << (PAGE_SHIFT - blkbits);
/* /*
* Whole page beyond EOF? Skip allocating blocks to avoid leaking * Whole page beyond EOF? Skip allocating blocks to avoid leaking
* space. * space.
...@@ -527,7 +525,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc, ...@@ -527,7 +525,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
if (block_in_file >= (i_size + (1 << blkbits) - 1) >> blkbits) if (block_in_file >= (i_size + (1 << blkbits) - 1) >> blkbits)
goto page_is_mapped; goto page_is_mapped;
last_block = (i_size - 1) >> blkbits; last_block = (i_size - 1) >> blkbits;
map_bh.b_page = page; map_bh.b_folio = folio;
for (page_block = 0; page_block < blocks_per_page; ) { for (page_block = 0; page_block < blocks_per_page; ) {
map_bh.b_state = 0; map_bh.b_state = 0;
...@@ -556,8 +554,11 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc, ...@@ -556,8 +554,11 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
first_unmapped = page_block; first_unmapped = page_block;
page_is_mapped: page_is_mapped:
end_index = i_size >> PAGE_SHIFT; /* Don't bother writing beyond EOF, truncate will discard the folio */
if (page->index >= end_index) { if (folio_pos(folio) >= i_size)
goto confused;
length = folio_size(folio);
if (folio_pos(folio) + length > i_size) {
/* /*
* The page straddles i_size. It must be zeroed out on each * The page straddles i_size. It must be zeroed out on each
* and every writepage invocation because it may be mmapped. * and every writepage invocation because it may be mmapped.
...@@ -566,11 +567,8 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc, ...@@ -566,11 +567,8 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
* is zeroed when mapped, and writes to that region are not * is zeroed when mapped, and writes to that region are not
* written out to the file." * written out to the file."
*/ */
unsigned offset = i_size & (PAGE_SIZE - 1); length = i_size - folio_pos(folio);
folio_zero_segment(folio, length, folio_size(folio));
if (page->index > end_index || !offset)
goto confused;
zero_user_segment(page, offset, PAGE_SIZE);
} }
/* /*
...@@ -593,18 +591,18 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc, ...@@ -593,18 +591,18 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
* the confused fail path above (OOM) will be very confused when * the confused fail path above (OOM) will be very confused when
* it finds all bh marked clean (i.e. it will not write anything) * it finds all bh marked clean (i.e. it will not write anything)
*/ */
wbc_account_cgroup_owner(wbc, page, PAGE_SIZE); wbc_account_cgroup_owner(wbc, &folio->page, folio_size(folio));
length = first_unmapped << blkbits; length = first_unmapped << blkbits;
if (bio_add_page(bio, page, length, 0) < length) { if (!bio_add_folio(bio, folio, length, 0)) {
bio = mpage_bio_submit(bio); bio = mpage_bio_submit(bio);
goto alloc_new; goto alloc_new;
} }
clean_buffers(page, first_unmapped); clean_buffers(&folio->page, first_unmapped);
BUG_ON(PageWriteback(page)); BUG_ON(folio_test_writeback(folio));
set_page_writeback(page); folio_start_writeback(folio);
unlock_page(page); folio_unlock(folio);
if (boundary || (first_unmapped != blocks_per_page)) { if (boundary || (first_unmapped != blocks_per_page)) {
bio = mpage_bio_submit(bio); bio = mpage_bio_submit(bio);
if (boundary_block) { if (boundary_block) {
...@@ -623,7 +621,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc, ...@@ -623,7 +621,7 @@ static int __mpage_writepage(struct folio *folio, struct writeback_control *wbc,
/* /*
* The caller has a ref on the inode, so *mapping is stable * The caller has a ref on the inode, so *mapping is stable
*/ */
ret = block_write_full_page(page, mpd->get_block, wbc); ret = block_write_full_page(&folio->page, mpd->get_block, wbc);
mapping_set_error(mapping, ret); mapping_set_error(mapping, ret);
out: out:
mpd->bio = bio; mpd->bio = bio;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment