Commit 9845e5dd authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: merge end_write_bio and flush_write_bio

Merge end_write_bio and flush_write_bio into a single submit_write_bio
helper, that either submits the bio or ends it if a negative errno was
passed in.  This consolidates a lot of duplicated checks in the callers.
Reviewed-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 2d5ac130
...@@ -201,39 +201,26 @@ static void submit_one_bio(struct bio *bio, int mirror_num, ...@@ -201,39 +201,26 @@ static void submit_one_bio(struct bio *bio, int mirror_num,
*/ */
} }
/* Cleanup unsubmitted bios */
static void end_write_bio(struct extent_page_data *epd, int ret)
{
struct bio *bio = epd->bio_ctrl.bio;
if (bio) {
bio->bi_status = errno_to_blk_status(ret);
bio_endio(bio);
epd->bio_ctrl.bio = NULL;
}
}
/* /*
* Submit bio from extent page data via submit_one_bio * Submit or fail the current bio in an extent_page_data structure.
*
* Return 0 if everything is OK.
* Return <0 for error.
*/ */
static void flush_write_bio(struct extent_page_data *epd) static void submit_write_bio(struct extent_page_data *epd, int ret)
{ {
struct bio *bio = epd->bio_ctrl.bio; struct bio *bio = epd->bio_ctrl.bio;
if (bio) { if (!bio)
return;
if (ret) {
ASSERT(ret < 0);
bio->bi_status = errno_to_blk_status(ret);
bio_endio(bio);
} else {
submit_one_bio(bio, 0, 0); submit_one_bio(bio, 0, 0);
/*
* Clean up of epd->bio is handled by its endio function.
* And endio is either triggered by successful bio execution
* or the error handler of submit bio hook.
* So at this point, no matter what happened, we don't need
* to clean up epd->bio.
*/
epd->bio_ctrl.bio = NULL;
} }
/* The bio is owned by the bi_end_io handler now */
epd->bio_ctrl.bio = NULL;
} }
int __init extent_state_cache_init(void) int __init extent_state_cache_init(void)
...@@ -4251,7 +4238,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb ...@@ -4251,7 +4238,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
int ret = 0; int ret = 0;
if (!btrfs_try_tree_write_lock(eb)) { if (!btrfs_try_tree_write_lock(eb)) {
flush_write_bio(epd); submit_write_bio(epd, 0);
flush = 1; flush = 1;
btrfs_tree_lock(eb); btrfs_tree_lock(eb);
} }
...@@ -4261,7 +4248,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb ...@@ -4261,7 +4248,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
if (!epd->sync_io) if (!epd->sync_io)
return 0; return 0;
if (!flush) { if (!flush) {
flush_write_bio(epd); submit_write_bio(epd, 0);
flush = 1; flush = 1;
} }
while (1) { while (1) {
...@@ -4308,7 +4295,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb ...@@ -4308,7 +4295,7 @@ static noinline_for_stack int lock_extent_buffer_for_io(struct extent_buffer *eb
if (!trylock_page(p)) { if (!trylock_page(p)) {
if (!flush) { if (!flush) {
flush_write_bio(epd); submit_write_bio(epd, 0);
flush = 1; flush = 1;
} }
lock_page(p); lock_page(p);
...@@ -4724,7 +4711,7 @@ static int submit_eb_subpage(struct page *page, ...@@ -4724,7 +4711,7 @@ static int submit_eb_subpage(struct page *page,
cleanup: cleanup:
/* We hit error, end bio for the submitted extent buffers */ /* We hit error, end bio for the submitted extent buffers */
end_write_bio(epd, ret); submit_write_bio(epd, ret);
return ret; return ret;
} }
...@@ -4903,10 +4890,6 @@ int btree_write_cache_pages(struct address_space *mapping, ...@@ -4903,10 +4890,6 @@ int btree_write_cache_pages(struct address_space *mapping,
index = 0; index = 0;
goto retry; goto retry;
} }
if (ret < 0) {
end_write_bio(&epd, ret);
goto out;
}
/* /*
* If something went wrong, don't allow any metadata write bio to be * If something went wrong, don't allow any metadata write bio to be
* submitted. * submitted.
...@@ -4933,21 +4916,17 @@ int btree_write_cache_pages(struct address_space *mapping, ...@@ -4933,21 +4916,17 @@ int btree_write_cache_pages(struct address_space *mapping,
* Now such dirty tree block will not be cleaned by any dirty * Now such dirty tree block will not be cleaned by any dirty
* extent io tree. Thus we don't want to submit such wild eb * extent io tree. Thus we don't want to submit such wild eb
* if the fs already has error. * if the fs already has error.
*/ *
if (!BTRFS_FS_ERROR(fs_info)) {
flush_write_bio(&epd);
} else {
ret = -EROFS;
end_write_bio(&epd, ret);
}
out:
btrfs_zoned_meta_io_unlock(fs_info);
/*
* We can get ret > 0 from submit_extent_page() indicating how many ebs * We can get ret > 0 from submit_extent_page() indicating how many ebs
* were submitted. Reset it to 0 to avoid false alerts for the caller. * were submitted. Reset it to 0 to avoid false alerts for the caller.
*/ */
if (ret > 0) if (ret > 0)
ret = 0; ret = 0;
if (!ret && BTRFS_FS_ERROR(fs_info))
ret = -EROFS;
submit_write_bio(&epd, ret);
btrfs_zoned_meta_io_unlock(fs_info);
return ret; return ret;
} }
...@@ -5049,7 +5028,7 @@ static int extent_write_cache_pages(struct address_space *mapping, ...@@ -5049,7 +5028,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
* tmpfs file mapping * tmpfs file mapping
*/ */
if (!trylock_page(page)) { if (!trylock_page(page)) {
flush_write_bio(epd); submit_write_bio(epd, 0);
lock_page(page); lock_page(page);
} }
...@@ -5060,7 +5039,7 @@ static int extent_write_cache_pages(struct address_space *mapping, ...@@ -5060,7 +5039,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
if (wbc->sync_mode != WB_SYNC_NONE) { if (wbc->sync_mode != WB_SYNC_NONE) {
if (PageWriteback(page)) if (PageWriteback(page))
flush_write_bio(epd); submit_write_bio(epd, 0);
wait_on_page_writeback(page); wait_on_page_writeback(page);
} }
...@@ -5100,7 +5079,7 @@ static int extent_write_cache_pages(struct address_space *mapping, ...@@ -5100,7 +5079,7 @@ static int extent_write_cache_pages(struct address_space *mapping,
* page in our current bio, and thus deadlock, so flush the * page in our current bio, and thus deadlock, so flush the
* write bio here. * write bio here.
*/ */
flush_write_bio(epd); submit_write_bio(epd, 0);
goto retry; goto retry;
} }
...@@ -5121,13 +5100,7 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc) ...@@ -5121,13 +5100,7 @@ int extent_write_full_page(struct page *page, struct writeback_control *wbc)
}; };
ret = __extent_writepage(page, wbc, &epd); ret = __extent_writepage(page, wbc, &epd);
ASSERT(ret <= 0); submit_write_bio(&epd, ret);
if (ret < 0) {
end_write_bio(&epd, ret);
return ret;
}
flush_write_bio(&epd);
return ret; return ret;
} }
...@@ -5188,10 +5161,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end) ...@@ -5188,10 +5161,7 @@ int extent_write_locked_range(struct inode *inode, u64 start, u64 end)
cur = cur_end + 1; cur = cur_end + 1;
} }
if (!found_error) submit_write_bio(&epd, found_error ? ret : 0);
flush_write_bio(&epd);
else
end_write_bio(&epd, ret);
wbc_detach_inode(&wbc_writepages); wbc_detach_inode(&wbc_writepages);
if (found_error) if (found_error)
...@@ -5216,13 +5186,7 @@ int extent_writepages(struct address_space *mapping, ...@@ -5216,13 +5186,7 @@ int extent_writepages(struct address_space *mapping,
*/ */
btrfs_zoned_data_reloc_lock(BTRFS_I(inode)); btrfs_zoned_data_reloc_lock(BTRFS_I(inode));
ret = extent_write_cache_pages(mapping, wbc, &epd); ret = extent_write_cache_pages(mapping, wbc, &epd);
ASSERT(ret <= 0); submit_write_bio(&epd, ret);
if (ret < 0) {
btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
end_write_bio(&epd, ret);
return ret;
}
flush_write_bio(&epd);
btrfs_zoned_data_reloc_unlock(BTRFS_I(inode)); btrfs_zoned_data_reloc_unlock(BTRFS_I(inode));
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment