Commit 917f32a2 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: give struct btrfs_bio a real end_io handler

Currently btrfs_bio end I/O handling is a bit of a mess.  The bi_end_io
handler and bi_private pointer of the embedded struct bio are both used
to handle the completion of the high-level btrfs_bio and for the I/O
completion for the low-level device that the embedded bio ends up being
sent to.

To support this bi_end_io and bi_private are saved into the
btrfs_io_context structure and then restored after the bio sent to the
underlying device has completed the actual I/O.

Untangle this by adding an end I/O handler and private data to struct
btrfs_bio for the high-level btrfs_bio based completions, and leave the
actual bio bi_end_io handler and bi_private pointer entirely to the
low-level device I/O.
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Reviewed-by: default avatarAnand Jain <anand.jain@oracle.com>
Tested-by: default avatarNikolay Borisov <nborisov@suse.com>
Tested-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent f1c29379
...@@ -152,9 +152,7 @@ static void finish_compressed_bio_read(struct compressed_bio *cb) ...@@ -152,9 +152,7 @@ static void finish_compressed_bio_read(struct compressed_bio *cb)
} }
/* Do io completion on the original bio */ /* Do io completion on the original bio */
if (cb->status != BLK_STS_OK) btrfs_bio_end_io(btrfs_bio(cb->orig_bio), cb->status);
cb->orig_bio->bi_status = cb->status;
bio_endio(cb->orig_bio);
/* Finally free the cb struct */ /* Finally free the cb struct */
kfree(cb->compressed_pages); kfree(cb->compressed_pages);
...@@ -166,16 +164,15 @@ static void finish_compressed_bio_read(struct compressed_bio *cb) ...@@ -166,16 +164,15 @@ static void finish_compressed_bio_read(struct compressed_bio *cb)
* before decompressing it into the original bio and freeing the uncompressed * before decompressing it into the original bio and freeing the uncompressed
* pages. * pages.
*/ */
static void end_compressed_bio_read(struct bio *bio) static void end_compressed_bio_read(struct btrfs_bio *bbio)
{ {
struct compressed_bio *cb = bio->bi_private; struct compressed_bio *cb = bbio->private;
struct inode *inode = cb->inode; struct inode *inode = cb->inode;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_inode *bi = BTRFS_I(inode); struct btrfs_inode *bi = BTRFS_I(inode);
bool csum = !(bi->flags & BTRFS_INODE_NODATASUM) && bool csum = !(bi->flags & BTRFS_INODE_NODATASUM) &&
!test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state); !test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state);
blk_status_t status = bio->bi_status; blk_status_t status = bbio->bio.bi_status;
struct btrfs_bio *bbio = btrfs_bio(bio);
struct bvec_iter iter; struct bvec_iter iter;
struct bio_vec bv; struct bio_vec bv;
u32 offset; u32 offset;
...@@ -209,7 +206,7 @@ static void end_compressed_bio_read(struct bio *bio) ...@@ -209,7 +206,7 @@ static void end_compressed_bio_read(struct bio *bio)
if (refcount_dec_and_test(&cb->pending_ios)) if (refcount_dec_and_test(&cb->pending_ios))
finish_compressed_bio_read(cb); finish_compressed_bio_read(cb);
btrfs_bio_free_csum(bbio); btrfs_bio_free_csum(bbio);
bio_put(bio); bio_put(&bbio->bio);
} }
/* /*
...@@ -301,20 +298,20 @@ static void btrfs_finish_compressed_write_work(struct work_struct *work) ...@@ -301,20 +298,20 @@ static void btrfs_finish_compressed_write_work(struct work_struct *work)
* This also calls the writeback end hooks for the file pages so that metadata * This also calls the writeback end hooks for the file pages so that metadata
* and checksums can be updated in the file. * and checksums can be updated in the file.
*/ */
static void end_compressed_bio_write(struct bio *bio) static void end_compressed_bio_write(struct btrfs_bio *bbio)
{ {
struct compressed_bio *cb = bio->bi_private; struct compressed_bio *cb = bbio->private;
if (bio->bi_status) if (bbio->bio.bi_status)
cb->status = bio->bi_status; cb->status = bbio->bio.bi_status;
if (refcount_dec_and_test(&cb->pending_ios)) { if (refcount_dec_and_test(&cb->pending_ios)) {
struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
btrfs_record_physical_zoned(cb->inode, cb->start, bio); btrfs_record_physical_zoned(cb->inode, cb->start, &bbio->bio);
queue_work(fs_info->compressed_write_workers, &cb->write_end_work); queue_work(fs_info->compressed_write_workers, &cb->write_end_work);
} }
bio_put(bio); bio_put(&bbio->bio);
} }
/* /*
...@@ -335,7 +332,8 @@ static void end_compressed_bio_write(struct bio *bio) ...@@ -335,7 +332,8 @@ static void end_compressed_bio_write(struct bio *bio)
static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_bytenr, static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_bytenr,
blk_opf_t opf, bio_end_io_t endio_func, blk_opf_t opf,
btrfs_bio_end_io_t endio_func,
u64 *next_stripe_start) u64 *next_stripe_start)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(cb->inode->i_sb);
...@@ -344,10 +342,8 @@ static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_byte ...@@ -344,10 +342,8 @@ static struct bio *alloc_compressed_bio(struct compressed_bio *cb, u64 disk_byte
struct bio *bio; struct bio *bio;
int ret; int ret;
bio = btrfs_bio_alloc(BIO_MAX_VECS, opf); bio = btrfs_bio_alloc(BIO_MAX_VECS, opf, endio_func, cb);
bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT; bio->bi_iter.bi_sector = disk_bytenr >> SECTOR_SHIFT;
bio->bi_private = cb;
bio->bi_end_io = endio_func;
em = btrfs_get_chunk_map(fs_info, disk_bytenr, fs_info->sectorsize); em = btrfs_get_chunk_map(fs_info, disk_bytenr, fs_info->sectorsize);
if (IS_ERR(em)) { if (IS_ERR(em)) {
...@@ -476,8 +472,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start, ...@@ -476,8 +472,7 @@ blk_status_t btrfs_submit_compressed_write(struct btrfs_inode *inode, u64 start,
if (!skip_sum) { if (!skip_sum) {
ret = btrfs_csum_one_bio(inode, bio, start, true); ret = btrfs_csum_one_bio(inode, bio, start, true);
if (ret) { if (ret) {
bio->bi_status = ret; btrfs_bio_end_io(btrfs_bio(bio), ret);
bio_endio(bio);
break; break;
} }
} }
...@@ -797,8 +792,7 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -797,8 +792,7 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
ret = btrfs_lookup_bio_sums(inode, comp_bio, NULL); ret = btrfs_lookup_bio_sums(inode, comp_bio, NULL);
if (ret) { if (ret) {
comp_bio->bi_status = ret; btrfs_bio_end_io(btrfs_bio(comp_bio), ret);
bio_endio(comp_bio);
break; break;
} }
...@@ -824,8 +818,7 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -824,8 +818,7 @@ void btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
kfree(cb); kfree(cb);
out: out:
free_extent_map(em); free_extent_map(em);
bio->bi_status = ret; btrfs_bio_end_io(btrfs_bio(bio), ret);
bio_endio(bio);
return; return;
} }
......
...@@ -647,16 +647,14 @@ static void run_one_async_start(struct btrfs_work *work) ...@@ -647,16 +647,14 @@ static void run_one_async_start(struct btrfs_work *work)
*/ */
static void run_one_async_done(struct btrfs_work *work) static void run_one_async_done(struct btrfs_work *work)
{ {
struct async_submit_bio *async; struct async_submit_bio *async =
struct inode *inode; container_of(work, struct async_submit_bio, work);
struct inode *inode = async->inode;
async = container_of(work, struct async_submit_bio, work); struct btrfs_bio *bbio = btrfs_bio(async->bio);
inode = async->inode;
/* If an error occurred we just want to clean up the bio and move on */ /* If an error occurred we just want to clean up the bio and move on */
if (async->status) { if (async->status) {
async->bio->bi_status = async->status; btrfs_bio_end_io(bbio, async->status);
bio_endio(async->bio);
return; return;
} }
...@@ -757,6 +755,7 @@ static bool should_async_write(struct btrfs_fs_info *fs_info, ...@@ -757,6 +755,7 @@ static bool should_async_write(struct btrfs_fs_info *fs_info,
void btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, int mirror_num) void btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, int mirror_num)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_bio *bbio = btrfs_bio(bio);
blk_status_t ret; blk_status_t ret;
bio->bi_opf |= REQ_META; bio->bi_opf |= REQ_META;
...@@ -776,8 +775,7 @@ void btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, int mirror_ ...@@ -776,8 +775,7 @@ void btrfs_submit_metadata_bio(struct inode *inode, struct bio *bio, int mirror_
ret = btree_csum_one_bio(bio); ret = btree_csum_one_bio(bio);
if (ret) { if (ret) {
bio->bi_status = ret; btrfs_bio_end_io(bbio, ret);
bio_endio(bio);
return; return;
} }
......
...@@ -206,7 +206,7 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl) ...@@ -206,7 +206,7 @@ static void submit_one_bio(struct btrfs_bio_ctrl *bio_ctrl)
btrfs_submit_data_read_bio(inode, bio, mirror_num, btrfs_submit_data_read_bio(inode, bio, mirror_num,
bio_ctrl->compress_type); bio_ctrl->compress_type);
/* The bio is owned by the bi_end_io handler now */ /* The bio is owned by the end_io handler now */
bio_ctrl->bio = NULL; bio_ctrl->bio = NULL;
} }
...@@ -222,9 +222,8 @@ static void submit_write_bio(struct extent_page_data *epd, int ret) ...@@ -222,9 +222,8 @@ static void submit_write_bio(struct extent_page_data *epd, int ret)
if (ret) { if (ret) {
ASSERT(ret < 0); ASSERT(ret < 0);
bio->bi_status = errno_to_blk_status(ret); btrfs_bio_end_io(btrfs_bio(bio), errno_to_blk_status(ret));
bio_endio(bio); /* The bio is owned by the end_io handler now */
/* The bio is owned by the bi_end_io handler now */
epd->bio_ctrl.bio = NULL; epd->bio_ctrl.bio = NULL;
} else { } else {
submit_one_bio(&epd->bio_ctrl); submit_one_bio(&epd->bio_ctrl);
...@@ -2626,12 +2625,11 @@ int btrfs_repair_one_sector(struct inode *inode, struct btrfs_bio *failed_bbio, ...@@ -2626,12 +2625,11 @@ int btrfs_repair_one_sector(struct inode *inode, struct btrfs_bio *failed_bbio,
return -EIO; return -EIO;
} }
repair_bio = btrfs_bio_alloc(1, REQ_OP_READ); repair_bio = btrfs_bio_alloc(1, REQ_OP_READ, failed_bbio->end_io,
failed_bbio->private);
repair_bbio = btrfs_bio(repair_bio); repair_bbio = btrfs_bio(repair_bio);
repair_bbio->file_offset = start; repair_bbio->file_offset = start;
repair_bio->bi_end_io = failed_bio->bi_end_io;
repair_bio->bi_iter.bi_sector = failrec->logical >> 9; repair_bio->bi_iter.bi_sector = failrec->logical >> 9;
repair_bio->bi_private = failed_bio->bi_private;
if (failed_bbio->csum) { if (failed_bbio->csum) {
const u32 csum_size = fs_info->csum_size; const u32 csum_size = fs_info->csum_size;
...@@ -2798,8 +2796,9 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end) ...@@ -2798,8 +2796,9 @@ void end_extent_writepage(struct page *page, int err, u64 start, u64 end)
* Scheduling is not allowed, so the extent state tree is expected * Scheduling is not allowed, so the extent state tree is expected
* to have one and only one object corresponding to this IO. * to have one and only one object corresponding to this IO.
*/ */
static void end_bio_extent_writepage(struct bio *bio) static void end_bio_extent_writepage(struct btrfs_bio *bbio)
{ {
struct bio *bio = &bbio->bio;
int error = blk_status_to_errno(bio->bi_status); int error = blk_status_to_errno(bio->bi_status);
struct bio_vec *bvec; struct bio_vec *bvec;
u64 start; u64 start;
...@@ -2960,10 +2959,10 @@ static struct extent_buffer *find_extent_buffer_readpage( ...@@ -2960,10 +2959,10 @@ static struct extent_buffer *find_extent_buffer_readpage(
* Scheduling is not allowed, so the extent state tree is expected * Scheduling is not allowed, so the extent state tree is expected
* to have one and only one object corresponding to this IO. * to have one and only one object corresponding to this IO.
*/ */
static void end_bio_extent_readpage(struct bio *bio) static void end_bio_extent_readpage(struct btrfs_bio *bbio)
{ {
struct bio *bio = &bbio->bio;
struct bio_vec *bvec; struct bio_vec *bvec;
struct btrfs_bio *bbio = btrfs_bio(bio);
struct extent_io_tree *tree, *failure_tree; struct extent_io_tree *tree, *failure_tree;
struct processed_extent processed = { 0 }; struct processed_extent processed = { 0 };
/* /*
...@@ -3279,7 +3278,7 @@ static int alloc_new_bio(struct btrfs_inode *inode, ...@@ -3279,7 +3278,7 @@ static int alloc_new_bio(struct btrfs_inode *inode,
struct btrfs_bio_ctrl *bio_ctrl, struct btrfs_bio_ctrl *bio_ctrl,
struct writeback_control *wbc, struct writeback_control *wbc,
blk_opf_t opf, blk_opf_t opf,
bio_end_io_t end_io_func, btrfs_bio_end_io_t end_io_func,
u64 disk_bytenr, u32 offset, u64 file_offset, u64 disk_bytenr, u32 offset, u64 file_offset,
enum btrfs_compression_type compress_type) enum btrfs_compression_type compress_type)
{ {
...@@ -3287,7 +3286,7 @@ static int alloc_new_bio(struct btrfs_inode *inode, ...@@ -3287,7 +3286,7 @@ static int alloc_new_bio(struct btrfs_inode *inode,
struct bio *bio; struct bio *bio;
int ret; int ret;
bio = btrfs_bio_alloc(BIO_MAX_VECS, opf); bio = btrfs_bio_alloc(BIO_MAX_VECS, opf, end_io_func, NULL);
/* /*
* For compressed page range, its disk_bytenr is always @disk_bytenr * For compressed page range, its disk_bytenr is always @disk_bytenr
* passed in, no matter if we have added any range into previous bio. * passed in, no matter if we have added any range into previous bio.
...@@ -3298,7 +3297,6 @@ static int alloc_new_bio(struct btrfs_inode *inode, ...@@ -3298,7 +3297,6 @@ static int alloc_new_bio(struct btrfs_inode *inode,
bio->bi_iter.bi_sector = (disk_bytenr + offset) >> SECTOR_SHIFT; bio->bi_iter.bi_sector = (disk_bytenr + offset) >> SECTOR_SHIFT;
bio_ctrl->bio = bio; bio_ctrl->bio = bio;
bio_ctrl->compress_type = compress_type; bio_ctrl->compress_type = compress_type;
bio->bi_end_io = end_io_func;
ret = calc_bio_boundaries(bio_ctrl, inode, file_offset); ret = calc_bio_boundaries(bio_ctrl, inode, file_offset);
if (ret < 0) if (ret < 0)
goto error; goto error;
...@@ -3337,8 +3335,7 @@ static int alloc_new_bio(struct btrfs_inode *inode, ...@@ -3337,8 +3335,7 @@ static int alloc_new_bio(struct btrfs_inode *inode,
return 0; return 0;
error: error:
bio_ctrl->bio = NULL; bio_ctrl->bio = NULL;
bio->bi_status = errno_to_blk_status(ret); btrfs_bio_end_io(btrfs_bio(bio), errno_to_blk_status(ret));
bio_endio(bio);
return ret; return ret;
} }
...@@ -3361,7 +3358,7 @@ static int submit_extent_page(blk_opf_t opf, ...@@ -3361,7 +3358,7 @@ static int submit_extent_page(blk_opf_t opf,
struct btrfs_bio_ctrl *bio_ctrl, struct btrfs_bio_ctrl *bio_ctrl,
struct page *page, u64 disk_bytenr, struct page *page, u64 disk_bytenr,
size_t size, unsigned long pg_offset, size_t size, unsigned long pg_offset,
bio_end_io_t end_io_func, btrfs_bio_end_io_t end_io_func,
enum btrfs_compression_type compress_type, enum btrfs_compression_type compress_type,
bool force_bio_submit) bool force_bio_submit)
{ {
...@@ -4345,8 +4342,9 @@ static struct extent_buffer *find_extent_buffer_nolock( ...@@ -4345,8 +4342,9 @@ static struct extent_buffer *find_extent_buffer_nolock(
* Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback() * Unlike end_bio_extent_buffer_writepage(), we only call end_page_writeback()
* after all extent buffers in the page has finished their writeback. * after all extent buffers in the page has finished their writeback.
*/ */
static void end_bio_subpage_eb_writepage(struct bio *bio) static void end_bio_subpage_eb_writepage(struct btrfs_bio *bbio)
{ {
struct bio *bio = &bbio->bio;
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
struct bio_vec *bvec; struct bio_vec *bvec;
struct bvec_iter_all iter_all; struct bvec_iter_all iter_all;
...@@ -4402,8 +4400,9 @@ static void end_bio_subpage_eb_writepage(struct bio *bio) ...@@ -4402,8 +4400,9 @@ static void end_bio_subpage_eb_writepage(struct bio *bio)
bio_put(bio); bio_put(bio);
} }
static void end_bio_extent_buffer_writepage(struct bio *bio) static void end_bio_extent_buffer_writepage(struct btrfs_bio *bbio)
{ {
struct bio *bio = &bbio->bio;
struct bio_vec *bvec; struct bio_vec *bvec;
struct extent_buffer *eb; struct extent_buffer *eb;
int done; int done;
......
...@@ -2700,8 +2700,10 @@ void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirro ...@@ -2700,8 +2700,10 @@ void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirro
if (bio_op(bio) == REQ_OP_ZONE_APPEND) { if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
ret = extract_ordered_extent(bi, bio, ret = extract_ordered_extent(bi, bio,
page_offset(bio_first_bvec_all(bio)->bv_page)); page_offset(bio_first_bvec_all(bio)->bv_page));
if (ret) if (ret) {
goto out; btrfs_bio_end_io(btrfs_bio(bio), ret);
return;
}
} }
/* /*
...@@ -2721,16 +2723,12 @@ void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirro ...@@ -2721,16 +2723,12 @@ void btrfs_submit_data_write_bio(struct inode *inode, struct bio *bio, int mirro
return; return;
ret = btrfs_csum_one_bio(bi, bio, (u64)-1, false); ret = btrfs_csum_one_bio(bi, bio, (u64)-1, false);
if (ret) if (ret) {
goto out; btrfs_bio_end_io(btrfs_bio(bio), ret);
return;
}
} }
btrfs_submit_bio(fs_info, bio, mirror_num); btrfs_submit_bio(fs_info, bio, mirror_num);
return;
out:
if (ret) {
bio->bi_status = ret;
bio_endio(bio);
}
} }
void btrfs_submit_data_read_bio(struct inode *inode, struct bio *bio, void btrfs_submit_data_read_bio(struct inode *inode, struct bio *bio,
...@@ -2757,8 +2755,7 @@ void btrfs_submit_data_read_bio(struct inode *inode, struct bio *bio, ...@@ -2757,8 +2755,7 @@ void btrfs_submit_data_read_bio(struct inode *inode, struct bio *bio,
*/ */
ret = btrfs_lookup_bio_sums(inode, bio, NULL); ret = btrfs_lookup_bio_sums(inode, bio, NULL);
if (ret) { if (ret) {
bio->bi_status = ret; btrfs_bio_end_io(btrfs_bio(bio), ret);
bio_endio(bio);
return; return;
} }
...@@ -7984,7 +7981,7 @@ static void submit_dio_repair_bio(struct inode *inode, struct bio *bio, ...@@ -7984,7 +7981,7 @@ static void submit_dio_repair_bio(struct inode *inode, struct bio *bio,
int mirror_num, int mirror_num,
enum btrfs_compression_type compress_type) enum btrfs_compression_type compress_type)
{ {
struct btrfs_dio_private *dip = bio->bi_private; struct btrfs_dio_private *dip = btrfs_bio(bio)->private;
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
BUG_ON(bio_op(bio) == REQ_OP_WRITE); BUG_ON(bio_op(bio) == REQ_OP_WRITE);
...@@ -8037,10 +8034,10 @@ static blk_status_t btrfs_submit_bio_start_direct_io(struct inode *inode, ...@@ -8037,10 +8034,10 @@ static blk_status_t btrfs_submit_bio_start_direct_io(struct inode *inode,
return btrfs_csum_one_bio(BTRFS_I(inode), bio, dio_file_offset, false); return btrfs_csum_one_bio(BTRFS_I(inode), bio, dio_file_offset, false);
} }
static void btrfs_end_dio_bio(struct bio *bio) static void btrfs_end_dio_bio(struct btrfs_bio *bbio)
{ {
struct btrfs_dio_private *dip = bio->bi_private; struct btrfs_dio_private *dip = bbio->private;
struct btrfs_bio *bbio = btrfs_bio(bio); struct bio *bio = &bbio->bio;
blk_status_t err = bio->bi_status; blk_status_t err = bio->bi_status;
if (err) if (err)
...@@ -8066,7 +8063,7 @@ static void btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, ...@@ -8066,7 +8063,7 @@ static void btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
u64 file_offset, int async_submit) u64 file_offset, int async_submit)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_dio_private *dip = bio->bi_private; struct btrfs_dio_private *dip = btrfs_bio(bio)->private;
blk_status_t ret; blk_status_t ret;
/* Save the original iter for read repair */ /* Save the original iter for read repair */
...@@ -8089,8 +8086,7 @@ static void btrfs_submit_dio_bio(struct bio *bio, struct inode *inode, ...@@ -8089,8 +8086,7 @@ static void btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
*/ */
ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, file_offset, false); ret = btrfs_csum_one_bio(BTRFS_I(inode), bio, file_offset, false);
if (ret) { if (ret) {
bio->bi_status = ret; btrfs_bio_end_io(btrfs_bio(bio), ret);
bio_endio(bio);
return; return;
} }
} else { } else {
...@@ -8173,9 +8169,8 @@ static void btrfs_submit_direct(const struct iomap_iter *iter, ...@@ -8173,9 +8169,8 @@ static void btrfs_submit_direct(const struct iomap_iter *iter,
* This will never fail as it's passing GPF_NOFS and * This will never fail as it's passing GPF_NOFS and
* the allocation is backed by btrfs_bioset. * the allocation is backed by btrfs_bioset.
*/ */
bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len); bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len,
bio->bi_private = dip; btrfs_end_dio_bio, dip);
bio->bi_end_io = btrfs_end_dio_bio;
btrfs_bio(bio)->file_offset = file_offset; btrfs_bio(bio)->file_offset = file_offset;
if (bio_op(bio) == REQ_OP_ZONE_APPEND) { if (bio_op(bio) == REQ_OP_ZONE_APPEND) {
...@@ -10380,7 +10375,7 @@ struct btrfs_encoded_read_private { ...@@ -10380,7 +10375,7 @@ struct btrfs_encoded_read_private {
static blk_status_t submit_encoded_read_bio(struct btrfs_inode *inode, static blk_status_t submit_encoded_read_bio(struct btrfs_inode *inode,
struct bio *bio, int mirror_num) struct bio *bio, int mirror_num)
{ {
struct btrfs_encoded_read_private *priv = bio->bi_private; struct btrfs_encoded_read_private *priv = btrfs_bio(bio)->private;
struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_fs_info *fs_info = inode->root->fs_info;
blk_status_t ret; blk_status_t ret;
...@@ -10398,7 +10393,7 @@ static blk_status_t submit_encoded_read_bio(struct btrfs_inode *inode, ...@@ -10398,7 +10393,7 @@ static blk_status_t submit_encoded_read_bio(struct btrfs_inode *inode,
static blk_status_t btrfs_encoded_read_verify_csum(struct btrfs_bio *bbio) static blk_status_t btrfs_encoded_read_verify_csum(struct btrfs_bio *bbio)
{ {
const bool uptodate = (bbio->bio.bi_status == BLK_STS_OK); const bool uptodate = (bbio->bio.bi_status == BLK_STS_OK);
struct btrfs_encoded_read_private *priv = bbio->bio.bi_private; struct btrfs_encoded_read_private *priv = bbio->private;
struct btrfs_inode *inode = priv->inode; struct btrfs_inode *inode = priv->inode;
struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_fs_info *fs_info = inode->root->fs_info;
u32 sectorsize = fs_info->sectorsize; u32 sectorsize = fs_info->sectorsize;
...@@ -10426,10 +10421,9 @@ static blk_status_t btrfs_encoded_read_verify_csum(struct btrfs_bio *bbio) ...@@ -10426,10 +10421,9 @@ static blk_status_t btrfs_encoded_read_verify_csum(struct btrfs_bio *bbio)
return BLK_STS_OK; return BLK_STS_OK;
} }
static void btrfs_encoded_read_endio(struct bio *bio) static void btrfs_encoded_read_endio(struct btrfs_bio *bbio)
{ {
struct btrfs_encoded_read_private *priv = bio->bi_private; struct btrfs_encoded_read_private *priv = bbio->private;
struct btrfs_bio *bbio = btrfs_bio(bio);
blk_status_t status; blk_status_t status;
status = btrfs_encoded_read_verify_csum(bbio); status = btrfs_encoded_read_verify_csum(bbio);
...@@ -10447,7 +10441,7 @@ static void btrfs_encoded_read_endio(struct bio *bio) ...@@ -10447,7 +10441,7 @@ static void btrfs_encoded_read_endio(struct bio *bio)
if (!atomic_dec_return(&priv->pending)) if (!atomic_dec_return(&priv->pending))
wake_up(&priv->wait); wake_up(&priv->wait);
btrfs_bio_free_csum(bbio); btrfs_bio_free_csum(bbio);
bio_put(bio); bio_put(&bbio->bio);
} }
int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
...@@ -10494,11 +10488,11 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode, ...@@ -10494,11 +10488,11 @@ int btrfs_encoded_read_regular_fill_pages(struct btrfs_inode *inode,
size_t bytes = min_t(u64, remaining, PAGE_SIZE); size_t bytes = min_t(u64, remaining, PAGE_SIZE);
if (!bio) { if (!bio) {
bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ); bio = btrfs_bio_alloc(BIO_MAX_VECS, REQ_OP_READ,
btrfs_encoded_read_endio,
&priv);
bio->bi_iter.bi_sector = bio->bi_iter.bi_sector =
(disk_bytenr + cur) >> SECTOR_SHIFT; (disk_bytenr + cur) >> SECTOR_SHIFT;
bio->bi_end_io = btrfs_encoded_read_endio;
bio->bi_private = &priv;
} }
if (!bytes || if (!bytes ||
......
...@@ -6617,9 +6617,12 @@ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op, ...@@ -6617,9 +6617,12 @@ int btrfs_map_sblock(struct btrfs_fs_info *fs_info, enum btrfs_map_op op,
* Initialize a btrfs_bio structure. This skips the embedded bio itself as it * Initialize a btrfs_bio structure. This skips the embedded bio itself as it
* is already initialized by the block layer. * is already initialized by the block layer.
*/ */
static inline void btrfs_bio_init(struct btrfs_bio *bbio) static inline void btrfs_bio_init(struct btrfs_bio *bbio,
btrfs_bio_end_io_t end_io, void *private)
{ {
memset(bbio, 0, offsetof(struct btrfs_bio, bio)); memset(bbio, 0, offsetof(struct btrfs_bio, bio));
bbio->end_io = end_io;
bbio->private = private;
} }
/* /*
...@@ -6629,16 +6632,18 @@ static inline void btrfs_bio_init(struct btrfs_bio *bbio) ...@@ -6629,16 +6632,18 @@ static inline void btrfs_bio_init(struct btrfs_bio *bbio)
* Just like the underlying bio_alloc_bioset it will not fail as it is backed by * Just like the underlying bio_alloc_bioset it will not fail as it is backed by
* a mempool. * a mempool.
*/ */
struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf) struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
btrfs_bio_end_io_t end_io, void *private)
{ {
struct bio *bio; struct bio *bio;
bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset); bio = bio_alloc_bioset(NULL, nr_vecs, opf, GFP_NOFS, &btrfs_bioset);
btrfs_bio_init(btrfs_bio(bio)); btrfs_bio_init(btrfs_bio(bio), end_io, private);
return bio; return bio;
} }
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size) struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size,
btrfs_bio_end_io_t end_io, void *private)
{ {
struct bio *bio; struct bio *bio;
struct btrfs_bio *bbio; struct btrfs_bio *bbio;
...@@ -6647,7 +6652,7 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size) ...@@ -6647,7 +6652,7 @@ struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size)
bio = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOFS, &btrfs_bioset); bio = bio_alloc_clone(orig->bi_bdev, orig, GFP_NOFS, &btrfs_bioset);
bbio = btrfs_bio(bio); bbio = btrfs_bio(bio);
btrfs_bio_init(bbio); btrfs_bio_init(bbio, end_io, private);
bio_trim(bio, offset >> 9, size >> 9); bio_trim(bio, offset >> 9, size >> 9);
bbio->iter = bio->bi_iter; bbio->iter = bio->bi_iter;
...@@ -6681,7 +6686,7 @@ static void btrfs_end_bio_work(struct work_struct *work) ...@@ -6681,7 +6686,7 @@ static void btrfs_end_bio_work(struct work_struct *work)
struct btrfs_bio *bbio = struct btrfs_bio *bbio =
container_of(work, struct btrfs_bio, end_io_work); container_of(work, struct btrfs_bio, end_io_work);
bio_endio(&bbio->bio); bbio->end_io(bbio);
} }
static void btrfs_raid56_end_io(struct bio *bio) static void btrfs_raid56_end_io(struct bio *bio)
...@@ -6691,9 +6696,7 @@ static void btrfs_raid56_end_io(struct bio *bio) ...@@ -6691,9 +6696,7 @@ static void btrfs_raid56_end_io(struct bio *bio)
btrfs_bio_counter_dec(bioc->fs_info); btrfs_bio_counter_dec(bioc->fs_info);
bbio->mirror_num = bioc->mirror_num; bbio->mirror_num = bioc->mirror_num;
bio->bi_end_io = bioc->end_io; bbio->end_io(bbio);
bio->bi_private = bioc->private;
bio->bi_end_io(bio);
btrfs_put_bioc(bioc); btrfs_put_bioc(bioc);
} }
...@@ -6712,8 +6715,6 @@ static void btrfs_end_bio(struct bio *bio) ...@@ -6712,8 +6715,6 @@ static void btrfs_end_bio(struct bio *bio)
} }
bbio->mirror_num = bioc->mirror_num; bbio->mirror_num = bioc->mirror_num;
bio->bi_end_io = bioc->end_io;
bio->bi_private = bioc->private;
/* /*
* Only send an error to the higher layers if it is beyond the tolerance * Only send an error to the higher layers if it is beyond the tolerance
...@@ -6728,7 +6729,7 @@ static void btrfs_end_bio(struct bio *bio) ...@@ -6728,7 +6729,7 @@ static void btrfs_end_bio(struct bio *bio)
INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work); INIT_WORK(&bbio->end_io_work, btrfs_end_bio_work);
queue_work(btrfs_end_io_wq(bioc), &bbio->end_io_work); queue_work(btrfs_end_io_wq(bioc), &bbio->end_io_work);
} else { } else {
bio_endio(bio); bbio->end_io(bbio);
} }
btrfs_put_bioc(bioc); btrfs_put_bioc(bioc);
...@@ -6819,15 +6820,12 @@ void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror ...@@ -6819,15 +6820,12 @@ void btrfs_submit_bio(struct btrfs_fs_info *fs_info, struct bio *bio, int mirror
&map_length, &bioc, mirror_num, 1); &map_length, &bioc, mirror_num, 1);
if (ret) { if (ret) {
btrfs_bio_counter_dec(fs_info); btrfs_bio_counter_dec(fs_info);
bio->bi_status = errno_to_blk_status(ret); btrfs_bio_end_io(btrfs_bio(bio), errno_to_blk_status(ret));
bio_endio(bio);
return; return;
} }
total_devs = bioc->num_stripes; total_devs = bioc->num_stripes;
bioc->orig_bio = bio; bioc->orig_bio = bio;
bioc->private = bio->bi_private;
bioc->end_io = bio->bi_end_io;
if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) && if ((bioc->map_type & BTRFS_BLOCK_GROUP_RAID56_MASK) &&
((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) { ((btrfs_op(bio) == BTRFS_MAP_WRITE) || (mirror_num > 1))) {
......
...@@ -361,6 +361,8 @@ struct btrfs_fs_devices { ...@@ -361,6 +361,8 @@ struct btrfs_fs_devices {
*/ */
#define BTRFS_MAX_BIO_SECTORS (256) #define BTRFS_MAX_BIO_SECTORS (256)
typedef void (*btrfs_bio_end_io_t)(struct btrfs_bio *bbio);
/* /*
* Additional info to pass along bio. * Additional info to pass along bio.
* *
...@@ -378,6 +380,10 @@ struct btrfs_bio { ...@@ -378,6 +380,10 @@ struct btrfs_bio {
u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE]; u8 csum_inline[BTRFS_BIO_INLINE_CSUM_SIZE];
struct bvec_iter iter; struct bvec_iter iter;
/* End I/O information supplied to btrfs_bio_alloc */
btrfs_bio_end_io_t end_io;
void *private;
/* For read end I/O handling */ /* For read end I/O handling */
struct work_struct end_io_work; struct work_struct end_io_work;
...@@ -396,8 +402,16 @@ static inline struct btrfs_bio *btrfs_bio(struct bio *bio) ...@@ -396,8 +402,16 @@ static inline struct btrfs_bio *btrfs_bio(struct bio *bio)
int __init btrfs_bioset_init(void); int __init btrfs_bioset_init(void);
void __cold btrfs_bioset_exit(void); void __cold btrfs_bioset_exit(void);
struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf); struct bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size); btrfs_bio_end_io_t end_io, void *private);
struct bio *btrfs_bio_clone_partial(struct bio *orig, u64 offset, u64 size,
btrfs_bio_end_io_t end_io, void *private);
static inline void btrfs_bio_end_io(struct btrfs_bio *bbio, blk_status_t status)
{
bbio->bio.bi_status = status;
bbio->end_io(bbio);
}
static inline void btrfs_bio_free_csum(struct btrfs_bio *bbio) static inline void btrfs_bio_free_csum(struct btrfs_bio *bbio)
{ {
...@@ -459,9 +473,7 @@ struct btrfs_io_context { ...@@ -459,9 +473,7 @@ struct btrfs_io_context {
refcount_t refs; refcount_t refs;
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
u64 map_type; /* get from map_lookup->type */ u64 map_type; /* get from map_lookup->type */
bio_end_io_t *end_io;
struct bio *orig_bio; struct bio *orig_bio;
void *private;
atomic_t error; atomic_t error;
int max_errors; int max_errors;
int num_stripes; int num_stripes;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment