Commit 769b4f24 authored by Omar Sandoval's avatar Omar Sandoval Committed by David Sterba

btrfs: get rid of one layer of bios in direct I/O

In the worst case, there are _4_ layers of bios in the Btrfs direct I/O
path:

1. The bio created by the generic direct I/O code (dio_bio).
2. A clone of dio_bio we create in btrfs_submit_direct() to represent
   the entire direct I/O range (orig_bio).
3. A partial clone of orig_bio limited to the size of a RAID stripe that
   we create in btrfs_submit_direct_hook().
4. Clones of each of those split bios for each RAID stripe that we
   create in btrfs_map_bio().

As of the previous commit, the second layer (orig_bio) is no longer
needed for anything: we can split dio_bio instead, and complete dio_bio
directly when all of the cloned bios complete. This lets us clean up a
bunch of cruft, including dip->subio_endio and dip->errors (we can use
dio_bio->bi_status instead). It also enables the next big cleanup of
direct I/O read repair.
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarOmar Sandoval <osandov@fb.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 85879573
...@@ -294,11 +294,8 @@ static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation) ...@@ -294,11 +294,8 @@ static inline int btrfs_inode_in_log(struct btrfs_inode *inode, u64 generation)
return ret; return ret;
} }
#define BTRFS_DIO_ORIG_BIO_SUBMITTED 0x1
struct btrfs_dio_private { struct btrfs_dio_private {
struct inode *inode; struct inode *inode;
unsigned long flags;
u64 logical_offset; u64 logical_offset;
u64 disk_bytenr; u64 disk_bytenr;
u64 bytes; u64 bytes;
...@@ -309,22 +306,9 @@ struct btrfs_dio_private { ...@@ -309,22 +306,9 @@ struct btrfs_dio_private {
*/ */
refcount_t refs; refcount_t refs;
/* IO errors */
int errors;
/* orig_bio is our btrfs_io_bio */
struct bio *orig_bio;
/* dio_bio came from fs/direct-io.c */ /* dio_bio came from fs/direct-io.c */
struct bio *dio_bio; struct bio *dio_bio;
/*
* The original bio may be split to several sub-bios, this is
* done during endio of sub-bios
*/
blk_status_t (*subio_endio)(struct inode *, struct btrfs_io_bio *,
blk_status_t);
/* Array of checksums */ /* Array of checksums */
u8 csums[]; u8 csums[];
}; };
......
...@@ -7356,6 +7356,29 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, ...@@ -7356,6 +7356,29 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
return ret; return ret;
} }
static void btrfs_dio_private_put(struct btrfs_dio_private *dip)
{
/*
* This implies a barrier so that stores to dio_bio->bi_status before
* this and loads of dio_bio->bi_status after this are fully ordered.
*/
if (!refcount_dec_and_test(&dip->refs))
return;
if (bio_op(dip->dio_bio) == REQ_OP_WRITE) {
__endio_write_update_ordered(dip->inode, dip->logical_offset,
dip->bytes,
!dip->dio_bio->bi_status);
} else {
unlock_extent(&BTRFS_I(dip->inode)->io_tree,
dip->logical_offset,
dip->logical_offset + dip->bytes - 1);
}
dio_end_io(dip->dio_bio);
kfree(dip);
}
static inline blk_status_t submit_dio_repair_bio(struct inode *inode, static inline blk_status_t submit_dio_repair_bio(struct inode *inode,
struct bio *bio, struct bio *bio,
int mirror_num) int mirror_num)
...@@ -7678,8 +7701,9 @@ static blk_status_t __btrfs_subio_endio_read(struct inode *inode, ...@@ -7678,8 +7701,9 @@ static blk_status_t __btrfs_subio_endio_read(struct inode *inode,
return err; return err;
} }
static blk_status_t btrfs_subio_endio_read(struct inode *inode, static blk_status_t btrfs_check_read_dio_bio(struct inode *inode,
struct btrfs_io_bio *io_bio, blk_status_t err) struct btrfs_io_bio *io_bio,
blk_status_t err)
{ {
bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; bool skip_csum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
...@@ -7693,28 +7717,6 @@ static blk_status_t btrfs_subio_endio_read(struct inode *inode, ...@@ -7693,28 +7717,6 @@ static blk_status_t btrfs_subio_endio_read(struct inode *inode,
} }
} }
static void btrfs_endio_direct_read(struct bio *bio)
{
struct btrfs_dio_private *dip = bio->bi_private;
struct inode *inode = dip->inode;
struct bio *dio_bio;
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
blk_status_t err = bio->bi_status;
if (dip->flags & BTRFS_DIO_ORIG_BIO_SUBMITTED)
err = btrfs_subio_endio_read(inode, io_bio, err);
unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
dip->logical_offset + dip->bytes - 1);
dio_bio = dip->dio_bio;
kfree(dip);
dio_bio->bi_status = err;
dio_end_io(dio_bio);
bio_put(bio);
}
static void __endio_write_update_ordered(struct inode *inode, static void __endio_write_update_ordered(struct inode *inode,
const u64 offset, const u64 bytes, const u64 offset, const u64 bytes,
const bool uptodate) const bool uptodate)
...@@ -7758,21 +7760,6 @@ static void __endio_write_update_ordered(struct inode *inode, ...@@ -7758,21 +7760,6 @@ static void __endio_write_update_ordered(struct inode *inode,
} }
} }
static void btrfs_endio_direct_write(struct bio *bio)
{
struct btrfs_dio_private *dip = bio->bi_private;
struct bio *dio_bio = dip->dio_bio;
__endio_write_update_ordered(dip->inode, dip->logical_offset,
dip->bytes, !bio->bi_status);
kfree(dip);
dio_bio->bi_status = bio->bi_status;
dio_end_io(dio_bio);
bio_put(bio);
}
static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data, static blk_status_t btrfs_submit_bio_start_direct_io(void *private_data,
struct bio *bio, u64 offset) struct bio *bio, u64 offset)
{ {
...@@ -7796,31 +7783,16 @@ static void btrfs_end_dio_bio(struct bio *bio) ...@@ -7796,31 +7783,16 @@ static void btrfs_end_dio_bio(struct bio *bio)
(unsigned long long)bio->bi_iter.bi_sector, (unsigned long long)bio->bi_iter.bi_sector,
bio->bi_iter.bi_size, err); bio->bi_iter.bi_size, err);
if (dip->subio_endio) if (bio_op(bio) == REQ_OP_READ) {
err = dip->subio_endio(dip->inode, btrfs_io_bio(bio), err); err = btrfs_check_read_dio_bio(dip->inode, btrfs_io_bio(bio),
err);
if (err) {
/*
* We want to perceive the errors flag being set before
* decrementing the reference count. We don't need a barrier
* since atomic operations with a return value are fully
* ordered as per atomic_t.txt
*/
dip->errors = 1;
} }
/* if there are more bios still pending for this dio, just exit */ if (err)
if (!refcount_dec_and_test(&dip->refs)) dip->dio_bio->bi_status = err;
goto out;
if (dip->errors) {
bio_io_error(dip->orig_bio);
} else {
dip->dio_bio->bi_status = BLK_STS_OK;
bio_endio(dip->orig_bio);
}
out:
bio_put(bio); bio_put(bio);
btrfs_dio_private_put(dip);
} }
static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio, static inline blk_status_t btrfs_submit_dio_bio(struct bio *bio,
...@@ -7883,7 +7855,6 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio, ...@@ -7883,7 +7855,6 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
size_t dip_size; size_t dip_size;
struct btrfs_dio_private *dip; struct btrfs_dio_private *dip;
struct bio *bio;
dip_size = sizeof(*dip); dip_size = sizeof(*dip);
if (!write && csum) { if (!write && csum) {
...@@ -7899,15 +7870,10 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio, ...@@ -7899,15 +7870,10 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
if (!dip) if (!dip)
return NULL; return NULL;
bio = btrfs_bio_clone(dio_bio);
bio->bi_private = dip;
btrfs_io_bio(bio)->logical = file_offset;
dip->inode = inode; dip->inode = inode;
dip->logical_offset = file_offset; dip->logical_offset = file_offset;
dip->bytes = dio_bio->bi_iter.bi_size; dip->bytes = dio_bio->bi_iter.bi_size;
dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9; dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
dip->orig_bio = bio;
dip->dio_bio = dio_bio; dip->dio_bio = dio_bio;
refcount_set(&dip->refs, 1); refcount_set(&dip->refs, 1);
...@@ -7922,11 +7888,6 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio, ...@@ -7922,11 +7888,6 @@ static struct btrfs_dio_private *btrfs_create_dio_private(struct bio *dio_bio,
dip->bytes; dip->bytes;
dio_data->unsubmitted_oe_range_start = dio_data->unsubmitted_oe_range_start =
dio_data->unsubmitted_oe_range_end; dio_data->unsubmitted_oe_range_end;
bio->bi_end_io = btrfs_endio_direct_write;
} else {
bio->bi_end_io = btrfs_endio_direct_read;
dip->subio_endio = btrfs_subio_endio_read;
} }
return dip; return dip;
} }
...@@ -7937,9 +7898,10 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, ...@@ -7937,9 +7898,10 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
const bool write = (bio_op(dio_bio) == REQ_OP_WRITE); const bool write = (bio_op(dio_bio) == REQ_OP_WRITE);
const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM); const bool csum = !(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM);
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
const bool raid56 = (btrfs_data_alloc_profile(fs_info) &
BTRFS_BLOCK_GROUP_RAID56_MASK);
struct btrfs_dio_private *dip; struct btrfs_dio_private *dip;
struct bio *bio; struct bio *bio;
struct bio *orig_bio;
u64 start_sector; u64 start_sector;
int async_submit = 0; int async_submit = 0;
u64 submit_len; u64 submit_len;
...@@ -7971,89 +7933,72 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode, ...@@ -7971,89 +7933,72 @@ static void btrfs_submit_direct(struct bio *dio_bio, struct inode *inode,
goto out_err; goto out_err;
} }
orig_bio = dip->orig_bio; start_sector = dio_bio->bi_iter.bi_sector;
start_sector = orig_bio->bi_iter.bi_sector; submit_len = dio_bio->bi_iter.bi_size;
submit_len = orig_bio->bi_iter.bi_size;
ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio),
start_sector << 9, submit_len, &geom);
if (ret)
goto out_err;
if (geom.len >= submit_len) {
bio = orig_bio;
dip->flags |= BTRFS_DIO_ORIG_BIO_SUBMITTED;
goto submit;
}
/* async crcs make it difficult to collect full stripe writes. */
if (btrfs_data_alloc_profile(fs_info) & BTRFS_BLOCK_GROUP_RAID56_MASK)
async_submit = 0;
else
async_submit = 1;
/* bio split */
ASSERT(geom.len <= INT_MAX);
do { do {
ret = btrfs_get_io_geometry(fs_info, btrfs_op(dio_bio),
start_sector << 9, submit_len,
&geom);
if (ret) {
status = errno_to_blk_status(ret);
goto out_err;
}
ASSERT(geom.len <= INT_MAX);
clone_len = min_t(int, submit_len, geom.len); clone_len = min_t(int, submit_len, geom.len);
/* /*
* This will never fail as it's passing GPF_NOFS and * This will never fail as it's passing GPF_NOFS and
* the allocation is backed by btrfs_bioset. * the allocation is backed by btrfs_bioset.
*/ */
bio = btrfs_bio_clone_partial(orig_bio, clone_offset, bio = btrfs_bio_clone_partial(dio_bio, clone_offset, clone_len);
clone_len);
bio->bi_private = dip; bio->bi_private = dip;
bio->bi_end_io = btrfs_end_dio_bio; bio->bi_end_io = btrfs_end_dio_bio;
btrfs_io_bio(bio)->logical = file_offset; btrfs_io_bio(bio)->logical = file_offset;
ASSERT(submit_len >= clone_len); ASSERT(submit_len >= clone_len);
submit_len -= clone_len; submit_len -= clone_len;
if (submit_len == 0)
break;
/* /*
* Increase the count before we submit the bio so we know * Increase the count before we submit the bio so we know
* the end IO handler won't happen before we increase the * the end IO handler won't happen before we increase the
* count. Otherwise, the dip might get freed before we're * count. Otherwise, the dip might get freed before we're
* done setting it up. * done setting it up.
*
* We transfer the initial reference to the last bio, so we
* don't need to increment the reference count for the last one.
*/ */
refcount_inc(&dip->refs); if (submit_len > 0) {
refcount_inc(&dip->refs);
/*
* If we are submitting more than one bio, submit them
* all asynchronously. The exception is RAID 5 or 6, as
* asynchronous checksums make it difficult to collect
* full stripe writes.
*/
if (!raid56)
async_submit = 1;
}
status = btrfs_submit_dio_bio(bio, inode, file_offset, status = btrfs_submit_dio_bio(bio, inode, file_offset,
async_submit); async_submit);
if (status) { if (status) {
bio_put(bio); bio_put(bio);
refcount_dec(&dip->refs); if (submit_len > 0)
refcount_dec(&dip->refs);
goto out_err; goto out_err;
} }
clone_offset += clone_len; clone_offset += clone_len;
start_sector += clone_len >> 9; start_sector += clone_len >> 9;
file_offset += clone_len; file_offset += clone_len;
ret = btrfs_get_io_geometry(fs_info, btrfs_op(orig_bio),
start_sector << 9, submit_len, &geom);
if (ret)
goto out_err;
} while (submit_len > 0); } while (submit_len > 0);
return;
submit:
status = btrfs_submit_dio_bio(bio, inode, file_offset, async_submit);
if (!status)
return;
if (bio != orig_bio)
bio_put(bio);
out_err: out_err:
dip->errors = 1; dip->dio_bio->bi_status = status;
/* btrfs_dio_private_put(dip);
* Before atomic variable goto zero, we must make sure dip->errors is
* perceived to be set. This ordering is ensured by the fact that an
* atomic operations with a return value are fully ordered as per
* atomic_t.txt
*/
if (refcount_dec_and_test(&dip->refs))
bio_io_error(dip->orig_bio);
} }
static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info, static ssize_t check_direct_IO(struct btrfs_fs_info *fs_info,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment