Commit 71df088c authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: defer splitting of ordered extents until I/O completion

The btrfs zoned completion code currently needs an ordered_extent and
extent_map per bio so that it can account for the non-predictable
write location from Zone Append.  To archive that it currently splits
the ordered_extent and extent_map at I/O submission time, and then
records the actual physical address in the ->physical field of the
ordered_extent.

This patch instead switches to record the "original" physical address
that the btrfs allocator assigned in spare space in the btrfs_bio,
and then rewrites the logical address in the btrfs_ordered_sum
structure at I/O completion time.  This allows the ordered extent
completion handler to simply walk the list of ordered csums and
split the ordered extent as needed.  This removes an extra ordered
extent and extent_map lookup and manipulation during the I/O
submission path, and instead batches it in the I/O completion path
where we need to touch these anyway.
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 52b1fdca
...@@ -61,20 +61,6 @@ struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf, ...@@ -61,20 +61,6 @@ struct btrfs_bio *btrfs_bio_alloc(unsigned int nr_vecs, blk_opf_t opf,
return bbio; return bbio;
} }
static blk_status_t btrfs_bio_extract_ordered_extent(struct btrfs_bio *bbio)
{
struct btrfs_ordered_extent *ordered;
int ret;
ordered = btrfs_lookup_ordered_extent(bbio->inode, bbio->file_offset);
if (WARN_ON_ONCE(!ordered))
return BLK_STS_IOERR;
ret = btrfs_extract_ordered_extent(bbio, ordered);
btrfs_put_ordered_extent(ordered);
return errno_to_blk_status(ret);
}
static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info, static struct btrfs_bio *btrfs_split_bio(struct btrfs_fs_info *fs_info,
struct btrfs_bio *orig_bbio, struct btrfs_bio *orig_bbio,
u64 map_length, bool use_append) u64 map_length, bool use_append)
...@@ -668,9 +654,6 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num) ...@@ -668,9 +654,6 @@ static bool btrfs_submit_chunk(struct btrfs_bio *bbio, int mirror_num)
if (use_append) { if (use_append) {
bio->bi_opf &= ~REQ_OP_WRITE; bio->bi_opf &= ~REQ_OP_WRITE;
bio->bi_opf |= REQ_OP_ZONE_APPEND; bio->bi_opf |= REQ_OP_ZONE_APPEND;
ret = btrfs_bio_extract_ordered_extent(bbio);
if (ret)
goto fail_put_bio;
} }
/* /*
......
...@@ -410,8 +410,6 @@ static inline bool btrfs_inode_can_compress(const struct btrfs_inode *inode) ...@@ -410,8 +410,6 @@ static inline bool btrfs_inode_can_compress(const struct btrfs_inode *inode)
int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page, int btrfs_check_sector_csum(struct btrfs_fs_info *fs_info, struct page *page,
u32 pgoff, u8 *csum, const u8 * const csum_expected); u32 pgoff, u8 *csum, const u8 * const csum_expected);
int btrfs_extract_ordered_extent(struct btrfs_bio *bbio,
struct btrfs_ordered_extent *ordered);
bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev, bool btrfs_data_csum_ok(struct btrfs_bio *bbio, struct btrfs_device *dev,
u32 bio_offset, struct bio_vec *bv); u32 bio_offset, struct bio_vec *bv);
noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len, noinline int can_nocow_extent(struct inode *inode, u64 offset, u64 *len,
......
...@@ -2714,8 +2714,8 @@ void btrfs_clear_delalloc_extent(struct btrfs_inode *inode, ...@@ -2714,8 +2714,8 @@ void btrfs_clear_delalloc_extent(struct btrfs_inode *inode,
} }
} }
int btrfs_extract_ordered_extent(struct btrfs_bio *bbio, static int btrfs_extract_ordered_extent(struct btrfs_bio *bbio,
struct btrfs_ordered_extent *ordered) struct btrfs_ordered_extent *ordered)
{ {
u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT; u64 start = (u64)bbio->bio.bi_iter.bi_sector << SECTOR_SHIFT;
u64 len = bbio->bio.bi_iter.bi_size; u64 len = bbio->bio.bi_iter.bi_size;
...@@ -3180,7 +3180,7 @@ static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans, ...@@ -3180,7 +3180,7 @@ static int insert_ordered_extent_file_extent(struct btrfs_trans_handle *trans,
* an ordered extent if the range of bytes in the file it covers are * an ordered extent if the range of bytes in the file it covers are
* fully written. * fully written.
*/ */
int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent)
{ {
struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode); struct btrfs_inode *inode = BTRFS_I(ordered_extent->inode);
struct btrfs_root *root = inode->root; struct btrfs_root *root = inode->root;
...@@ -3215,11 +3215,9 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) ...@@ -3215,11 +3215,9 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
goto out; goto out;
} }
if (btrfs_is_zoned(fs_info)) { if (btrfs_is_zoned(fs_info))
btrfs_rewrite_logical_zoned(ordered_extent);
btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr, btrfs_zone_finish_endio(fs_info, ordered_extent->disk_bytenr,
ordered_extent->disk_num_bytes); ordered_extent->disk_num_bytes);
}
if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) { if (test_bit(BTRFS_ORDERED_TRUNCATED, &ordered_extent->flags)) {
truncated = true; truncated = true;
...@@ -3387,6 +3385,14 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) ...@@ -3387,6 +3385,14 @@ int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
return ret; return ret;
} }
int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered)
{
if (btrfs_is_zoned(btrfs_sb(ordered->inode->i_sb)) &&
!test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
btrfs_finish_ordered_zoned(ordered);
return btrfs_finish_one_ordered(ordered);
}
void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode, void btrfs_writepage_endio_finish_ordered(struct btrfs_inode *inode,
struct page *page, u64 start, struct page *page, u64 start,
u64 end, bool uptodate) u64 end, bool uptodate)
......
...@@ -161,6 +161,7 @@ btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t) ...@@ -161,6 +161,7 @@ btrfs_ordered_inode_tree_init(struct btrfs_ordered_inode_tree *t)
t->last = NULL; t->last = NULL;
} }
int btrfs_finish_one_ordered(struct btrfs_ordered_extent *ordered_extent);
int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent); int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry); void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry);
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#include "transaction.h" #include "transaction.h"
#include "dev-replace.h" #include "dev-replace.h"
#include "space-info.h" #include "space-info.h"
#include "super.h"
#include "fs.h" #include "fs.h"
#include "accessors.h" #include "accessors.h"
#include "bio.h" #include "bio.h"
...@@ -1665,17 +1666,11 @@ void btrfs_record_physical_zoned(struct btrfs_bio *bbio) ...@@ -1665,17 +1666,11 @@ void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
sum->logical += physical - bbio->orig_physical; sum->logical += physical - bbio->orig_physical;
} }
void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered) static void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered,
u64 logical)
{ {
struct btrfs_inode *inode = BTRFS_I(ordered->inode); struct extent_map_tree *em_tree = &BTRFS_I(ordered->inode)->extent_tree;
struct extent_map_tree *em_tree = &inode->extent_tree;
struct extent_map *em; struct extent_map *em;
struct btrfs_ordered_sum *sum =
list_first_entry(&ordered->list, typeof(*sum), list);
u64 logical = sum->logical;
if (ordered->disk_bytenr == logical)
goto out;
ordered->disk_bytenr = logical; ordered->disk_bytenr = logical;
...@@ -1685,6 +1680,54 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered) ...@@ -1685,6 +1680,54 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
em->block_start = logical; em->block_start = logical;
free_extent_map(em); free_extent_map(em);
write_unlock(&em_tree->lock); write_unlock(&em_tree->lock);
}
static bool btrfs_zoned_split_ordered(struct btrfs_ordered_extent *ordered,
u64 logical, u64 len)
{
struct btrfs_ordered_extent *new;
if (!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
split_extent_map(BTRFS_I(ordered->inode), ordered->file_offset,
ordered->num_bytes, len))
return false;
new = btrfs_split_ordered_extent(ordered, len);
if (IS_ERR(new))
return false;
if (new->disk_bytenr != logical)
btrfs_rewrite_logical_zoned(new, logical);
btrfs_finish_one_ordered(new);
return true;
}
void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered)
{
struct btrfs_inode *inode = BTRFS_I(ordered->inode);
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct btrfs_ordered_sum *sum =
list_first_entry(&ordered->list, typeof(*sum), list);
u64 logical = sum->logical;
u64 len = sum->len;
while (len < ordered->disk_num_bytes) {
sum = list_next_entry(sum, list);
if (sum->logical == logical + len) {
len += sum->len;
continue;
}
if (!btrfs_zoned_split_ordered(ordered, logical, len)) {
set_bit(BTRFS_ORDERED_IOERR, &ordered->flags);
btrfs_err(fs_info, "failed to split ordered extent");
goto out;
}
logical = sum->logical;
len = sum->len;
}
if (ordered->disk_bytenr != logical)
btrfs_rewrite_logical_zoned(ordered, logical);
out: out:
/* /*
...@@ -1694,9 +1737,12 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered) ...@@ -1694,9 +1737,12 @@ void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered)
* here so that we don't attempt to log the csums later. * here so that we don't attempt to log the csums later.
*/ */
if ((inode->flags & BTRFS_INODE_NODATASUM) || if ((inode->flags & BTRFS_INODE_NODATASUM) ||
test_bit(BTRFS_FS_STATE_NO_CSUMS, &inode->root->fs_info->fs_state)) { test_bit(BTRFS_FS_STATE_NO_CSUMS, &fs_info->fs_state)) {
list_del(&sum->list); while ((sum = list_first_entry_or_null(&ordered->list,
kfree(sum); typeof(*sum), list))) {
list_del(&sum->list);
kfree(sum);
}
} }
} }
......
...@@ -30,6 +30,8 @@ struct btrfs_zoned_device_info { ...@@ -30,6 +30,8 @@ struct btrfs_zoned_device_info {
struct blk_zone sb_zones[2 * BTRFS_SUPER_MIRROR_MAX]; struct blk_zone sb_zones[2 * BTRFS_SUPER_MIRROR_MAX];
}; };
void btrfs_finish_ordered_zoned(struct btrfs_ordered_extent *ordered);
#ifdef CONFIG_BLK_DEV_ZONED #ifdef CONFIG_BLK_DEV_ZONED
int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos, int btrfs_get_dev_zone(struct btrfs_device *device, u64 pos,
struct blk_zone *zone); struct blk_zone *zone);
...@@ -56,7 +58,6 @@ void btrfs_redirty_list_add(struct btrfs_transaction *trans, ...@@ -56,7 +58,6 @@ void btrfs_redirty_list_add(struct btrfs_transaction *trans,
struct extent_buffer *eb); struct extent_buffer *eb);
bool btrfs_use_zone_append(struct btrfs_bio *bbio); bool btrfs_use_zone_append(struct btrfs_bio *bbio);
void btrfs_record_physical_zoned(struct btrfs_bio *bbio); void btrfs_record_physical_zoned(struct btrfs_bio *bbio);
void btrfs_rewrite_logical_zoned(struct btrfs_ordered_extent *ordered);
bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, struct extent_buffer *eb,
struct btrfs_block_group **cache_ret); struct btrfs_block_group **cache_ret);
...@@ -188,9 +189,6 @@ static inline void btrfs_record_physical_zoned(struct btrfs_bio *bbio) ...@@ -188,9 +189,6 @@ static inline void btrfs_record_physical_zoned(struct btrfs_bio *bbio)
{ {
} }
static inline void btrfs_rewrite_logical_zoned(
struct btrfs_ordered_extent *ordered) { }
static inline bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info, static inline bool btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
struct extent_buffer *eb, struct extent_buffer *eb,
struct btrfs_block_group **cache_ret) struct btrfs_block_group **cache_ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment