Commit 0356ad41 authored by Naohiro Aota's avatar Naohiro Aota Committed by David Sterba

btrfs: zoned: defer advancing meta write pointer

We currently advance the meta_write_pointer in
btrfs_check_meta_write_pointer(). That makes it necessary to revert it
when locking the buffer failed. Instead, we can advance it just before
sending the buffer.

Also, this is necessary for the following commit. In the commit, it needs
to release the zoned_meta_io_lock to allow IOs to come in and wait for them
to fill the currently active block group. If we advance the
meta_write_pointer before locking the extent buffer, the following extent
buffer can pass the meta_write_pointer check, resulting in an unaligned
write failure.

Advancing the pointer is still thread-safe as the extent buffer is locked.
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarJohannes Thumshirn <johannes.thumshirn@wdc.com>
Signed-off-by: default avatarNaohiro Aota <naohiro.aota@wdc.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 2ad8c051
......@@ -1855,15 +1855,14 @@ static int submit_eb_page(struct page *page, struct btrfs_eb_write_context *ctx)
}
if (!lock_extent_buffer_for_io(eb, wbc)) {
btrfs_revert_meta_write_pointer(ctx->zoned_bg, eb);
free_extent_buffer(eb);
return 0;
}
/* Implies write in zoned mode. */
if (ctx->zoned_bg) {
/*
* Implies write in zoned mode. Mark the last eb in a block group.
*/
/* Mark the last eb in the block group. */
btrfs_schedule_zone_finish_bg(ctx->zoned_bg, eb);
ctx->zoned_bg->meta_write_pointer += eb->len;
}
write_one_eb(eb, wbc);
free_extent_buffer(eb);
......
......@@ -1781,11 +1781,8 @@ int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
ctx->zoned_bg = block_group;
}
if (block_group->meta_write_pointer == eb->start) {
block_group->meta_write_pointer = eb->start + eb->len;
if (block_group->meta_write_pointer == eb->start)
return 0;
}
/* If for_sync, this hole will be filled with trasnsaction commit. */
if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync)
......@@ -1793,16 +1790,6 @@ int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
return -EBUSY;
}
void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
struct extent_buffer *eb)
{
if (!btrfs_is_zoned(eb->fs_info) || !cache)
return;
ASSERT(cache->meta_write_pointer == eb->start + eb->len);
cache->meta_write_pointer = eb->start;
}
int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length)
{
if (!btrfs_dev_is_sequential(device, physical))
......
......@@ -60,8 +60,6 @@ bool btrfs_use_zone_append(struct btrfs_bio *bbio);
void btrfs_record_physical_zoned(struct btrfs_bio *bbio);
int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
struct btrfs_eb_write_context *ctx);
void btrfs_revert_meta_write_pointer(struct btrfs_block_group *cache,
struct extent_buffer *eb);
int btrfs_zoned_issue_zeroout(struct btrfs_device *device, u64 physical, u64 length);
int btrfs_sync_zone_write_pointer(struct btrfs_device *tgt_dev, u64 logical,
u64 physical_start, u64 physical_pos);
......@@ -194,12 +192,6 @@ static inline int btrfs_check_meta_write_pointer(struct btrfs_fs_info *fs_info,
return 0;
}
static inline void btrfs_revert_meta_write_pointer(
struct btrfs_block_group *cache,
struct extent_buffer *eb)
{
}
static inline int btrfs_zoned_issue_zeroout(struct btrfs_device *device,
u64 physical, u64 length)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment