Commit b4ccace8 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: refactor submit_compressed_extents()

We have a big chunk of code inside a while() loop, with tons of strange
jumps for error handling.  It's definitely not to the code standard of
today.  Move the code into a new function, submit_one_async_extent().

Since we're here, also do the following changes:

- Comment style change
  To follow the current scheme

- Don't fallback to non-compressed write then hitting ENOSPC
  If we hit ENOSPC for compressed write, how could we reserve more space
  for non-compressed write?
  Thus we go error path directly.
  This removes the retry: label.

- Add more comment for super long parameter list
  Explain which parameter is for, so we don't need to check the
  prototype.

- Move the error handling to submit_one_async_extent()
  Thus no strange code like:

  out_free:
	...
	goto again;
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 6aabd858
...@@ -839,163 +839,121 @@ static void free_async_extent_pages(struct async_extent *async_extent) ...@@ -839,163 +839,121 @@ static void free_async_extent_pages(struct async_extent *async_extent)
async_extent->pages = NULL; async_extent->pages = NULL;
} }
/* static int submit_one_async_extent(struct btrfs_inode *inode,
* phase two of compressed writeback. This is the ordered portion struct async_chunk *async_chunk,
* of the code, which only gets called in the order the work was struct async_extent *async_extent,
* queued. We walk all the async extents created by compress_file_range u64 *alloc_hint)
* and send them down to the disk.
*/
static noinline void submit_compressed_extents(struct async_chunk *async_chunk)
{ {
struct btrfs_inode *inode = BTRFS_I(async_chunk->inode); struct extent_io_tree *io_tree = &inode->io_tree;
struct btrfs_fs_info *fs_info = inode->root->fs_info; struct btrfs_root *root = inode->root;
struct async_extent *async_extent; struct btrfs_fs_info *fs_info = root->fs_info;
u64 alloc_hint = 0;
struct btrfs_key ins; struct btrfs_key ins;
struct extent_map *em; struct extent_map *em;
struct btrfs_root *root = inode->root;
struct extent_io_tree *io_tree = &inode->io_tree;
int ret = 0; int ret = 0;
u64 start = async_extent->start;
u64 end = async_extent->start + async_extent->ram_size - 1;
again: lock_extent(io_tree, start, end);
while (!list_empty(&async_chunk->extents)) {
async_extent = list_entry(async_chunk->extents.next,
struct async_extent, list);
list_del(&async_extent->list);
retry: /* We have fallback to uncompressed write */
lock_extent(io_tree, async_extent->start,
async_extent->start + async_extent->ram_size - 1);
/* did the compression code fall back to uncompressed IO? */
if (!async_extent->pages) { if (!async_extent->pages) {
int page_started = 0; int page_started = 0;
unsigned long nr_written = 0; unsigned long nr_written = 0;
/* allocate blocks */ /*
* Call cow_file_range() to run the delalloc range directly,
* since we won't go to nocow or async path again.
*/
ret = cow_file_range(inode, async_chunk->locked_page, ret = cow_file_range(inode, async_chunk->locked_page,
async_extent->start, start, end, &page_started, &nr_written, 0);
async_extent->start +
async_extent->ram_size - 1,
&page_started, &nr_written, 0);
/* JDM XXX */
/* /*
* if page_started, cow_file_range inserted an * If @page_started, cow_file_range() inserted an inline extent
* inline extent and took care of all the unlocking * and took care of all the unlocking and IO for us.
* and IO for us. Otherwise, we need to submit * Otherwise, we need to submit all those pages down to the
* all those pages down to the drive. * drive.
*/ */
if (!page_started && !ret) if (!page_started && !ret)
extent_write_locked_range(&inode->vfs_inode, extent_write_locked_range(&inode->vfs_inode, start,
async_extent->start, end, WB_SYNC_ALL);
async_extent->start +
async_extent->ram_size - 1,
WB_SYNC_ALL);
else if (ret && async_chunk->locked_page) else if (ret && async_chunk->locked_page)
unlock_page(async_chunk->locked_page); unlock_page(async_chunk->locked_page);
kfree(async_extent); kfree(async_extent);
cond_resched(); return ret;
continue;
} }
ret = btrfs_reserve_extent(root, async_extent->ram_size, ret = btrfs_reserve_extent(root, async_extent->ram_size,
async_extent->compressed_size, async_extent->compressed_size,
async_extent->compressed_size, async_extent->compressed_size,
0, alloc_hint, &ins, 1, 1); 0, *alloc_hint, &ins, 1, 1);
if (ret) { if (ret) {
free_async_extent_pages(async_extent); free_async_extent_pages(async_extent);
if (ret == -ENOSPC) {
unlock_extent(io_tree, async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
/* /*
* we need to redirty the pages if we decide to * Here we used to try again by going back to non-compressed
* fallback to uncompressed IO, otherwise we * path for ENOSPC. But we can't reserve space even for
* will not submit these pages down to lower * compressed size, how could it work for uncompressed size
* layers. * which requires larger size? So here we directly go error
* path.
*/ */
extent_range_redirty_for_io(&inode->vfs_inode,
async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
goto retry;
}
goto out_free; goto out_free;
} }
/*
* here we're doing allocation and writeback of the /* Here we're doing allocation and writeback of the compressed pages */
* compressed pages em = create_io_em(inode, start,
*/
em = create_io_em(inode, async_extent->start,
async_extent->ram_size, /* len */ async_extent->ram_size, /* len */
async_extent->start, /* orig_start */ start, /* orig_start */
ins.objectid, /* block_start */ ins.objectid, /* block_start */
ins.offset, /* block_len */ ins.offset, /* block_len */
ins.offset, /* orig_block_len */ ins.offset, /* orig_block_len */
async_extent->ram_size, /* ram_bytes */ async_extent->ram_size, /* ram_bytes */
async_extent->compress_type, async_extent->compress_type,
BTRFS_ORDERED_COMPRESSED); BTRFS_ORDERED_COMPRESSED);
if (IS_ERR(em)) if (IS_ERR(em)) {
/* ret value is not necessary due to void function */ ret = PTR_ERR(em);
goto out_free_reserve; goto out_free_reserve;
}
free_extent_map(em); free_extent_map(em);
ret = btrfs_add_ordered_extent_compress(inode, ret = btrfs_add_ordered_extent_compress(inode, start, /* file_offset */
async_extent->start, ins.objectid, /* disk_bytenr */
ins.objectid, async_extent->ram_size, /* num_bytes */
async_extent->ram_size, ins.offset, /* disk_num_bytes */
ins.offset,
async_extent->compress_type); async_extent->compress_type);
if (ret) { if (ret) {
btrfs_drop_extent_cache(inode, async_extent->start, btrfs_drop_extent_cache(inode, start, end, 0);
async_extent->start +
async_extent->ram_size - 1, 0);
goto out_free_reserve; goto out_free_reserve;
} }
btrfs_dec_block_group_reservations(fs_info, ins.objectid); btrfs_dec_block_group_reservations(fs_info, ins.objectid);
/* /* Clear dirty, set writeback and unlock the pages. */
* clear dirty, set writeback and unlock the pages. extent_clear_unlock_delalloc(inode, start, end,
*/
extent_clear_unlock_delalloc(inode, async_extent->start,
async_extent->start +
async_extent->ram_size - 1,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC, NULL, EXTENT_LOCKED | EXTENT_DELALLOC,
PAGE_UNLOCK | PAGE_START_WRITEBACK); PAGE_UNLOCK | PAGE_START_WRITEBACK);
if (btrfs_submit_compressed_write(inode, async_extent->start, if (btrfs_submit_compressed_write(inode, start, /* file_offset */
async_extent->ram_size, async_extent->ram_size, /* num_bytes */
ins.objectid, ins.objectid, /* disk_bytenr */
ins.offset, async_extent->pages, ins.offset, /* compressed_len */
async_extent->pages, /* compressed_pages */
async_extent->nr_pages, async_extent->nr_pages,
async_chunk->write_flags, async_chunk->write_flags,
async_chunk->blkcg_css)) { async_chunk->blkcg_css)) {
const u64 start = async_extent->start; const u64 start = async_extent->start;
const u64 end = start + async_extent->ram_size - 1; const u64 end = start + async_extent->ram_size - 1;
btrfs_writepage_endio_finish_ordered(inode, NULL, start, btrfs_writepage_endio_finish_ordered(inode, NULL, start, end, 0);
end, false);
extent_clear_unlock_delalloc(inode, start, end, NULL, 0, extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
PAGE_END_WRITEBACK | PAGE_END_WRITEBACK | PAGE_SET_ERROR);
PAGE_SET_ERROR);
free_async_extent_pages(async_extent); free_async_extent_pages(async_extent);
} }
alloc_hint = ins.objectid + ins.offset; *alloc_hint = ins.objectid + ins.offset;
kfree(async_extent); kfree(async_extent);
cond_resched(); return ret;
}
return;
out_free_reserve: out_free_reserve:
btrfs_dec_block_group_reservations(fs_info, ins.objectid); btrfs_dec_block_group_reservations(fs_info, ins.objectid);
btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1); btrfs_free_reserved_extent(fs_info, ins.objectid, ins.offset, 1);
out_free: out_free:
extent_clear_unlock_delalloc(inode, async_extent->start, extent_clear_unlock_delalloc(inode, start, end,
async_extent->start +
async_extent->ram_size - 1,
NULL, EXTENT_LOCKED | EXTENT_DELALLOC | NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
EXTENT_DELALLOC_NEW | EXTENT_DELALLOC_NEW |
EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
...@@ -1003,7 +961,39 @@ static noinline void submit_compressed_extents(struct async_chunk *async_chunk) ...@@ -1003,7 +961,39 @@ static noinline void submit_compressed_extents(struct async_chunk *async_chunk)
PAGE_END_WRITEBACK | PAGE_SET_ERROR); PAGE_END_WRITEBACK | PAGE_SET_ERROR);
free_async_extent_pages(async_extent); free_async_extent_pages(async_extent);
kfree(async_extent); kfree(async_extent);
goto again; return ret;
}
/*
* Phase two of compressed writeback. This is the ordered portion of the code,
* which only gets called in the order the work was queued. We walk all the
* async extents created by compress_file_range and send them down to the disk.
*/
static noinline void submit_compressed_extents(struct async_chunk *async_chunk)
{
struct btrfs_inode *inode = BTRFS_I(async_chunk->inode);
struct btrfs_fs_info *fs_info = inode->root->fs_info;
struct async_extent *async_extent;
u64 alloc_hint = 0;
int ret = 0;
while (!list_empty(&async_chunk->extents)) {
u64 extent_start;
u64 ram_size;
async_extent = list_entry(async_chunk->extents.next,
struct async_extent, list);
list_del(&async_extent->list);
extent_start = async_extent->start;
ram_size = async_extent->ram_size;
ret = submit_one_async_extent(inode, async_chunk, async_extent,
&alloc_hint);
btrfs_debug(fs_info,
"async extent submission failed root=%lld inode=%llu start=%llu len=%llu ret=%d",
inode->root->root_key.objectid,
btrfs_ino(inode), extent_start, ram_size, ret);
}
} }
static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start, static u64 get_extent_allocation_hint(struct btrfs_inode *inode, u64 start,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment