Commit 1728366e authored by Josef Bacik's avatar Josef Bacik

Btrfs: stop using write_one_page

While looking for a performance regression a user was complaining about, I
noticed that we had a regression with the varmail test of filebench.  This was
introduced by

0d10ee2e

which keeps us from calling writepages in writepage.  This is a correct change,
however it happens to help the varmail test because we write out in larger
chunks.  This is largly to do with how we write out dirty pages for each
transaction.  If you run filebench with

load varmail
set $dir=/mnt/btrfs-test
run 60

prior to this patch you would get ~1420 ops/second, but with the patch you get
~1200 ops/second.  This is a 16% decrease.  So since we know the range of dirty
pages we want to write out, don't write out in one page chunks, write out in
ranges.  So to do this we call filemap_fdatawrite_range() on the range of bytes.
Then we convert the DIRTY extents to NEED_WAIT extents.  When we then call
btrfs_wait_marked_extents() we only have to filemap_fdatawait_range() on that
range and clear the NEED_WAIT extents.  This doesn't get us back to our original
speeds, but I've been seeing ~1380 ops/second, which is a <5% regression as
opposed to a >15% regression.  That is acceptable given that the original commit
greatly reduces our latency to begin with.  Thanks,
Signed-off-by: default avatarJosef Bacik <josef@redhat.com>
parent 462d6fac
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#define EXTENT_NODATASUM (1 << 10) #define EXTENT_NODATASUM (1 << 10)
#define EXTENT_DO_ACCOUNTING (1 << 11) #define EXTENT_DO_ACCOUNTING (1 << 11)
#define EXTENT_FIRST_DELALLOC (1 << 12) #define EXTENT_FIRST_DELALLOC (1 << 12)
#define EXTENT_NEED_WAIT (1 << 13)
#define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK) #define EXTENT_IOBITS (EXTENT_LOCKED | EXTENT_WRITEBACK)
#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC) #define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING | EXTENT_FIRST_DELALLOC)
......
...@@ -572,50 +572,21 @@ int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, ...@@ -572,50 +572,21 @@ int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans,
int btrfs_write_marked_extents(struct btrfs_root *root, int btrfs_write_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages, int mark) struct extent_io_tree *dirty_pages, int mark)
{ {
int ret;
int err = 0; int err = 0;
int werr = 0; int werr = 0;
struct page *page; struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
struct inode *btree_inode = root->fs_info->btree_inode;
u64 start = 0; u64 start = 0;
u64 end; u64 end;
unsigned long index;
while (1) {
ret = find_first_extent_bit(dirty_pages, start, &start, &end,
mark);
if (ret)
break;
while (start <= end) {
cond_resched();
index = start >> PAGE_CACHE_SHIFT;
start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
page = find_get_page(btree_inode->i_mapping, index);
if (!page)
continue;
btree_lock_page_hook(page);
if (!page->mapping) {
unlock_page(page);
page_cache_release(page);
continue;
}
if (PageWriteback(page)) { while (!find_first_extent_bit(dirty_pages, start, &start, &end,
if (PageDirty(page)) mark)) {
wait_on_page_writeback(page); convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, mark,
else { GFP_NOFS);
unlock_page(page); err = filemap_fdatawrite_range(mapping, start, end);
page_cache_release(page);
continue;
}
}
err = write_one_page(page, 0);
if (err) if (err)
werr = err; werr = err;
page_cache_release(page); cond_resched();
} start = end + 1;
} }
if (err) if (err)
werr = err; werr = err;
...@@ -631,39 +602,20 @@ int btrfs_write_marked_extents(struct btrfs_root *root, ...@@ -631,39 +602,20 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
int btrfs_wait_marked_extents(struct btrfs_root *root, int btrfs_wait_marked_extents(struct btrfs_root *root,
struct extent_io_tree *dirty_pages, int mark) struct extent_io_tree *dirty_pages, int mark)
{ {
int ret;
int err = 0; int err = 0;
int werr = 0; int werr = 0;
struct page *page; struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
struct inode *btree_inode = root->fs_info->btree_inode;
u64 start = 0; u64 start = 0;
u64 end; u64 end;
unsigned long index;
while (1) { while (!find_first_extent_bit(dirty_pages, start, &start, &end,
ret = find_first_extent_bit(dirty_pages, start, &start, &end, EXTENT_NEED_WAIT)) {
mark); clear_extent_bits(dirty_pages, start, end, EXTENT_NEED_WAIT, GFP_NOFS);
if (ret) err = filemap_fdatawait_range(mapping, start, end);
break;
clear_extent_bits(dirty_pages, start, end, mark, GFP_NOFS);
while (start <= end) {
index = start >> PAGE_CACHE_SHIFT;
start = (u64)(index + 1) << PAGE_CACHE_SHIFT;
page = find_get_page(btree_inode->i_mapping, index);
if (!page)
continue;
if (PageDirty(page)) {
btree_lock_page_hook(page);
wait_on_page_writeback(page);
err = write_one_page(page, 0);
if (err) if (err)
werr = err; werr = err;
}
wait_on_page_writeback(page);
page_cache_release(page);
cond_resched(); cond_resched();
} start = end + 1;
} }
if (err) if (err)
werr = err; werr = err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment