Commit dcc3eb96 authored by Nikolay Borisov's avatar Nikolay Borisov Committed by David Sterba

btrfs: convert snapshot/nocow exlcusion to drew lock

This patch removes all haphazard code implementing nocow writers
exclusion from pending snapshot creation and switches to using the drew
lock to ensure this invariant still holds.

'Readers' are snapshot creators from create_snapshot and 'writers' are
nocow writers from buffered write path or btrfs_setsize. This locking
scheme allows for multiple snapshots to happen while any nocow writers
are blocked, since writes to page cache in the nocow path will make
snapshots inconsistent.

So for performance reasons we'd like to have the ability to run multiple
concurrent snapshots and also favors readers in this case. And in case
there aren't pending snapshots (which will be the majority of the cases)
we rely on the percpu's writers counter to avoid cacheline contention.

The main gain from using the drew lock is it's now a lot easier to
reason about the guarantees of the locking scheme and whether there is
some silent breakage lurking.
Signed-off-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 2992df73
...@@ -957,11 +957,6 @@ static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb) ...@@ -957,11 +957,6 @@ static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
return sb->s_fs_info; return sb->s_fs_info;
} }
struct btrfs_subvolume_writers {
struct percpu_counter counter;
wait_queue_head_t wait;
};
/* /*
* The state of btrfs root * The state of btrfs root
*/ */
...@@ -1133,8 +1128,9 @@ struct btrfs_root { ...@@ -1133,8 +1128,9 @@ struct btrfs_root {
* root_item_lock. * root_item_lock.
*/ */
int dedupe_in_progress; int dedupe_in_progress;
struct btrfs_subvolume_writers *subv_writers; /* For exclusion of snapshot creation and nocow writes */
atomic_t will_be_snapshotted; struct btrfs_drew_lock snapshot_lock;
atomic_t snapshot_force_cow; atomic_t snapshot_force_cow;
/* For qgroup metadata reserved space */ /* For qgroup metadata reserved space */
......
...@@ -1104,32 +1104,6 @@ void btrfs_clean_tree_block(struct extent_buffer *buf) ...@@ -1104,32 +1104,6 @@ void btrfs_clean_tree_block(struct extent_buffer *buf)
} }
} }
static struct btrfs_subvolume_writers *btrfs_alloc_subvolume_writers(void)
{
struct btrfs_subvolume_writers *writers;
int ret;
writers = kmalloc(sizeof(*writers), GFP_NOFS);
if (!writers)
return ERR_PTR(-ENOMEM);
ret = percpu_counter_init(&writers->counter, 0, GFP_NOFS);
if (ret < 0) {
kfree(writers);
return ERR_PTR(ret);
}
init_waitqueue_head(&writers->wait);
return writers;
}
static void
btrfs_free_subvolume_writers(struct btrfs_subvolume_writers *writers)
{
percpu_counter_destroy(&writers->counter);
kfree(writers);
}
static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
u64 objectid) u64 objectid)
{ {
...@@ -1178,7 +1152,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info, ...@@ -1178,7 +1152,6 @@ static void __setup_root(struct btrfs_root *root, struct btrfs_fs_info *fs_info,
atomic_set(&root->log_writers, 0); atomic_set(&root->log_writers, 0);
atomic_set(&root->log_batch, 0); atomic_set(&root->log_batch, 0);
refcount_set(&root->refs, 1); refcount_set(&root->refs, 1);
atomic_set(&root->will_be_snapshotted, 0);
atomic_set(&root->snapshot_force_cow, 0); atomic_set(&root->snapshot_force_cow, 0);
atomic_set(&root->nr_swapfiles, 0); atomic_set(&root->nr_swapfiles, 0);
root->log_transid = 0; root->log_transid = 0;
...@@ -1450,7 +1423,7 @@ struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root, ...@@ -1450,7 +1423,7 @@ struct btrfs_root *btrfs_read_tree_root(struct btrfs_root *tree_root,
static int btrfs_init_fs_root(struct btrfs_root *root) static int btrfs_init_fs_root(struct btrfs_root *root)
{ {
int ret; int ret;
struct btrfs_subvolume_writers *writers; unsigned int nofs_flag;
root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS);
root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned),
...@@ -1460,12 +1433,15 @@ static int btrfs_init_fs_root(struct btrfs_root *root) ...@@ -1460,12 +1433,15 @@ static int btrfs_init_fs_root(struct btrfs_root *root)
goto fail; goto fail;
} }
writers = btrfs_alloc_subvolume_writers(); /*
if (IS_ERR(writers)) { * We might be called under a transaction (e.g. indirect backref
ret = PTR_ERR(writers); * resolution) which could deadlock if it triggers memory reclaim
*/
nofs_flag = memalloc_nofs_save();
ret = btrfs_drew_lock_init(&root->snapshot_lock);
memalloc_nofs_restore(nofs_flag);
if (ret)
goto fail; goto fail;
}
root->subv_writers = writers;
if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) { if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
set_bit(BTRFS_ROOT_REF_COWS, &root->state); set_bit(BTRFS_ROOT_REF_COWS, &root->state);
...@@ -3961,8 +3937,7 @@ void btrfs_free_fs_root(struct btrfs_root *root) ...@@ -3961,8 +3937,7 @@ void btrfs_free_fs_root(struct btrfs_root *root)
WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree));
if (root->anon_dev) if (root->anon_dev)
free_anon_bdev(root->anon_dev); free_anon_bdev(root->anon_dev);
if (root->subv_writers) btrfs_drew_lock_destroy(&root->snapshot_lock);
btrfs_free_subvolume_writers(root->subv_writers);
free_extent_buffer(root->node); free_extent_buffer(root->node);
free_extent_buffer(root->commit_root); free_extent_buffer(root->commit_root);
kfree(root->free_ino_ctl); kfree(root->free_ino_ctl);
......
...@@ -5740,47 +5740,3 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range) ...@@ -5740,47 +5740,3 @@ int btrfs_trim_fs(struct btrfs_fs_info *fs_info, struct fstrim_range *range)
return bg_ret; return bg_ret;
return dev_ret; return dev_ret;
} }
/*
* btrfs_{start,end}_write_no_snapshotting() are similar to
* mnt_{want,drop}_write(), they are used to prevent some tasks from writing
* data into the page cache through nocow before the subvolume is snapshoted,
* but flush the data into disk after the snapshot creation, or to prevent
* operations while snapshotting is ongoing and that cause the snapshot to be
* inconsistent (writes followed by expanding truncates for example).
*/
void btrfs_end_write_no_snapshotting(struct btrfs_root *root)
{
percpu_counter_dec(&root->subv_writers->counter);
cond_wake_up(&root->subv_writers->wait);
}
int btrfs_start_write_no_snapshotting(struct btrfs_root *root)
{
if (atomic_read(&root->will_be_snapshotted))
return 0;
percpu_counter_inc(&root->subv_writers->counter);
/*
* Make sure counter is updated before we check for snapshot creation.
*/
smp_mb();
if (atomic_read(&root->will_be_snapshotted)) {
btrfs_end_write_no_snapshotting(root);
return 0;
}
return 1;
}
void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
{
while (true) {
int ret;
ret = btrfs_start_write_no_snapshotting(root);
if (ret)
break;
wait_var_event(&root->will_be_snapshotted,
!atomic_read(&root->will_be_snapshotted));
}
}
...@@ -1553,8 +1553,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, ...@@ -1553,8 +1553,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
u64 num_bytes; u64 num_bytes;
int ret; int ret;
ret = btrfs_start_write_no_snapshotting(root); if (!btrfs_drew_try_write_lock(&root->snapshot_lock))
if (!ret)
return -EAGAIN; return -EAGAIN;
lockstart = round_down(pos, fs_info->sectorsize); lockstart = round_down(pos, fs_info->sectorsize);
...@@ -1569,7 +1568,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos, ...@@ -1569,7 +1568,7 @@ static noinline int check_can_nocow(struct btrfs_inode *inode, loff_t pos,
NULL, NULL, NULL); NULL, NULL, NULL);
if (ret <= 0) { if (ret <= 0) {
ret = 0; ret = 0;
btrfs_end_write_no_snapshotting(root); btrfs_drew_write_unlock(&root->snapshot_lock);
} else { } else {
*write_bytes = min_t(size_t, *write_bytes , *write_bytes = min_t(size_t, *write_bytes ,
num_bytes - pos + lockstart); num_bytes - pos + lockstart);
...@@ -1675,7 +1674,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, ...@@ -1675,7 +1674,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
data_reserved, pos, data_reserved, pos,
write_bytes); write_bytes);
else else
btrfs_end_write_no_snapshotting(root); btrfs_drew_write_unlock(&root->snapshot_lock);
break; break;
} }
...@@ -1779,7 +1778,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, ...@@ -1779,7 +1778,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
release_bytes = 0; release_bytes = 0;
if (only_release_metadata) if (only_release_metadata)
btrfs_end_write_no_snapshotting(root); btrfs_drew_write_unlock(&root->snapshot_lock);
if (only_release_metadata && copied > 0) { if (only_release_metadata && copied > 0) {
lockstart = round_down(pos, lockstart = round_down(pos,
...@@ -1808,7 +1807,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, ...@@ -1808,7 +1807,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
if (release_bytes) { if (release_bytes) {
if (only_release_metadata) { if (only_release_metadata) {
btrfs_end_write_no_snapshotting(root); btrfs_drew_write_unlock(&root->snapshot_lock);
btrfs_delalloc_release_metadata(BTRFS_I(inode), btrfs_delalloc_release_metadata(BTRFS_I(inode),
release_bytes, true); release_bytes, true);
} else { } else {
......
...@@ -4726,16 +4726,16 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) ...@@ -4726,16 +4726,16 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
* truncation, it must capture all writes that happened before * truncation, it must capture all writes that happened before
* this truncation. * this truncation.
*/ */
btrfs_wait_for_snapshot_creation(root); btrfs_drew_write_lock(&root->snapshot_lock);
ret = btrfs_cont_expand(inode, oldsize, newsize); ret = btrfs_cont_expand(inode, oldsize, newsize);
if (ret) { if (ret) {
btrfs_end_write_no_snapshotting(root); btrfs_drew_write_unlock(&root->snapshot_lock);
return ret; return ret;
} }
trans = btrfs_start_transaction(root, 1); trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
btrfs_end_write_no_snapshotting(root); btrfs_drew_write_unlock(&root->snapshot_lock);
return PTR_ERR(trans); return PTR_ERR(trans);
} }
...@@ -4743,7 +4743,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr) ...@@ -4743,7 +4743,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
btrfs_inode_safe_disk_i_size_write(inode, 0); btrfs_inode_safe_disk_i_size_write(inode, 0);
pagecache_isize_extended(inode, oldsize, newsize); pagecache_isize_extended(inode, oldsize, newsize);
ret = btrfs_update_inode(trans, root, inode); ret = btrfs_update_inode(trans, root, inode);
btrfs_end_write_no_snapshotting(root); btrfs_drew_write_unlock(&root->snapshot_lock);
btrfs_end_transaction(trans); btrfs_end_transaction(trans);
} else { } else {
......
...@@ -791,11 +791,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, ...@@ -791,11 +791,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
* possible. This is to avoid later writeback (running dealloc) to * possible. This is to avoid later writeback (running dealloc) to
* fallback to COW mode and unexpectedly fail with ENOSPC. * fallback to COW mode and unexpectedly fail with ENOSPC.
*/ */
atomic_inc(&root->will_be_snapshotted); btrfs_drew_read_lock(&root->snapshot_lock);
smp_mb__after_atomic();
/* wait for no snapshot writes */
wait_event(root->subv_writers->wait,
percpu_counter_sum(&root->subv_writers->counter) == 0);
ret = btrfs_start_delalloc_snapshot(root); ret = btrfs_start_delalloc_snapshot(root);
if (ret) if (ret)
...@@ -876,8 +872,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, ...@@ -876,8 +872,8 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
dec_and_free: dec_and_free:
if (snapshot_force_cow) if (snapshot_force_cow)
atomic_dec(&root->snapshot_force_cow); atomic_dec(&root->snapshot_force_cow);
if (atomic_dec_and_test(&root->will_be_snapshotted)) btrfs_drew_read_unlock(&root->snapshot_lock);
wake_up_var(&root->will_be_snapshotted);
free_pending: free_pending:
kfree(pending_snapshot->root_item); kfree(pending_snapshot->root_item);
btrfs_free_path(pending_snapshot->path); btrfs_free_path(pending_snapshot->path);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment