Commit e64df3eb authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus2' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs updates from Chris Mason:
 "These are all fixes I'd like to get out to a broader audience.

  The biggest of the bunch is Mark's quota fix, which is also in the
  SUSE kernel, and makes our subvolume quotas dramatically more
  accurate.

  I've been running xfstests with these against your current git
  overnight, but I'm queueing up longer tests as well"

* 'for-linus2' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  btrfs: disable strict file flushes for renames and truncates
  Btrfs: fix csum tree corruption, duplicate and outdated checksums
  Btrfs: Fix memory corruption by ulist_add_merge() on 32bit arch
  Btrfs: fix compressed write corruption on enospc
  btrfs: correctly handle return from ulist_add
  btrfs: qgroup: account shared subtrees during snapshot delete
  Btrfs: read lock extent buffer while walking backrefs
  Btrfs: __btrfs_mod_ref should always use no_quota
  btrfs: adjust statfs calculations according to raid profiles
parents 53b95d63 8d875f95
......@@ -276,9 +276,8 @@ static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
}
if (ret > 0)
goto next;
ret = ulist_add_merge(parents, eb->start,
(uintptr_t)eie,
(u64 *)&old, GFP_NOFS);
ret = ulist_add_merge_ptr(parents, eb->start,
eie, (void **)&old, GFP_NOFS);
if (ret < 0)
break;
if (!ret && extent_item_pos) {
......@@ -1001,16 +1000,19 @@ static int find_parent_nodes(struct btrfs_trans_handle *trans,
ret = -EIO;
goto out;
}
btrfs_tree_read_lock(eb);
btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
ret = find_extent_in_eb(eb, bytenr,
*extent_item_pos, &eie);
btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
if (ret < 0)
goto out;
ref->inode_list = eie;
}
ret = ulist_add_merge(refs, ref->parent,
(uintptr_t)ref->inode_list,
(u64 *)&eie, GFP_NOFS);
ret = ulist_add_merge_ptr(refs, ref->parent,
ref->inode_list,
(void **)&eie, GFP_NOFS);
if (ret < 0)
goto out;
if (!ret && extent_item_pos) {
......
......@@ -84,12 +84,6 @@ struct btrfs_inode {
*/
struct list_head delalloc_inodes;
/*
* list for tracking inodes that must be sent to disk before a
* rename or truncate commit
*/
struct list_head ordered_operations;
/* node for the red-black tree that links inodes in subvolume root */
struct rb_node rb_node;
......
......@@ -280,9 +280,9 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
WARN_ON(btrfs_header_generation(buf) > trans->transid);
if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
ret = btrfs_inc_ref(trans, root, cow, 1, 1);
ret = btrfs_inc_ref(trans, root, cow, 1);
else
ret = btrfs_inc_ref(trans, root, cow, 0, 1);
ret = btrfs_inc_ref(trans, root, cow, 0);
if (ret)
return ret;
......@@ -1035,14 +1035,14 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if ((owner == root->root_key.objectid ||
root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
ret = btrfs_inc_ref(trans, root, buf, 1, 1);
ret = btrfs_inc_ref(trans, root, buf, 1);
BUG_ON(ret); /* -ENOMEM */
if (root->root_key.objectid ==
BTRFS_TREE_RELOC_OBJECTID) {
ret = btrfs_dec_ref(trans, root, buf, 0, 1);
ret = btrfs_dec_ref(trans, root, buf, 0);
BUG_ON(ret); /* -ENOMEM */
ret = btrfs_inc_ref(trans, root, cow, 1, 1);
ret = btrfs_inc_ref(trans, root, cow, 1);
BUG_ON(ret); /* -ENOMEM */
}
new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
......@@ -1050,9 +1050,9 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if (root->root_key.objectid ==
BTRFS_TREE_RELOC_OBJECTID)
ret = btrfs_inc_ref(trans, root, cow, 1, 1);
ret = btrfs_inc_ref(trans, root, cow, 1);
else
ret = btrfs_inc_ref(trans, root, cow, 0, 1);
ret = btrfs_inc_ref(trans, root, cow, 0);
BUG_ON(ret); /* -ENOMEM */
}
if (new_flags != 0) {
......@@ -1069,11 +1069,11 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
if (root->root_key.objectid ==
BTRFS_TREE_RELOC_OBJECTID)
ret = btrfs_inc_ref(trans, root, cow, 1, 1);
ret = btrfs_inc_ref(trans, root, cow, 1);
else
ret = btrfs_inc_ref(trans, root, cow, 0, 1);
ret = btrfs_inc_ref(trans, root, cow, 0);
BUG_ON(ret); /* -ENOMEM */
ret = btrfs_dec_ref(trans, root, buf, 1, 1);
ret = btrfs_dec_ref(trans, root, buf, 1);
BUG_ON(ret); /* -ENOMEM */
}
clean_tree_block(trans, root, buf);
......
......@@ -3326,9 +3326,9 @@ int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes,
u64 min_alloc_size, u64 empty_size, u64 hint_byte,
struct btrfs_key *ins, int is_data, int delalloc);
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref, int no_quota);
struct extent_buffer *buf, int full_backref);
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref, int no_quota);
struct extent_buffer *buf, int full_backref);
int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
u64 bytenr, u64 num_bytes, u64 flags,
......
......@@ -60,8 +60,6 @@ static void end_workqueue_fn(struct btrfs_work *work);
static void free_fs_root(struct btrfs_root *root);
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only);
static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
struct btrfs_root *root);
static void btrfs_destroy_ordered_extents(struct btrfs_root *root);
static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
struct btrfs_root *root);
......@@ -3829,34 +3827,6 @@ static void btrfs_error_commit_super(struct btrfs_root *root)
btrfs_cleanup_transaction(root);
}
static void btrfs_destroy_ordered_operations(struct btrfs_transaction *t,
struct btrfs_root *root)
{
struct btrfs_inode *btrfs_inode;
struct list_head splice;
INIT_LIST_HEAD(&splice);
mutex_lock(&root->fs_info->ordered_operations_mutex);
spin_lock(&root->fs_info->ordered_root_lock);
list_splice_init(&t->ordered_operations, &splice);
while (!list_empty(&splice)) {
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
ordered_operations);
list_del_init(&btrfs_inode->ordered_operations);
spin_unlock(&root->fs_info->ordered_root_lock);
btrfs_invalidate_inodes(btrfs_inode->root);
spin_lock(&root->fs_info->ordered_root_lock);
}
spin_unlock(&root->fs_info->ordered_root_lock);
mutex_unlock(&root->fs_info->ordered_operations_mutex);
}
static void btrfs_destroy_ordered_extents(struct btrfs_root *root)
{
struct btrfs_ordered_extent *ordered;
......@@ -4093,8 +4063,6 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
struct btrfs_root *root)
{
btrfs_destroy_ordered_operations(cur_trans, root);
btrfs_destroy_delayed_refs(cur_trans, root);
cur_trans->state = TRANS_STATE_COMMIT_START;
......
This diff is collapsed.
......@@ -756,7 +756,7 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
found_next = 1;
if (ret != 0)
goto insert;
slot = 0;
slot = path->slots[0];
}
btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
......
......@@ -1838,33 +1838,9 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
int btrfs_release_file(struct inode *inode, struct file *filp)
{
/*
* ordered_data_close is set by settattr when we are about to truncate
* a file from a non-zero size to a zero size. This tries to
* flush down new bytes that may have been written if the
* application were using truncate to replace a file in place.
*/
if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
&BTRFS_I(inode)->runtime_flags)) {
struct btrfs_trans_handle *trans;
struct btrfs_root *root = BTRFS_I(inode)->root;
/*
* We need to block on a committing transaction to keep us from
* throwing a ordered operation on to the list and causing
* something like sync to deadlock trying to flush out this
* inode.
*/
trans = btrfs_start_transaction(root, 0);
if (IS_ERR(trans))
return PTR_ERR(trans);
btrfs_add_ordered_operation(trans, BTRFS_I(inode)->root, inode);
btrfs_end_transaction(trans, root);
if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
filemap_flush(inode->i_mapping);
}
if (filp->private_data)
btrfs_ioctl_trans_end(filp);
filemap_flush(inode->i_mapping);
return 0;
}
......
......@@ -709,6 +709,18 @@ static noinline int submit_compressed_extents(struct inode *inode,
unlock_extent(io_tree, async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
/*
* we need to redirty the pages if we decide to
* fallback to uncompressed IO, otherwise we
* will not submit these pages down to lower
* layers.
*/
extent_range_redirty_for_io(inode,
async_extent->start,
async_extent->start +
async_extent->ram_size - 1);
goto retry;
}
goto out_free;
......@@ -7938,27 +7950,6 @@ static int btrfs_truncate(struct inode *inode)
min_size);
BUG_ON(ret);
/*
* setattr is responsible for setting the ordered_data_close flag,
* but that is only tested during the last file release. That
* could happen well after the next commit, leaving a great big
* window where new writes may get lost if someone chooses to write
* to this file after truncating to zero
*
* The inode doesn't have any dirty data here, and so if we commit
* this is a noop. If someone immediately starts writing to the inode
* it is very likely we'll catch some of their writes in this
* transaction, and the commit will find this file on the ordered
* data list with good things to send down.
*
* This is a best effort solution, there is still a window where
* using truncate to replace the contents of the file will
* end up with a zero length file after a crash.
*/
if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
&BTRFS_I(inode)->runtime_flags))
btrfs_add_ordered_operation(trans, root, inode);
/*
* So if we truncate and then write and fsync we normally would just
* write the extents that changed, which is a problem if we need to
......@@ -8106,7 +8097,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
mutex_init(&ei->delalloc_mutex);
btrfs_ordered_inode_tree_init(&ei->ordered_tree);
INIT_LIST_HEAD(&ei->delalloc_inodes);
INIT_LIST_HEAD(&ei->ordered_operations);
RB_CLEAR_NODE(&ei->rb_node);
return inode;
......@@ -8146,17 +8136,6 @@ void btrfs_destroy_inode(struct inode *inode)
if (!root)
goto free;
/*
* Make sure we're properly removed from the ordered operation
* lists.
*/
smp_mb();
if (!list_empty(&BTRFS_I(inode)->ordered_operations)) {
spin_lock(&root->fs_info->ordered_root_lock);
list_del_init(&BTRFS_I(inode)->ordered_operations);
spin_unlock(&root->fs_info->ordered_root_lock);
}
if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
&BTRFS_I(inode)->runtime_flags)) {
btrfs_info(root->fs_info, "inode %llu still on the orphan list",
......@@ -8338,12 +8317,10 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
ret = 0;
/*
* we're using rename to replace one file with another.
* and the replacement file is large. Start IO on it now so
* we don't add too much work to the end of the transaction
* we're using rename to replace one file with another. Start IO on it
* now so we don't add too much work to the end of the transaction
*/
if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size &&
old_inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
if (new_inode && S_ISREG(old_inode->i_mode) && new_inode->i_size)
filemap_flush(old_inode->i_mapping);
/* close the racy window with snapshot create/destroy ioctl */
......@@ -8391,12 +8368,6 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
btrfs_pin_log_trans(root);
}
/*
* make sure the inode gets flushed if it is replacing
* something.
*/
if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
btrfs_add_ordered_operation(trans, root, old_inode);
inode_inc_iversion(old_dir);
inode_inc_iversion(new_dir);
......
......@@ -571,18 +571,6 @@ void btrfs_remove_ordered_extent(struct inode *inode,
trace_btrfs_ordered_extent_remove(inode, entry);
/*
* we have no more ordered extents for this inode and
* no dirty pages. We can safely remove it from the
* list of ordered extents
*/
if (RB_EMPTY_ROOT(&tree->tree) &&
!mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
spin_lock(&root->fs_info->ordered_root_lock);
list_del_init(&BTRFS_I(inode)->ordered_operations);
spin_unlock(&root->fs_info->ordered_root_lock);
}
if (!root->nr_ordered_extents) {
spin_lock(&root->fs_info->ordered_root_lock);
BUG_ON(list_empty(&root->ordered_root));
......@@ -686,81 +674,6 @@ void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr)
mutex_unlock(&fs_info->ordered_operations_mutex);
}
/*
* this is used during transaction commit to write all the inodes
* added to the ordered operation list. These files must be fully on
* disk before the transaction commits.
*
* we have two modes here, one is to just start the IO via filemap_flush
* and the other is to wait for all the io. When we wait, we have an
* extra check to make sure the ordered operation list really is empty
* before we return
*/
int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int wait)
{
struct btrfs_inode *btrfs_inode;
struct inode *inode;
struct btrfs_transaction *cur_trans = trans->transaction;
struct list_head splice;
struct list_head works;
struct btrfs_delalloc_work *work, *next;
int ret = 0;
INIT_LIST_HEAD(&splice);
INIT_LIST_HEAD(&works);
mutex_lock(&root->fs_info->ordered_extent_flush_mutex);
spin_lock(&root->fs_info->ordered_root_lock);
list_splice_init(&cur_trans->ordered_operations, &splice);
while (!list_empty(&splice)) {
btrfs_inode = list_entry(splice.next, struct btrfs_inode,
ordered_operations);
inode = &btrfs_inode->vfs_inode;
list_del_init(&btrfs_inode->ordered_operations);
/*
* the inode may be getting freed (in sys_unlink path).
*/
inode = igrab(inode);
if (!inode)
continue;
if (!wait)
list_add_tail(&BTRFS_I(inode)->ordered_operations,
&cur_trans->ordered_operations);
spin_unlock(&root->fs_info->ordered_root_lock);
work = btrfs_alloc_delalloc_work(inode, wait, 1);
if (!work) {
spin_lock(&root->fs_info->ordered_root_lock);
if (list_empty(&BTRFS_I(inode)->ordered_operations))
list_add_tail(&btrfs_inode->ordered_operations,
&splice);
list_splice_tail(&splice,
&cur_trans->ordered_operations);
spin_unlock(&root->fs_info->ordered_root_lock);
ret = -ENOMEM;
goto out;
}
list_add_tail(&work->list, &works);
btrfs_queue_work(root->fs_info->flush_workers,
&work->work);
cond_resched();
spin_lock(&root->fs_info->ordered_root_lock);
}
spin_unlock(&root->fs_info->ordered_root_lock);
out:
list_for_each_entry_safe(work, next, &works, list) {
list_del_init(&work->list);
btrfs_wait_and_free_delalloc_work(work);
}
mutex_unlock(&root->fs_info->ordered_extent_flush_mutex);
return ret;
}
/*
* Used to start IO or wait for a given ordered extent to finish.
*
......@@ -1120,42 +1033,6 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
return index;
}
/*
* add a given inode to the list of inodes that must be fully on
* disk before a transaction commit finishes.
*
* This basically gives us the ext3 style data=ordered mode, and it is mostly
* used to make sure renamed files are fully on disk.
*
* It is a noop if the inode is already fully on disk.
*
* If trans is not null, we'll do a friendly check for a transaction that
* is already flushing things and force the IO down ourselves.
*/
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode)
{
struct btrfs_transaction *cur_trans = trans->transaction;
u64 last_mod;
last_mod = max(BTRFS_I(inode)->generation, BTRFS_I(inode)->last_trans);
/*
* if this file hasn't been changed since the last transaction
* commit, we can safely return without doing anything
*/
if (last_mod <= root->fs_info->last_trans_committed)
return;
spin_lock(&root->fs_info->ordered_root_lock);
if (list_empty(&BTRFS_I(inode)->ordered_operations)) {
list_add_tail(&BTRFS_I(inode)->ordered_operations,
&cur_trans->ordered_operations);
}
spin_unlock(&root->fs_info->ordered_root_lock);
}
int __init ordered_data_init(void)
{
btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
......
......@@ -190,11 +190,6 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_extent *ordered);
int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
u32 *sum, int len);
int btrfs_run_ordered_operations(struct btrfs_trans_handle *trans,
struct btrfs_root *root, int wait);
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct inode *inode);
int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr);
void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr);
void btrfs_get_logged_extents(struct inode *inode,
......
......@@ -1201,6 +1201,50 @@ int btrfs_limit_qgroup(struct btrfs_trans_handle *trans,
mutex_unlock(&fs_info->qgroup_ioctl_lock);
return ret;
}
static int comp_oper_exist(struct btrfs_qgroup_operation *oper1,
struct btrfs_qgroup_operation *oper2)
{
/*
* Ignore seq and type here, we're looking for any operation
* at all related to this extent on that root.
*/
if (oper1->bytenr < oper2->bytenr)
return -1;
if (oper1->bytenr > oper2->bytenr)
return 1;
if (oper1->ref_root < oper2->ref_root)
return -1;
if (oper1->ref_root > oper2->ref_root)
return 1;
return 0;
}
static int qgroup_oper_exists(struct btrfs_fs_info *fs_info,
struct btrfs_qgroup_operation *oper)
{
struct rb_node *n;
struct btrfs_qgroup_operation *cur;
int cmp;
spin_lock(&fs_info->qgroup_op_lock);
n = fs_info->qgroup_op_tree.rb_node;
while (n) {
cur = rb_entry(n, struct btrfs_qgroup_operation, n);
cmp = comp_oper_exist(cur, oper);
if (cmp < 0) {
n = n->rb_right;
} else if (cmp) {
n = n->rb_left;
} else {
spin_unlock(&fs_info->qgroup_op_lock);
return -EEXIST;
}
}
spin_unlock(&fs_info->qgroup_op_lock);
return 0;
}
static int comp_oper(struct btrfs_qgroup_operation *oper1,
struct btrfs_qgroup_operation *oper2)
{
......@@ -1290,6 +1334,23 @@ int btrfs_qgroup_record_ref(struct btrfs_trans_handle *trans,
oper->seq = atomic_inc_return(&fs_info->qgroup_op_seq);
INIT_LIST_HEAD(&oper->elem.list);
oper->elem.seq = 0;
if (type == BTRFS_QGROUP_OPER_SUB_SUBTREE) {
/*
* If any operation for this bytenr/ref_root combo
* exists, then we know it's not exclusively owned and
* shouldn't be queued up.
*
* This also catches the case where we have a cloned
* extent that gets queued up multiple times during
* drop snapshot.
*/
if (qgroup_oper_exists(fs_info, oper)) {
kfree(oper);
return 0;
}
}
ret = insert_qgroup_oper(fs_info, oper);
if (ret) {
/* Shouldn't happen so have an assert for developers */
......@@ -1883,6 +1944,111 @@ static int qgroup_shared_accounting(struct btrfs_trans_handle *trans,
return ret;
}
/*
* Process a reference to a shared subtree. This type of operation is
* queued during snapshot removal when we encounter extents which are
* shared between more than one root.
*/
static int qgroup_subtree_accounting(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info,
struct btrfs_qgroup_operation *oper)
{
struct ulist *roots = NULL;
struct ulist_node *unode;
struct ulist_iterator uiter;
struct btrfs_qgroup_list *glist;
struct ulist *parents;
int ret = 0;
int err;
struct btrfs_qgroup *qg;
u64 root_obj = 0;
struct seq_list elem = {};
parents = ulist_alloc(GFP_NOFS);
if (!parents)
return -ENOMEM;
btrfs_get_tree_mod_seq(fs_info, &elem);
ret = btrfs_find_all_roots(trans, fs_info, oper->bytenr,
elem.seq, &roots);
btrfs_put_tree_mod_seq(fs_info, &elem);
if (ret < 0)
return ret;
if (roots->nnodes != 1)
goto out;
ULIST_ITER_INIT(&uiter);
unode = ulist_next(roots, &uiter); /* Only want 1 so no need to loop */
/*
* If we find our ref root then that means all refs
* this extent has to the root have not yet been
* deleted. In that case, we do nothing and let the
* last ref for this bytenr drive our update.
*
* This can happen for example if an extent is
* referenced multiple times in a snapshot (clone,
* etc). If we are in the middle of snapshot removal,
* queued updates for such an extent will find the
* root if we have not yet finished removing the
* snapshot.
*/
if (unode->val == oper->ref_root)
goto out;
root_obj = unode->val;
BUG_ON(!root_obj);
spin_lock(&fs_info->qgroup_lock);
qg = find_qgroup_rb(fs_info, root_obj);
if (!qg)
goto out_unlock;
qg->excl += oper->num_bytes;
qg->excl_cmpr += oper->num_bytes;
qgroup_dirty(fs_info, qg);
/*
* Adjust counts for parent groups. First we find all
* parents, then in the 2nd loop we do the adjustment
* while adding parents of the parents to our ulist.
*/
list_for_each_entry(glist, &qg->groups, next_group) {
err = ulist_add(parents, glist->group->qgroupid,
ptr_to_u64(glist->group), GFP_ATOMIC);
if (err < 0) {
ret = err;
goto out_unlock;
}
}
ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(parents, &uiter))) {
qg = u64_to_ptr(unode->aux);
qg->excl += oper->num_bytes;
qg->excl_cmpr += oper->num_bytes;
qgroup_dirty(fs_info, qg);
/* Add any parents of the parents */
list_for_each_entry(glist, &qg->groups, next_group) {
err = ulist_add(parents, glist->group->qgroupid,
ptr_to_u64(glist->group), GFP_ATOMIC);
if (err < 0) {
ret = err;
goto out_unlock;
}
}
}
out_unlock:
spin_unlock(&fs_info->qgroup_lock);
out:
ulist_free(roots);
ulist_free(parents);
return ret;
}
/*
* btrfs_qgroup_account_ref is called for every ref that is added to or deleted
* from the fs. First, all roots referencing the extent are searched, and
......@@ -1920,6 +2086,9 @@ static int btrfs_qgroup_account(struct btrfs_trans_handle *trans,
case BTRFS_QGROUP_OPER_SUB_SHARED:
ret = qgroup_shared_accounting(trans, fs_info, oper);
break;
case BTRFS_QGROUP_OPER_SUB_SUBTREE:
ret = qgroup_subtree_accounting(trans, fs_info, oper);
break;
default:
ASSERT(0);
}
......
......@@ -44,6 +44,7 @@ enum btrfs_qgroup_operation_type {
BTRFS_QGROUP_OPER_ADD_SHARED,
BTRFS_QGROUP_OPER_SUB_EXCL,
BTRFS_QGROUP_OPER_SUB_SHARED,
BTRFS_QGROUP_OPER_SUB_SUBTREE,
};
struct btrfs_qgroup_operation {
......
......@@ -1665,6 +1665,21 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
return 0;
}
/*
* Calculate numbers for 'df', pessimistic in case of mixed raid profiles.
*
* If there's a redundant raid level at DATA block groups, use the respective
* multiplier to scale the sizes.
*
* Unused device space usage is based on simulating the chunk allocator
* algorithm that respects the device sizes, order of allocations and the
* 'alloc_start' value, this is a close approximation of the actual use but
* there are other factors that may change the result (like a new metadata
* chunk).
*
* FIXME: not accurate for mixed block groups, total and free/used are ok,
* available appears slightly larger.
*/
static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
{
struct btrfs_fs_info *fs_info = btrfs_sb(dentry->d_sb);
......@@ -1675,6 +1690,8 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
u64 total_free_data = 0;
int bits = dentry->d_sb->s_blocksize_bits;
__be32 *fsid = (__be32 *)fs_info->fsid;
unsigned factor = 1;
struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
int ret;
/* holding chunk_muext to avoid allocating new chunks */
......@@ -1682,30 +1699,52 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
rcu_read_lock();
list_for_each_entry_rcu(found, head, list) {
if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
int i;
total_free_data += found->disk_total - found->disk_used;
total_free_data -=
btrfs_account_ro_block_groups_free_space(found);
for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
if (!list_empty(&found->block_groups[i])) {
switch (i) {
case BTRFS_RAID_DUP:
case BTRFS_RAID_RAID1:
case BTRFS_RAID_RAID10:
factor = 2;
}
}
}
}
total_used += found->disk_used;
}
rcu_read_unlock();
buf->f_namelen = BTRFS_NAME_LEN;
buf->f_blocks = btrfs_super_total_bytes(disk_super) >> bits;
buf->f_bfree = buf->f_blocks - (total_used >> bits);
buf->f_bsize = dentry->d_sb->s_blocksize;
buf->f_type = BTRFS_SUPER_MAGIC;
buf->f_blocks = div_u64(btrfs_super_total_bytes(disk_super), factor);
buf->f_blocks >>= bits;
buf->f_bfree = buf->f_blocks - (div_u64(total_used, factor) >> bits);
/* Account global block reserve as used, it's in logical size already */
spin_lock(&block_rsv->lock);
buf->f_bfree -= block_rsv->size >> bits;
spin_unlock(&block_rsv->lock);
buf->f_bavail = total_free_data;
ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data);
if (ret) {
mutex_unlock(&fs_info->chunk_mutex);
return ret;
}
buf->f_bavail += total_free_data;
buf->f_bavail += div_u64(total_free_data, factor);
buf->f_bavail = buf->f_bavail >> bits;
mutex_unlock(&fs_info->chunk_mutex);
buf->f_type = BTRFS_SUPER_MAGIC;
buf->f_bsize = dentry->d_sb->s_blocksize;
buf->f_namelen = BTRFS_NAME_LEN;
/* We treat it as constant endianness (it doesn't matter _which_)
because we want the fsid to come out the same whether mounted
on a big-endian or little-endian host */
......
......@@ -218,7 +218,6 @@ static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
spin_lock_init(&cur_trans->delayed_refs.lock);
INIT_LIST_HEAD(&cur_trans->pending_snapshots);
INIT_LIST_HEAD(&cur_trans->ordered_operations);
INIT_LIST_HEAD(&cur_trans->pending_chunks);
INIT_LIST_HEAD(&cur_trans->switch_commits);
list_add_tail(&cur_trans->list, &fs_info->trans_list);
......@@ -1612,27 +1611,6 @@ static void cleanup_transaction(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_trans_handle_cachep, trans);
}
static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
struct btrfs_root *root)
{
int ret;
ret = btrfs_run_delayed_items(trans, root);
if (ret)
return ret;
/*
* rename don't use btrfs_join_transaction, so, once we
* set the transaction to blocked above, we aren't going
* to get any new ordered operations. We can safely run
* it here and no for sure that nothing new will be added
* to the list
*/
ret = btrfs_run_ordered_operations(trans, root, 1);
return ret;
}
static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
{
if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
......@@ -1653,13 +1631,6 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
struct btrfs_transaction *prev_trans = NULL;
int ret;
ret = btrfs_run_ordered_operations(trans, root, 0);
if (ret) {
btrfs_abort_transaction(trans, root, ret);
btrfs_end_transaction(trans, root);
return ret;
}
/* Stop the commit early if ->aborted is set */
if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
ret = cur_trans->aborted;
......@@ -1740,7 +1711,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
if (ret)
goto cleanup_transaction;
ret = btrfs_flush_all_pending_stuffs(trans, root);
ret = btrfs_run_delayed_items(trans, root);
if (ret)
goto cleanup_transaction;
......@@ -1748,7 +1719,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
extwriter_counter_read(cur_trans) == 0);
/* some pending stuffs might be added after the previous flush. */
ret = btrfs_flush_all_pending_stuffs(trans, root);
ret = btrfs_run_delayed_items(trans, root);
if (ret)
goto cleanup_transaction;
......
......@@ -55,7 +55,6 @@ struct btrfs_transaction {
wait_queue_head_t writer_wait;
wait_queue_head_t commit_wait;
struct list_head pending_snapshots;
struct list_head ordered_operations;
struct list_head pending_chunks;
struct list_head switch_commits;
struct btrfs_delayed_ref_root delayed_refs;
......
......@@ -57,6 +57,21 @@ void ulist_free(struct ulist *ulist);
int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
u64 *old_aux, gfp_t gfp_mask);
/* just like ulist_add_merge() but take a pointer for the aux data */
static inline int ulist_add_merge_ptr(struct ulist *ulist, u64 val, void *aux,
void **old_aux, gfp_t gfp_mask)
{
#if BITS_PER_LONG == 32
u64 old64 = (uintptr_t)*old_aux;
int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask);
*old_aux = (void *)((uintptr_t)old64);
return ret;
#else
return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask);
#endif
}
struct ulist_node *ulist_next(struct ulist *ulist,
struct ulist_iterator *uiter);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment