Commit 84399bb0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs fixes from Chris Mason:
 "Outside of misc fixes, Filipe has a few fsync corners and we're
  pulling in one more of Josef's fixes from production use here"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  Btrfs:__add_inode_ref: out of bounds memory read when looking for extended ref.
  Btrfs: fix data loss in the fast fsync path
  Btrfs: remove extra run_delayed_refs in update_cowonly_root
  Btrfs: incremental send, don't rename a directory too soon
  btrfs: fix lost return value due to variable shadowing
  Btrfs: do not ignore errors from btrfs_lookup_xattr in do_setxattr
  Btrfs: fix off-by-one logic error in btrfs_realloc_node
  Btrfs: add missing inode update when punching hole
  Btrfs: abort the transaction if we fail to update the free space cache inode
  Btrfs: fix fsync race leading to ordered extent memory leaks
parents 0d9b9c16 dd9ef135
...@@ -1645,14 +1645,14 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, ...@@ -1645,14 +1645,14 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
parent_nritems = btrfs_header_nritems(parent); parent_nritems = btrfs_header_nritems(parent);
blocksize = root->nodesize; blocksize = root->nodesize;
end_slot = parent_nritems; end_slot = parent_nritems - 1;
if (parent_nritems == 1) if (parent_nritems <= 1)
return 0; return 0;
btrfs_set_lock_blocking(parent); btrfs_set_lock_blocking(parent);
for (i = start_slot; i < end_slot; i++) { for (i = start_slot; i <= end_slot; i++) {
int close = 1; int close = 1;
btrfs_node_key(parent, &disk_key, i); btrfs_node_key(parent, &disk_key, i);
...@@ -1669,7 +1669,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, ...@@ -1669,7 +1669,7 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
other = btrfs_node_blockptr(parent, i - 1); other = btrfs_node_blockptr(parent, i - 1);
close = close_blocks(blocknr, other, blocksize); close = close_blocks(blocknr, other, blocksize);
} }
if (!close && i < end_slot - 2) { if (!close && i < end_slot) {
other = btrfs_node_blockptr(parent, i + 1); other = btrfs_node_blockptr(parent, i + 1);
close = close_blocks(blocknr, other, blocksize); close = close_blocks(blocknr, other, blocksize);
} }
......
...@@ -3208,6 +3208,8 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, ...@@ -3208,6 +3208,8 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
return 0; return 0;
} }
if (trans->aborted)
return 0;
again: again:
inode = lookup_free_space_inode(root, block_group, path); inode = lookup_free_space_inode(root, block_group, path);
if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
...@@ -3243,6 +3245,20 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group, ...@@ -3243,6 +3245,20 @@ static int cache_save_setup(struct btrfs_block_group_cache *block_group,
*/ */
BTRFS_I(inode)->generation = 0; BTRFS_I(inode)->generation = 0;
ret = btrfs_update_inode(trans, root, inode); ret = btrfs_update_inode(trans, root, inode);
if (ret) {
/*
* So theoretically we could recover from this, simply set the
* super cache generation to 0 so we know to invalidate the
* cache, but then we'd have to keep track of the block groups
* that fail this way so we know we _have_ to reset this cache
* before the next commit or risk reading stale cache. So to
* limit our exposure to horrible edge cases lets just abort the
* transaction, this only happens in really bad situations
* anyway.
*/
btrfs_abort_transaction(trans, root, ret);
goto out_put;
}
WARN_ON(ret); WARN_ON(ret);
if (i_size_read(inode) > 0) { if (i_size_read(inode) > 0) {
......
...@@ -1811,22 +1811,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, ...@@ -1811,22 +1811,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
/* /*
* we want to make sure fsync finds this change
* but we haven't joined a transaction running right now.
*
* Later on, someone is sure to update the inode and get the
* real transid recorded.
*
* We set last_trans now to the fs_info generation + 1,
* this will either be one more than the running transaction
* or the generation used for the next transaction if there isn't
* one running right now.
*
* We also have to set last_sub_trans to the current log transid, * We also have to set last_sub_trans to the current log transid,
* otherwise subsequent syncs to a file that's been synced in this * otherwise subsequent syncs to a file that's been synced in this
* transaction will appear to have already occured. * transaction will appear to have already occured.
*/ */
BTRFS_I(inode)->last_trans = root->fs_info->generation + 1;
BTRFS_I(inode)->last_sub_trans = root->log_transid; BTRFS_I(inode)->last_sub_trans = root->log_transid;
if (num_written > 0) { if (num_written > 0) {
err = generic_write_sync(file, pos, num_written); err = generic_write_sync(file, pos, num_written);
...@@ -1959,25 +1947,37 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync) ...@@ -1959,25 +1947,37 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
atomic_inc(&root->log_batch); atomic_inc(&root->log_batch);
/* /*
* check the transaction that last modified this inode * If the last transaction that changed this file was before the current
* and see if its already been committed * transaction and we have the full sync flag set in our inode, we can
*/ * bail out now without any syncing.
if (!BTRFS_I(inode)->last_trans) { *
mutex_unlock(&inode->i_mutex); * Note that we can't bail out if the full sync flag isn't set. This is
goto out; * because when the full sync flag is set we start all ordered extents
} * and wait for them to fully complete - when they complete they update
* the inode's last_trans field through:
/* *
* if the last transaction that changed this file was before * btrfs_finish_ordered_io() ->
* the current transaction, we can bail out now without any * btrfs_update_inode_fallback() ->
* syncing * btrfs_update_inode() ->
* btrfs_set_inode_last_trans()
*
* So we are sure that last_trans is up to date and can do this check to
* bail out safely. For the fast path, when the full sync flag is not
* set in our inode, we can not do it because we start only our ordered
* extents and don't wait for them to complete (that is when
* btrfs_finish_ordered_io runs), so here at this point their last_trans
* value might be less than or equals to fs_info->last_trans_committed,
* and setting a speculative last_trans for an inode when a buffered
* write is made (such as fs_info->generation + 1 for example) would not
* be reliable since after setting the value and before fsync is called
* any number of transactions can start and commit (transaction kthread
* commits the current transaction periodically), and a transaction
* commit does not start nor waits for ordered extents to complete.
*/ */
smp_mb(); smp_mb();
if (btrfs_inode_in_log(inode, root->fs_info->generation) || if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
BTRFS_I(inode)->last_trans <= (full_sync && BTRFS_I(inode)->last_trans <=
root->fs_info->last_trans_committed) { root->fs_info->last_trans_committed)) {
BTRFS_I(inode)->last_trans = 0;
/* /*
* We'v had everything committed since the last time we were * We'v had everything committed since the last time we were
* modified so clear this flag in case it was set for whatever * modified so clear this flag in case it was set for whatever
...@@ -2275,6 +2275,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ...@@ -2275,6 +2275,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
bool same_page; bool same_page;
bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES); bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
u64 ino_size; u64 ino_size;
bool truncated_page = false;
bool updated_inode = false;
ret = btrfs_wait_ordered_range(inode, offset, len); ret = btrfs_wait_ordered_range(inode, offset, len);
if (ret) if (ret)
...@@ -2306,13 +2308,18 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ...@@ -2306,13 +2308,18 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
* entire page. * entire page.
*/ */
if (same_page && len < PAGE_CACHE_SIZE) { if (same_page && len < PAGE_CACHE_SIZE) {
if (offset < ino_size) if (offset < ino_size) {
truncated_page = true;
ret = btrfs_truncate_page(inode, offset, len, 0); ret = btrfs_truncate_page(inode, offset, len, 0);
} else {
ret = 0;
}
goto out_only_mutex; goto out_only_mutex;
} }
/* zero back part of the first page */ /* zero back part of the first page */
if (offset < ino_size) { if (offset < ino_size) {
truncated_page = true;
ret = btrfs_truncate_page(inode, offset, 0, 0); ret = btrfs_truncate_page(inode, offset, 0, 0);
if (ret) { if (ret) {
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
...@@ -2348,6 +2355,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ...@@ -2348,6 +2355,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (!ret) { if (!ret) {
/* zero the front end of the last page */ /* zero the front end of the last page */
if (tail_start + tail_len < ino_size) { if (tail_start + tail_len < ino_size) {
truncated_page = true;
ret = btrfs_truncate_page(inode, ret = btrfs_truncate_page(inode,
tail_start + tail_len, 0, 1); tail_start + tail_len, 0, 1);
if (ret) if (ret)
...@@ -2357,8 +2365,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ...@@ -2357,8 +2365,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
} }
if (lockend < lockstart) { if (lockend < lockstart) {
mutex_unlock(&inode->i_mutex); ret = 0;
return 0; goto out_only_mutex;
} }
while (1) { while (1) {
...@@ -2506,6 +2514,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ...@@ -2506,6 +2514,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
trans->block_rsv = &root->fs_info->trans_block_rsv; trans->block_rsv = &root->fs_info->trans_block_rsv;
ret = btrfs_update_inode(trans, root, inode); ret = btrfs_update_inode(trans, root, inode);
updated_inode = true;
btrfs_end_transaction(trans, root); btrfs_end_transaction(trans, root);
btrfs_btree_balance_dirty(root); btrfs_btree_balance_dirty(root);
out_free: out_free:
...@@ -2515,6 +2524,22 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ...@@ -2515,6 +2524,22 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state, GFP_NOFS); &cached_state, GFP_NOFS);
out_only_mutex: out_only_mutex:
if (!updated_inode && truncated_page && !ret && !err) {
/*
* If we only end up zeroing part of a page, we still need to
* update the inode item, so that all the time fields are
* updated as well as the necessary btrfs inode in memory fields
* for detecting, at fsync time, if the inode isn't yet in the
* log tree or it's there but not up to date.
*/
trans = btrfs_start_transaction(root, 1);
if (IS_ERR(trans)) {
err = PTR_ERR(trans);
} else {
err = btrfs_update_inode(trans, root, inode);
ret = btrfs_end_transaction(trans, root);
}
}
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
if (ret && !err) if (ret && !err)
err = ret; err = ret;
......
...@@ -7285,7 +7285,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock, ...@@ -7285,7 +7285,6 @@ static int btrfs_get_blocks_direct(struct inode *inode, sector_t iblock,
((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) && ((BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) &&
em->block_start != EXTENT_MAP_HOLE)) { em->block_start != EXTENT_MAP_HOLE)) {
int type; int type;
int ret;
u64 block_start, orig_start, orig_block_len, ram_bytes; u64 block_start, orig_start, orig_block_len, ram_bytes;
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
......
...@@ -452,9 +452,7 @@ void btrfs_get_logged_extents(struct inode *inode, ...@@ -452,9 +452,7 @@ void btrfs_get_logged_extents(struct inode *inode,
continue; continue;
if (entry_end(ordered) <= start) if (entry_end(ordered) <= start)
break; break;
if (!list_empty(&ordered->log_list)) if (test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
continue;
if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
continue; continue;
list_add(&ordered->log_list, logged_list); list_add(&ordered->log_list, logged_list);
atomic_inc(&ordered->refs); atomic_inc(&ordered->refs);
...@@ -511,7 +509,6 @@ void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans, ...@@ -511,7 +509,6 @@ void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
&ordered->flags)); &ordered->flags));
if (!test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
list_add_tail(&ordered->trans_list, &trans->ordered); list_add_tail(&ordered->trans_list, &trans->ordered);
spin_lock_irq(&log->log_extents_lock[index]); spin_lock_irq(&log->log_extents_lock[index]);
} }
......
...@@ -230,6 +230,7 @@ struct pending_dir_move { ...@@ -230,6 +230,7 @@ struct pending_dir_move {
u64 parent_ino; u64 parent_ino;
u64 ino; u64 ino;
u64 gen; u64 gen;
bool is_orphan;
struct list_head update_refs; struct list_head update_refs;
}; };
...@@ -2984,7 +2985,8 @@ static int add_pending_dir_move(struct send_ctx *sctx, ...@@ -2984,7 +2985,8 @@ static int add_pending_dir_move(struct send_ctx *sctx,
u64 ino_gen, u64 ino_gen,
u64 parent_ino, u64 parent_ino,
struct list_head *new_refs, struct list_head *new_refs,
struct list_head *deleted_refs) struct list_head *deleted_refs,
const bool is_orphan)
{ {
struct rb_node **p = &sctx->pending_dir_moves.rb_node; struct rb_node **p = &sctx->pending_dir_moves.rb_node;
struct rb_node *parent = NULL; struct rb_node *parent = NULL;
...@@ -2999,6 +3001,7 @@ static int add_pending_dir_move(struct send_ctx *sctx, ...@@ -2999,6 +3001,7 @@ static int add_pending_dir_move(struct send_ctx *sctx,
pm->parent_ino = parent_ino; pm->parent_ino = parent_ino;
pm->ino = ino; pm->ino = ino;
pm->gen = ino_gen; pm->gen = ino_gen;
pm->is_orphan = is_orphan;
INIT_LIST_HEAD(&pm->list); INIT_LIST_HEAD(&pm->list);
INIT_LIST_HEAD(&pm->update_refs); INIT_LIST_HEAD(&pm->update_refs);
RB_CLEAR_NODE(&pm->node); RB_CLEAR_NODE(&pm->node);
...@@ -3131,16 +3134,20 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) ...@@ -3131,16 +3134,20 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
rmdir_ino = dm->rmdir_ino; rmdir_ino = dm->rmdir_ino;
free_waiting_dir_move(sctx, dm); free_waiting_dir_move(sctx, dm);
if (pm->is_orphan) {
ret = gen_unique_name(sctx, pm->ino,
pm->gen, from_path);
} else {
ret = get_first_ref(sctx->parent_root, pm->ino, ret = get_first_ref(sctx->parent_root, pm->ino,
&parent_ino, &parent_gen, name); &parent_ino, &parent_gen, name);
if (ret < 0) if (ret < 0)
goto out; goto out;
ret = get_cur_path(sctx, parent_ino, parent_gen, ret = get_cur_path(sctx, parent_ino, parent_gen,
from_path); from_path);
if (ret < 0) if (ret < 0)
goto out; goto out;
ret = fs_path_add_path(from_path, name); ret = fs_path_add_path(from_path, name);
}
if (ret < 0) if (ret < 0)
goto out; goto out;
...@@ -3150,7 +3157,8 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm) ...@@ -3150,7 +3157,8 @@ static int apply_dir_move(struct send_ctx *sctx, struct pending_dir_move *pm)
LIST_HEAD(deleted_refs); LIST_HEAD(deleted_refs);
ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID); ASSERT(ancestor > BTRFS_FIRST_FREE_OBJECTID);
ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor, ret = add_pending_dir_move(sctx, pm->ino, pm->gen, ancestor,
&pm->update_refs, &deleted_refs); &pm->update_refs, &deleted_refs,
pm->is_orphan);
if (ret < 0) if (ret < 0)
goto out; goto out;
if (rmdir_ino) { if (rmdir_ino) {
...@@ -3283,6 +3291,127 @@ static int apply_children_dir_moves(struct send_ctx *sctx) ...@@ -3283,6 +3291,127 @@ static int apply_children_dir_moves(struct send_ctx *sctx)
return ret; return ret;
} }
/*
* We might need to delay a directory rename even when no ancestor directory
* (in the send root) with a higher inode number than ours (sctx->cur_ino) was
* renamed. This happens when we rename a directory to the old name (the name
* in the parent root) of some other unrelated directory that got its rename
* delayed due to some ancestor with higher number that got renamed.
*
* Example:
*
* Parent snapshot:
* . (ino 256)
* |---- a/ (ino 257)
* | |---- file (ino 260)
* |
* |---- b/ (ino 258)
* |---- c/ (ino 259)
*
* Send snapshot:
* . (ino 256)
* |---- a/ (ino 258)
* |---- x/ (ino 259)
* |---- y/ (ino 257)
* |----- file (ino 260)
*
* Here we can not rename 258 from 'b' to 'a' without the rename of inode 257
* from 'a' to 'x/y' happening first, which in turn depends on the rename of
* inode 259 from 'c' to 'x'. So the order of rename commands the send stream
* must issue is:
*
* 1 - rename 259 from 'c' to 'x'
* 2 - rename 257 from 'a' to 'x/y'
* 3 - rename 258 from 'b' to 'a'
*
* Returns 1 if the rename of sctx->cur_ino needs to be delayed, 0 if it can
* be done right away and < 0 on error.
*/
static int wait_for_dest_dir_move(struct send_ctx *sctx,
struct recorded_ref *parent_ref,
const bool is_orphan)
{
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_key di_key;
struct btrfs_dir_item *di;
u64 left_gen;
u64 right_gen;
int ret = 0;
if (RB_EMPTY_ROOT(&sctx->waiting_dir_moves))
return 0;
path = alloc_path_for_send();
if (!path)
return -ENOMEM;
key.objectid = parent_ref->dir;
key.type = BTRFS_DIR_ITEM_KEY;
key.offset = btrfs_name_hash(parent_ref->name, parent_ref->name_len);
ret = btrfs_search_slot(NULL, sctx->parent_root, &key, path, 0, 0);
if (ret < 0) {
goto out;
} else if (ret > 0) {
ret = 0;
goto out;
}
di = btrfs_match_dir_item_name(sctx->parent_root, path,
parent_ref->name, parent_ref->name_len);
if (!di) {
ret = 0;
goto out;
}
/*
* di_key.objectid has the number of the inode that has a dentry in the
* parent directory with the same name that sctx->cur_ino is being
* renamed to. We need to check if that inode is in the send root as
* well and if it is currently marked as an inode with a pending rename,
* if it is, we need to delay the rename of sctx->cur_ino as well, so
* that it happens after that other inode is renamed.
*/
btrfs_dir_item_key_to_cpu(path->nodes[0], di, &di_key);
if (di_key.type != BTRFS_INODE_ITEM_KEY) {
ret = 0;
goto out;
}
ret = get_inode_info(sctx->parent_root, di_key.objectid, NULL,
&left_gen, NULL, NULL, NULL, NULL);
if (ret < 0)
goto out;
ret = get_inode_info(sctx->send_root, di_key.objectid, NULL,
&right_gen, NULL, NULL, NULL, NULL);
if (ret < 0) {
if (ret == -ENOENT)
ret = 0;
goto out;
}
/* Different inode, no need to delay the rename of sctx->cur_ino */
if (right_gen != left_gen) {
ret = 0;
goto out;
}
if (is_waiting_for_move(sctx, di_key.objectid)) {
ret = add_pending_dir_move(sctx,
sctx->cur_ino,
sctx->cur_inode_gen,
di_key.objectid,
&sctx->new_refs,
&sctx->deleted_refs,
is_orphan);
if (!ret)
ret = 1;
}
out:
btrfs_free_path(path);
return ret;
}
static int wait_for_parent_move(struct send_ctx *sctx, static int wait_for_parent_move(struct send_ctx *sctx,
struct recorded_ref *parent_ref) struct recorded_ref *parent_ref)
{ {
...@@ -3349,7 +3478,8 @@ static int wait_for_parent_move(struct send_ctx *sctx, ...@@ -3349,7 +3478,8 @@ static int wait_for_parent_move(struct send_ctx *sctx,
sctx->cur_inode_gen, sctx->cur_inode_gen,
ino, ino,
&sctx->new_refs, &sctx->new_refs,
&sctx->deleted_refs); &sctx->deleted_refs,
false);
if (!ret) if (!ret)
ret = 1; ret = 1;
} }
...@@ -3372,6 +3502,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move) ...@@ -3372,6 +3502,7 @@ static int process_recorded_refs(struct send_ctx *sctx, int *pending_move)
int did_overwrite = 0; int did_overwrite = 0;
int is_orphan = 0; int is_orphan = 0;
u64 last_dir_ino_rm = 0; u64 last_dir_ino_rm = 0;
bool can_rename = true;
verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
...@@ -3490,12 +3621,22 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); ...@@ -3490,12 +3621,22 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
} }
} }
if (S_ISDIR(sctx->cur_inode_mode) && sctx->parent_root) {
ret = wait_for_dest_dir_move(sctx, cur, is_orphan);
if (ret < 0)
goto out;
if (ret == 1) {
can_rename = false;
*pending_move = 1;
}
}
/* /*
* link/move the ref to the new place. If we have an orphan * link/move the ref to the new place. If we have an orphan
* inode, move it and update valid_path. If not, link or move * inode, move it and update valid_path. If not, link or move
* it depending on the inode mode. * it depending on the inode mode.
*/ */
if (is_orphan) { if (is_orphan && can_rename) {
ret = send_rename(sctx, valid_path, cur->full_path); ret = send_rename(sctx, valid_path, cur->full_path);
if (ret < 0) if (ret < 0)
goto out; goto out;
...@@ -3503,7 +3644,7 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino); ...@@ -3503,7 +3644,7 @@ verbose_printk("btrfs: process_recorded_refs %llu\n", sctx->cur_ino);
ret = fs_path_copy(valid_path, cur->full_path); ret = fs_path_copy(valid_path, cur->full_path);
if (ret < 0) if (ret < 0)
goto out; goto out;
} else { } else if (can_rename) {
if (S_ISDIR(sctx->cur_inode_mode)) { if (S_ISDIR(sctx->cur_inode_mode)) {
/* /*
* Dirs can't be linked, so move it. For moved * Dirs can't be linked, so move it. For moved
......
...@@ -1052,9 +1052,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans, ...@@ -1052,9 +1052,6 @@ static int update_cowonly_root(struct btrfs_trans_handle *trans,
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
if (ret) if (ret)
return ret; return ret;
ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
if (ret)
return ret;
} }
return 0; return 0;
......
...@@ -1012,7 +1012,7 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans, ...@@ -1012,7 +1012,7 @@ static inline int __add_inode_ref(struct btrfs_trans_handle *trans,
base = btrfs_item_ptr_offset(leaf, path->slots[0]); base = btrfs_item_ptr_offset(leaf, path->slots[0]);
while (cur_offset < item_size) { while (cur_offset < item_size) {
extref = (struct btrfs_inode_extref *)base + cur_offset; extref = (struct btrfs_inode_extref *)(base + cur_offset);
victim_name_len = btrfs_inode_extref_name_len(leaf, extref); victim_name_len = btrfs_inode_extref_name_len(leaf, extref);
......
...@@ -111,6 +111,8 @@ static int do_setxattr(struct btrfs_trans_handle *trans, ...@@ -111,6 +111,8 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
name, name_len, -1); name, name_len, -1);
if (!di && (flags & XATTR_REPLACE)) if (!di && (flags & XATTR_REPLACE))
ret = -ENODATA; ret = -ENODATA;
else if (IS_ERR(di))
ret = PTR_ERR(di);
else if (di) else if (di)
ret = btrfs_delete_one_dir_name(trans, root, path, di); ret = btrfs_delete_one_dir_name(trans, root, path, di);
goto out; goto out;
...@@ -127,10 +129,12 @@ static int do_setxattr(struct btrfs_trans_handle *trans, ...@@ -127,10 +129,12 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
ASSERT(mutex_is_locked(&inode->i_mutex)); ASSERT(mutex_is_locked(&inode->i_mutex));
di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
name, name_len, 0); name, name_len, 0);
if (!di) { if (!di)
ret = -ENODATA; ret = -ENODATA;
else if (IS_ERR(di))
ret = PTR_ERR(di);
if (ret)
goto out; goto out;
}
btrfs_release_path(path); btrfs_release_path(path);
di = NULL; di = NULL;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment