Commit f8779876 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.4-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs fixes from David Sterba:
 "A few more stabitly fixes, one build warning fix.

   - fix inode allocation under NOFS context

   - fix leak in fiemap due to concurrent append writes

   - fix log-root tree updates

   - fix balance convert of single profile on 32bit architectures

   - silence false positive warning on old GCCs (code moved in rc1)"

* tag 'for-5.4-rc2-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux:
  btrfs: silence maybe-uninitialized warning in clone_range
  btrfs: fix uninitialized ret in ref-verify
  btrfs: allocate new inode in NOFS context
  btrfs: fix balance convert to single on 32-bit host CPUs
  btrfs: fix incorrect updating of log root tree
  Btrfs: fix memory leak due to concurrent append writes with fiemap
parents ad338d05 431d3988
...@@ -1591,7 +1591,6 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, ...@@ -1591,7 +1591,6 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct page **pages = NULL; struct page **pages = NULL;
struct extent_state *cached_state = NULL;
struct extent_changeset *data_reserved = NULL; struct extent_changeset *data_reserved = NULL;
u64 release_bytes = 0; u64 release_bytes = 0;
u64 lockstart; u64 lockstart;
...@@ -1611,6 +1610,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, ...@@ -1611,6 +1610,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
return -ENOMEM; return -ENOMEM;
while (iov_iter_count(i) > 0) { while (iov_iter_count(i) > 0) {
struct extent_state *cached_state = NULL;
size_t offset = offset_in_page(pos); size_t offset = offset_in_page(pos);
size_t sector_offset; size_t sector_offset;
size_t write_bytes = min(iov_iter_count(i), size_t write_bytes = min(iov_iter_count(i),
...@@ -1758,9 +1758,20 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, ...@@ -1758,9 +1758,20 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
if (copied > 0) if (copied > 0)
ret = btrfs_dirty_pages(inode, pages, dirty_pages, ret = btrfs_dirty_pages(inode, pages, dirty_pages,
pos, copied, &cached_state); pos, copied, &cached_state);
/*
* If we have not locked the extent range, because the range's
* start offset is >= i_size, we might still have a non-NULL
* cached extent state, acquired while marking the extent range
* as delalloc through btrfs_dirty_pages(). Therefore free any
* possible cached extent state to avoid a memory leak.
*/
if (extents_locked) if (extents_locked)
unlock_extent_cached(&BTRFS_I(inode)->io_tree, unlock_extent_cached(&BTRFS_I(inode)->io_tree,
lockstart, lockend, &cached_state); lockstart, lockend, &cached_state);
else
free_extent_state(cached_state);
btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes, btrfs_delalloc_release_extents(BTRFS_I(inode), reserve_bytes,
true); true);
if (ret) { if (ret) {
......
...@@ -6305,13 +6305,16 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, ...@@ -6305,13 +6305,16 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans,
u32 sizes[2]; u32 sizes[2];
int nitems = name ? 2 : 1; int nitems = name ? 2 : 1;
unsigned long ptr; unsigned long ptr;
unsigned int nofs_flag;
int ret; int ret;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
nofs_flag = memalloc_nofs_save();
inode = new_inode(fs_info->sb); inode = new_inode(fs_info->sb);
memalloc_nofs_restore(nofs_flag);
if (!inode) { if (!inode) {
btrfs_free_path(path); btrfs_free_path(path);
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
......
...@@ -500,7 +500,7 @@ static int process_leaf(struct btrfs_root *root, ...@@ -500,7 +500,7 @@ static int process_leaf(struct btrfs_root *root,
struct btrfs_extent_data_ref *dref; struct btrfs_extent_data_ref *dref;
struct btrfs_shared_data_ref *sref; struct btrfs_shared_data_ref *sref;
u32 count; u32 count;
int i = 0, tree_block_level = 0, ret; int i = 0, tree_block_level = 0, ret = 0;
struct btrfs_key key; struct btrfs_key key;
int nritems = btrfs_header_nritems(leaf); int nritems = btrfs_header_nritems(leaf);
......
...@@ -5085,7 +5085,7 @@ static int clone_range(struct send_ctx *sctx, ...@@ -5085,7 +5085,7 @@ static int clone_range(struct send_ctx *sctx,
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_key key; struct btrfs_key key;
int ret; int ret;
u64 clone_src_i_size; u64 clone_src_i_size = 0;
/* /*
* Prevent cloning from a zero offset with a length matching the sector * Prevent cloning from a zero offset with a length matching the sector
......
...@@ -2932,7 +2932,8 @@ static int walk_log_tree(struct btrfs_trans_handle *trans, ...@@ -2932,7 +2932,8 @@ static int walk_log_tree(struct btrfs_trans_handle *trans,
* in the tree of log roots * in the tree of log roots
*/ */
static int update_log_root(struct btrfs_trans_handle *trans, static int update_log_root(struct btrfs_trans_handle *trans,
struct btrfs_root *log) struct btrfs_root *log,
struct btrfs_root_item *root_item)
{ {
struct btrfs_fs_info *fs_info = log->fs_info; struct btrfs_fs_info *fs_info = log->fs_info;
int ret; int ret;
...@@ -2940,10 +2941,10 @@ static int update_log_root(struct btrfs_trans_handle *trans, ...@@ -2940,10 +2941,10 @@ static int update_log_root(struct btrfs_trans_handle *trans,
if (log->log_transid == 1) { if (log->log_transid == 1) {
/* insert root item on the first sync */ /* insert root item on the first sync */
ret = btrfs_insert_root(trans, fs_info->log_root_tree, ret = btrfs_insert_root(trans, fs_info->log_root_tree,
&log->root_key, &log->root_item); &log->root_key, root_item);
} else { } else {
ret = btrfs_update_root(trans, fs_info->log_root_tree, ret = btrfs_update_root(trans, fs_info->log_root_tree,
&log->root_key, &log->root_item); &log->root_key, root_item);
} }
return ret; return ret;
} }
...@@ -3041,6 +3042,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, ...@@ -3041,6 +3042,7 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
struct btrfs_fs_info *fs_info = root->fs_info; struct btrfs_fs_info *fs_info = root->fs_info;
struct btrfs_root *log = root->log_root; struct btrfs_root *log = root->log_root;
struct btrfs_root *log_root_tree = fs_info->log_root_tree; struct btrfs_root *log_root_tree = fs_info->log_root_tree;
struct btrfs_root_item new_root_item;
int log_transid = 0; int log_transid = 0;
struct btrfs_log_ctx root_log_ctx; struct btrfs_log_ctx root_log_ctx;
struct blk_plug plug; struct blk_plug plug;
...@@ -3104,17 +3106,25 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, ...@@ -3104,17 +3106,25 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
goto out; goto out;
} }
/*
* We _must_ update under the root->log_mutex in order to make sure we
* have a consistent view of the log root we are trying to commit at
* this moment.
*
* We _must_ copy this into a local copy, because we are not holding the
* log_root_tree->log_mutex yet. This is important because when we
* commit the log_root_tree we must have a consistent view of the
* log_root_tree when we update the super block to point at the
* log_root_tree bytenr. If we update the log_root_tree here we'll race
* with the commit and possibly point at the new block which we may not
* have written out.
*/
btrfs_set_root_node(&log->root_item, log->node); btrfs_set_root_node(&log->root_item, log->node);
memcpy(&new_root_item, &log->root_item, sizeof(new_root_item));
root->log_transid++; root->log_transid++;
log->log_transid = root->log_transid; log->log_transid = root->log_transid;
root->log_start_pid = 0; root->log_start_pid = 0;
/*
* Update or create log root item under the root's log_mutex to prevent
* races with concurrent log syncs that can lead to failure to update
* log root item because it was not created yet.
*/
ret = update_log_root(trans, log);
/* /*
* IO has been started, blocks of the log tree have WRITTEN flag set * IO has been started, blocks of the log tree have WRITTEN flag set
* in their headers. new modifications of the log will be written to * in their headers. new modifications of the log will be written to
...@@ -3135,6 +3145,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, ...@@ -3135,6 +3145,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
mutex_unlock(&log_root_tree->log_mutex); mutex_unlock(&log_root_tree->log_mutex);
mutex_lock(&log_root_tree->log_mutex); mutex_lock(&log_root_tree->log_mutex);
/*
* Now we are safe to update the log_root_tree because we're under the
* log_mutex, and we're a current writer so we're holding the commit
* open until we drop the log_mutex.
*/
ret = update_log_root(trans, log, &new_root_item);
if (atomic_dec_and_test(&log_root_tree->log_writers)) { if (atomic_dec_and_test(&log_root_tree->log_writers)) {
/* atomic_dec_and_test implies a barrier */ /* atomic_dec_and_test implies a barrier */
cond_wake_up_nomb(&log_root_tree->log_writer_wait); cond_wake_up_nomb(&log_root_tree->log_writer_wait);
......
...@@ -3845,7 +3845,11 @@ static int alloc_profile_is_valid(u64 flags, int extended) ...@@ -3845,7 +3845,11 @@ static int alloc_profile_is_valid(u64 flags, int extended)
return !extended; /* "0" is valid for usual profiles */ return !extended; /* "0" is valid for usual profiles */
/* true if exactly one bit set */ /* true if exactly one bit set */
return is_power_of_2(flags); /*
* Don't use is_power_of_2(unsigned long) because it won't work
* for the single profile (1ULL << 48) on 32-bit CPUs.
*/
return flags != 0 && (flags & (flags - 1)) == 0;
} }
static inline int balance_need_close(struct btrfs_fs_info *fs_info) static inline int balance_need_close(struct btrfs_fs_info *fs_info)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment