Commit e13d100b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs fixes from Chris Mason:
 "This fixes some lockups in btrfs reported with rc1.  It probably has
  some performance impact because it is backing off our spinning locks
  more often and switching to a blocking lock.  I'll be able to nail
  that down next week, but for now I want to get the lockups taken care
  of.

  Otherwise some more stack reduction and assorted fixes"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  Btrfs: fix wrong error handle when the device is missing or is not writeable
  Btrfs: fix deadlock when mounting a degraded fs
  Btrfs: use bio_endio_nodec instead of open code
  Btrfs: fix NULL pointer crash when running balance and scrub concurrently
  btrfs: Skip scrubbing removed chunks to avoid -ENOENT.
  Btrfs: fix broken free space cache after the system crashed
  Btrfs: make free space cache write out functions more readable
  Btrfs: remove unused wait queue in struct extent_buffer
  Btrfs: fix deadlocks with trylock on tree nodes
parents 147f1404 8408c716
...@@ -1259,11 +1259,19 @@ struct btrfs_block_group_cache { ...@@ -1259,11 +1259,19 @@ struct btrfs_block_group_cache {
spinlock_t lock; spinlock_t lock;
u64 pinned; u64 pinned;
u64 reserved; u64 reserved;
u64 delalloc_bytes;
u64 bytes_super; u64 bytes_super;
u64 flags; u64 flags;
u64 sectorsize; u64 sectorsize;
u64 cache_generation; u64 cache_generation;
/*
* It is just used for the delayed data space allocation because
* only the data space allocation and the relative metadata update
* can be done cross the transaction.
*/
struct rw_semaphore data_rwsem;
/* for raid56, this is a full stripe, without parity */ /* for raid56, this is a full stripe, without parity */
unsigned long full_stripe_len; unsigned long full_stripe_len;
...@@ -3316,7 +3324,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans, ...@@ -3316,7 +3324,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
struct btrfs_key *ins); struct btrfs_key *ins);
int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes, int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes,
u64 min_alloc_size, u64 empty_size, u64 hint_byte, u64 min_alloc_size, u64 empty_size, u64 hint_byte,
struct btrfs_key *ins, int is_data); struct btrfs_key *ins, int is_data, int delalloc);
int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct extent_buffer *buf, int full_backref, int no_quota); struct extent_buffer *buf, int full_backref, int no_quota);
int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
...@@ -3330,7 +3338,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, ...@@ -3330,7 +3338,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid, u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
u64 owner, u64 offset, int no_quota); u64 owner, u64 offset, int no_quota);
int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len); int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len,
int delalloc);
int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root, int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
u64 start, u64 len); u64 start, u64 len);
void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans, void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
......
This diff is collapsed.
...@@ -158,7 +158,6 @@ struct extent_buffer { ...@@ -158,7 +158,6 @@ struct extent_buffer {
* to unlock * to unlock
*/ */
wait_queue_head_t read_lock_wq; wait_queue_head_t read_lock_wq;
wait_queue_head_t lock_wq;
struct page *pages[INLINE_EXTENT_BUFFER_PAGES]; struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
#ifdef CONFIG_BTRFS_DEBUG #ifdef CONFIG_BTRFS_DEBUG
struct list_head leak_list; struct list_head leak_list;
......
...@@ -75,6 +75,8 @@ void free_extent_map(struct extent_map *em) ...@@ -75,6 +75,8 @@ void free_extent_map(struct extent_map *em)
if (atomic_dec_and_test(&em->refs)) { if (atomic_dec_and_test(&em->refs)) {
WARN_ON(extent_map_in_tree(em)); WARN_ON(extent_map_in_tree(em));
WARN_ON(!list_empty(&em->list)); WARN_ON(!list_empty(&em->list));
if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
kfree(em->bdev);
kmem_cache_free(extent_map_cache, em); kmem_cache_free(extent_map_cache, em);
} }
} }
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */ #define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */
#define EXTENT_FLAG_LOGGING 4 /* Logging this extent */ #define EXTENT_FLAG_LOGGING 4 /* Logging this extent */
#define EXTENT_FLAG_FILLING 5 /* Filling in a preallocated extent */ #define EXTENT_FLAG_FILLING 5 /* Filling in a preallocated extent */
#define EXTENT_FLAG_FS_MAPPING 6 /* filesystem extent mapping type */
struct extent_map { struct extent_map {
struct rb_node rb_node; struct rb_node rb_node;
......
...@@ -274,18 +274,32 @@ struct io_ctl { ...@@ -274,18 +274,32 @@ struct io_ctl {
}; };
static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode, static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
struct btrfs_root *root) struct btrfs_root *root, int write)
{ {
int num_pages;
int check_crcs = 0;
num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT;
if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
check_crcs = 1;
/* Make sure we can fit our crcs into the first page */
if (write && check_crcs &&
(num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
return -ENOSPC;
memset(io_ctl, 0, sizeof(struct io_ctl)); memset(io_ctl, 0, sizeof(struct io_ctl));
io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
PAGE_CACHE_SHIFT; io_ctl->pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
GFP_NOFS);
if (!io_ctl->pages) if (!io_ctl->pages)
return -ENOMEM; return -ENOMEM;
io_ctl->num_pages = num_pages;
io_ctl->root = root; io_ctl->root = root;
if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID) io_ctl->check_crcs = check_crcs;
io_ctl->check_crcs = 1;
return 0; return 0;
} }
...@@ -666,6 +680,13 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, ...@@ -666,6 +680,13 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
generation = btrfs_free_space_generation(leaf, header); generation = btrfs_free_space_generation(leaf, header);
btrfs_release_path(path); btrfs_release_path(path);
if (!BTRFS_I(inode)->generation) {
btrfs_info(root->fs_info,
"The free space cache file (%llu) is invalid. skip it\n",
offset);
return 0;
}
if (BTRFS_I(inode)->generation != generation) { if (BTRFS_I(inode)->generation != generation) {
btrfs_err(root->fs_info, btrfs_err(root->fs_info,
"free space inode generation (%llu) " "free space inode generation (%llu) "
...@@ -677,7 +698,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, ...@@ -677,7 +698,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
if (!num_entries) if (!num_entries)
return 0; return 0;
ret = io_ctl_init(&io_ctl, inode, root); ret = io_ctl_init(&io_ctl, inode, root, 0);
if (ret) if (ret)
return ret; return ret;
...@@ -957,19 +978,18 @@ update_cache_item(struct btrfs_trans_handle *trans, ...@@ -957,19 +978,18 @@ update_cache_item(struct btrfs_trans_handle *trans,
} }
static noinline_for_stack int static noinline_for_stack int
add_ioctl_entries(struct btrfs_root *root, write_pinned_extent_entries(struct btrfs_root *root,
struct inode *inode, struct btrfs_block_group_cache *block_group,
struct btrfs_block_group_cache *block_group, struct io_ctl *io_ctl,
struct io_ctl *io_ctl, int *entries)
struct extent_state **cached_state,
struct list_head *bitmap_list,
int *entries)
{ {
u64 start, extent_start, extent_end, len; u64 start, extent_start, extent_end, len;
struct list_head *pos, *n;
struct extent_io_tree *unpin = NULL; struct extent_io_tree *unpin = NULL;
int ret; int ret;
if (!block_group)
return 0;
/* /*
* We want to add any pinned extents to our free space cache * We want to add any pinned extents to our free space cache
* so we don't leak the space * so we don't leak the space
...@@ -979,23 +999,19 @@ add_ioctl_entries(struct btrfs_root *root, ...@@ -979,23 +999,19 @@ add_ioctl_entries(struct btrfs_root *root,
*/ */
unpin = root->fs_info->pinned_extents; unpin = root->fs_info->pinned_extents;
if (block_group) start = block_group->key.objectid;
start = block_group->key.objectid;
while (block_group && (start < block_group->key.objectid + while (start < block_group->key.objectid + block_group->key.offset) {
block_group->key.offset)) {
ret = find_first_extent_bit(unpin, start, ret = find_first_extent_bit(unpin, start,
&extent_start, &extent_end, &extent_start, &extent_end,
EXTENT_DIRTY, NULL); EXTENT_DIRTY, NULL);
if (ret) { if (ret)
ret = 0; return 0;
break;
}
/* This pinned extent is out of our range */ /* This pinned extent is out of our range */
if (extent_start >= block_group->key.objectid + if (extent_start >= block_group->key.objectid +
block_group->key.offset) block_group->key.offset)
break; return 0;
extent_start = max(extent_start, start); extent_start = max(extent_start, start);
extent_end = min(block_group->key.objectid + extent_end = min(block_group->key.objectid +
...@@ -1005,11 +1021,20 @@ add_ioctl_entries(struct btrfs_root *root, ...@@ -1005,11 +1021,20 @@ add_ioctl_entries(struct btrfs_root *root,
*entries += 1; *entries += 1;
ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL); ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
if (ret) if (ret)
goto out_nospc; return -ENOSPC;
start = extent_end; start = extent_end;
} }
return 0;
}
static noinline_for_stack int
write_bitmap_entries(struct io_ctl *io_ctl, struct list_head *bitmap_list)
{
struct list_head *pos, *n;
int ret;
/* Write out the bitmaps */ /* Write out the bitmaps */
list_for_each_safe(pos, n, bitmap_list) { list_for_each_safe(pos, n, bitmap_list) {
struct btrfs_free_space *entry = struct btrfs_free_space *entry =
...@@ -1017,36 +1042,24 @@ add_ioctl_entries(struct btrfs_root *root, ...@@ -1017,36 +1042,24 @@ add_ioctl_entries(struct btrfs_root *root,
ret = io_ctl_add_bitmap(io_ctl, entry->bitmap); ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
if (ret) if (ret)
goto out_nospc; return -ENOSPC;
list_del_init(&entry->list); list_del_init(&entry->list);
} }
/* Zero out the rest of the pages just to make sure */ return 0;
io_ctl_zero_remaining_pages(io_ctl); }
ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages,
0, i_size_read(inode), cached_state);
io_ctl_drop_pages(io_ctl);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
i_size_read(inode) - 1, cached_state, GFP_NOFS);
if (ret) static int flush_dirty_cache(struct inode *inode)
goto fail; {
int ret;
ret = btrfs_wait_ordered_range(inode, 0, (u64)-1); ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
if (ret) { if (ret)
clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1, clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL, EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
GFP_NOFS); GFP_NOFS);
goto fail;
}
return 0;
fail: return ret;
return -1;
out_nospc:
return -ENOSPC;
} }
static void noinline_for_stack static void noinline_for_stack
...@@ -1056,6 +1069,7 @@ cleanup_write_cache_enospc(struct inode *inode, ...@@ -1056,6 +1069,7 @@ cleanup_write_cache_enospc(struct inode *inode,
struct list_head *bitmap_list) struct list_head *bitmap_list)
{ {
struct list_head *pos, *n; struct list_head *pos, *n;
list_for_each_safe(pos, n, bitmap_list) { list_for_each_safe(pos, n, bitmap_list) {
struct btrfs_free_space *entry = struct btrfs_free_space *entry =
list_entry(pos, struct btrfs_free_space, list); list_entry(pos, struct btrfs_free_space, list);
...@@ -1088,64 +1102,104 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, ...@@ -1088,64 +1102,104 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
{ {
struct extent_state *cached_state = NULL; struct extent_state *cached_state = NULL;
struct io_ctl io_ctl; struct io_ctl io_ctl;
struct list_head bitmap_list; LIST_HEAD(bitmap_list);
int entries = 0; int entries = 0;
int bitmaps = 0; int bitmaps = 0;
int ret; int ret;
int err = -1;
INIT_LIST_HEAD(&bitmap_list);
if (!i_size_read(inode)) if (!i_size_read(inode))
return -1; return -1;
ret = io_ctl_init(&io_ctl, inode, root); ret = io_ctl_init(&io_ctl, inode, root, 1);
if (ret) if (ret)
return -1; return -1;
if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
down_write(&block_group->data_rwsem);
spin_lock(&block_group->lock);
if (block_group->delalloc_bytes) {
block_group->disk_cache_state = BTRFS_DC_WRITTEN;
spin_unlock(&block_group->lock);
up_write(&block_group->data_rwsem);
BTRFS_I(inode)->generation = 0;
ret = 0;
goto out;
}
spin_unlock(&block_group->lock);
}
/* Lock all pages first so we can lock the extent safely. */ /* Lock all pages first so we can lock the extent safely. */
io_ctl_prepare_pages(&io_ctl, inode, 0); io_ctl_prepare_pages(&io_ctl, inode, 0);
lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1, lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
0, &cached_state); 0, &cached_state);
/* Make sure we can fit our crcs into the first page */
if (io_ctl.check_crcs &&
(io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
goto out_nospc;
io_ctl_set_generation(&io_ctl, trans->transid); io_ctl_set_generation(&io_ctl, trans->transid);
/* Write out the extent entries in the free space cache */
ret = write_cache_extent_entries(&io_ctl, ctl, ret = write_cache_extent_entries(&io_ctl, ctl,
block_group, &entries, &bitmaps, block_group, &entries, &bitmaps,
&bitmap_list); &bitmap_list);
if (ret) if (ret)
goto out_nospc; goto out_nospc;
ret = add_ioctl_entries(root, inode, block_group, &io_ctl, /*
&cached_state, &bitmap_list, &entries); * Some spaces that are freed in the current transaction are pinned,
* they will be added into free space cache after the transaction is
* committed, we shouldn't lose them.
*/
ret = write_pinned_extent_entries(root, block_group, &io_ctl, &entries);
if (ret)
goto out_nospc;
if (ret == -ENOSPC) /* At last, we write out all the bitmaps. */
ret = write_bitmap_entries(&io_ctl, &bitmap_list);
if (ret)
goto out_nospc; goto out_nospc;
else if (ret)
/* Zero out the rest of the pages just to make sure */
io_ctl_zero_remaining_pages(&io_ctl);
/* Everything is written out, now we dirty the pages in the file. */
ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
0, i_size_read(inode), &cached_state);
if (ret)
goto out_nospc;
if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
up_write(&block_group->data_rwsem);
/*
* Release the pages and unlock the extent, we will flush
* them out later
*/
io_ctl_drop_pages(&io_ctl);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
i_size_read(inode) - 1, &cached_state, GFP_NOFS);
/* Flush the dirty pages in the cache file. */
ret = flush_dirty_cache(inode);
if (ret)
goto out; goto out;
err = update_cache_item(trans, root, inode, path, offset, /* Update the cache item to tell everyone this cache file is valid. */
ret = update_cache_item(trans, root, inode, path, offset,
entries, bitmaps); entries, bitmaps);
out: out:
io_ctl_free(&io_ctl); io_ctl_free(&io_ctl);
if (err) { if (ret) {
invalidate_inode_pages2(inode->i_mapping); invalidate_inode_pages2(inode->i_mapping);
BTRFS_I(inode)->generation = 0; BTRFS_I(inode)->generation = 0;
} }
btrfs_update_inode(trans, root, inode); btrfs_update_inode(trans, root, inode);
return err; return ret;
out_nospc: out_nospc:
cleanup_write_cache_enospc(inode, &io_ctl, &cached_state, &bitmap_list); cleanup_write_cache_enospc(inode, &io_ctl, &cached_state, &bitmap_list);
if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
up_write(&block_group->data_rwsem);
goto out; goto out;
} }
...@@ -1165,6 +1219,12 @@ int btrfs_write_out_cache(struct btrfs_root *root, ...@@ -1165,6 +1219,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
return 0; return 0;
} }
if (block_group->delalloc_bytes) {
block_group->disk_cache_state = BTRFS_DC_WRITTEN;
spin_unlock(&block_group->lock);
return 0;
}
spin_unlock(&block_group->lock); spin_unlock(&block_group->lock);
inode = lookup_free_space_inode(root, block_group, path); inode = lookup_free_space_inode(root, block_group, path);
......
...@@ -693,7 +693,7 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -693,7 +693,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
ret = btrfs_reserve_extent(root, ret = btrfs_reserve_extent(root,
async_extent->compressed_size, async_extent->compressed_size,
async_extent->compressed_size, async_extent->compressed_size,
0, alloc_hint, &ins, 1); 0, alloc_hint, &ins, 1, 1);
if (ret) { if (ret) {
int i; int i;
...@@ -794,7 +794,7 @@ static noinline int submit_compressed_extents(struct inode *inode, ...@@ -794,7 +794,7 @@ static noinline int submit_compressed_extents(struct inode *inode,
out: out:
return ret; return ret;
out_free_reserve: out_free_reserve:
btrfs_free_reserved_extent(root, ins.objectid, ins.offset); btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
out_free: out_free:
extent_clear_unlock_delalloc(inode, async_extent->start, extent_clear_unlock_delalloc(inode, async_extent->start,
async_extent->start + async_extent->start +
...@@ -917,7 +917,7 @@ static noinline int cow_file_range(struct inode *inode, ...@@ -917,7 +917,7 @@ static noinline int cow_file_range(struct inode *inode,
cur_alloc_size = disk_num_bytes; cur_alloc_size = disk_num_bytes;
ret = btrfs_reserve_extent(root, cur_alloc_size, ret = btrfs_reserve_extent(root, cur_alloc_size,
root->sectorsize, 0, alloc_hint, root->sectorsize, 0, alloc_hint,
&ins, 1); &ins, 1, 1);
if (ret < 0) if (ret < 0)
goto out_unlock; goto out_unlock;
...@@ -995,7 +995,7 @@ static noinline int cow_file_range(struct inode *inode, ...@@ -995,7 +995,7 @@ static noinline int cow_file_range(struct inode *inode,
return ret; return ret;
out_reserve: out_reserve:
btrfs_free_reserved_extent(root, ins.objectid, ins.offset); btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
out_unlock: out_unlock:
extent_clear_unlock_delalloc(inode, start, end, locked_page, extent_clear_unlock_delalloc(inode, start, end, locked_page,
EXTENT_LOCKED | EXTENT_DO_ACCOUNTING | EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
...@@ -2599,6 +2599,21 @@ record_old_file_extents(struct inode *inode, ...@@ -2599,6 +2599,21 @@ record_old_file_extents(struct inode *inode,
return NULL; return NULL;
} }
static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
u64 start, u64 len)
{
struct btrfs_block_group_cache *cache;
cache = btrfs_lookup_block_group(root->fs_info, start);
ASSERT(cache);
spin_lock(&cache->lock);
cache->delalloc_bytes -= len;
spin_unlock(&cache->lock);
btrfs_put_block_group(cache);
}
/* as ordered data IO finishes, this gets called so we can finish /* as ordered data IO finishes, this gets called so we can finish
* an ordered extent if the range of bytes in the file it covers are * an ordered extent if the range of bytes in the file it covers are
* fully written. * fully written.
...@@ -2698,6 +2713,10 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) ...@@ -2698,6 +2713,10 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
logical_len, logical_len, logical_len, logical_len,
compress_type, 0, 0, compress_type, 0, 0,
BTRFS_FILE_EXTENT_REG); BTRFS_FILE_EXTENT_REG);
if (!ret)
btrfs_release_delalloc_bytes(root,
ordered_extent->start,
ordered_extent->disk_len);
} }
unpin_extent_cache(&BTRFS_I(inode)->extent_tree, unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
ordered_extent->file_offset, ordered_extent->len, ordered_extent->file_offset, ordered_extent->len,
...@@ -2750,7 +2769,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) ...@@ -2750,7 +2769,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
!test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) && !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags)) !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
btrfs_free_reserved_extent(root, ordered_extent->start, btrfs_free_reserved_extent(root, ordered_extent->start,
ordered_extent->disk_len); ordered_extent->disk_len, 1);
} }
...@@ -6535,21 +6554,21 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, ...@@ -6535,21 +6554,21 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
alloc_hint = get_extent_allocation_hint(inode, start, len); alloc_hint = get_extent_allocation_hint(inode, start, len);
ret = btrfs_reserve_extent(root, len, root->sectorsize, 0, ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
alloc_hint, &ins, 1); alloc_hint, &ins, 1, 1);
if (ret) if (ret)
return ERR_PTR(ret); return ERR_PTR(ret);
em = create_pinned_em(inode, start, ins.offset, start, ins.objectid, em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
ins.offset, ins.offset, ins.offset, 0); ins.offset, ins.offset, ins.offset, 0);
if (IS_ERR(em)) { if (IS_ERR(em)) {
btrfs_free_reserved_extent(root, ins.objectid, ins.offset); btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
return em; return em;
} }
ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid, ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
ins.offset, ins.offset, 0); ins.offset, ins.offset, 0);
if (ret) { if (ret) {
btrfs_free_reserved_extent(root, ins.objectid, ins.offset); btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
free_extent_map(em); free_extent_map(em);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
...@@ -7437,7 +7456,7 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio, ...@@ -7437,7 +7456,7 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) && if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
btrfs_free_reserved_extent(root, ordered->start, btrfs_free_reserved_extent(root, ordered->start,
ordered->disk_len); ordered->disk_len, 1);
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
} }
...@@ -8808,7 +8827,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, ...@@ -8808,7 +8827,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
cur_bytes = min(num_bytes, 256ULL * 1024 * 1024); cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
cur_bytes = max(cur_bytes, min_size); cur_bytes = max(cur_bytes, min_size);
ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0, ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
*alloc_hint, &ins, 1); *alloc_hint, &ins, 1, 0);
if (ret) { if (ret) {
if (own_trans) if (own_trans)
btrfs_end_transaction(trans, root); btrfs_end_transaction(trans, root);
...@@ -8822,7 +8841,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode, ...@@ -8822,7 +8841,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
BTRFS_FILE_EXTENT_PREALLOC); BTRFS_FILE_EXTENT_PREALLOC);
if (ret) { if (ret) {
btrfs_free_reserved_extent(root, ins.objectid, btrfs_free_reserved_extent(root, ins.objectid,
ins.offset); ins.offset, 0);
btrfs_abort_transaction(trans, root, ret); btrfs_abort_transaction(trans, root, ret);
if (own_trans) if (own_trans)
btrfs_end_transaction(trans, root); btrfs_end_transaction(trans, root);
......
...@@ -33,14 +33,14 @@ static void btrfs_assert_tree_read_locked(struct extent_buffer *eb); ...@@ -33,14 +33,14 @@ static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
*/ */
void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
{ {
if (eb->lock_nested) { /*
read_lock(&eb->lock); * no lock is required. The lock owner may change if
if (eb->lock_nested && current->pid == eb->lock_owner) { * we have a read lock, but it won't change to or away
read_unlock(&eb->lock); * from us. If we have the write lock, we are the owner
return; * and it'll never change.
} */
read_unlock(&eb->lock); if (eb->lock_nested && current->pid == eb->lock_owner)
} return;
if (rw == BTRFS_WRITE_LOCK) { if (rw == BTRFS_WRITE_LOCK) {
if (atomic_read(&eb->blocking_writers) == 0) { if (atomic_read(&eb->blocking_writers) == 0) {
WARN_ON(atomic_read(&eb->spinning_writers) != 1); WARN_ON(atomic_read(&eb->spinning_writers) != 1);
...@@ -65,14 +65,15 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw) ...@@ -65,14 +65,15 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
*/ */
void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
{ {
if (eb->lock_nested) { /*
read_lock(&eb->lock); * no lock is required. The lock owner may change if
if (eb->lock_nested && current->pid == eb->lock_owner) { * we have a read lock, but it won't change to or away
read_unlock(&eb->lock); * from us. If we have the write lock, we are the owner
return; * and it'll never change.
} */
read_unlock(&eb->lock); if (eb->lock_nested && current->pid == eb->lock_owner)
} return;
if (rw == BTRFS_WRITE_LOCK_BLOCKING) { if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
BUG_ON(atomic_read(&eb->blocking_writers) != 1); BUG_ON(atomic_read(&eb->blocking_writers) != 1);
write_lock(&eb->lock); write_lock(&eb->lock);
...@@ -99,6 +100,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw) ...@@ -99,6 +100,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
void btrfs_tree_read_lock(struct extent_buffer *eb) void btrfs_tree_read_lock(struct extent_buffer *eb)
{ {
again: again:
BUG_ON(!atomic_read(&eb->blocking_writers) &&
current->pid == eb->lock_owner);
read_lock(&eb->lock); read_lock(&eb->lock);
if (atomic_read(&eb->blocking_writers) && if (atomic_read(&eb->blocking_writers) &&
current->pid == eb->lock_owner) { current->pid == eb->lock_owner) {
...@@ -132,7 +136,9 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb) ...@@ -132,7 +136,9 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
if (atomic_read(&eb->blocking_writers)) if (atomic_read(&eb->blocking_writers))
return 0; return 0;
read_lock(&eb->lock); if (!read_trylock(&eb->lock))
return 0;
if (atomic_read(&eb->blocking_writers)) { if (atomic_read(&eb->blocking_writers)) {
read_unlock(&eb->lock); read_unlock(&eb->lock);
return 0; return 0;
...@@ -151,7 +157,10 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) ...@@ -151,7 +157,10 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
if (atomic_read(&eb->blocking_writers) || if (atomic_read(&eb->blocking_writers) ||
atomic_read(&eb->blocking_readers)) atomic_read(&eb->blocking_readers))
return 0; return 0;
write_lock(&eb->lock);
if (!write_trylock(&eb->lock))
return 0;
if (atomic_read(&eb->blocking_writers) || if (atomic_read(&eb->blocking_writers) ||
atomic_read(&eb->blocking_readers)) { atomic_read(&eb->blocking_readers)) {
write_unlock(&eb->lock); write_unlock(&eb->lock);
...@@ -168,14 +177,15 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb) ...@@ -168,14 +177,15 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
*/ */
void btrfs_tree_read_unlock(struct extent_buffer *eb) void btrfs_tree_read_unlock(struct extent_buffer *eb)
{ {
if (eb->lock_nested) { /*
read_lock(&eb->lock); * if we're nested, we have the write lock. No new locking
if (eb->lock_nested && current->pid == eb->lock_owner) { * is needed as long as we are the lock owner.
eb->lock_nested = 0; * The write unlock will do a barrier for us, and the lock_nested
read_unlock(&eb->lock); * field only matters to the lock owner.
return; */
} if (eb->lock_nested && current->pid == eb->lock_owner) {
read_unlock(&eb->lock); eb->lock_nested = 0;
return;
} }
btrfs_assert_tree_read_locked(eb); btrfs_assert_tree_read_locked(eb);
WARN_ON(atomic_read(&eb->spinning_readers) == 0); WARN_ON(atomic_read(&eb->spinning_readers) == 0);
...@@ -189,14 +199,15 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb) ...@@ -189,14 +199,15 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
*/ */
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb) void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
{ {
if (eb->lock_nested) { /*
read_lock(&eb->lock); * if we're nested, we have the write lock. No new locking
if (eb->lock_nested && current->pid == eb->lock_owner) { * is needed as long as we are the lock owner.
eb->lock_nested = 0; * The write unlock will do a barrier for us, and the lock_nested
read_unlock(&eb->lock); * field only matters to the lock owner.
return; */
} if (eb->lock_nested && current->pid == eb->lock_owner) {
read_unlock(&eb->lock); eb->lock_nested = 0;
return;
} }
btrfs_assert_tree_read_locked(eb); btrfs_assert_tree_read_locked(eb);
WARN_ON(atomic_read(&eb->blocking_readers) == 0); WARN_ON(atomic_read(&eb->blocking_readers) == 0);
...@@ -244,6 +255,7 @@ void btrfs_tree_unlock(struct extent_buffer *eb) ...@@ -244,6 +255,7 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
BUG_ON(blockers > 1); BUG_ON(blockers > 1);
btrfs_assert_tree_locked(eb); btrfs_assert_tree_locked(eb);
eb->lock_owner = 0;
atomic_dec(&eb->write_locks); atomic_dec(&eb->write_locks);
if (blockers) { if (blockers) {
......
...@@ -2725,11 +2725,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, ...@@ -2725,11 +2725,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
length = btrfs_dev_extent_length(l, dev_extent); length = btrfs_dev_extent_length(l, dev_extent);
if (found_key.offset + length <= start) { if (found_key.offset + length <= start)
key.offset = found_key.offset + length; goto skip;
btrfs_release_path(path);
continue;
}
chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
...@@ -2740,10 +2737,12 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, ...@@ -2740,10 +2737,12 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
* the chunk from going away while we scrub it * the chunk from going away while we scrub it
*/ */
cache = btrfs_lookup_block_group(fs_info, chunk_offset); cache = btrfs_lookup_block_group(fs_info, chunk_offset);
if (!cache) {
ret = -ENOENT; /* some chunks are removed but not committed to disk yet,
break; * continue scrubbing */
} if (!cache)
goto skip;
dev_replace->cursor_right = found_key.offset + length; dev_replace->cursor_right = found_key.offset + length;
dev_replace->cursor_left = found_key.offset; dev_replace->cursor_left = found_key.offset;
dev_replace->item_needs_writeback = 1; dev_replace->item_needs_writeback = 1;
...@@ -2802,7 +2801,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx, ...@@ -2802,7 +2801,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
dev_replace->cursor_left = dev_replace->cursor_right; dev_replace->cursor_left = dev_replace->cursor_right;
dev_replace->item_needs_writeback = 1; dev_replace->item_needs_writeback = 1;
skip:
key.offset = found_key.offset + length; key.offset = found_key.offset + length;
btrfs_release_path(path); btrfs_release_path(path);
} }
......
...@@ -2543,9 +2543,6 @@ static int btrfs_relocate_chunk(struct btrfs_root *root, ...@@ -2543,9 +2543,6 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
remove_extent_mapping(em_tree, em); remove_extent_mapping(em_tree, em);
write_unlock(&em_tree->lock); write_unlock(&em_tree->lock);
kfree(map);
em->bdev = NULL;
/* once for the tree */ /* once for the tree */
free_extent_map(em); free_extent_map(em);
/* once for us */ /* once for us */
...@@ -4301,9 +4298,11 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, ...@@ -4301,9 +4298,11 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
em = alloc_extent_map(); em = alloc_extent_map();
if (!em) { if (!em) {
kfree(map);
ret = -ENOMEM; ret = -ENOMEM;
goto error; goto error;
} }
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
em->bdev = (struct block_device *)map; em->bdev = (struct block_device *)map;
em->start = start; em->start = start;
em->len = num_bytes; em->len = num_bytes;
...@@ -4346,7 +4345,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, ...@@ -4346,7 +4345,6 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
/* One for the tree reference */ /* One for the tree reference */
free_extent_map(em); free_extent_map(em);
error: error:
kfree(map);
kfree(devices_info); kfree(devices_info);
return ret; return ret;
} }
...@@ -4558,7 +4556,6 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) ...@@ -4558,7 +4556,6 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
write_unlock(&tree->map_tree.lock); write_unlock(&tree->map_tree.lock);
if (!em) if (!em)
break; break;
kfree(em->bdev);
/* once for us */ /* once for us */
free_extent_map(em); free_extent_map(em);
/* once for the tree */ /* once for the tree */
...@@ -5362,6 +5359,15 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, ...@@ -5362,6 +5359,15 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
return 0; return 0;
} }
static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int err)
{
if (likely(bbio->flags & BTRFS_BIO_ORIG_BIO_SUBMITTED))
bio_endio_nodec(bio, err);
else
bio_endio(bio, err);
kfree(bbio);
}
static void btrfs_end_bio(struct bio *bio, int err) static void btrfs_end_bio(struct bio *bio, int err)
{ {
struct btrfs_bio *bbio = bio->bi_private; struct btrfs_bio *bbio = bio->bi_private;
...@@ -5402,12 +5408,6 @@ static void btrfs_end_bio(struct bio *bio, int err) ...@@ -5402,12 +5408,6 @@ static void btrfs_end_bio(struct bio *bio, int err)
bio = bbio->orig_bio; bio = bbio->orig_bio;
} }
/*
* We have original bio now. So increment bi_remaining to
* account for it in endio
*/
atomic_inc(&bio->bi_remaining);
bio->bi_private = bbio->private; bio->bi_private = bbio->private;
bio->bi_end_io = bbio->end_io; bio->bi_end_io = bbio->end_io;
btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
...@@ -5424,9 +5424,8 @@ static void btrfs_end_bio(struct bio *bio, int err) ...@@ -5424,9 +5424,8 @@ static void btrfs_end_bio(struct bio *bio, int err)
set_bit(BIO_UPTODATE, &bio->bi_flags); set_bit(BIO_UPTODATE, &bio->bi_flags);
err = 0; err = 0;
} }
kfree(bbio);
bio_endio(bio, err); btrfs_end_bbio(bbio, bio, err);
} else if (!is_orig_bio) { } else if (!is_orig_bio) {
bio_put(bio); bio_put(bio);
} }
...@@ -5589,12 +5588,15 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical) ...@@ -5589,12 +5588,15 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
{ {
atomic_inc(&bbio->error); atomic_inc(&bbio->error);
if (atomic_dec_and_test(&bbio->stripes_pending)) { if (atomic_dec_and_test(&bbio->stripes_pending)) {
/* Shoud be the original bio. */
WARN_ON(bio != bbio->orig_bio);
bio->bi_private = bbio->private; bio->bi_private = bbio->private;
bio->bi_end_io = bbio->end_io; bio->bi_end_io = bbio->end_io;
btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
bio->bi_iter.bi_sector = logical >> 9; bio->bi_iter.bi_sector = logical >> 9;
kfree(bbio);
bio_endio(bio, -EIO); btrfs_end_bbio(bbio, bio, -EIO);
} }
} }
...@@ -5681,6 +5683,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, ...@@ -5681,6 +5683,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
BUG_ON(!bio); /* -ENOMEM */ BUG_ON(!bio); /* -ENOMEM */
} else { } else {
bio = first_bio; bio = first_bio;
bbio->flags |= BTRFS_BIO_ORIG_BIO_SUBMITTED;
} }
submit_stripe_bio(root, bbio, bio, submit_stripe_bio(root, bbio, bio,
...@@ -5822,6 +5825,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, ...@@ -5822,6 +5825,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
return -ENOMEM; return -ENOMEM;
} }
set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
em->bdev = (struct block_device *)map; em->bdev = (struct block_device *)map;
em->start = logical; em->start = logical;
em->len = length; em->len = length;
...@@ -5846,7 +5850,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, ...@@ -5846,7 +5850,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
map->stripes[i].dev = btrfs_find_device(root->fs_info, devid, map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
uuid, NULL); uuid, NULL);
if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) { if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
kfree(map);
free_extent_map(em); free_extent_map(em);
return -EIO; return -EIO;
} }
...@@ -5854,7 +5857,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, ...@@ -5854,7 +5857,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
map->stripes[i].dev = map->stripes[i].dev =
add_missing_dev(root, devid, uuid); add_missing_dev(root, devid, uuid);
if (!map->stripes[i].dev) { if (!map->stripes[i].dev) {
kfree(map);
free_extent_map(em); free_extent_map(em);
return -EIO; return -EIO;
} }
......
...@@ -190,11 +190,14 @@ struct btrfs_bio_stripe { ...@@ -190,11 +190,14 @@ struct btrfs_bio_stripe {
struct btrfs_bio; struct btrfs_bio;
typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err); typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
#define BTRFS_BIO_ORIG_BIO_SUBMITTED 0x1
struct btrfs_bio { struct btrfs_bio {
atomic_t stripes_pending; atomic_t stripes_pending;
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
bio_end_io_t *end_io; bio_end_io_t *end_io;
struct bio *orig_bio; struct bio *orig_bio;
unsigned long flags;
void *private; void *private;
atomic_t error; atomic_t error;
int max_errors; int max_errors;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment