Commit b971712a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs fixes from Chris Mason:
 "I have a two part pull this time because one of the patches Dave
  Sterba collected needed to be against v4.7-rc2 or higher (we used
  rc4).  I try to make my for-linus-xx branch testable on top of the
  last major so we can hand fixes to people on the list more easily, so
  I've split this pull in two.

  This first part has some fixes and two performance improvements that
  we've been testing for some time.

  Josef's two performance fixes are most notable.  The transid tracking
  patch makes a big improvement on pretty much every workload"

* 'for-linus-4.7' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs:
  Btrfs: Force stripesize to the value of sectorsize
  btrfs: fix disk_i_size update bug when fallocate() fails
  Btrfs: fix error handling in map_private_extent_buffer
  Btrfs: fix error return code in btrfs_init_test_fs()
  Btrfs: don't do nocow check unless we have to
  btrfs: fix deadlock in delayed_ref_async_start
  Btrfs: track transid for delayed ref flushing
parents ca83a55c b7f67055
......@@ -1786,10 +1786,12 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
if (!err) {
tmp = (struct btrfs_disk_key *)(kaddr + offset -
map_start);
} else {
} else if (err == 1) {
read_extent_buffer(eb, &unaligned,
offset, sizeof(unaligned));
tmp = &unaligned;
} else {
return err;
}
} else {
......@@ -2830,6 +2832,8 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
}
ret = key_search(b, key, level, &prev_cmp, &slot);
if (ret < 0)
goto done;
if (level != 0) {
int dec = 0;
......
......@@ -2518,7 +2518,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
struct btrfs_root *root, unsigned long count);
int btrfs_async_run_delayed_refs(struct btrfs_root *root,
unsigned long count, int wait);
unsigned long count, u64 transid, int wait);
int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr,
......
......@@ -2806,7 +2806,7 @@ int open_ctree(struct super_block *sb,
nodesize = btrfs_super_nodesize(disk_super);
sectorsize = btrfs_super_sectorsize(disk_super);
stripesize = btrfs_super_stripesize(disk_super);
stripesize = sectorsize;
fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
......@@ -4133,9 +4133,7 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
btrfs_super_bytes_used(sb));
ret = -EINVAL;
}
if (!is_power_of_2(btrfs_super_stripesize(sb)) ||
((btrfs_super_stripesize(sb) != sectorsize) &&
(btrfs_super_stripesize(sb) != 4096))) {
if (!is_power_of_2(btrfs_super_stripesize(sb))) {
btrfs_err(fs_info, "invalid stripesize %u",
btrfs_super_stripesize(sb));
ret = -EINVAL;
......
......@@ -2835,6 +2835,7 @@ int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
struct async_delayed_refs {
struct btrfs_root *root;
u64 transid;
int count;
int error;
int sync;
......@@ -2850,6 +2851,10 @@ static void delayed_ref_async_start(struct btrfs_work *work)
async = container_of(work, struct async_delayed_refs, work);
/* if the commit is already started, we don't need to wait here */
if (btrfs_transaction_blocked(async->root->fs_info))
goto done;
trans = btrfs_join_transaction(async->root);
if (IS_ERR(trans)) {
async->error = PTR_ERR(trans);
......@@ -2861,10 +2866,15 @@ static void delayed_ref_async_start(struct btrfs_work *work)
* wait on delayed refs
*/
trans->sync = true;
/* Don't bother flushing if we got into a different transaction */
if (trans->transid > async->transid)
goto end;
ret = btrfs_run_delayed_refs(trans, async->root, async->count);
if (ret)
async->error = ret;
end:
ret = btrfs_end_transaction(trans, async->root);
if (ret && !async->error)
async->error = ret;
......@@ -2876,7 +2886,7 @@ static void delayed_ref_async_start(struct btrfs_work *work)
}
int btrfs_async_run_delayed_refs(struct btrfs_root *root,
unsigned long count, int wait)
unsigned long count, u64 transid, int wait)
{
struct async_delayed_refs *async;
int ret;
......@@ -2888,6 +2898,7 @@ int btrfs_async_run_delayed_refs(struct btrfs_root *root,
async->root = root->fs_info->tree_root;
async->count = count;
async->error = 0;
async->transid = transid;
if (wait)
async->sync = 1;
else
......
......@@ -5342,6 +5342,11 @@ int read_extent_buffer_to_user(struct extent_buffer *eb, void __user *dstv,
return ret;
}
/*
* return 0 if the item is found within a page.
* return 1 if the item spans two pages.
* return -EINVAL otherwise.
*/
int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
unsigned long min_len, char **map,
unsigned long *map_start,
......@@ -5356,7 +5361,7 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long start,
PAGE_SHIFT;
if (i != end_i)
return -EINVAL;
return 1;
if (i == 0) {
offset = start_offset;
......
......@@ -1534,30 +1534,30 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
reserve_bytes = round_up(write_bytes + sector_offset,
root->sectorsize);
if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
BTRFS_INODE_PREALLOC)) &&
check_can_nocow(inode, pos, &write_bytes) > 0) {
/*
* For nodata cow case, no need to reserve
* data space.
*/
only_release_metadata = true;
/*
* our prealloc extent may be smaller than
* write_bytes, so scale down.
*/
num_pages = DIV_ROUND_UP(write_bytes + offset,
PAGE_SIZE);
reserve_bytes = round_up(write_bytes + sector_offset,
root->sectorsize);
goto reserve_metadata;
}
ret = btrfs_check_data_free_space(inode, pos, write_bytes);
if (ret < 0)
break;
if (ret < 0) {
if ((BTRFS_I(inode)->flags & (BTRFS_INODE_NODATACOW |
BTRFS_INODE_PREALLOC)) &&
check_can_nocow(inode, pos, &write_bytes) > 0) {
/*
* For nodata cow case, no need to reserve
* data space.
*/
only_release_metadata = true;
/*
* our prealloc extent may be smaller than
* write_bytes, so scale down.
*/
num_pages = DIV_ROUND_UP(write_bytes + offset,
PAGE_SIZE);
reserve_bytes = round_up(write_bytes +
sector_offset,
root->sectorsize);
} else {
break;
}
}
reserve_metadata:
ret = btrfs_delalloc_reserve_metadata(inode, reserve_bytes);
if (ret) {
if (!only_release_metadata)
......
......@@ -4558,6 +4558,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
BUG_ON(ret);
if (btrfs_should_throttle_delayed_refs(trans, root))
btrfs_async_run_delayed_refs(root,
trans->transid,
trans->delayed_ref_updates * 2, 0);
if (be_nice) {
if (truncate_space_check(trans, root,
......
......@@ -968,6 +968,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct rb_node *prev = NULL;
struct btrfs_ordered_extent *test;
int ret = 1;
u64 orig_offset = offset;
spin_lock_irq(&tree->lock);
if (ordered) {
......@@ -983,7 +984,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
/* truncate file */
if (disk_i_size > i_size) {
BTRFS_I(inode)->disk_i_size = i_size;
BTRFS_I(inode)->disk_i_size = orig_offset;
ret = 0;
goto out;
}
......
......@@ -68,7 +68,7 @@ int btrfs_init_test_fs(void)
if (IS_ERR(test_mnt)) {
printk(KERN_ERR "btrfs: cannot mount test file system\n");
unregister_filesystem(&test_type);
return ret;
return PTR_ERR(test_mnt);
}
return 0;
}
......
......@@ -818,6 +818,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
{
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_fs_info *info = root->fs_info;
u64 transid = trans->transid;
unsigned long cur = trans->delayed_ref_updates;
int lock = (trans->type != TRANS_JOIN_NOLOCK);
int err = 0;
......@@ -905,7 +906,7 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
kmem_cache_free(btrfs_trans_handle_cachep, trans);
if (must_run_delayed_refs) {
btrfs_async_run_delayed_refs(root, cur,
btrfs_async_run_delayed_refs(root, cur, transid,
must_run_delayed_refs == 1);
}
return err;
......
......@@ -4694,12 +4694,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
if (type & BTRFS_BLOCK_GROUP_RAID5) {
raid_stripe_len = find_raid56_stripe_len(ndevs - 1,
btrfs_super_stripesize(info->super_copy));
extent_root->stripesize);
data_stripes = num_stripes - 1;
}
if (type & BTRFS_BLOCK_GROUP_RAID6) {
raid_stripe_len = find_raid56_stripe_len(ndevs - 2,
btrfs_super_stripesize(info->super_copy));
extent_root->stripesize);
data_stripes = num_stripes - 2;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment