Commit d7781546 authored by Qu Wenruo's avatar Qu Wenruo Committed by Chris Mason

btrfs: Avoid trucating page or punching hole in a already existed hole.

btrfs_punch_hole() will truncate unaligned pages or punch hole on a
already existed hole.
This will cause unneeded zero page or holes splitting the original huge
hole.

This patch will skip already existed holes before any page truncating or
hole punching.
Signed-off-by: default avatarQu Wenruo <quwenruo@cn.fujitsu.com>
Signed-off-by: default avatarChris Mason <clm@fb.com>
parent 3821f348
...@@ -2184,6 +2184,37 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode, ...@@ -2184,6 +2184,37 @@ static int fill_holes(struct btrfs_trans_handle *trans, struct inode *inode,
return 0; return 0;
} }
/*
* Find a hole extent on given inode and change start/len to the end of hole
* extent.(hole/vacuum extent whose em->start <= start &&
* em->start + em->len > start)
* When a hole extent is found, return 1 and modify start/len.
*/
static int find_first_non_hole(struct inode *inode, u64 *start, u64 *len)
{
struct extent_map *em;
int ret = 0;
em = btrfs_get_extent(inode, NULL, 0, *start, *len, 0);
if (IS_ERR_OR_NULL(em)) {
if (!em)
ret = -ENOMEM;
else
ret = PTR_ERR(em);
return ret;
}
/* Hole or vacuum extent(only exists in no-hole mode) */
if (em->block_start == EXTENT_MAP_HOLE) {
ret = 1;
*len = em->start + em->len > *start + *len ?
0 : *start + *len - em->start - em->len;
*start = em->start + em->len;
}
free_extent_map(em);
return ret;
}
static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
...@@ -2191,17 +2222,18 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ...@@ -2191,17 +2222,18 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_block_rsv *rsv; struct btrfs_block_rsv *rsv;
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
u64 lockstart = round_up(offset, BTRFS_I(inode)->root->sectorsize); u64 lockstart;
u64 lockend = round_down(offset + len, u64 lockend;
BTRFS_I(inode)->root->sectorsize) - 1; u64 tail_start;
u64 cur_offset = lockstart; u64 tail_len;
u64 orig_start = offset;
u64 cur_offset;
u64 min_size = btrfs_calc_trunc_metadata_size(root, 1); u64 min_size = btrfs_calc_trunc_metadata_size(root, 1);
u64 drop_end; u64 drop_end;
int ret = 0; int ret = 0;
int err = 0; int err = 0;
int rsv_count; int rsv_count;
bool same_page = ((offset >> PAGE_CACHE_SHIFT) == bool same_page;
((offset + len - 1) >> PAGE_CACHE_SHIFT));
bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES); bool no_holes = btrfs_fs_incompat(root->fs_info, NO_HOLES);
u64 ino_size; u64 ino_size;
...@@ -2211,6 +2243,21 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ...@@ -2211,6 +2243,21 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
mutex_lock(&inode->i_mutex); mutex_lock(&inode->i_mutex);
ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE); ino_size = round_up(inode->i_size, PAGE_CACHE_SIZE);
ret = find_first_non_hole(inode, &offset, &len);
if (ret < 0)
goto out_only_mutex;
if (ret && !len) {
/* Already in a large hole */
ret = 0;
goto out_only_mutex;
}
lockstart = round_up(offset , BTRFS_I(inode)->root->sectorsize);
lockend = round_down(offset + len,
BTRFS_I(inode)->root->sectorsize) - 1;
same_page = ((offset >> PAGE_CACHE_SHIFT) ==
((offset + len - 1) >> PAGE_CACHE_SHIFT));
/* /*
* We needn't truncate any page which is beyond the end of the file * We needn't truncate any page which is beyond the end of the file
* because we are sure there is no data there. * because we are sure there is no data there.
...@@ -2222,8 +2269,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ...@@ -2222,8 +2269,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
if (same_page && len < PAGE_CACHE_SIZE) { if (same_page && len < PAGE_CACHE_SIZE) {
if (offset < ino_size) if (offset < ino_size)
ret = btrfs_truncate_page(inode, offset, len, 0); ret = btrfs_truncate_page(inode, offset, len, 0);
mutex_unlock(&inode->i_mutex); goto out_only_mutex;
return ret;
} }
/* zero back part of the first page */ /* zero back part of the first page */
...@@ -2235,12 +2281,39 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ...@@ -2235,12 +2281,39 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
} }
} }
/* zero the front end of the last page */ /* Check the aligned pages after the first unaligned page,
if (offset + len < ino_size) { * if offset != orig_start, which means the first unaligned page
ret = btrfs_truncate_page(inode, offset + len, 0, 1); * including serveral following pages are already in holes,
if (ret) { * the extra check can be skipped */
mutex_unlock(&inode->i_mutex); if (offset == orig_start) {
return ret; /* after truncate page, check hole again */
len = offset + len - lockstart;
offset = lockstart;
ret = find_first_non_hole(inode, &offset, &len);
if (ret < 0)
goto out_only_mutex;
if (ret && !len) {
ret = 0;
goto out_only_mutex;
}
lockstart = offset;
}
/* Check the tail unaligned part is in a hole */
tail_start = lockend + 1;
tail_len = offset + len - tail_start;
if (tail_len) {
ret = find_first_non_hole(inode, &tail_start, &tail_len);
if (unlikely(ret < 0))
goto out_only_mutex;
if (!ret) {
/* zero the front end of the last page */
if (tail_start + tail_len < ino_size) {
ret = btrfs_truncate_page(inode,
tail_start + tail_len, 0, 1);
if (ret)
goto out_only_mutex;
}
} }
} }
...@@ -2314,6 +2387,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ...@@ -2314,6 +2387,8 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
BUG_ON(ret); BUG_ON(ret);
trans->block_rsv = rsv; trans->block_rsv = rsv;
cur_offset = lockstart;
len = lockend - cur_offset;
while (cur_offset < lockend) { while (cur_offset < lockend) {
ret = __btrfs_drop_extents(trans, root, inode, path, ret = __btrfs_drop_extents(trans, root, inode, path,
cur_offset, lockend + 1, cur_offset, lockend + 1,
...@@ -2354,6 +2429,14 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ...@@ -2354,6 +2429,14 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
rsv, min_size); rsv, min_size);
BUG_ON(ret); /* shouldn't happen */ BUG_ON(ret); /* shouldn't happen */
trans->block_rsv = rsv; trans->block_rsv = rsv;
ret = find_first_non_hole(inode, &cur_offset, &len);
if (unlikely(ret < 0))
break;
if (ret && !len) {
ret = 0;
break;
}
} }
if (ret) { if (ret) {
...@@ -2392,6 +2475,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len) ...@@ -2392,6 +2475,7 @@ static int btrfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
out: out:
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state, GFP_NOFS); &cached_state, GFP_NOFS);
out_only_mutex:
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
if (ret && !err) if (ret && !err)
err = ret; err = ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment