Commit 70b99e69 authored by Chris Mason's avatar Chris Mason

Btrfs: Compression corner fixes

Make sure we keep page->mapping NULL on the pages we're getting
via alloc_page.  It gets set so a few of the callbacks can do the right
thing, but in general these pages don't have a mapping.

Don't try to truncate compressed inline items in btrfs_drop_extents.
The whole compressed item must be preserved.

Don't try to create multipage inline compressed items.  When we try to
overwrite just the first page of the file, we would have to read in and recow
all the pages after it in the same compressed inline items.  For now, only
create single page inline items.

Make sure we lock pages in the correct order during delalloc.  The
search into the state tree for delalloc bytes can return bytes before
the page we already have locked.
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent d899e052
...@@ -220,10 +220,12 @@ static void end_compressed_bio_write(struct bio *bio, int err) ...@@ -220,10 +220,12 @@ static void end_compressed_bio_write(struct bio *bio, int err)
*/ */
inode = cb->inode; inode = cb->inode;
tree = &BTRFS_I(inode)->io_tree; tree = &BTRFS_I(inode)->io_tree;
cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
tree->ops->writepage_end_io_hook(cb->compressed_pages[0], tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
cb->start, cb->start,
cb->start + cb->len - 1, cb->start + cb->len - 1,
NULL, 1); NULL, 1);
cb->compressed_pages[0]->mapping = NULL;
end_compressed_writeback(inode, cb->start, cb->len); end_compressed_writeback(inode, cb->start, cb->len);
/* note, our inode could be gone now */ /* note, our inode could be gone now */
...@@ -306,6 +308,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, ...@@ -306,6 +308,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
else else
ret = 0; ret = 0;
page->mapping = NULL;
if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < if (ret || bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) <
PAGE_CACHE_SIZE) { PAGE_CACHE_SIZE) {
bio_get(bio); bio_get(bio);
...@@ -423,6 +426,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -423,6 +426,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
else else
ret = 0; ret = 0;
page->mapping = NULL;
if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) < if (ret || bio_add_page(comp_bio, page, PAGE_CACHE_SIZE, 0) <
PAGE_CACHE_SIZE) { PAGE_CACHE_SIZE) {
bio_get(comp_bio); bio_get(comp_bio);
......
...@@ -1242,12 +1242,21 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode, ...@@ -1242,12 +1242,21 @@ static noinline u64 find_lock_delalloc_range(struct inode *inode,
delalloc_end = 0; delalloc_end = 0;
found = find_delalloc_range(tree, &delalloc_start, &delalloc_end, found = find_delalloc_range(tree, &delalloc_start, &delalloc_end,
max_bytes); max_bytes);
if (!found) { if (!found || delalloc_end <= *start) {
*start = delalloc_start; *start = delalloc_start;
*end = delalloc_end; *end = delalloc_end;
return found; return found;
} }
/*
* start comes from the offset of locked_page. We have to lock
* pages in order, so we can't process delalloc bytes before
* locked_page
*/
if (delalloc_start < *start) {
delalloc_start = *start;
}
/* /*
* make sure to limit the number of pages we try to lock down * make sure to limit the number of pages we try to lock down
* if we're looping. * if we're looping.
......
...@@ -368,8 +368,8 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, ...@@ -368,8 +368,8 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
u64 search_start = start; u64 search_start = start;
u64 leaf_start; u64 leaf_start;
u64 ram_bytes = 0; u64 ram_bytes = 0;
u8 compression = 0; u8 compression;
u8 encryption = 0; u8 encryption;
u16 other_encoding = 0; u16 other_encoding = 0;
u64 root_gen; u64 root_gen;
u64 root_owner; u64 root_owner;
...@@ -415,6 +415,8 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, ...@@ -415,6 +415,8 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
leaf_start = 0; leaf_start = 0;
root_gen = 0; root_gen = 0;
root_owner = 0; root_owner = 0;
compression = 0;
encryption = 0;
extent = NULL; extent = NULL;
leaf = path->nodes[0]; leaf = path->nodes[0];
slot = path->slots[0]; slot = path->slots[0];
...@@ -546,8 +548,12 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, ...@@ -546,8 +548,12 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
inline_limit - key.offset); inline_limit - key.offset);
inode_sub_bytes(inode, extent_end - inode_sub_bytes(inode, extent_end -
inline_limit); inline_limit);
btrfs_truncate_item(trans, root, path, btrfs_set_file_extent_ram_bytes(leaf, extent,
new_size, 1); new_size);
if (!compression && !encryption) {
btrfs_truncate_item(trans, root, path,
new_size, 1);
}
} }
} }
/* delete the entire extent */ /* delete the entire extent */
...@@ -567,8 +573,11 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans, ...@@ -567,8 +573,11 @@ int noinline btrfs_drop_extents(struct btrfs_trans_handle *trans,
new_size = btrfs_file_extent_calc_inline_size( new_size = btrfs_file_extent_calc_inline_size(
extent_end - end); extent_end - end);
inode_sub_bytes(inode, end - key.offset); inode_sub_bytes(inode, end - key.offset);
ret = btrfs_truncate_item(trans, root, path, btrfs_set_file_extent_ram_bytes(leaf, extent,
new_size, 0); new_size);
if (!compression && !encryption)
ret = btrfs_truncate_item(trans, root, path,
new_size, 0);
BUG_ON(ret); BUG_ON(ret);
} }
/* create bookend, splitting the extent in two */ /* create bookend, splitting the extent in two */
......
...@@ -239,6 +239,7 @@ static int cow_file_range_inline(struct btrfs_trans_handle *trans, ...@@ -239,6 +239,7 @@ static int cow_file_range_inline(struct btrfs_trans_handle *trans,
data_len = compressed_size; data_len = compressed_size;
if (start > 0 || if (start > 0 ||
actual_end >= PAGE_CACHE_SIZE ||
data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) || data_len >= BTRFS_MAX_INLINE_DATA_SIZE(root) ||
(!compressed_size && (!compressed_size &&
(actual_end & (root->sectorsize - 1)) == 0) || (actual_end & (root->sectorsize - 1)) == 0) ||
...@@ -248,7 +249,7 @@ static int cow_file_range_inline(struct btrfs_trans_handle *trans, ...@@ -248,7 +249,7 @@ static int cow_file_range_inline(struct btrfs_trans_handle *trans,
} }
ret = btrfs_drop_extents(trans, root, inode, start, ret = btrfs_drop_extents(trans, root, inode, start,
aligned_end, aligned_end, &hint_byte); aligned_end, start, &hint_byte);
BUG_ON(ret); BUG_ON(ret);
if (isize > actual_end) if (isize > actual_end)
...@@ -423,6 +424,7 @@ static int cow_file_range(struct inode *inode, struct page *locked_page, ...@@ -423,6 +424,7 @@ static int cow_file_range(struct inode *inode, struct page *locked_page,
* free any pages it allocated and our page pointer array * free any pages it allocated and our page pointer array
*/ */
for (i = 0; i < nr_pages_ret; i++) { for (i = 0; i < nr_pages_ret; i++) {
WARN_ON(pages[i]->mapping);
page_cache_release(pages[i]); page_cache_release(pages[i]);
} }
kfree(pages); kfree(pages);
...@@ -572,8 +574,10 @@ static int cow_file_range(struct inode *inode, struct page *locked_page, ...@@ -572,8 +574,10 @@ static int cow_file_range(struct inode *inode, struct page *locked_page,
extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree, extent_clear_unlock_delalloc(inode, &BTRFS_I(inode)->io_tree,
start, end, locked_page, 0, 0, 0); start, end, locked_page, 0, 0, 0);
free_pages_out: free_pages_out:
for (i = 0; i < nr_pages_ret; i++) for (i = 0; i < nr_pages_ret; i++) {
WARN_ON(pages[i]->mapping);
page_cache_release(pages[i]); page_cache_release(pages[i]);
}
if (pages) if (pages)
kfree(pages); kfree(pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment