Commit 689f9346 authored by Yan's avatar Yan Committed by Chris Mason

Fix inline extent handling in btrfs_get_extent

1.  Reorder kmap and the test for  'page != NULL'
2.  Zero-fill rest area of a block when inline extent isn't big enough.
3.  Do not insert extent_map into the map tree when page == NULL.
(If insert the extent_map into the map tree,  subsequent read requests
will find it in the map tree directly and the corresponding inline
extent data aren't copied into page by the the get_extent function.
extent_read_full_page can't handle that case)
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent c67cda17
...@@ -1596,8 +1596,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, ...@@ -1596,8 +1596,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
size = btrfs_file_extent_inline_len(leaf, btrfs_item_nr(leaf, size = btrfs_file_extent_inline_len(leaf, btrfs_item_nr(leaf,
path->slots[0])); path->slots[0]));
extent_end = (extent_start + size - 1) |
extent_end = (extent_start + size) |
((u64)root->sectorsize - 1); ((u64)root->sectorsize - 1);
if (start < extent_start || start >= extent_end) { if (start < extent_start || start >= extent_end) {
em->start = start; em->start = start;
...@@ -1610,29 +1609,32 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, ...@@ -1610,29 +1609,32 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page,
} }
goto not_found_em; goto not_found_em;
} }
em->block_start = EXTENT_MAP_INLINE;
em->block_end = EXTENT_MAP_INLINE;
if (!page) {
em->start = extent_start;
em->end = extent_start + size - 1;
goto out;
}
extent_offset = (page->index << PAGE_CACHE_SHIFT) - extent_offset = (page->index << PAGE_CACHE_SHIFT) -
extent_start; extent_start + page_offset;
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
map = kmap(page);
copy_size = min_t(u64, PAGE_CACHE_SIZE - page_offset, copy_size = min_t(u64, PAGE_CACHE_SIZE - page_offset,
size - extent_offset); size - extent_offset);
em->block_start = EXTENT_MAP_INLINE;
em->block_end = EXTENT_MAP_INLINE;
em->start = extent_start + extent_offset; em->start = extent_start + extent_offset;
em->end = (em->start + copy_size -1) | em->end = (em->start + copy_size -1) |
((u64)root->sectorsize -1); ((u64)root->sectorsize -1);
map = kmap(page);
ptr = btrfs_file_extent_inline_start(item) + extent_offset;
read_extent_buffer(leaf, map + page_offset, ptr, copy_size);
if (!page) { if (em->start + copy_size <= em->end) {
goto insert; size = min_t(u64, em->end + 1 - em->start,
PAGE_CACHE_SIZE - page_offset) - copy_size;
memset(map + page_offset + copy_size, 0, size);
} }
read_extent_buffer(leaf, map + page_offset, ptr, copy_size);
/*
memset(map + page_offset + copy_size, 0,
PAGE_CACHE_SIZE - copy_size - page_offset);
*/
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap(page);
set_extent_uptodate(em_tree, em->start, em->end, GFP_NOFS); set_extent_uptodate(em_tree, em->start, em->end, GFP_NOFS);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment