Commit 6a404910 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: subpage: make add_ra_bio_pages() compatible

[BUG]
If we remove the subpage limitation in add_ra_bio_pages(), then read a
compressed extent which has part of its range in next page, like the
following inode layout:

	0	32K	64K	96K	128K
	|<--------------|-------------->|

Btrfs will trigger ASSERT() in endio function:

  assertion failed: atomic_read(&subpage->readers) >= nbits
  ------------[ cut here ]------------
  kernel BUG at fs/btrfs/ctree.h:3431!
  Internal error: Oops - BUG: 0 [#1] SMP
  Workqueue: btrfs-endio btrfs_work_helper [btrfs]
  Call trace:
   assertfail.constprop.0+0x28/0x2c [btrfs]
   btrfs_subpage_end_reader+0x148/0x14c [btrfs]
   end_page_read+0x8c/0x100 [btrfs]
   end_bio_extent_readpage+0x320/0x6b0 [btrfs]
   bio_endio+0x15c/0x1dc
   end_workqueue_fn+0x44/0x64 [btrfs]
   btrfs_work_helper+0x74/0x250 [btrfs]
   process_one_work+0x1d4/0x47c
   worker_thread+0x180/0x400
   kthread+0x11c/0x120
   ret_from_fork+0x10/0x30
  ---[ end trace c8b7b552d3bb408c ]---

[CAUSE]
When we read the page range [0, 64K), we find it's a compressed extent,
and we will try to add extra pages in add_ra_bio_pages() to avoid
reading the same compressed extent.

But when we add such page into the read bio, it doesn't follow the
behavior of btrfs_do_readpage() to properly set subpage::readers.

This means, for page [64K, 128K), its subpage::readers is still 0.

And when endio is executed on both pages, since page [64K, 128K) has 0
subpage::readers, it triggers above ASSERT()

[FIX]
Function add_ra_bio_pages() is far from subpage compatible, it always
assume PAGE_SIZE == sectorsize, thus when it skip to next range it
always just skip PAGE_SIZE.

Make it subpage compatible by:

- Skip to next page properly when needed
  If we find there is already a page cache, we need to skip to next page.
  For that case, we shouldn't just skip PAGE_SIZE bytes, but use
  @pg_index to calculate the next bytenr and continue.

- Only add the page range covered by current extent map
  We need to calculate which range is covered by current extent map and
  only add that part into the read bio.

- Update subpage::readers before submitting the bio

- Use proper cursor other than confusing @last_offset

- Calculate the missed threshold based on sector size
  It's no longer using missed pages, as for 64K page size, we have at
  most 3 pages to skip. (If aligned only 2 pages)

- Add ASSERT() to make sure our bytenr is always aligned

- Add comment for the function
  Add a special note for subpage case, as the function won't really
  work well for subpage cases.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 58469174
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include "compression.h" #include "compression.h"
#include "extent_io.h" #include "extent_io.h"
#include "extent_map.h" #include "extent_map.h"
#include "subpage.h"
#include "zoned.h" #include "zoned.h"
static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" }; static const char* const btrfs_compress_types[] = { "", "zlib", "lzo", "zstd" };
...@@ -541,13 +542,24 @@ static u64 bio_end_offset(struct bio *bio) ...@@ -541,13 +542,24 @@ static u64 bio_end_offset(struct bio *bio)
return page_offset(last->bv_page) + last->bv_len + last->bv_offset; return page_offset(last->bv_page) + last->bv_len + last->bv_offset;
} }
/*
* Add extra pages in the same compressed file extent so that we don't need to
* re-read the same extent again and again.
*
* NOTE: this won't work well for subpage, as for subpage read, we lock the
* full page then submit bio for each compressed/regular extents.
*
* This means, if we have several sectors in the same page points to the same
* on-disk compressed data, we will re-read the same extent many times and
* this function can only help for the next page.
*/
static noinline int add_ra_bio_pages(struct inode *inode, static noinline int add_ra_bio_pages(struct inode *inode,
u64 compressed_end, u64 compressed_end,
struct compressed_bio *cb) struct compressed_bio *cb)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
unsigned long end_index; unsigned long end_index;
unsigned long pg_index; u64 cur = bio_end_offset(cb->orig_bio);
u64 last_offset;
u64 isize = i_size_read(inode); u64 isize = i_size_read(inode);
int ret; int ret;
struct page *page; struct page *page;
...@@ -555,10 +567,8 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -555,10 +567,8 @@ static noinline int add_ra_bio_pages(struct inode *inode,
struct address_space *mapping = inode->i_mapping; struct address_space *mapping = inode->i_mapping;
struct extent_map_tree *em_tree; struct extent_map_tree *em_tree;
struct extent_io_tree *tree; struct extent_io_tree *tree;
u64 end; int sectors_missed = 0;
int misses = 0;
last_offset = bio_end_offset(cb->orig_bio);
em_tree = &BTRFS_I(inode)->extent_tree; em_tree = &BTRFS_I(inode)->extent_tree;
tree = &BTRFS_I(inode)->io_tree; tree = &BTRFS_I(inode)->io_tree;
...@@ -577,18 +587,29 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -577,18 +587,29 @@ static noinline int add_ra_bio_pages(struct inode *inode,
end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT; end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
while (last_offset < compressed_end) { while (cur < compressed_end) {
pg_index = last_offset >> PAGE_SHIFT; u64 page_end;
u64 pg_index = cur >> PAGE_SHIFT;
u32 add_size;
if (pg_index > end_index) if (pg_index > end_index)
break; break;
page = xa_load(&mapping->i_pages, pg_index); page = xa_load(&mapping->i_pages, pg_index);
if (page && !xa_is_value(page)) { if (page && !xa_is_value(page)) {
misses++; sectors_missed += (PAGE_SIZE - offset_in_page(cur)) >>
if (misses > 4) fs_info->sectorsize_bits;
/* Beyond threshold, no need to continue */
if (sectors_missed > 4)
break; break;
goto next;
/*
* Jump to next page start as we already have page for
* current offset.
*/
cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
continue;
} }
page = __page_cache_alloc(mapping_gfp_constraint(mapping, page = __page_cache_alloc(mapping_gfp_constraint(mapping,
...@@ -598,14 +619,11 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -598,14 +619,11 @@ static noinline int add_ra_bio_pages(struct inode *inode,
if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) { if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
put_page(page); put_page(page);
goto next; /* There is already a page, skip to page end */
cur = (pg_index << PAGE_SHIFT) + PAGE_SIZE;
continue;
} }
/*
* at this point, we have a locked page in the page cache
* for these bytes in the file. But, we have to make
* sure they map to this compressed extent on disk.
*/
ret = set_page_extent_mapped(page); ret = set_page_extent_mapped(page);
if (ret < 0) { if (ret < 0) {
unlock_page(page); unlock_page(page);
...@@ -613,18 +631,22 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -613,18 +631,22 @@ static noinline int add_ra_bio_pages(struct inode *inode,
break; break;
} }
end = last_offset + PAGE_SIZE - 1; page_end = (pg_index << PAGE_SHIFT) + PAGE_SIZE - 1;
lock_extent(tree, last_offset, end); lock_extent(tree, cur, page_end);
read_lock(&em_tree->lock); read_lock(&em_tree->lock);
em = lookup_extent_mapping(em_tree, last_offset, em = lookup_extent_mapping(em_tree, cur, page_end + 1 - cur);
PAGE_SIZE);
read_unlock(&em_tree->lock); read_unlock(&em_tree->lock);
if (!em || last_offset < em->start || /*
(last_offset + PAGE_SIZE > extent_map_end(em)) || * At this point, we have a locked page in the page cache for
* these bytes in the file. But, we have to make sure they map
* to this compressed extent on disk.
*/
if (!em || cur < em->start ||
(cur + fs_info->sectorsize > extent_map_end(em)) ||
(em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) { (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
free_extent_map(em); free_extent_map(em);
unlock_extent(tree, last_offset, end); unlock_extent(tree, cur, page_end);
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
break; break;
...@@ -642,19 +664,23 @@ static noinline int add_ra_bio_pages(struct inode *inode, ...@@ -642,19 +664,23 @@ static noinline int add_ra_bio_pages(struct inode *inode,
} }
} }
ret = bio_add_page(cb->orig_bio, page, add_size = min(em->start + em->len, page_end + 1) - cur;
PAGE_SIZE, 0); ret = bio_add_page(cb->orig_bio, page, add_size, offset_in_page(cur));
if (ret != add_size) {
if (ret == PAGE_SIZE) { unlock_extent(tree, cur, page_end);
put_page(page);
} else {
unlock_extent(tree, last_offset, end);
unlock_page(page); unlock_page(page);
put_page(page); put_page(page);
break; break;
} }
next: /*
last_offset += PAGE_SIZE; * If it's subpage, we also need to increase its
* subpage::readers number, as at endio we will decrease
* subpage::readers and to unlock the page.
*/
if (fs_info->sectorsize < PAGE_SIZE)
btrfs_subpage_start_reader(fs_info, page, cur, add_size);
put_page(page);
cur += add_size;
} }
return 0; return 0;
} }
......
...@@ -3590,6 +3590,7 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached, ...@@ -3590,6 +3590,7 @@ int btrfs_do_readpage(struct page *page, struct extent_map **em_cached,
bool force_bio_submit = false; bool force_bio_submit = false;
u64 disk_bytenr; u64 disk_bytenr;
ASSERT(IS_ALIGNED(cur, fs_info->sectorsize));
if (cur >= last_byte) { if (cur >= last_byte) {
struct extent_state *cached = NULL; struct extent_state *cached = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment