Commit 13df3775 authored by Qu Wenruo's avatar Qu Wenruo Committed by David Sterba

btrfs: cleanup metadata page pointer usage

Although we have migrated extent_buffer::pages[] to folios[], we're
still mostly using the folio_page() help to grab the page.

This patch would do the following cleanups for metadata:

- Introduce num_extent_folios() helper
  This is to replace most num_extent_pages() callers.

- Use num_extent_folios() to iterate future large folios
  This allows us to use things like
  bio_add_folio()/bio_add_folio_nofail(), and only set the needed flags
  for the folio (aka the leading/tailing page), which reduces the loop
  iteration to 1 for large folios.

- Change metadata related functions to use folio pointers
  Including their function name, involving:
  * attach_extent_buffer_page()
  * detach_extent_buffer_page()
  * page_range_has_eb()
  * btrfs_release_extent_buffer_pages()
  * btree_clear_page_dirty()
  * btrfs_page_inc_eb_refs()
  * btrfs_page_dec_eb_refs()

- Change btrfs_is_subpage() to accept an address_space pointer
  This is to allow both page->mapping and folio->mapping to be utilized.
  As data is still using the old per-page code, and may keep so for a
  while.

- Special corner case place holder for future order mismatches between
  extent buffer and inode filemap
  For now it's  just a block of comments and a dead ASSERT(), no real
  handling yet.

The subpage code would still go page, just because subpage and large
folio are conflicting conditions, thus we don't need to bother subpage
with higher order folios at all. Just folio_page(folio, 0) would be
enough.
Signed-off-by: default avatarQu Wenruo <wqu@suse.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
[ minor styling tweaks ]
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 082d5bb9
...@@ -97,6 +97,12 @@ static void csum_tree_block(struct extent_buffer *buf, u8 *result) ...@@ -97,6 +97,12 @@ static void csum_tree_block(struct extent_buffer *buf, u8 *result)
crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE, crypto_shash_update(shash, kaddr + BTRFS_CSUM_SIZE,
first_page_part - BTRFS_CSUM_SIZE); first_page_part - BTRFS_CSUM_SIZE);
/*
* Multiple single-page folios case would reach here.
*
* nodesize <= PAGE_SIZE and large folio all handled by above
* crypto_shash_update() already.
*/
for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) { for (i = 1; i < num_pages && INLINE_EXTENT_BUFFER_PAGES > 1; i++) {
kaddr = folio_address(buf->folios[i]); kaddr = folio_address(buf->folios[i]);
crypto_shash_update(shash, kaddr, PAGE_SIZE); crypto_shash_update(shash, kaddr, PAGE_SIZE);
......
This diff is collapsed.
...@@ -243,6 +243,20 @@ static inline int num_extent_pages(const struct extent_buffer *eb) ...@@ -243,6 +243,20 @@ static inline int num_extent_pages(const struct extent_buffer *eb)
return (eb->len >> PAGE_SHIFT) ?: 1; return (eb->len >> PAGE_SHIFT) ?: 1;
} }
/*
* This can only be determined at runtime by checking eb::folios[0].
*
* As we can have either one large folio covering the whole eb
* (either nodesize <= PAGE_SIZE, or high order folio), or multiple
* single-paged folios.
*/
static inline int num_extent_folios(const struct extent_buffer *eb)
{
if (folio_order(eb->folios[0]))
return 1;
return num_extent_pages(eb);
}
static inline int extent_buffer_uptodate(const struct extent_buffer *eb) static inline int extent_buffer_uptodate(const struct extent_buffer *eb)
{ {
return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags); return test_bit(EXTENT_BUFFER_UPTODATE, &eb->bflags);
......
...@@ -7867,7 +7867,7 @@ static void wait_subpage_spinlock(struct page *page) ...@@ -7867,7 +7867,7 @@ static void wait_subpage_spinlock(struct page *page)
struct folio *folio = page_folio(page); struct folio *folio = page_folio(page);
struct btrfs_subpage *subpage; struct btrfs_subpage *subpage;
if (!btrfs_is_subpage(fs_info, page)) if (!btrfs_is_subpage(fs_info, page->mapping))
return; return;
ASSERT(folio_test_private(folio) && folio_get_private(folio)); ASSERT(folio_test_private(folio) && folio_get_private(folio));
......
...@@ -64,7 +64,7 @@ ...@@ -64,7 +64,7 @@
* This means a slightly higher tree locking latency. * This means a slightly higher tree locking latency.
*/ */
bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page) bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping)
{ {
if (fs_info->sectorsize >= PAGE_SIZE) if (fs_info->sectorsize >= PAGE_SIZE)
return false; return false;
...@@ -74,8 +74,7 @@ bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page) ...@@ -74,8 +74,7 @@ bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page)
* mapping. And if page->mapping->host is data inode, it's subpage. * mapping. And if page->mapping->host is data inode, it's subpage.
* As we have ruled our sectorsize >= PAGE_SIZE case already. * As we have ruled our sectorsize >= PAGE_SIZE case already.
*/ */
if (!page->mapping || !page->mapping->host || if (!mapping || !mapping->host || is_data_inode(mapping->host))
is_data_inode(page->mapping->host))
return true; return true;
/* /*
...@@ -129,7 +128,7 @@ int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, ...@@ -129,7 +128,7 @@ int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
ASSERT(PageLocked(page)); ASSERT(PageLocked(page));
/* Either not subpage, or the folio already has private attached. */ /* Either not subpage, or the folio already has private attached. */
if (!btrfs_is_subpage(fs_info, page) || folio_test_private(folio)) if (!btrfs_is_subpage(fs_info, page->mapping) || folio_test_private(folio))
return 0; return 0;
subpage = btrfs_alloc_subpage(fs_info, type); subpage = btrfs_alloc_subpage(fs_info, type);
...@@ -147,7 +146,7 @@ void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info, ...@@ -147,7 +146,7 @@ void btrfs_detach_subpage(const struct btrfs_fs_info *fs_info,
struct btrfs_subpage *subpage; struct btrfs_subpage *subpage;
/* Either not subpage, or the folio already has private attached. */ /* Either not subpage, or the folio already has private attached. */
if (!btrfs_is_subpage(fs_info, page) || !folio_test_private(folio)) if (!btrfs_is_subpage(fs_info, page->mapping) || !folio_test_private(folio))
return; return;
subpage = folio_detach_private(folio); subpage = folio_detach_private(folio);
...@@ -193,33 +192,29 @@ void btrfs_free_subpage(struct btrfs_subpage *subpage) ...@@ -193,33 +192,29 @@ void btrfs_free_subpage(struct btrfs_subpage *subpage)
* detach_extent_buffer_page() won't detach the folio private while we're still * detach_extent_buffer_page() won't detach the folio private while we're still
* allocating the extent buffer. * allocating the extent buffer.
*/ */
void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info, void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
struct page *page)
{ {
struct folio *folio = page_folio(page);
struct btrfs_subpage *subpage; struct btrfs_subpage *subpage;
if (!btrfs_is_subpage(fs_info, page)) if (!btrfs_is_subpage(fs_info, folio->mapping))
return; return;
ASSERT(folio_test_private(folio) && page->mapping); ASSERT(folio_test_private(folio) && folio->mapping);
lockdep_assert_held(&page->mapping->private_lock); lockdep_assert_held(&folio->mapping->private_lock);
subpage = folio_get_private(folio); subpage = folio_get_private(folio);
atomic_inc(&subpage->eb_refs); atomic_inc(&subpage->eb_refs);
} }
void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info, void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio)
struct page *page)
{ {
struct folio *folio = page_folio(page);
struct btrfs_subpage *subpage; struct btrfs_subpage *subpage;
if (!btrfs_is_subpage(fs_info, page)) if (!btrfs_is_subpage(fs_info, folio->mapping))
return; return;
ASSERT(folio_test_private(folio) && page->mapping); ASSERT(folio_test_private(folio) && folio->mapping);
lockdep_assert_held(&page->mapping->private_lock); lockdep_assert_held(&folio->mapping->private_lock);
subpage = folio_get_private(folio); subpage = folio_get_private(folio);
ASSERT(atomic_read(&subpage->eb_refs)); ASSERT(atomic_read(&subpage->eb_refs));
...@@ -352,7 +347,7 @@ int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info, ...@@ -352,7 +347,7 @@ int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
{ {
struct folio *folio = page_folio(page); struct folio *folio = page_folio(page);
if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page->mapping)) {
lock_page(page); lock_page(page);
return 0; return 0;
} }
...@@ -369,7 +364,7 @@ int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info, ...@@ -369,7 +364,7 @@ int btrfs_page_start_writer_lock(const struct btrfs_fs_info *fs_info,
void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info, void btrfs_page_end_writer_lock(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len) struct page *page, u64 start, u32 len)
{ {
if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page->mapping))
return unlock_page(page); return unlock_page(page);
btrfs_subpage_clamp_range(page, &start, &len); btrfs_subpage_clamp_range(page, &start, &len);
if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len)) if (btrfs_subpage_end_and_test_writer(fs_info, page, start, len))
...@@ -612,7 +607,8 @@ IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked); ...@@ -612,7 +607,8 @@ IMPLEMENT_BTRFS_SUBPAGE_TEST_OP(checked);
void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \ void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len) \ struct page *page, u64 start, u32 len) \
{ \ { \
if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \ if (unlikely(!fs_info) || \
!btrfs_is_subpage(fs_info, page->mapping)) { \
set_page_func(page); \ set_page_func(page); \
return; \ return; \
} \ } \
...@@ -621,7 +617,8 @@ void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \ ...@@ -621,7 +617,8 @@ void btrfs_page_set_##name(const struct btrfs_fs_info *fs_info, \
void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \ void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len) \ struct page *page, u64 start, u32 len) \
{ \ { \
if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \ if (unlikely(!fs_info) || \
!btrfs_is_subpage(fs_info, page->mapping)) { \
clear_page_func(page); \ clear_page_func(page); \
return; \ return; \
} \ } \
...@@ -630,14 +627,16 @@ void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \ ...@@ -630,14 +627,16 @@ void btrfs_page_clear_##name(const struct btrfs_fs_info *fs_info, \
bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \ bool btrfs_page_test_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len) \ struct page *page, u64 start, u32 len) \
{ \ { \
if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \ if (unlikely(!fs_info) || \
!btrfs_is_subpage(fs_info, page->mapping)) \
return test_page_func(page); \ return test_page_func(page); \
return btrfs_subpage_test_##name(fs_info, page, start, len); \ return btrfs_subpage_test_##name(fs_info, page, start, len); \
} \ } \
void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \ void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len) \ struct page *page, u64 start, u32 len) \
{ \ { \
if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \ if (unlikely(!fs_info) || \
!btrfs_is_subpage(fs_info, page->mapping)) { \
set_page_func(page); \ set_page_func(page); \
return; \ return; \
} \ } \
...@@ -647,7 +646,8 @@ void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \ ...@@ -647,7 +646,8 @@ void btrfs_page_clamp_set_##name(const struct btrfs_fs_info *fs_info, \
void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \ void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len) \ struct page *page, u64 start, u32 len) \
{ \ { \
if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) { \ if (unlikely(!fs_info) || \
!btrfs_is_subpage(fs_info, page->mapping)) { \
clear_page_func(page); \ clear_page_func(page); \
return; \ return; \
} \ } \
...@@ -657,7 +657,8 @@ void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \ ...@@ -657,7 +657,8 @@ void btrfs_page_clamp_clear_##name(const struct btrfs_fs_info *fs_info, \
bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \ bool btrfs_page_clamp_test_##name(const struct btrfs_fs_info *fs_info, \
struct page *page, u64 start, u32 len) \ struct page *page, u64 start, u32 len) \
{ \ { \
if (unlikely(!fs_info) || !btrfs_is_subpage(fs_info, page)) \ if (unlikely(!fs_info) || \
!btrfs_is_subpage(fs_info, page->mapping)) \
return test_page_func(page); \ return test_page_func(page); \
btrfs_subpage_clamp_range(page, &start, &len); \ btrfs_subpage_clamp_range(page, &start, &len); \
return btrfs_subpage_test_##name(fs_info, page, start, len); \ return btrfs_subpage_test_##name(fs_info, page, start, len); \
...@@ -686,7 +687,7 @@ void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info, ...@@ -686,7 +687,7 @@ void btrfs_page_assert_not_dirty(const struct btrfs_fs_info *fs_info,
return; return;
ASSERT(!PageDirty(page)); ASSERT(!PageDirty(page));
if (!btrfs_is_subpage(fs_info, page)) if (!btrfs_is_subpage(fs_info, page->mapping))
return; return;
ASSERT(folio_test_private(folio) && folio_get_private(folio)); ASSERT(folio_test_private(folio) && folio_get_private(folio));
...@@ -716,7 +717,7 @@ void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page, ...@@ -716,7 +717,7 @@ void btrfs_page_unlock_writer(struct btrfs_fs_info *fs_info, struct page *page,
ASSERT(PageLocked(page)); ASSERT(PageLocked(page));
/* For non-subpage case, we just unlock the page */ /* For non-subpage case, we just unlock the page */
if (!btrfs_is_subpage(fs_info, page)) if (!btrfs_is_subpage(fs_info, page->mapping))
return unlock_page(page); return unlock_page(page);
ASSERT(folio_test_private(folio) && folio_get_private(folio)); ASSERT(folio_test_private(folio) && folio_get_private(folio));
......
...@@ -73,7 +73,7 @@ enum btrfs_subpage_type { ...@@ -73,7 +73,7 @@ enum btrfs_subpage_type {
BTRFS_SUBPAGE_DATA, BTRFS_SUBPAGE_DATA,
}; };
bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct page *page); bool btrfs_is_subpage(const struct btrfs_fs_info *fs_info, struct address_space *mapping);
void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize); void btrfs_init_subpage_info(struct btrfs_subpage_info *subpage_info, u32 sectorsize);
int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info, int btrfs_attach_subpage(const struct btrfs_fs_info *fs_info,
...@@ -86,10 +86,8 @@ struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info, ...@@ -86,10 +86,8 @@ struct btrfs_subpage *btrfs_alloc_subpage(const struct btrfs_fs_info *fs_info,
enum btrfs_subpage_type type); enum btrfs_subpage_type type);
void btrfs_free_subpage(struct btrfs_subpage *subpage); void btrfs_free_subpage(struct btrfs_subpage *subpage);
void btrfs_page_inc_eb_refs(const struct btrfs_fs_info *fs_info, void btrfs_folio_inc_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
struct page *page); void btrfs_folio_dec_eb_refs(const struct btrfs_fs_info *fs_info, struct folio *folio);
void btrfs_page_dec_eb_refs(const struct btrfs_fs_info *fs_info,
struct page *page);
void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info, void btrfs_subpage_start_reader(const struct btrfs_fs_info *fs_info,
struct page *page, u64 start, u32 len); struct page *page, u64 start, u32 len);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment