Commit b8ae2bfa authored by Li Zetao's avatar Li Zetao Committed by David Sterba

btrfs: convert try_release_extent_buffer() to take a folio

The old page API is being gradually replaced and converted to use folio
to improve code readability and avoid repeated conversion between page
and folio.
Signed-off-by: default avatarLi Zetao <lizetao1@huawei.com>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent 0145aa38
...@@ -525,7 +525,7 @@ static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags) ...@@ -525,7 +525,7 @@ static bool btree_release_folio(struct folio *folio, gfp_t gfp_flags)
if (folio_test_writeback(folio) || folio_test_dirty(folio)) if (folio_test_writeback(folio) || folio_test_dirty(folio))
return false; return false;
return try_release_extent_buffer(&folio->page); return try_release_extent_buffer(folio);
} }
static void btree_invalidate_folio(struct folio *folio, size_t offset, static void btree_invalidate_folio(struct folio *folio, size_t offset,
......
...@@ -4147,21 +4147,20 @@ static int try_release_subpage_extent_buffer(struct folio *folio) ...@@ -4147,21 +4147,20 @@ static int try_release_subpage_extent_buffer(struct folio *folio)
} }
int try_release_extent_buffer(struct page *page) int try_release_extent_buffer(struct folio *folio)
{ {
struct folio *folio = page_folio(page);
struct extent_buffer *eb; struct extent_buffer *eb;
if (page_to_fs_info(page)->nodesize < PAGE_SIZE) if (folio_to_fs_info(folio)->nodesize < PAGE_SIZE)
return try_release_subpage_extent_buffer(page_folio(page)); return try_release_subpage_extent_buffer(folio);
/* /*
* We need to make sure nobody is changing folio private, as we rely on * We need to make sure nobody is changing folio private, as we rely on
* folio private as the pointer to extent buffer. * folio private as the pointer to extent buffer.
*/ */
spin_lock(&page->mapping->i_private_lock); spin_lock(&folio->mapping->i_private_lock);
if (!folio_test_private(folio)) { if (!folio_test_private(folio)) {
spin_unlock(&page->mapping->i_private_lock); spin_unlock(&folio->mapping->i_private_lock);
return 1; return 1;
} }
...@@ -4176,10 +4175,10 @@ int try_release_extent_buffer(struct page *page) ...@@ -4176,10 +4175,10 @@ int try_release_extent_buffer(struct page *page)
spin_lock(&eb->refs_lock); spin_lock(&eb->refs_lock);
if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) { if (atomic_read(&eb->refs) != 1 || extent_buffer_under_io(eb)) {
spin_unlock(&eb->refs_lock); spin_unlock(&eb->refs_lock);
spin_unlock(&page->mapping->i_private_lock); spin_unlock(&folio->mapping->i_private_lock);
return 0; return 0;
} }
spin_unlock(&page->mapping->i_private_lock); spin_unlock(&folio->mapping->i_private_lock);
/* /*
* If tree ref isn't set then we know the ref on this eb is a real ref, * If tree ref isn't set then we know the ref on this eb is a real ref,
......
...@@ -237,7 +237,7 @@ static inline void extent_changeset_free(struct extent_changeset *changeset) ...@@ -237,7 +237,7 @@ static inline void extent_changeset_free(struct extent_changeset *changeset)
} }
bool try_release_extent_mapping(struct page *page, gfp_t mask); bool try_release_extent_mapping(struct page *page, gfp_t mask);
int try_release_extent_buffer(struct page *page); int try_release_extent_buffer(struct folio *folio);
int btrfs_read_folio(struct file *file, struct folio *folio); int btrfs_read_folio(struct file *file, struct folio *folio);
void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio, void extent_write_locked_range(struct inode *inode, const struct folio *locked_folio,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment