Commit ff5710c3 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

nilfs2: convert nilfs_segctor_prepare_write to use folios

Use the new folio APIs, saving 17 hidden calls to compound_head().

Link: https://lkml.kernel.org/r/20231114084436.2755-11-konishi.ryusuke@gmail.comSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarRyusuke Konishi <konishi.ryusuke@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6609e235
......@@ -1665,39 +1665,39 @@ static int nilfs_segctor_assign(struct nilfs_sc_info *sci, int mode)
return 0;
}
static void nilfs_begin_page_io(struct page *page)
static void nilfs_begin_folio_io(struct folio *folio)
{
if (!page || PageWriteback(page))
if (!folio || folio_test_writeback(folio))
/*
* For split b-tree node pages, this function may be called
* twice. We ignore the 2nd or later calls by this check.
*/
return;
lock_page(page);
clear_page_dirty_for_io(page);
set_page_writeback(page);
unlock_page(page);
folio_lock(folio);
folio_clear_dirty_for_io(folio);
folio_start_writeback(folio);
folio_unlock(folio);
}
static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
{
struct nilfs_segment_buffer *segbuf;
struct page *bd_page = NULL, *fs_page = NULL;
struct folio *bd_folio = NULL, *fs_folio = NULL;
list_for_each_entry(segbuf, &sci->sc_segbufs, sb_list) {
struct buffer_head *bh;
list_for_each_entry(bh, &segbuf->sb_segsum_buffers,
b_assoc_buffers) {
if (bh->b_page != bd_page) {
if (bd_page) {
lock_page(bd_page);
clear_page_dirty_for_io(bd_page);
set_page_writeback(bd_page);
unlock_page(bd_page);
if (bh->b_folio != bd_folio) {
if (bd_folio) {
folio_lock(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
}
bd_page = bh->b_page;
bd_folio = bh->b_folio;
}
}
......@@ -1705,28 +1705,28 @@ static void nilfs_segctor_prepare_write(struct nilfs_sc_info *sci)
b_assoc_buffers) {
set_buffer_async_write(bh);
if (bh == segbuf->sb_super_root) {
if (bh->b_page != bd_page) {
lock_page(bd_page);
clear_page_dirty_for_io(bd_page);
set_page_writeback(bd_page);
unlock_page(bd_page);
bd_page = bh->b_page;
if (bh->b_folio != bd_folio) {
folio_lock(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
bd_folio = bh->b_folio;
}
break;
}
if (bh->b_page != fs_page) {
nilfs_begin_page_io(fs_page);
fs_page = bh->b_page;
if (bh->b_folio != fs_folio) {
nilfs_begin_folio_io(fs_folio);
fs_folio = bh->b_folio;
}
}
}
if (bd_page) {
lock_page(bd_page);
clear_page_dirty_for_io(bd_page);
set_page_writeback(bd_page);
unlock_page(bd_page);
if (bd_folio) {
folio_lock(bd_folio);
folio_clear_dirty_for_io(bd_folio);
folio_start_writeback(bd_folio);
folio_unlock(bd_folio);
}
nilfs_begin_page_io(fs_page);
nilfs_begin_folio_io(fs_folio);
}
static int nilfs_segctor_write(struct nilfs_sc_info *sci,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment