Commit 49bd2bf9 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

vmscan: convert dirty page handling to folios

Mostly this just eliminates calls to compound_head(), but
NR_VMSCAN_IMMEDIATE was being incremented by 1 instead of by nr_pages.

Link: https://lkml.kernel.org/r/20220504182857.4013401-10-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 09c02e56
......@@ -1787,28 +1787,31 @@ static unsigned int shrink_page_list(struct list_head *page_list,
}
}
if (PageDirty(page)) {
if (folio_test_dirty(folio)) {
/*
* Only kswapd can writeback filesystem pages
* Only kswapd can writeback filesystem folios
* to avoid risk of stack overflow. But avoid
* injecting inefficient single-page IO into
* injecting inefficient single-folio I/O into
* flusher writeback as much as possible: only
* write pages when we've encountered many
* dirty pages, and when we've already scanned
* the rest of the LRU for clean pages and see
* the same dirty pages again (PageReclaim).
*/
if (page_is_file_lru(page) &&
(!current_is_kswapd() || !PageReclaim(page) ||
* write folios when we've encountered many
* dirty folios, and when we've already scanned
* the rest of the LRU for clean folios and see
* the same dirty folios again (with the reclaim
* flag set).
*/
if (folio_is_file_lru(folio) &&
(!current_is_kswapd() ||
!folio_test_reclaim(folio) ||
!test_bit(PGDAT_DIRTY, &pgdat->flags))) {
/*
* Immediately reclaim when written back.
* Similar in principal to deactivate_page()
* except we already have the page isolated
* Similar in principle to deactivate_page()
* except we already have the folio isolated
* and know it's dirty
*/
inc_node_page_state(page, NR_VMSCAN_IMMEDIATE);
SetPageReclaim(page);
node_stat_mod_folio(folio, NR_VMSCAN_IMMEDIATE,
nr_pages);
folio_set_reclaim(folio);
goto activate_locked;
}
......@@ -1821,8 +1824,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
goto keep_locked;
/*
* Page is dirty. Flush the TLB if a writable entry
* potentially exists to avoid CPU writes after IO
* Folio is dirty. Flush the TLB if a writable entry
* potentially exists to avoid CPU writes after I/O
* starts and then write it out here.
*/
try_to_unmap_flush_dirty();
......@@ -1834,23 +1837,24 @@ static unsigned int shrink_page_list(struct list_head *page_list,
case PAGE_SUCCESS:
stat->nr_pageout += nr_pages;
if (PageWriteback(page))
if (folio_test_writeback(folio))
goto keep;
if (PageDirty(page))
if (folio_test_dirty(folio))
goto keep;
/*
* A synchronous write - probably a ramdisk. Go
* ahead and try to reclaim the page.
* ahead and try to reclaim the folio.
*/
if (!trylock_page(page))
if (!folio_trylock(folio))
goto keep;
if (PageDirty(page) || PageWriteback(page))
if (folio_test_dirty(folio) ||
folio_test_writeback(folio))
goto keep_locked;
mapping = page_mapping(page);
mapping = folio_mapping(folio);
fallthrough;
case PAGE_CLEAN:
; /* try to free the page below */
; /* try to free the folio below */
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment