Commit b5612c36 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: return void from folio_start_writeback() and related functions

Nobody now checks the return value from any of these functions, so
add an assertion at the beginning of the function and return void.

Link: https://lkml.kernel.org/r/20231108204605.745109-5-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Steve French <sfrench@samba.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent a9540e35
...@@ -772,8 +772,8 @@ static __always_inline void SetPageUptodate(struct page *page) ...@@ -772,8 +772,8 @@ static __always_inline void SetPageUptodate(struct page *page)
CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL) CLEARPAGEFLAG(Uptodate, uptodate, PF_NO_TAIL)
bool __folio_start_writeback(struct folio *folio, bool keep_write); void __folio_start_writeback(struct folio *folio, bool keep_write);
bool set_page_writeback(struct page *page); void set_page_writeback(struct page *page);
#define folio_start_writeback(folio) \ #define folio_start_writeback(folio) \
__folio_start_writeback(folio, false) __folio_start_writeback(folio, false)
......
...@@ -46,9 +46,9 @@ void mark_page_accessed(struct page *page) ...@@ -46,9 +46,9 @@ void mark_page_accessed(struct page *page)
} }
EXPORT_SYMBOL(mark_page_accessed); EXPORT_SYMBOL(mark_page_accessed);
bool set_page_writeback(struct page *page) void set_page_writeback(struct page *page)
{ {
return folio_start_writeback(page_folio(page)); folio_start_writeback(page_folio(page));
} }
EXPORT_SYMBOL(set_page_writeback); EXPORT_SYMBOL(set_page_writeback);
......
...@@ -2982,28 +2982,27 @@ bool __folio_end_writeback(struct folio *folio) ...@@ -2982,28 +2982,27 @@ bool __folio_end_writeback(struct folio *folio)
return ret; return ret;
} }
bool __folio_start_writeback(struct folio *folio, bool keep_write) void __folio_start_writeback(struct folio *folio, bool keep_write)
{ {
long nr = folio_nr_pages(folio); long nr = folio_nr_pages(folio);
struct address_space *mapping = folio_mapping(folio); struct address_space *mapping = folio_mapping(folio);
bool ret;
int access_ret; int access_ret;
VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
folio_memcg_lock(folio); folio_memcg_lock(folio);
if (mapping && mapping_use_writeback_tags(mapping)) { if (mapping && mapping_use_writeback_tags(mapping)) {
XA_STATE(xas, &mapping->i_pages, folio_index(folio)); XA_STATE(xas, &mapping->i_pages, folio_index(folio));
struct inode *inode = mapping->host; struct inode *inode = mapping->host;
struct backing_dev_info *bdi = inode_to_bdi(inode); struct backing_dev_info *bdi = inode_to_bdi(inode);
unsigned long flags; unsigned long flags;
bool on_wblist;
xas_lock_irqsave(&xas, flags); xas_lock_irqsave(&xas, flags);
xas_load(&xas); xas_load(&xas);
ret = folio_test_set_writeback(folio); folio_test_set_writeback(folio);
if (!ret) {
bool on_wblist;
on_wblist = mapping_tagged(mapping, on_wblist = mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
PAGECACHE_TAG_WRITEBACK);
xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK); xas_set_mark(&xas, PAGECACHE_TAG_WRITEBACK);
if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) { if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
...@@ -3015,34 +3014,31 @@ bool __folio_start_writeback(struct folio *folio, bool keep_write) ...@@ -3015,34 +3014,31 @@ bool __folio_start_writeback(struct folio *folio, bool keep_write)
} }
/* /*
* We can come through here when swapping * We can come through here when swapping anonymous
* anonymous folios, so we don't necessarily * folios, so we don't necessarily have an inode to
* have an inode to track for sync. * track for sync.
*/ */
if (mapping->host && !on_wblist) if (mapping->host && !on_wblist)
sb_mark_inode_writeback(mapping->host); sb_mark_inode_writeback(mapping->host);
}
if (!folio_test_dirty(folio)) if (!folio_test_dirty(folio))
xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY); xas_clear_mark(&xas, PAGECACHE_TAG_DIRTY);
if (!keep_write) if (!keep_write)
xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE); xas_clear_mark(&xas, PAGECACHE_TAG_TOWRITE);
xas_unlock_irqrestore(&xas, flags); xas_unlock_irqrestore(&xas, flags);
} else { } else {
ret = folio_test_set_writeback(folio); folio_test_set_writeback(folio);
} }
if (!ret) {
lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr); lruvec_stat_mod_folio(folio, NR_WRITEBACK, nr);
zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr); zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, nr);
}
folio_memcg_unlock(folio); folio_memcg_unlock(folio);
access_ret = arch_make_folio_accessible(folio); access_ret = arch_make_folio_accessible(folio);
/* /*
* If writeback has been triggered on a page that cannot be made * If writeback has been triggered on a page that cannot be made
* accessible, it is too late to recover here. * accessible, it is too late to recover here.
*/ */
VM_BUG_ON_FOLIO(access_ret != 0, folio); VM_BUG_ON_FOLIO(access_ret != 0, folio);
return ret;
} }
EXPORT_SYMBOL(__folio_start_writeback); EXPORT_SYMBOL(__folio_start_writeback);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment