Commit 75fa68a5 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by akpm

mm/swap: convert delete_from_swap_cache() to take a folio

All but one caller already has a folio, so convert it to use a folio.

Link: https://lkml.kernel.org/r/20220617175020.717127-22-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent b98c359f
...@@ -1007,12 +1007,13 @@ static int me_swapcache_dirty(struct page_state *ps, struct page *p) ...@@ -1007,12 +1007,13 @@ static int me_swapcache_dirty(struct page_state *ps, struct page *p)
static int me_swapcache_clean(struct page_state *ps, struct page *p) static int me_swapcache_clean(struct page_state *ps, struct page *p)
{ {
struct folio *folio = page_folio(p);
int ret; int ret;
delete_from_swap_cache(p); delete_from_swap_cache(folio);
ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED; ret = delete_from_lru_cache(p) ? MF_FAILED : MF_RECOVERED;
unlock_page(p); folio_unlock(folio);
if (has_extra_refcount(ps, p, false)) if (has_extra_refcount(ps, p, false))
ret = MF_FAILED; ret = MF_FAILED;
......
...@@ -1691,7 +1691,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index, ...@@ -1691,7 +1691,7 @@ static void shmem_set_folio_swapin_error(struct inode *inode, pgoff_t index,
return; return;
folio_wait_writeback(folio); folio_wait_writeback(folio);
delete_from_swap_cache(&folio->page); delete_from_swap_cache(folio);
spin_lock_irq(&info->lock); spin_lock_irq(&info->lock);
/* /*
* Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't * Don't treat swapin error folio as alloced. Otherwise inode->i_blocks won't
...@@ -1789,7 +1789,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index, ...@@ -1789,7 +1789,7 @@ static int shmem_swapin_folio(struct inode *inode, pgoff_t index,
if (sgp == SGP_WRITE) if (sgp == SGP_WRITE)
folio_mark_accessed(folio); folio_mark_accessed(folio);
delete_from_swap_cache(&folio->page); delete_from_swap_cache(folio);
folio_mark_dirty(folio); folio_mark_dirty(folio);
swap_free(swap); swap_free(swap);
......
...@@ -38,7 +38,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry, ...@@ -38,7 +38,7 @@ int add_to_swap_cache(struct page *page, swp_entry_t entry,
gfp_t gfp, void **shadowp); gfp_t gfp, void **shadowp);
void __delete_from_swap_cache(struct page *page, void __delete_from_swap_cache(struct page *page,
swp_entry_t entry, void *shadow); swp_entry_t entry, void *shadow);
void delete_from_swap_cache(struct page *page); void delete_from_swap_cache(struct folio *folio);
void clear_shadow_from_swap_cache(int type, unsigned long begin, void clear_shadow_from_swap_cache(int type, unsigned long begin,
unsigned long end); unsigned long end);
void free_swap_cache(struct page *page); void free_swap_cache(struct page *page);
...@@ -140,7 +140,7 @@ static inline void __delete_from_swap_cache(struct page *page, ...@@ -140,7 +140,7 @@ static inline void __delete_from_swap_cache(struct page *page,
{ {
} }
static inline void delete_from_swap_cache(struct page *page) static inline void delete_from_swap_cache(struct folio *folio)
{ {
} }
......
...@@ -222,22 +222,22 @@ bool add_to_swap(struct folio *folio) ...@@ -222,22 +222,22 @@ bool add_to_swap(struct folio *folio)
} }
/* /*
* This must be called only on pages that have * This must be called only on folios that have
* been verified to be in the swap cache and locked. * been verified to be in the swap cache and locked.
* It will never put the page into the free list, * It will never put the folio into the free list,
* the caller has a reference on the page. * the caller has a reference on the folio.
*/ */
void delete_from_swap_cache(struct page *page) void delete_from_swap_cache(struct folio *folio)
{ {
swp_entry_t entry = { .val = page_private(page) }; swp_entry_t entry = folio_swap_entry(folio);
struct address_space *address_space = swap_address_space(entry); struct address_space *address_space = swap_address_space(entry);
xa_lock_irq(&address_space->i_pages); xa_lock_irq(&address_space->i_pages);
__delete_from_swap_cache(page, entry, NULL); __delete_from_swap_cache(&folio->page, entry, NULL);
xa_unlock_irq(&address_space->i_pages); xa_unlock_irq(&address_space->i_pages);
put_swap_page(page, entry); put_swap_page(&folio->page, entry);
page_ref_sub(page, thp_nr_pages(page)); folio_ref_sub(folio, folio_nr_pages(folio));
} }
void clear_shadow_from_swap_cache(int type, unsigned long begin, void clear_shadow_from_swap_cache(int type, unsigned long begin,
......
...@@ -1617,7 +1617,7 @@ int try_to_free_swap(struct page *page) ...@@ -1617,7 +1617,7 @@ int try_to_free_swap(struct page *page)
if (pm_suspended_storage()) if (pm_suspended_storage())
return 0; return 0;
delete_from_swap_cache(&folio->page); delete_from_swap_cache(folio);
folio_set_dirty(folio); folio_set_dirty(folio);
return 1; return 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment