Commit cb29e794 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: remove PageUnevictable

There is only one caller of PageUnevictable() left; convert it to call
folio_test_unevictable() and remove all the page accessors.

Link: https://lkml.kernel.org/r/20240821193445.2294269-6-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 32f51ead
...@@ -580,9 +580,9 @@ FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE) ...@@ -580,9 +580,9 @@ FOLIO_CLEAR_FLAG(swapcache, FOLIO_HEAD_PAGE)
FOLIO_FLAG_FALSE(swapcache) FOLIO_FLAG_FALSE(swapcache)
#endif #endif
PAGEFLAG(Unevictable, unevictable, PF_HEAD) FOLIO_FLAG(unevictable, FOLIO_HEAD_PAGE)
__CLEARPAGEFLAG(Unevictable, unevictable, PF_HEAD) __FOLIO_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
TESTCLEARFLAG(Unevictable, unevictable, PF_HEAD) FOLIO_TEST_CLEAR_FLAG(unevictable, FOLIO_HEAD_PAGE)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL) PAGEFLAG(Mlocked, mlocked, PF_NO_TAIL)
......
...@@ -2932,25 +2932,25 @@ static void remap_page(struct folio *folio, unsigned long nr) ...@@ -2932,25 +2932,25 @@ static void remap_page(struct folio *folio, unsigned long nr)
} }
} }
static void lru_add_page_tail(struct page *head, struct page *tail, static void lru_add_page_tail(struct folio *folio, struct page *tail,
struct lruvec *lruvec, struct list_head *list) struct lruvec *lruvec, struct list_head *list)
{ {
VM_BUG_ON_PAGE(!PageHead(head), head); VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
VM_BUG_ON_PAGE(PageLRU(tail), head); VM_BUG_ON_FOLIO(PageLRU(tail), folio);
lockdep_assert_held(&lruvec->lru_lock); lockdep_assert_held(&lruvec->lru_lock);
if (list) { if (list) {
/* page reclaim is reclaiming a huge page */ /* page reclaim is reclaiming a huge page */
VM_WARN_ON(PageLRU(head)); VM_WARN_ON(folio_test_lru(folio));
get_page(tail); get_page(tail);
list_add_tail(&tail->lru, list); list_add_tail(&tail->lru, list);
} else { } else {
/* head is still on lru (and we have it frozen) */ /* head is still on lru (and we have it frozen) */
VM_WARN_ON(!PageLRU(head)); VM_WARN_ON(!folio_test_lru(folio));
if (PageUnevictable(tail)) if (folio_test_unevictable(folio))
tail->mlock_count = 0; tail->mlock_count = 0;
else else
list_add_tail(&tail->lru, &head->lru); list_add_tail(&tail->lru, &folio->lru);
SetPageLRU(tail); SetPageLRU(tail);
} }
} }
...@@ -3049,7 +3049,7 @@ static void __split_huge_page_tail(struct folio *folio, int tail, ...@@ -3049,7 +3049,7 @@ static void __split_huge_page_tail(struct folio *folio, int tail,
* pages to show after the currently processed elements - e.g. * pages to show after the currently processed elements - e.g.
* migrate_pages * migrate_pages
*/ */
lru_add_page_tail(head, page_tail, lruvec, list); lru_add_page_tail(folio, page_tail, lruvec, list);
} }
static void __split_huge_page(struct page *page, struct list_head *list, static void __split_huge_page(struct page *page, struct list_head *list,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment