mm/rmap: Turn page_referenced() into folio_referenced()

Both its callers pass a page which was previously on an LRU list,
so were passing a folio by definition.  Use the type system to enforce
that and remove a few calls to compound_head().
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent dcc5d337
...@@ -190,7 +190,7 @@ static inline void page_dup_rmap(struct page *page, bool compound) ...@@ -190,7 +190,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
/* /*
* Called from mm/vmscan.c to handle paging out * Called from mm/vmscan.c to handle paging out
*/ */
int page_referenced(struct page *, int is_locked, int folio_referenced(struct folio *, int is_locked,
struct mem_cgroup *memcg, unsigned long *vm_flags); struct mem_cgroup *memcg, unsigned long *vm_flags);
void try_to_migrate(struct page *page, enum ttu_flags flags); void try_to_migrate(struct page *page, enum ttu_flags flags);
...@@ -301,7 +301,7 @@ void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc); ...@@ -301,7 +301,7 @@ void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
#define anon_vma_prepare(vma) (0) #define anon_vma_prepare(vma) (0)
#define anon_vma_link(vma) do {} while (0) #define anon_vma_link(vma) do {} while (0)
static inline int page_referenced(struct page *page, int is_locked, static inline int folio_referenced(struct folio *folio, int is_locked,
struct mem_cgroup *memcg, struct mem_cgroup *memcg,
unsigned long *vm_flags) unsigned long *vm_flags)
{ {
......
...@@ -77,7 +77,7 @@ static bool page_idle_clear_pte_refs_one(struct page *page, ...@@ -77,7 +77,7 @@ static bool page_idle_clear_pte_refs_one(struct page *page,
/* /*
* We cleared the referenced bit in a mapping to this page. To * We cleared the referenced bit in a mapping to this page. To
* avoid interference with page reclaim, mark it young so that * avoid interference with page reclaim, mark it young so that
* page_referenced() will return > 0. * folio_referenced() will return > 0.
*/ */
folio_set_young(folio); folio_set_young(folio);
} }
......
...@@ -789,29 +789,30 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address) ...@@ -789,29 +789,30 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
return pmd; return pmd;
} }
struct page_referenced_arg { struct folio_referenced_arg {
int mapcount; int mapcount;
int referenced; int referenced;
unsigned long vm_flags; unsigned long vm_flags;
struct mem_cgroup *memcg; struct mem_cgroup *memcg;
}; };
/* /*
* arg: page_referenced_arg will be passed * arg: folio_referenced_arg will be passed
*/ */
static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, static bool folio_referenced_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg) unsigned long address, void *arg)
{ {
struct page_referenced_arg *pra = arg; struct folio *folio = page_folio(page);
DEFINE_PAGE_VMA_WALK(pvmw, page, vma, address, 0); struct folio_referenced_arg *pra = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, address, 0);
int referenced = 0; int referenced = 0;
while (page_vma_mapped_walk(&pvmw)) { while (page_vma_mapped_walk(&pvmw)) {
address = pvmw.address; address = pvmw.address;
if ((vma->vm_flags & VM_LOCKED) && if ((vma->vm_flags & VM_LOCKED) &&
(!PageTransCompound(page) || !pvmw.pte)) { (!folio_test_large(folio) || !pvmw.pte)) {
/* Restore the mlock which got missed */ /* Restore the mlock which got missed */
mlock_vma_page(page, vma, !pvmw.pte); mlock_vma_folio(folio, vma, !pvmw.pte);
page_vma_mapped_walk_done(&pvmw); page_vma_mapped_walk_done(&pvmw);
pra->vm_flags |= VM_LOCKED; pra->vm_flags |= VM_LOCKED;
return false; /* To break the loop */ return false; /* To break the loop */
...@@ -823,10 +824,10 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, ...@@ -823,10 +824,10 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
/* /*
* Don't treat a reference through * Don't treat a reference through
* a sequentially read mapping as such. * a sequentially read mapping as such.
* If the page has been used in another mapping, * If the folio has been used in another mapping,
* we will catch it; if this other mapping is * we will catch it; if this other mapping is
* already gone, the unmap path will have set * already gone, the unmap path will have set
* PG_referenced or activated the page. * the referenced flag or activated the folio.
*/ */
if (likely(!(vma->vm_flags & VM_SEQ_READ))) if (likely(!(vma->vm_flags & VM_SEQ_READ)))
referenced++; referenced++;
...@@ -836,7 +837,7 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, ...@@ -836,7 +837,7 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
pvmw.pmd)) pvmw.pmd))
referenced++; referenced++;
} else { } else {
/* unexpected pmd-mapped page? */ /* unexpected pmd-mapped folio? */
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
} }
...@@ -844,8 +845,8 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, ...@@ -844,8 +845,8 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
} }
if (referenced) if (referenced)
clear_page_idle(page); folio_clear_idle(folio);
if (test_and_clear_page_young(page)) if (folio_test_clear_young(folio))
referenced++; referenced++;
if (referenced) { if (referenced) {
...@@ -859,9 +860,9 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, ...@@ -859,9 +860,9 @@ static bool page_referenced_one(struct page *page, struct vm_area_struct *vma,
return true; return true;
} }
static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) static bool invalid_folio_referenced_vma(struct vm_area_struct *vma, void *arg)
{ {
struct page_referenced_arg *pra = arg; struct folio_referenced_arg *pra = arg;
struct mem_cgroup *memcg = pra->memcg; struct mem_cgroup *memcg = pra->memcg;
if (!mm_match_cgroup(vma->vm_mm, memcg)) if (!mm_match_cgroup(vma->vm_mm, memcg))
...@@ -871,27 +872,26 @@ static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg) ...@@ -871,27 +872,26 @@ static bool invalid_page_referenced_vma(struct vm_area_struct *vma, void *arg)
} }
/** /**
* page_referenced - test if the page was referenced * folio_referenced() - Test if the folio was referenced.
* @page: the page to test * @folio: The folio to test.
* @is_locked: caller holds lock on the page * @is_locked: Caller holds lock on the folio.
* @memcg: target memory cgroup * @memcg: target memory cgroup
* @vm_flags: collect encountered vma->vm_flags who actually referenced the page * @vm_flags: A combination of all the vma->vm_flags which referenced the folio.
* *
* Quick test_and_clear_referenced for all mappings to a page, * Quick test_and_clear_referenced for all mappings of a folio,
* returns the number of ptes which referenced the page. *
* Return: The number of mappings which referenced the folio.
*/ */
int page_referenced(struct page *page, int folio_referenced(struct folio *folio, int is_locked,
int is_locked, struct mem_cgroup *memcg, unsigned long *vm_flags)
struct mem_cgroup *memcg,
unsigned long *vm_flags)
{ {
int we_locked = 0; int we_locked = 0;
struct page_referenced_arg pra = { struct folio_referenced_arg pra = {
.mapcount = total_mapcount(page), .mapcount = folio_mapcount(folio),
.memcg = memcg, .memcg = memcg,
}; };
struct rmap_walk_control rwc = { struct rmap_walk_control rwc = {
.rmap_one = page_referenced_one, .rmap_one = folio_referenced_one,
.arg = (void *)&pra, .arg = (void *)&pra,
.anon_lock = page_lock_anon_vma_read, .anon_lock = page_lock_anon_vma_read,
}; };
...@@ -900,11 +900,11 @@ int page_referenced(struct page *page, ...@@ -900,11 +900,11 @@ int page_referenced(struct page *page,
if (!pra.mapcount) if (!pra.mapcount)
return 0; return 0;
if (!page_rmapping(page)) if (!folio_raw_mapping(folio))
return 0; return 0;
if (!is_locked && (!PageAnon(page) || PageKsm(page))) { if (!is_locked && (!folio_test_anon(folio) || folio_test_ksm(folio))) {
we_locked = trylock_page(page); we_locked = folio_trylock(folio);
if (!we_locked) if (!we_locked)
return 1; return 1;
} }
...@@ -915,14 +915,14 @@ int page_referenced(struct page *page, ...@@ -915,14 +915,14 @@ int page_referenced(struct page *page,
* cgroups * cgroups
*/ */
if (memcg) { if (memcg) {
rwc.invalid_vma = invalid_page_referenced_vma; rwc.invalid_vma = invalid_folio_referenced_vma;
} }
rmap_walk(page, &rwc); rmap_walk(&folio->page, &rwc);
*vm_flags = pra.vm_flags; *vm_flags = pra.vm_flags;
if (we_locked) if (we_locked)
unlock_page(page); folio_unlock(folio);
return pra.referenced; return pra.referenced;
} }
...@@ -1052,8 +1052,8 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) ...@@ -1052,8 +1052,8 @@ void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma)
anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
/* /*
* Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written * Ensure that anon_vma and the PAGE_MAPPING_ANON bit are written
* simultaneously, so a concurrent reader (eg page_referenced()'s * simultaneously, so a concurrent reader (eg folio_referenced()'s
* PageAnon()) will not see one without the other. * folio_test_anon()) will not see one without the other.
*/ */
WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); WRITE_ONCE(page->mapping, (struct address_space *) anon_vma);
} }
......
...@@ -1386,11 +1386,12 @@ enum page_references { ...@@ -1386,11 +1386,12 @@ enum page_references {
static enum page_references page_check_references(struct page *page, static enum page_references page_check_references(struct page *page,
struct scan_control *sc) struct scan_control *sc)
{ {
struct folio *folio = page_folio(page);
int referenced_ptes, referenced_page; int referenced_ptes, referenced_page;
unsigned long vm_flags; unsigned long vm_flags;
referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup, referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup,
&vm_flags); &vm_flags);
referenced_page = TestClearPageReferenced(page); referenced_page = TestClearPageReferenced(page);
/* /*
...@@ -2490,7 +2491,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec, ...@@ -2490,7 +2491,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
* *
* If the pages are mostly unmapped, the processing is fast and it is * If the pages are mostly unmapped, the processing is fast and it is
* appropriate to hold lru_lock across the whole operation. But if * appropriate to hold lru_lock across the whole operation. But if
* the pages are mapped, the processing is slow (page_referenced()), so * the pages are mapped, the processing is slow (folio_referenced()), so
* we should drop lru_lock around each page. It's impossible to balance * we should drop lru_lock around each page. It's impossible to balance
* this, so instead we remove the pages from the LRU while processing them. * this, so instead we remove the pages from the LRU while processing them.
* It is safe to rely on PG_active against the non-LRU pages in here because * It is safe to rely on PG_active against the non-LRU pages in here because
...@@ -2510,7 +2511,6 @@ static void shrink_active_list(unsigned long nr_to_scan, ...@@ -2510,7 +2511,6 @@ static void shrink_active_list(unsigned long nr_to_scan,
LIST_HEAD(l_hold); /* The pages which were snipped off */ LIST_HEAD(l_hold); /* The pages which were snipped off */
LIST_HEAD(l_active); LIST_HEAD(l_active);
LIST_HEAD(l_inactive); LIST_HEAD(l_inactive);
struct page *page;
unsigned nr_deactivate, nr_activate; unsigned nr_deactivate, nr_activate;
unsigned nr_rotated = 0; unsigned nr_rotated = 0;
int file = is_file_lru(lru); int file = is_file_lru(lru);
...@@ -2532,9 +2532,13 @@ static void shrink_active_list(unsigned long nr_to_scan, ...@@ -2532,9 +2532,13 @@ static void shrink_active_list(unsigned long nr_to_scan,
spin_unlock_irq(&lruvec->lru_lock); spin_unlock_irq(&lruvec->lru_lock);
while (!list_empty(&l_hold)) { while (!list_empty(&l_hold)) {
struct folio *folio;
struct page *page;
cond_resched(); cond_resched();
page = lru_to_page(&l_hold); folio = lru_to_folio(&l_hold);
list_del(&page->lru); list_del(&folio->lru);
page = &folio->page;
if (unlikely(!page_evictable(page))) { if (unlikely(!page_evictable(page))) {
putback_lru_page(page); putback_lru_page(page);
...@@ -2549,8 +2553,8 @@ static void shrink_active_list(unsigned long nr_to_scan, ...@@ -2549,8 +2553,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
} }
} }
if (page_referenced(page, 0, sc->target_mem_cgroup, if (folio_referenced(folio, 0, sc->target_mem_cgroup,
&vm_flags)) { &vm_flags)) {
/* /*
* Identify referenced, file-backed active pages and * Identify referenced, file-backed active pages and
* give them one more trip around the active list. So * give them one more trip around the active list. So
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment