mm/rmap: Convert rmap_walk() to take a folio

This ripples all the way through to every calling and called function
from rmap.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
parent e05b3453
......@@ -51,7 +51,7 @@ static inline void ksm_exit(struct mm_struct *mm)
struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc);
void folio_migrate_ksm(struct folio *newfolio, struct folio *folio);
#else /* !CONFIG_KSM */
......@@ -78,7 +78,7 @@ static inline struct page *ksm_might_need_to_copy(struct page *page,
return page;
}
static inline void rmap_walk_ksm(struct page *page,
static inline void rmap_walk_ksm(struct folio *folio,
struct rmap_walk_control *rwc)
{
}
......
......@@ -266,7 +266,6 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked);
/*
* Called by memory-failure.c to kill processes.
*/
struct anon_vma *page_lock_anon_vma_read(struct page *page);
struct anon_vma *folio_lock_anon_vma_read(struct folio *folio);
void page_unlock_anon_vma_read(struct anon_vma *anon_vma);
int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
......@@ -286,15 +285,15 @@ struct rmap_walk_control {
* Return false if page table scanning in rmap_walk should be stopped.
* Otherwise, return true.
*/
bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
bool (*rmap_one)(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct page *page);
struct anon_vma *(*anon_lock)(struct page *page);
int (*done)(struct folio *folio);
struct anon_vma *(*anon_lock)(struct folio *folio);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
void rmap_walk(struct folio *folio, struct rmap_walk_control *rwc);
void rmap_walk_locked(struct folio *folio, struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
......
......@@ -16,10 +16,10 @@
#include "../internal.h"
#include "prmtv-common.h"
static bool __damon_pa_mkold(struct page *page, struct vm_area_struct *vma,
static bool __damon_pa_mkold(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
DEFINE_PAGE_VMA_WALK(pvmw, page, vma, addr, 0);
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
while (page_vma_mapped_walk(&pvmw)) {
addr = pvmw.address;
......@@ -37,7 +37,7 @@ static void damon_pa_mkold(unsigned long paddr)
struct page *page = damon_get_page(PHYS_PFN(paddr));
struct rmap_walk_control rwc = {
.rmap_one = __damon_pa_mkold,
.anon_lock = page_lock_anon_vma_read,
.anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;
......@@ -54,7 +54,7 @@ static void damon_pa_mkold(unsigned long paddr)
if (need_lock && !folio_trylock(folio))
goto out;
rmap_walk(&folio->page, &rwc);
rmap_walk(folio, &rwc);
if (need_lock)
folio_unlock(folio);
......@@ -87,10 +87,9 @@ struct damon_pa_access_chk_result {
bool accessed;
};
static bool __damon_pa_young(struct page *page, struct vm_area_struct *vma,
static bool __damon_pa_young(struct folio *folio, struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
struct folio *folio = page_folio(page);
struct damon_pa_access_chk_result *result = arg;
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
......@@ -133,7 +132,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
struct rmap_walk_control rwc = {
.arg = &result,
.rmap_one = __damon_pa_young,
.anon_lock = page_lock_anon_vma_read,
.anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;
......@@ -156,7 +155,7 @@ static bool damon_pa_young(unsigned long paddr, unsigned long *page_sz)
return NULL;
}
rmap_walk(&folio->page, &rwc);
rmap_walk(folio, &rwc);
if (need_lock)
folio_unlock(folio);
......
......@@ -164,10 +164,3 @@ void putback_lru_page(struct page *page)
{
folio_putback_lru(page_folio(page));
}
#ifdef CONFIG_MMU
struct anon_vma *page_lock_anon_vma_read(struct page *page)
{
return folio_lock_anon_vma_read(page_folio(page));
}
#endif
......@@ -2572,7 +2572,7 @@ int split_huge_page_to_list(struct page *page, struct list_head *list)
* The caller does not necessarily hold an mmap_lock that would
* prevent the anon_vma disappearing so we first we take a
* reference to it and then lock the anon_vma for write. This
* is similar to page_lock_anon_vma_read except the write lock
* is similar to folio_lock_anon_vma_read except the write lock
* is taken to serialise against parallel split or collapse
* operations.
*/
......
......@@ -2588,21 +2588,21 @@ struct page *ksm_might_need_to_copy(struct page *page,
return new_page;
}
void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
void rmap_walk_ksm(struct folio *folio, struct rmap_walk_control *rwc)
{
struct stable_node *stable_node;
struct rmap_item *rmap_item;
int search_new_forks = 0;
VM_BUG_ON_PAGE(!PageKsm(page), page);
VM_BUG_ON_FOLIO(!folio_test_ksm(folio), folio);
/*
* Rely on the page lock to protect against concurrent modifications
* to that page's node of the stable tree.
*/
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
stable_node = page_stable_node(page);
stable_node = folio_stable_node(folio);
if (!stable_node)
return;
again:
......@@ -2637,11 +2637,11 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg))
continue;
if (!rwc->rmap_one(page, vma, addr, rwc->arg)) {
if (!rwc->rmap_one(folio, vma, addr, rwc->arg)) {
anon_vma_unlock_read(anon_vma);
return;
}
if (rwc->done && rwc->done(page)) {
if (rwc->done && rwc->done(folio)) {
anon_vma_unlock_read(anon_vma);
return;
}
......
......@@ -171,13 +171,11 @@ void putback_movable_pages(struct list_head *l)
/*
* Restore a potential migration pte to a working pte entry
*/
static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *old)
static bool remove_migration_pte(struct folio *folio,
struct vm_area_struct *vma, unsigned long addr, void *old)
{
struct folio *folio = page_folio(page);
DEFINE_FOLIO_VMA_WALK(pvmw, old, vma, addr, PVMW_SYNC | PVMW_MIGRATION);
VM_BUG_ON_PAGE(PageTail(page), page);
while (page_vma_mapped_walk(&pvmw)) {
pte_t pte;
swp_entry_t entry;
......@@ -269,9 +267,9 @@ void remove_migration_ptes(struct folio *src, struct folio *dst, bool locked)
};
if (locked)
rmap_walk_locked(&dst->page, &rwc);
rmap_walk_locked(dst, &rwc);
else
rmap_walk(&dst->page, &rwc);
rmap_walk(dst, &rwc);
}
/*
......
......@@ -46,11 +46,10 @@ static struct page *page_idle_get_page(unsigned long pfn)
return page;
}
static bool page_idle_clear_pte_refs_one(struct page *page,
static bool page_idle_clear_pte_refs_one(struct folio *folio,
struct vm_area_struct *vma,
unsigned long addr, void *arg)
{
struct folio *folio = page_folio(page);
DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, addr, 0);
bool referenced = false;
......@@ -93,7 +92,7 @@ static void page_idle_clear_pte_refs(struct page *page)
*/
static const struct rmap_walk_control rwc = {
.rmap_one = page_idle_clear_pte_refs_one,
.anon_lock = page_lock_anon_vma_read,
.anon_lock = folio_lock_anon_vma_read,
};
bool need_lock;
......@@ -104,7 +103,7 @@ static void page_idle_clear_pte_refs(struct page *page)
if (need_lock && !folio_trylock(folio))
return;
rmap_walk(&folio->page, (struct rmap_walk_control *)&rwc);
rmap_walk(folio, (struct rmap_walk_control *)&rwc);
if (need_lock)
folio_unlock(folio);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment