Commit e8351ac9 authored by Joonsoo Kim's avatar Joonsoo Kim Committed by Linus Torvalds

mm/rmap: use rmap_walk() in try_to_munlock()

Now, we have an infrastructure in rmap_walk() to handle difference from
variants of rmap traversing functions.

So, just use it in try_to_munlock().

In this patch, I change following things.

1. remove some variants of rmap traversing functions.
	cf> try_to_unmap_ksm, try_to_unmap_anon, try_to_unmap_file
2. mechanical change to use rmap_walk() in try_to_munlock().
3. copy and paste comments.
Signed-off-by: default avatarJoonsoo Kim <iamjoonsoo.kim@lge.com>
Reviewed-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 52629506
...@@ -75,7 +75,6 @@ struct page *ksm_might_need_to_copy(struct page *page, ...@@ -75,7 +75,6 @@ struct page *ksm_might_need_to_copy(struct page *page,
int page_referenced_ksm(struct page *page, int page_referenced_ksm(struct page *page,
struct mem_cgroup *memcg, unsigned long *vm_flags); struct mem_cgroup *memcg, unsigned long *vm_flags);
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags);
int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc); int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage); void ksm_migrate_page(struct page *newpage, struct page *oldpage);
...@@ -114,11 +113,6 @@ static inline int page_referenced_ksm(struct page *page, ...@@ -114,11 +113,6 @@ static inline int page_referenced_ksm(struct page *page,
return 0; return 0;
} }
static inline int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
{
return 0;
}
static inline int rmap_walk_ksm(struct page *page, static inline int rmap_walk_ksm(struct page *page,
struct rmap_walk_control *rwc) struct rmap_walk_control *rwc)
{ {
......
...@@ -1946,56 +1946,6 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, ...@@ -1946,56 +1946,6 @@ int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg,
return referenced; return referenced;
} }
int try_to_unmap_ksm(struct page *page, enum ttu_flags flags)
{
struct stable_node *stable_node;
struct rmap_item *rmap_item;
int ret = SWAP_AGAIN;
int search_new_forks = 0;
VM_BUG_ON(!PageKsm(page));
VM_BUG_ON(!PageLocked(page));
stable_node = page_stable_node(page);
if (!stable_node)
return SWAP_FAIL;
again:
hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
struct anon_vma *anon_vma = rmap_item->anon_vma;
struct anon_vma_chain *vmac;
struct vm_area_struct *vma;
anon_vma_lock_read(anon_vma);
anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root,
0, ULONG_MAX) {
vma = vmac->vma;
if (rmap_item->address < vma->vm_start ||
rmap_item->address >= vma->vm_end)
continue;
/*
* Initially we examine only the vma which covers this
* rmap_item; but later, if there is still work to do,
* we examine covering vmas in other mms: in case they
* were forked from the original since ksmd passed.
*/
if ((rmap_item->mm == vma->vm_mm) == search_new_forks)
continue;
ret = try_to_unmap_one(page, vma,
rmap_item->address, (void *)flags);
if (ret != SWAP_AGAIN || !page_mapped(page)) {
anon_vma_unlock_read(anon_vma);
goto out;
}
}
anon_vma_unlock_read(anon_vma);
}
if (!search_new_forks++)
goto again;
out:
return ret;
}
int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
{ {
struct stable_node *stable_node; struct stable_node *stable_node;
......
...@@ -1177,9 +1177,6 @@ void page_remove_rmap(struct page *page) ...@@ -1177,9 +1177,6 @@ void page_remove_rmap(struct page *page)
} }
/* /*
* Subfunctions of try_to_unmap: try_to_unmap_one called
* repeatedly from try_to_unmap_ksm, try_to_unmap_anon or try_to_unmap_file.
*
* @arg: enum ttu_flags will be passed to this argument * @arg: enum ttu_flags will be passed to this argument
*/ */
int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
...@@ -1521,107 +1518,6 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg) ...@@ -1521,107 +1518,6 @@ static bool invalid_migration_vma(struct vm_area_struct *vma, void *arg)
return is_vma_temporary_stack(vma); return is_vma_temporary_stack(vma);
} }
/**
* try_to_unmap_anon - unmap or unlock anonymous page using the object-based
* rmap method
* @page: the page to unmap/unlock
* @flags: action and flags
*
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the anon_vma struct it points to.
*
* This function is only called from try_to_unmap/try_to_munlock for
* anonymous pages.
* When called from try_to_munlock(), the mmap_sem of the mm containing the vma
* where the page was found will be held for write. So, we won't recheck
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* 'LOCKED.
*/
static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
{
struct anon_vma *anon_vma;
pgoff_t pgoff;
struct anon_vma_chain *avc;
int ret = SWAP_AGAIN;
anon_vma = page_lock_anon_vma_read(page);
if (!anon_vma)
return ret;
pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root, pgoff, pgoff) {
struct vm_area_struct *vma = avc->vma;
unsigned long address;
/*
* During exec, a temporary VMA is setup and later moved.
* The VMA is moved under the anon_vma lock but not the
* page tables leading to a race where migration cannot
* find the migration ptes. Rather than increasing the
* locking requirements of exec(), migration skips
* temporary VMAs until after exec() completes.
*/
if (IS_ENABLED(CONFIG_MIGRATION) && (flags & TTU_MIGRATION) &&
is_vma_temporary_stack(vma))
continue;
address = vma_address(page, vma);
ret = try_to_unmap_one(page, vma, address, (void *)flags);
if (ret != SWAP_AGAIN || !page_mapped(page))
break;
}
page_unlock_anon_vma_read(anon_vma);
return ret;
}
/**
* try_to_unmap_file - unmap/unlock file page using the object-based rmap method
* @page: the page to unmap/unlock
* @flags: action and flags
*
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the address_space struct it points to.
*
* This function is only called from try_to_unmap/try_to_munlock for
* object-based pages.
* When called from try_to_munlock(), the mmap_sem of the mm containing the vma
* where the page was found will be held for write. So, we won't recheck
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* 'LOCKED.
*/
static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
{
struct address_space *mapping = page->mapping;
pgoff_t pgoff = page->index << compound_order(page);
struct vm_area_struct *vma;
int ret = SWAP_AGAIN;
mutex_lock(&mapping->i_mmap_mutex);
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma);
ret = try_to_unmap_one(page, vma, address, (void *)flags);
if (ret != SWAP_AGAIN || !page_mapped(page))
goto out;
}
if (list_empty(&mapping->i_mmap_nonlinear))
goto out;
/*
* We don't bother to try to find the munlocked page in nonlinears.
* It's costly. Instead, later, page reclaim logic may call
* try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
*/
if (TTU_ACTION(flags) == TTU_MUNLOCK)
goto out;
ret = try_to_unmap_nonlinear(page, mapping, vma);
out:
mutex_unlock(&mapping->i_mmap_mutex);
return ret;
}
static int page_not_mapped(struct page *page) static int page_not_mapped(struct page *page)
{ {
return !page_mapped(page); return !page_mapped(page);
...@@ -1689,14 +1585,25 @@ int try_to_unmap(struct page *page, enum ttu_flags flags) ...@@ -1689,14 +1585,25 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
*/ */
int try_to_munlock(struct page *page) int try_to_munlock(struct page *page)
{ {
int ret;
struct rmap_walk_control rwc = {
.rmap_one = try_to_unmap_one,
.arg = (void *)TTU_MUNLOCK,
.done = page_not_mapped,
/*
* We don't bother to try to find the munlocked page in
* nonlinears. It's costly. Instead, later, page reclaim logic
* may call try_to_unmap() and recover PG_mlocked lazily.
*/
.file_nonlinear = NULL,
.anon_lock = page_lock_anon_vma_read,
};
VM_BUG_ON(!PageLocked(page) || PageLRU(page)); VM_BUG_ON(!PageLocked(page) || PageLRU(page));
if (unlikely(PageKsm(page))) ret = rmap_walk(page, &rwc);
return try_to_unmap_ksm(page, TTU_MUNLOCK); return ret;
else if (PageAnon(page))
return try_to_unmap_anon(page, TTU_MUNLOCK);
else
return try_to_unmap_file(page, TTU_MUNLOCK);
} }
void __put_anon_vma(struct anon_vma *anon_vma) void __put_anon_vma(struct anon_vma *anon_vma)
...@@ -1732,8 +1639,18 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page, ...@@ -1732,8 +1639,18 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
} }
/* /*
* rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): * rmap_walk_anon - do something to anonymous page using the object-based
* Called by migrate.c to remove migration ptes, but might be used more later. * rmap method
* @page: the page to be handled
* @rwc: control variable according to each walk type
*
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the anon_vma struct it points to.
*
* When called from try_to_munlock(), the mmap_sem of the mm containing the vma
* where the page was found will be held for write. So, we won't recheck
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED.
*/ */
static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
{ {
...@@ -1763,6 +1680,19 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc) ...@@ -1763,6 +1680,19 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
return ret; return ret;
} }
/*
* rmap_walk_file - do something to file page using the object-based rmap method
* @page: the page to be handled
* @rwc: control variable according to each walk type
*
* Find all the mappings of a page using the mapping pointer and the vma chains
* contained in the address_space struct it points to.
*
* When called from try_to_munlock(), the mmap_sem of the mm containing the vma
* where the page was found will be held for write. So, we won't recheck
* vm_flags for that VMA. That should be OK, because that vma shouldn't be
* LOCKED.
*/
static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc) static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
{ {
struct address_space *mapping = page->mapping; struct address_space *mapping = page->mapping;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment