Commit 1fb08ac6 authored by Yang Shi's avatar Yang Shi Committed by Linus Torvalds

mm: rmap: make try_to_unmap() void function

Currently try_to_unmap() return bool value by checking page_mapcount(),
however this may return false positive since page_mapcount() doesn't check
all subpages of compound page.  The total_mapcount() could be used
instead, but its cost is higher since it traverses all subpages.

Actually the most callers of try_to_unmap() don't care about the return
value at all.  So just need check if page is still mapped by page_mapped()
when necessary.  And page_mapped() does bail out early when it finds
mapped subpage.

Link: https://lkml.kernel.org/r/bb27e3fe-6036-b637-5086-272befbfe3da@google.comSuggested-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarYang Shi <shy828301@gmail.com>
Acked-by: default avatarMinchan Kim <minchan@kernel.org>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Acked-by: default avatarNaoya Horiguchi <naoya.horiguchi@nec.com>
Cc: Alistair Popple <apopple@nvidia.com>
Cc: Jan Kara <jack@suse.cz>
Cc: Jue Wang <juew@google.com>
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: Miaohe Lin <linmiaohe@huawei.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Peter Xu <peterx@redhat.com>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Wang Yugui <wangyugui@e16-tech.com>
Cc: Zi Yan <ziy@nvidia.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent cebc774f
...@@ -195,7 +195,7 @@ static inline void page_dup_rmap(struct page *page, bool compound) ...@@ -195,7 +195,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
int page_referenced(struct page *, int is_locked, int page_referenced(struct page *, int is_locked,
struct mem_cgroup *memcg, unsigned long *vm_flags); struct mem_cgroup *memcg, unsigned long *vm_flags);
bool try_to_unmap(struct page *, enum ttu_flags flags); void try_to_unmap(struct page *, enum ttu_flags flags);
/* Avoid racy checks */ /* Avoid racy checks */
#define PVMW_SYNC (1 << 0) #define PVMW_SYNC (1 << 0)
......
...@@ -1269,7 +1269,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -1269,7 +1269,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
enum ttu_flags ttu = TTU_IGNORE_MLOCK; enum ttu_flags ttu = TTU_IGNORE_MLOCK;
struct address_space *mapping; struct address_space *mapping;
LIST_HEAD(tokill); LIST_HEAD(tokill);
bool unmap_success = true; bool unmap_success;
int kill = 1, forcekill; int kill = 1, forcekill;
struct page *hpage = *hpagep; struct page *hpage = *hpagep;
bool mlocked = PageMlocked(hpage); bool mlocked = PageMlocked(hpage);
...@@ -1332,7 +1332,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -1332,7 +1332,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
if (!PageHuge(hpage)) { if (!PageHuge(hpage)) {
unmap_success = try_to_unmap(hpage, ttu); try_to_unmap(hpage, ttu);
} else { } else {
if (!PageAnon(hpage)) { if (!PageAnon(hpage)) {
/* /*
...@@ -1344,17 +1344,16 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn, ...@@ -1344,17 +1344,16 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
*/ */
mapping = hugetlb_page_mapping_lock_write(hpage); mapping = hugetlb_page_mapping_lock_write(hpage);
if (mapping) { if (mapping) {
unmap_success = try_to_unmap(hpage, try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
ttu|TTU_RMAP_LOCKED);
i_mmap_unlock_write(mapping); i_mmap_unlock_write(mapping);
} else { } else
pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn); pr_info("Memory failure: %#lx: could not lock mapping for mapped huge page\n", pfn);
unmap_success = false;
}
} else { } else {
unmap_success = try_to_unmap(hpage, ttu); try_to_unmap(hpage, ttu);
} }
} }
unmap_success = !page_mapped(hpage);
if (!unmap_success) if (!unmap_success)
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(hpage)); pfn, page_mapcount(hpage));
......
...@@ -1405,7 +1405,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, ...@@ -1405,7 +1405,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
/* /*
* When racing against e.g. zap_pte_range() on another cpu, * When racing against e.g. zap_pte_range() on another cpu,
* in between its ptep_get_and_clear_full() and page_remove_rmap(), * in between its ptep_get_and_clear_full() and page_remove_rmap(),
* try_to_unmap() may return false when it is about to become true, * try_to_unmap() may return before page_mapped() has become false,
* if page table locking is skipped: use TTU_SYNC to wait for that. * if page table locking is skipped: use TTU_SYNC to wait for that.
*/ */
if (flags & TTU_SYNC) if (flags & TTU_SYNC)
...@@ -1756,9 +1756,10 @@ static int page_not_mapped(struct page *page) ...@@ -1756,9 +1756,10 @@ static int page_not_mapped(struct page *page)
* Tries to remove all the page table entries which are mapping this * Tries to remove all the page table entries which are mapping this
* page, used in the pageout path. Caller must hold the page lock. * page, used in the pageout path. Caller must hold the page lock.
* *
* If unmap is successful, return true. Otherwise, false. * It is the caller's responsibility to check if the page is still
* mapped when needed (use TTU_SYNC to prevent accounting races).
*/ */
bool try_to_unmap(struct page *page, enum ttu_flags flags) void try_to_unmap(struct page *page, enum ttu_flags flags)
{ {
struct rmap_walk_control rwc = { struct rmap_walk_control rwc = {
.rmap_one = try_to_unmap_one, .rmap_one = try_to_unmap_one,
...@@ -1783,14 +1784,6 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags) ...@@ -1783,14 +1784,6 @@ bool try_to_unmap(struct page *page, enum ttu_flags flags)
rmap_walk_locked(page, &rwc); rmap_walk_locked(page, &rwc);
else else
rmap_walk(page, &rwc); rmap_walk(page, &rwc);
/*
* When racing against e.g. zap_pte_range() on another cpu,
* in between its ptep_get_and_clear_full() and page_remove_rmap(),
* try_to_unmap() may return false when it is about to become true,
* if page table locking is skipped: use TTU_SYNC to wait for that.
*/
return !page_mapcount(page);
} }
/** /**
......
...@@ -1499,7 +1499,8 @@ static unsigned int shrink_page_list(struct list_head *page_list, ...@@ -1499,7 +1499,8 @@ static unsigned int shrink_page_list(struct list_head *page_list,
if (unlikely(PageTransHuge(page))) if (unlikely(PageTransHuge(page)))
flags |= TTU_SPLIT_HUGE_PMD; flags |= TTU_SPLIT_HUGE_PMD;
if (!try_to_unmap(page, flags)) { try_to_unmap(page, flags);
if (page_mapped(page)) {
stat->nr_unmap_fail += nr_pages; stat->nr_unmap_fail += nr_pages;
if (!was_swapbacked && PageSwapBacked(page)) if (!was_swapbacked && PageSwapBacked(page))
stat->nr_lazyfree_fail += nr_pages; stat->nr_lazyfree_fail += nr_pages;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment