Commit 0964730b authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mlock: fix unevictable_pgs event counts on THP

5.8 commit 5d91f31f ("mm: swap: fix vmstats for huge page") has
established that vm_events should count every subpage of a THP, including
unevictable_pgs_culled and unevictable_pgs_rescued; but
lru_cache_add_inactive_or_unevictable() was not doing so for
unevictable_pgs_mlocked, and mm/mlock.c was not doing so for
unevictable_pgs mlocked, munlocked, cleared and stranded.

Fix them; but THPs don't go the pagevec way in mlock.c, so no fixes needed
on that path.

Fixes: 5d91f31f ("mm: swap: fix vmstats for huge page")
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Reviewed-by: default avatarShakeel Butt <shakeelb@google.com>
Acked-by: default avatarYang Shi <shy828301@gmail.com>
Cc: Alex Shi <alex.shi@linux.alibaba.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Qian Cai <cai@lca.pw>
Link: http://lkml.kernel.org/r/alpine.LSU.2.11.2008301408230.5954@eggly.anvilsSigned-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 8d8869ca
...@@ -58,11 +58,14 @@ EXPORT_SYMBOL(can_do_mlock); ...@@ -58,11 +58,14 @@ EXPORT_SYMBOL(can_do_mlock);
*/ */
void clear_page_mlock(struct page *page) void clear_page_mlock(struct page *page)
{ {
int nr_pages;
if (!TestClearPageMlocked(page)) if (!TestClearPageMlocked(page))
return; return;
mod_zone_page_state(page_zone(page), NR_MLOCK, -thp_nr_pages(page)); nr_pages = thp_nr_pages(page);
count_vm_event(UNEVICTABLE_PGCLEARED); mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages);
count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages);
/* /*
* The previous TestClearPageMlocked() corresponds to the smp_mb() * The previous TestClearPageMlocked() corresponds to the smp_mb()
* in __pagevec_lru_add_fn(). * in __pagevec_lru_add_fn().
...@@ -76,7 +79,7 @@ void clear_page_mlock(struct page *page) ...@@ -76,7 +79,7 @@ void clear_page_mlock(struct page *page)
* We lost the race. the page already moved to evictable list. * We lost the race. the page already moved to evictable list.
*/ */
if (PageUnevictable(page)) if (PageUnevictable(page))
count_vm_event(UNEVICTABLE_PGSTRANDED); count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
} }
} }
...@@ -93,9 +96,10 @@ void mlock_vma_page(struct page *page) ...@@ -93,9 +96,10 @@ void mlock_vma_page(struct page *page)
VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page);
if (!TestSetPageMlocked(page)) { if (!TestSetPageMlocked(page)) {
mod_zone_page_state(page_zone(page), NR_MLOCK, int nr_pages = thp_nr_pages(page);
thp_nr_pages(page));
count_vm_event(UNEVICTABLE_PGMLOCKED); mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
if (!isolate_lru_page(page)) if (!isolate_lru_page(page))
putback_lru_page(page); putback_lru_page(page);
} }
...@@ -138,7 +142,7 @@ static void __munlock_isolated_page(struct page *page) ...@@ -138,7 +142,7 @@ static void __munlock_isolated_page(struct page *page)
/* Did try_to_unlock() succeed or punt? */ /* Did try_to_unlock() succeed or punt? */
if (!PageMlocked(page)) if (!PageMlocked(page))
count_vm_event(UNEVICTABLE_PGMUNLOCKED); count_vm_events(UNEVICTABLE_PGMUNLOCKED, thp_nr_pages(page));
putback_lru_page(page); putback_lru_page(page);
} }
...@@ -154,10 +158,12 @@ static void __munlock_isolated_page(struct page *page) ...@@ -154,10 +158,12 @@ static void __munlock_isolated_page(struct page *page)
*/ */
static void __munlock_isolation_failed(struct page *page) static void __munlock_isolation_failed(struct page *page)
{ {
int nr_pages = thp_nr_pages(page);
if (PageUnevictable(page)) if (PageUnevictable(page))
__count_vm_event(UNEVICTABLE_PGSTRANDED); __count_vm_events(UNEVICTABLE_PGSTRANDED, nr_pages);
else else
__count_vm_event(UNEVICTABLE_PGMUNLOCKED); __count_vm_events(UNEVICTABLE_PGMUNLOCKED, nr_pages);
} }
/** /**
......
...@@ -494,14 +494,14 @@ void lru_cache_add_inactive_or_unevictable(struct page *page, ...@@ -494,14 +494,14 @@ void lru_cache_add_inactive_or_unevictable(struct page *page,
unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED; unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED;
if (unlikely(unevictable) && !TestSetPageMlocked(page)) { if (unlikely(unevictable) && !TestSetPageMlocked(page)) {
int nr_pages = thp_nr_pages(page);
/* /*
* We use the irq-unsafe __mod_zone_page_stat because this * We use the irq-unsafe __mod_zone_page_stat because this
* counter is not modified from interrupt context, and the pte * counter is not modified from interrupt context, and the pte
* lock is held(spinlock), which implies preemption disabled. * lock is held(spinlock), which implies preemption disabled.
*/ */
__mod_zone_page_state(page_zone(page), NR_MLOCK, __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages);
thp_nr_pages(page)); count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages);
count_vm_event(UNEVICTABLE_PGMLOCKED);
} }
lru_cache_add(page); lru_cache_add(page);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment