Commit 7479df6d authored by Kirill A. Shutemov's avatar Kirill A. Shutemov Committed by Linus Torvalds

thp, mlock: do not allow huge pages in mlocked area

With new refcounting THP can belong to several VMAs.  This makes tricky
to track THP pages, when they partially mlocked.  It can lead to leaking
mlocked pages to non-VM_LOCKED vmas and other problems.

With this patch we will split all pages on mlock and avoid
fault-in/collapse new THP in VM_LOCKED vmas.

I've tried alternative approach: do not mark THP pages mlocked and keep
them on normal LRUs.  This way vmscan could try to split huge pages on
memory pressure and free up subpages which doesn't belong to VM_LOCKED
vmas.  But this is user-visible change: we screw up Mlocked accouting
reported in meminfo, so I had to leave this approach aside.

We can bring something better later, but this should be good enough for
now.
Signed-off-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: default avatarSasha Levin <sasha.levin@oracle.com>
Tested-by: default avatarAneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: default avatarJerome Marchand <jmarchan@redhat.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 7aef4172
...@@ -927,7 +927,8 @@ long populate_vma_page_range(struct vm_area_struct *vma, ...@@ -927,7 +927,8 @@ long populate_vma_page_range(struct vm_area_struct *vma,
gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK; gup_flags = FOLL_TOUCH | FOLL_POPULATE | FOLL_MLOCK;
if (vma->vm_flags & VM_LOCKONFAULT) if (vma->vm_flags & VM_LOCKONFAULT)
gup_flags &= ~FOLL_POPULATE; gup_flags &= ~FOLL_POPULATE;
if (vma->vm_flags & VM_LOCKED)
gup_flags |= FOLL_SPLIT;
/* /*
* We want to touch writable mappings with a write fault in order * We want to touch writable mappings with a write fault in order
* to break COW, except for shared mappings because these don't COW * to break COW, except for shared mappings because these don't COW
......
...@@ -842,6 +842,8 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -842,6 +842,8 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
return VM_FAULT_FALLBACK; return VM_FAULT_FALLBACK;
if (vma->vm_flags & VM_LOCKED)
return VM_FAULT_FALLBACK;
if (unlikely(anon_vma_prepare(vma))) if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM; return VM_FAULT_OOM;
if (unlikely(khugepaged_enter(vma, vma->vm_flags))) if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
...@@ -2555,7 +2557,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma) ...@@ -2555,7 +2557,8 @@ static bool hugepage_vma_check(struct vm_area_struct *vma)
if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) || if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
(vma->vm_flags & VM_NOHUGEPAGE)) (vma->vm_flags & VM_NOHUGEPAGE))
return false; return false;
if (vma->vm_flags & VM_LOCKED)
return false;
if (!vma->anon_vma || vma->vm_ops) if (!vma->anon_vma || vma->vm_ops)
return false; return false;
if (is_vma_temporary_stack(vma)) if (is_vma_temporary_stack(vma))
......
...@@ -2166,7 +2166,8 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -2166,7 +2166,8 @@ static int wp_page_copy(struct mm_struct *mm, struct vm_area_struct *vma,
pte_unmap_unlock(page_table, ptl); pte_unmap_unlock(page_table, ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end);
if (old_page) { /* THP pages are never mlocked */
if (old_page && !PageTransCompound(old_page)) {
/* /*
* Don't let another task, with possibly unlocked vma, * Don't let another task, with possibly unlocked vma,
* keep the mlocked page. * keep the mlocked page.
......
...@@ -443,39 +443,26 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, ...@@ -443,39 +443,26 @@ void munlock_vma_pages_range(struct vm_area_struct *vma,
page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP, page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
&page_mask); &page_mask);
if (page && !IS_ERR(page)) { if (page && !IS_ERR(page) && !PageTransCompound(page)) {
if (PageTransHuge(page)) { /*
lock_page(page); * Non-huge pages are handled in batches via
/* * pagevec. The pin from follow_page_mask()
* Any THP page found by follow_page_mask() may * prevents them from collapsing by THP.
* have gotten split before reaching */
* munlock_vma_page(), so we need to recompute pagevec_add(&pvec, page);
* the page_mask here. zone = page_zone(page);
*/ zoneid = page_zone_id(page);
page_mask = munlock_vma_page(page);
unlock_page(page);
put_page(page); /* follow_page_mask() */
} else {
/*
* Non-huge pages are handled in batches via
* pagevec. The pin from follow_page_mask()
* prevents them from collapsing by THP.
*/
pagevec_add(&pvec, page);
zone = page_zone(page);
zoneid = page_zone_id(page);
/* /*
* Try to fill the rest of pagevec using fast * Try to fill the rest of pagevec using fast
* pte walk. This will also update start to * pte walk. This will also update start to
* the next page to process. Then munlock the * the next page to process. Then munlock the
* pagevec. * pagevec.
*/ */
start = __munlock_pagevec_fill(&pvec, vma, start = __munlock_pagevec_fill(&pvec, vma,
zoneid, start, end); zoneid, start, end);
__munlock_pagevec(&pvec, zone); __munlock_pagevec(&pvec, zone);
goto next; goto next;
}
} }
/* It's a bug to munlock in the middle of a THP page */ /* It's a bug to munlock in the middle of a THP page */
VM_BUG_ON((start >> PAGE_SHIFT) & page_mask); VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment