Commit 1c2f6730 authored by Hugh Dickins's avatar Hugh Dickins Committed by Linus Torvalds

mm: thp: fix MADV_REMOVE deadlock on shmem THP

Sergey reported deadlock between kswapd correctly doing its usual
lock_page(page) followed by down_read(page->mapping->i_mmap_rwsem), and
madvise(MADV_REMOVE) on an madvise(MADV_HUGEPAGE) area doing
down_write(page->mapping->i_mmap_rwsem) followed by lock_page(page).

This happened when shmem_fallocate(punch hole)'s unmap_mapping_range()
reaches zap_pmd_range()'s call to __split_huge_pmd().  The same deadlock
could occur when partially truncating a mapped huge tmpfs file, or using
fallocate(FALLOC_FL_PUNCH_HOLE) on it.

__split_huge_pmd()'s page lock was added in 5.8, to make sure that any
concurrent use of reuse_swap_page() (holding page lock) could not catch
the anon THP's mapcounts and swapcounts while they were being split.

Fortunately, reuse_swap_page() is never applied to a shmem or file THP
(not even by khugepaged, which checks PageSwapCache before calling), and
anonymous THPs are never created in shmem or file areas: so that
__split_huge_pmd()'s page lock can only be necessary for anonymous THPs,
on which there is no risk of deadlock with i_mmap_rwsem.

Link: https://lkml.kernel.org/r/alpine.LSU.2.11.2101161409470.2022@eggly.anvils
Fixes: c444eb56 ("mm: thp: make the THP mapcount atomic against __split_huge_pmd_locked()")
Signed-off-by: default avatarHugh Dickins <hughd@google.com>
Reported-by: default avatarSergey Senozhatsky <sergey.senozhatsky.work@gmail.com>
Reviewed-by: default avatarAndrea Arcangeli <aarcange@redhat.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 55b6f763
...@@ -2202,7 +2202,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2202,7 +2202,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
{ {
spinlock_t *ptl; spinlock_t *ptl;
struct mmu_notifier_range range; struct mmu_notifier_range range;
bool was_locked = false; bool do_unlock_page = false;
pmd_t _pmd; pmd_t _pmd;
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm, mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
...@@ -2218,7 +2218,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2218,7 +2218,6 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
VM_BUG_ON(freeze && !page); VM_BUG_ON(freeze && !page);
if (page) { if (page) {
VM_WARN_ON_ONCE(!PageLocked(page)); VM_WARN_ON_ONCE(!PageLocked(page));
was_locked = true;
if (page != pmd_page(*pmd)) if (page != pmd_page(*pmd))
goto out; goto out;
} }
...@@ -2227,6 +2226,14 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2227,6 +2226,14 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
if (pmd_trans_huge(*pmd)) { if (pmd_trans_huge(*pmd)) {
if (!page) { if (!page) {
page = pmd_page(*pmd); page = pmd_page(*pmd);
/*
* An anonymous page must be locked, to ensure that a
* concurrent reuse_swap_page() sees stable mapcount;
* but reuse_swap_page() is not used on shmem or file,
* and page lock must not be taken when zap_pmd_range()
* calls __split_huge_pmd() while i_mmap_lock is held.
*/
if (PageAnon(page)) {
if (unlikely(!trylock_page(page))) { if (unlikely(!trylock_page(page))) {
get_page(page); get_page(page);
_pmd = *pmd; _pmd = *pmd;
...@@ -2241,6 +2248,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2241,6 +2248,8 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
} }
put_page(page); put_page(page);
} }
do_unlock_page = true;
}
} }
if (PageMlocked(page)) if (PageMlocked(page))
clear_page_mlock(page); clear_page_mlock(page);
...@@ -2249,7 +2258,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -2249,7 +2258,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
__split_huge_pmd_locked(vma, pmd, range.start, freeze); __split_huge_pmd_locked(vma, pmd, range.start, freeze);
out: out:
spin_unlock(ptl); spin_unlock(ptl);
if (!was_locked && page) if (do_unlock_page)
unlock_page(page); unlock_page(page);
/* /*
* No need to double call mmu_notifier->invalidate_range() callback. * No need to double call mmu_notifier->invalidate_range() callback.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment