Commit 12c9d70b authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

mm: fix memory leak in copy_huge_pmd()

We allocate a pgtable but do not attach it to anything if the PMD is in
a DAX VMA, causing it to leak.

We certainly try to not free pgtables associated with the huge zero page
if the zero page is in a DAX VMA, so I think this is the right solution.
This needs to be properly audited.
Signed-off-by: default avatarMatthew Wilcox <matthew.r.wilcox@intel.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent c6400ba7
...@@ -858,7 +858,8 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm, ...@@ -858,7 +858,8 @@ static bool set_huge_zero_page(pgtable_t pgtable, struct mm_struct *mm,
return false; return false;
entry = mk_pmd(zero_page, vma->vm_page_prot); entry = mk_pmd(zero_page, vma->vm_page_prot);
entry = pmd_mkhuge(entry); entry = pmd_mkhuge(entry);
pgtable_trans_huge_deposit(mm, pmd, pgtable); if (pgtable)
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, haddr, pmd, entry); set_pmd_at(mm, haddr, pmd, entry);
atomic_long_inc(&mm->nr_ptes); atomic_long_inc(&mm->nr_ptes);
return true; return true;
...@@ -1036,13 +1037,15 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -1036,13 +1037,15 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
spinlock_t *dst_ptl, *src_ptl; spinlock_t *dst_ptl, *src_ptl;
struct page *src_page; struct page *src_page;
pmd_t pmd; pmd_t pmd;
pgtable_t pgtable; pgtable_t pgtable = NULL;
int ret; int ret;
ret = -ENOMEM; if (!vma_is_dax(vma)) {
pgtable = pte_alloc_one(dst_mm, addr); ret = -ENOMEM;
if (unlikely(!pgtable)) pgtable = pte_alloc_one(dst_mm, addr);
goto out; if (unlikely(!pgtable))
goto out;
}
dst_ptl = pmd_lock(dst_mm, dst_pmd); dst_ptl = pmd_lock(dst_mm, dst_pmd);
src_ptl = pmd_lockptr(src_mm, src_pmd); src_ptl = pmd_lockptr(src_mm, src_pmd);
...@@ -1073,7 +1076,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, ...@@ -1073,7 +1076,7 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
goto out_unlock; goto out_unlock;
} }
if (pmd_trans_huge(pmd)) { if (!vma_is_dax(vma)) {
/* thp accounting separate from pmd_devmap accounting */ /* thp accounting separate from pmd_devmap accounting */
src_page = pmd_page(pmd); src_page = pmd_page(pmd);
VM_BUG_ON_PAGE(!PageHead(src_page), src_page); VM_BUG_ON_PAGE(!PageHead(src_page), src_page);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment