Commit ad8ee77e authored by Dmitry Safonov's avatar Dmitry Safonov Committed by Linus Torvalds

mm/mremap: for MREMAP_DONTUNMAP check security_vm_enough_memory_mm()

Currently memory is accounted post-mremap() with MREMAP_DONTUNMAP, which
may break overcommit policy.  So, check if there's enough memory before
doing actual VMA copy.

Don't unset VM_ACCOUNT on MREMAP_DONTUNMAP.  By semantics, such mremap()
is actually a memory allocation.  That also simplifies the error-path a
little.

Also, as it's memory allocation on success don't reset hiwater_vm value.

Link: https://lkml.kernel.org/r/20201013013416.390574-3-dima@arista.com
Fixes: commit e346b381 ("mm/mremap: add MREMAP_DONTUNMAP to mremap()")
Signed-off-by: default avatarDmitry Safonov <dima@arista.com>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andy Lutomirski <luto@kernel.org>
Cc: Brian Geffon <bgeffon@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Dan Williams <dan.j.williams@intel.com>
Cc: Dave Jiang <dave.jiang@intel.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jason Gunthorpe <jgg@ziepe.ca>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Ralph Campbell <rcampbell@nvidia.com>
Cc: Russell King <linux@armlinux.org.uk>
Cc: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Vishal Verma <vishal.l.verma@intel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 51df7bcb
...@@ -515,11 +515,19 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -515,11 +515,19 @@ static unsigned long move_vma(struct vm_area_struct *vma,
if (err) if (err)
return err; return err;
if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT)) {
if (security_vm_enough_memory_mm(mm, new_len >> PAGE_SHIFT))
return -ENOMEM;
}
new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT); new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff, new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
&need_rmap_locks); &need_rmap_locks);
if (!new_vma) if (!new_vma) {
if (unlikely(flags & MREMAP_DONTUNMAP && vm_flags & VM_ACCOUNT))
vm_unacct_memory(new_len >> PAGE_SHIFT);
return -ENOMEM; return -ENOMEM;
}
moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len, moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
need_rmap_locks); need_rmap_locks);
...@@ -548,7 +556,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -548,7 +556,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
} }
/* Conceal VM_ACCOUNT so old reservation is not undone */ /* Conceal VM_ACCOUNT so old reservation is not undone */
if (vm_flags & VM_ACCOUNT) { if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
vma->vm_flags &= ~VM_ACCOUNT; vma->vm_flags &= ~VM_ACCOUNT;
excess = vma->vm_end - vma->vm_start - old_len; excess = vma->vm_end - vma->vm_start - old_len;
if (old_addr > vma->vm_start && if (old_addr > vma->vm_start &&
...@@ -573,34 +581,16 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -573,34 +581,16 @@ static unsigned long move_vma(struct vm_area_struct *vma,
untrack_pfn_moved(vma); untrack_pfn_moved(vma);
if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) { if (unlikely(!err && (flags & MREMAP_DONTUNMAP))) {
if (vm_flags & VM_ACCOUNT) {
/* Always put back VM_ACCOUNT since we won't unmap */
vma->vm_flags |= VM_ACCOUNT;
vm_acct_memory(new_len >> PAGE_SHIFT);
}
/*
* VMAs can actually be merged back together in copy_vma
* calling merge_vma. This can happen with anonymous vmas
* which have not yet been faulted, so if we were to consider
* this VMA split we'll end up adding VM_ACCOUNT on the
* next VMA, which is completely unrelated if this VMA
* was re-merged.
*/
if (split && new_vma == vma)
split = 0;
/* We always clear VM_LOCKED[ONFAULT] on the old vma */ /* We always clear VM_LOCKED[ONFAULT] on the old vma */
vma->vm_flags &= VM_LOCKED_CLEAR_MASK; vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
/* Because we won't unmap we don't need to touch locked_vm */ /* Because we won't unmap we don't need to touch locked_vm */
goto out; return new_addr;
} }
if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) { if (do_munmap(mm, old_addr, old_len, uf_unmap) < 0) {
/* OOM: unable to split vma, just get accounts right */ /* OOM: unable to split vma, just get accounts right */
if (vm_flags & VM_ACCOUNT) if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP))
vm_acct_memory(new_len >> PAGE_SHIFT); vm_acct_memory(new_len >> PAGE_SHIFT);
excess = 0; excess = 0;
} }
...@@ -609,7 +599,7 @@ static unsigned long move_vma(struct vm_area_struct *vma, ...@@ -609,7 +599,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
mm->locked_vm += new_len >> PAGE_SHIFT; mm->locked_vm += new_len >> PAGE_SHIFT;
*locked = true; *locked = true;
} }
out:
mm->hiwater_vm = hiwater_vm; mm->hiwater_vm = hiwater_vm;
/* Restore VM_ACCOUNT if one or two pieces of vma left */ /* Restore VM_ACCOUNT if one or two pieces of vma left */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment