Commit 74874c57 authored by Lorenzo Stoakes's avatar Lorenzo Stoakes Committed by Andrew Morton

mm/mmap: correct error handling in mmap_region()

Commit f8d112a4 ("mm/mmap: avoid zeroing vma tree in mmap_region()")
changed how error handling is performed in mmap_region().

The error value defaults to -ENOMEM, but then gets reassigned immediately
to the result of vms_gather_munmap_vmas() if we are performing a MAP_FIXED
mapping over existing VMAs (and thus unmapping them).

This overwrites the error value, potentially clearing it.

After this, we invoke may_expand_vm() and possibly vm_area_alloc(), and
check to see if they failed. If they do so, then we perform error-handling
logic, but importantly, we do NOT update the error code.

This means that, if vms_gather_munmap_vmas() succeeds, but one of these
calls does not, the function will return indicating no error, but rather an
address value of zero, which is entirely incorrect.

Correct this and avoid future confusion by strictly setting error on each
and every occasion we jump to the error handling logic, and set the error
code immediately prior to doing so.

This way we can see at a glance that the error code is always correct.

Many thanks to Vegard Nossum who spotted this issue in discussion around
this problem.

Link: https://lkml.kernel.org/r/20241002073932.13482-1-lorenzo.stoakes@oracle.com
Fixes: f8d112a4 ("mm/mmap: avoid zeroing vma tree in mmap_region()")
Signed-off-by: default avatarLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Suggested-by: default avatarVegard Nossum <vegard.nossum@oracle.com>
Reviewed-by: default avatarVlastimil Babka <vbabka@suse.cz>
Cc: "Liam R. Howlett" <Liam.Howlett@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 8e929cb5
...@@ -1371,7 +1371,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1371,7 +1371,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
struct maple_tree mt_detach; struct maple_tree mt_detach;
unsigned long end = addr + len; unsigned long end = addr + len;
bool writable_file_mapping = false; bool writable_file_mapping = false;
int error = -ENOMEM; int error;
VMA_ITERATOR(vmi, mm, addr); VMA_ITERATOR(vmi, mm, addr);
VMG_STATE(vmg, mm, &vmi, addr, end, vm_flags, pgoff); VMG_STATE(vmg, mm, &vmi, addr, end, vm_flags, pgoff);
...@@ -1396,8 +1396,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1396,8 +1396,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
} }
/* Check against address space limit. */ /* Check against address space limit. */
if (!may_expand_vm(mm, vm_flags, pglen - vms.nr_pages)) if (!may_expand_vm(mm, vm_flags, pglen - vms.nr_pages)) {
error = -ENOMEM;
goto abort_munmap; goto abort_munmap;
}
/* /*
* Private writable mapping: check memory availability * Private writable mapping: check memory availability
...@@ -1405,8 +1407,11 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1405,8 +1407,11 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
if (accountable_mapping(file, vm_flags)) { if (accountable_mapping(file, vm_flags)) {
charged = pglen; charged = pglen;
charged -= vms.nr_accounted; charged -= vms.nr_accounted;
if (charged && security_vm_enough_memory_mm(mm, charged)) if (charged) {
goto abort_munmap; error = security_vm_enough_memory_mm(mm, charged);
if (error)
goto abort_munmap;
}
vms.nr_accounted = 0; vms.nr_accounted = 0;
vm_flags |= VM_ACCOUNT; vm_flags |= VM_ACCOUNT;
...@@ -1422,8 +1427,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1422,8 +1427,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
* not unmapped, but the maps are removed from the list. * not unmapped, but the maps are removed from the list.
*/ */
vma = vm_area_alloc(mm); vma = vm_area_alloc(mm);
if (!vma) if (!vma) {
error = -ENOMEM;
goto unacct_error; goto unacct_error;
}
vma_iter_config(&vmi, addr, end); vma_iter_config(&vmi, addr, end);
vma_set_range(vma, addr, end, pgoff); vma_set_range(vma, addr, end, pgoff);
...@@ -1453,9 +1460,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1453,9 +1460,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
* Expansion is handled above, merging is handled below. * Expansion is handled above, merging is handled below.
* Drivers should not alter the address of the VMA. * Drivers should not alter the address of the VMA.
*/ */
error = -EINVAL; if (WARN_ON((addr != vma->vm_start))) {
if (WARN_ON((addr != vma->vm_start))) error = -EINVAL;
goto close_and_free_vma; goto close_and_free_vma;
}
vma_iter_config(&vmi, addr, end); vma_iter_config(&vmi, addr, end);
/* /*
...@@ -1500,13 +1508,15 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1500,13 +1508,15 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
} }
/* Allow architectures to sanity-check the vm_flags */ /* Allow architectures to sanity-check the vm_flags */
error = -EINVAL; if (!arch_validate_flags(vma->vm_flags)) {
if (!arch_validate_flags(vma->vm_flags)) error = -EINVAL;
goto close_and_free_vma; goto close_and_free_vma;
}
error = -ENOMEM; if (vma_iter_prealloc(&vmi, vma)) {
if (vma_iter_prealloc(&vmi, vma)) error = -ENOMEM;
goto close_and_free_vma; goto close_and_free_vma;
}
/* Lock the VMA since it is modified after insertion into VMA tree */ /* Lock the VMA since it is modified after insertion into VMA tree */
vma_start_write(vma); vma_start_write(vma);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment