Commit 0378c0a0 authored by Liam R. Howlett's avatar Liam R. Howlett Committed by Andrew Morton

mm/mmap: remove preallocation from do_mas_align_munmap()

In preparation of passing the vma state through split, the pre-allocation
that occurs before the split has to be moved to after.  Since the
preallocation would then live right next to the store, just call store
instead of preallocating.  This effectively restores the potential error
path of splitting and not munmap'ing which pre-dates the maple tree.

Link: https://lkml.kernel.org/r/20230120162650.984577-12-Liam.Howlett@oracle.comSigned-off-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 79e4f2ca
...@@ -2329,9 +2329,6 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, ...@@ -2329,9 +2329,6 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
mt_init_flags(&mt_detach, MT_FLAGS_LOCK_EXTERN); mt_init_flags(&mt_detach, MT_FLAGS_LOCK_EXTERN);
mt_set_external_lock(&mt_detach, &mm->mmap_lock); mt_set_external_lock(&mt_detach, &mm->mmap_lock);
if (mas_preallocate(mas, GFP_KERNEL))
return -ENOMEM;
mas->last = end - 1; mas->last = end - 1;
/* /*
* If we need to split any vma, do it now to save pain later. * If we need to split any vma, do it now to save pain later.
...@@ -2422,8 +2419,6 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, ...@@ -2422,8 +2419,6 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
goto userfaultfd_error; goto userfaultfd_error;
} }
/* Point of no return */
mas_set_range(mas, start, end - 1);
#if defined(CONFIG_DEBUG_VM_MAPLE_TREE) #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
/* Make sure no VMAs are about to be lost. */ /* Make sure no VMAs are about to be lost. */
{ {
...@@ -2431,6 +2426,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, ...@@ -2431,6 +2426,7 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
struct vm_area_struct *vma_mas, *vma_test; struct vm_area_struct *vma_mas, *vma_test;
int test_count = 0; int test_count = 0;
mas_set_range(mas, start, end - 1);
rcu_read_lock(); rcu_read_lock();
vma_test = mas_find(&test, end - 1); vma_test = mas_find(&test, end - 1);
mas_for_each(mas, vma_mas, end - 1) { mas_for_each(mas, vma_mas, end - 1) {
...@@ -2440,10 +2436,13 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, ...@@ -2440,10 +2436,13 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
} }
rcu_read_unlock(); rcu_read_unlock();
BUG_ON(count != test_count); BUG_ON(count != test_count);
mas_set_range(mas, start, end - 1);
} }
#endif #endif
mas_store_prealloc(mas, NULL); /* Point of no return */
mas_set_range(mas, start, end - 1);
if (mas_store_gfp(mas, NULL, GFP_KERNEL))
return -ENOMEM;
mm->map_count -= count; mm->map_count -= count;
/* /*
* Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or * Do not downgrade mmap_lock if we are next to VM_GROWSDOWN or
...@@ -2475,7 +2474,6 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma, ...@@ -2475,7 +2474,6 @@ do_mas_align_munmap(struct ma_state *mas, struct vm_area_struct *vma,
__mt_destroy(&mt_detach); __mt_destroy(&mt_detach);
start_split_failed: start_split_failed:
map_count_exceeded: map_count_exceeded:
mas_destroy(mas);
return error; return error;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment