Commit b2b3b886 authored by Liam R. Howlett's avatar Liam R. Howlett Committed by Andrew Morton

mm: don't use __vma_adjust() in __split_vma()

Use the abstracted locking and maple tree operations.  Since __split_vma()
is the only user of the __vma_adjust() function to use the insert
argument, drop that argument.  Remove the NULL passed through from
fs/exec's shift_arg_pages() and mremap() at the same time.

Link: https://lkml.kernel.org/r/20230120162650.984577-44-Liam.Howlett@oracle.comSigned-off-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 68cefec5
......@@ -699,7 +699,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
/*
* cover the whole range: [new_start, old_end)
*/
if (vma_adjust(&vmi, vma, new_start, old_end, vma->vm_pgoff, NULL))
if (vma_adjust(&vmi, vma, new_start, old_end, vma->vm_pgoff))
return -ENOMEM;
/*
......@@ -733,7 +733,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
vma_prev(&vmi);
/* Shrink the vma to just the new range */
return vma_adjust(&vmi, vma, new_start, new_end, vma->vm_pgoff, NULL);
return vma_adjust(&vmi, vma, new_start, new_end, vma->vm_pgoff);
}
/*
......
......@@ -2832,13 +2832,12 @@ void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
/* mmap.c */
extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
extern int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
struct vm_area_struct *expand);
unsigned long end, pgoff_t pgoff, struct vm_area_struct *expand);
static inline int vma_adjust(struct vma_iterator *vmi,
struct vm_area_struct *vma, unsigned long start, unsigned long end,
pgoff_t pgoff, struct vm_area_struct *insert)
pgoff_t pgoff)
{
return __vma_adjust(vmi, vma, start, end, pgoff, insert, NULL);
return __vma_adjust(vmi, vma, start, end, pgoff, NULL);
}
extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
......
......@@ -691,7 +691,7 @@ inline int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
*/
int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff,
struct vm_area_struct *insert, struct vm_area_struct *expand)
struct vm_area_struct *expand)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *remove2 = NULL;
......@@ -704,7 +704,7 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct vm_area_struct *exporter = NULL, *importer = NULL;
struct vma_prepare vma_prep;
if (next && !insert) {
if (next) {
if (end >= next->vm_end) {
/*
* vma expands, overlapping all the next, and
......@@ -795,39 +795,25 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
VM_WARN_ON(vma_prep.anon_vma && adjust_next && next->anon_vma &&
vma_prep.anon_vma != next->anon_vma);
vma_prep.insert = insert;
vma_prepare(&vma_prep);
if (start != vma->vm_start) {
if (vma->vm_start < start) {
if (!insert || (insert->vm_end != start)) {
vma_iter_clear(vmi, vma->vm_start, start);
vma_iter_set(vmi, start);
VM_WARN_ON(insert && insert->vm_start > vma->vm_start);
}
} else {
vma_changed = true;
}
vma->vm_start = start;
}
if (end != vma->vm_end) {
if (vma->vm_end > end) {
if (!insert || (insert->vm_start != end)) {
vma_iter_clear(vmi, end, vma->vm_end);
vma_iter_set(vmi, vma->vm_end);
VM_WARN_ON(insert &&
insert->vm_end < vma->vm_end);
}
} else {
vma_changed = true;
}
vma->vm_end = end;
}
if (vma->vm_start < start)
vma_iter_clear(vmi, vma->vm_start, start);
else if (start != vma->vm_start)
vma_changed = true;
if (vma->vm_end > end)
vma_iter_clear(vmi, end, vma->vm_end);
else if (end != vma->vm_end)
vma_changed = true;
vma->vm_start = start;
vma->vm_end = end;
vma->vm_pgoff = pgoff;
if (vma_changed)
vma_iter_store(vmi, vma);
vma->vm_pgoff = pgoff;
if (adjust_next) {
next->vm_start += adjust_next;
next->vm_pgoff += adjust_next >> PAGE_SHIFT;
......@@ -846,9 +832,9 @@ int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma,
* per-vma resources, so we don't attempt to merge those.
*/
static inline int is_mergeable_vma(struct vm_area_struct *vma,
struct file *file, unsigned long vm_flags,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
struct anon_vma_name *anon_name)
struct file *file, unsigned long vm_flags,
struct vm_userfaultfd_ctx vm_userfaultfd_ctx,
struct anon_vma_name *anon_name)
{
/*
* VM_SOFTDIRTY should not prevent from VMA merging, if we
......@@ -1030,20 +1016,19 @@ struct vm_area_struct *vma_merge(struct vma_iterator *vmi, struct mm_struct *mm,
is_mergeable_anon_vma(prev->anon_vma,
next->anon_vma, NULL)) { /* cases 1, 6 */
err = __vma_adjust(vmi, prev, prev->vm_start,
next->vm_end, prev->vm_pgoff, NULL,
prev);
next->vm_end, prev->vm_pgoff, prev);
res = prev;
} else if (merge_prev) { /* cases 2, 5, 7 */
err = __vma_adjust(vmi, prev, prev->vm_start,
end, prev->vm_pgoff, NULL, prev);
end, prev->vm_pgoff, prev);
res = prev;
} else if (merge_next) {
if (prev && addr < prev->vm_end) /* case 4 */
err = __vma_adjust(vmi, prev, prev->vm_start,
addr, prev->vm_pgoff, NULL, next);
addr, prev->vm_pgoff, next);
else /* cases 3, 8 */
err = __vma_adjust(vmi, mid, addr, next->vm_end,
next->vm_pgoff - pglen, NULL, next);
next->vm_pgoff - pglen, next);
res = next;
}
......@@ -2187,11 +2172,15 @@ static void unmap_region(struct mm_struct *mm, struct maple_tree *mt,
int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long addr, int new_below)
{
struct vma_prepare vp;
struct vm_area_struct *new;
int err;
validate_mm_mt(vma->vm_mm);
WARN_ON(vma->vm_start >= addr);
WARN_ON(vma->vm_end <= addr);
if (vma->vm_ops && vma->vm_ops->may_split) {
err = vma->vm_ops->may_split(vma, addr);
if (err)
......@@ -2202,16 +2191,20 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
if (!new)
return -ENOMEM;
if (new_below)
err = -ENOMEM;
if (vma_iter_prealloc(vmi))
goto out_free_vma;
if (new_below) {
new->vm_end = addr;
else {
} else {
new->vm_start = addr;
new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
}
err = vma_dup_policy(vma, new);
if (err)
goto out_free_vma;
goto out_free_vmi;
err = anon_vma_clone(new, vma);
if (err)
......@@ -2223,33 +2216,32 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
if (new_below)
err = vma_adjust(vmi, vma, addr, vma->vm_end,
vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT),
new);
else
err = vma_adjust(vmi, vma, vma->vm_start, addr, vma->vm_pgoff,
new);
vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
init_vma_prep(&vp, vma);
vp.insert = new;
vma_prepare(&vp);
/* Success. */
if (!err) {
if (new_below)
vma_next(vmi);
return 0;
if (new_below) {
vma->vm_start = addr;
vma->vm_pgoff += (addr - new->vm_start) >> PAGE_SHIFT;
} else {
vma->vm_end = addr;
}
/* Avoid vm accounting in close() operation */
new->vm_start = new->vm_end;
new->vm_pgoff = 0;
/* Clean everything up if vma_adjust failed. */
if (new->vm_ops && new->vm_ops->close)
new->vm_ops->close(new);
if (new->vm_file)
fput(new->vm_file);
unlink_anon_vmas(new);
out_free_mpol:
/* vma_complete stores the new vma */
vma_complete(&vp, vmi, vma->vm_mm);
/* Success. */
if (new_below)
vma_next(vmi);
validate_mm_mt(vma->vm_mm);
return 0;
out_free_mpol:
mpol_put(vma_policy(new));
out_free_vma:
out_free_vmi:
vma_iter_free(vmi);
out_free_vma:
vm_area_free(new);
validate_mm_mt(vma->vm_mm);
return err;
......
......@@ -1054,7 +1054,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
vma->vm_file, extension_pgoff, vma_policy(vma),
vma->vm_userfaultfd_ctx, anon_vma_name(vma));
} else if (vma_adjust(&vmi, vma, vma->vm_start,
addr + new_len, vma->vm_pgoff, NULL)) {
addr + new_len, vma->vm_pgoff)) {
vma = NULL;
}
if (!vma) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment