Commit b373037f authored by Liam R. Howlett's avatar Liam R. Howlett Committed by Andrew Morton

mm: add vma iterator to vma_adjust() arguments

Change the vma_adjust() function definition to accept the vma iterator and
pass it through to __vma_adjust().

Update fs/exec to use the new vma_adjust() function parameters.

Update mm/mremap to use the new vma_adjust() function parameters.

Revert the __split_vma() calls back from __vma_adjust() to vma_adjust()
and pass through the vma iterator.

Link: https://lkml.kernel.org/r/20230120162650.984577-37-Liam.Howlett@oracle.comSigned-off-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0fd5a9e2
......@@ -699,7 +699,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
/*
* cover the whole range: [new_start, old_end)
*/
if (vma_adjust(vma, new_start, old_end, vma->vm_pgoff, NULL))
if (vma_adjust(&vmi, vma, new_start, old_end, vma->vm_pgoff, NULL))
return -ENOMEM;
/*
......@@ -731,12 +731,9 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
}
tlb_finish_mmu(&tlb);
/*
* Shrink the vma to just the new range. Always succeeds.
*/
vma_adjust(vma, new_start, new_end, vma->vm_pgoff, NULL);
return 0;
vma_prev(&vmi);
/* Shrink the vma to just the new range */
return vma_adjust(&vmi, vma, new_start, new_end, vma->vm_pgoff, NULL);
}
/*
......
......@@ -2834,12 +2834,11 @@ extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admi
extern int __vma_adjust(struct vma_iterator *vmi, struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert,
struct vm_area_struct *expand);
static inline int vma_adjust(struct vm_area_struct *vma, unsigned long start,
unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
static inline int vma_adjust(struct vma_iterator *vmi,
struct vm_area_struct *vma, unsigned long start, unsigned long end,
pgoff_t pgoff, struct vm_area_struct *insert)
{
VMA_ITERATOR(vmi, vma->vm_mm, start);
return __vma_adjust(&vmi, vma, start, end, pgoff, insert, NULL);
return __vma_adjust(vmi, vma, start, end, pgoff, insert, NULL);
}
extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
......
......@@ -2210,12 +2210,12 @@ int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
new->vm_ops->open(new);
if (new_below)
err = __vma_adjust(vmi, vma, addr, vma->vm_end,
err = vma_adjust(vmi, vma, addr, vma->vm_end,
vma->vm_pgoff + ((addr - new->vm_start) >> PAGE_SHIFT),
new, NULL);
new);
else
err = __vma_adjust(vmi, vma, vma->vm_start, addr, vma->vm_pgoff,
new, NULL);
err = vma_adjust(vmi, vma, vma->vm_start, addr, vma->vm_pgoff,
new);
/* Success. */
if (!err) {
......
......@@ -1047,8 +1047,8 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
extension_end, vma->vm_flags, vma->anon_vma,
vma->vm_file, extension_pgoff, vma_policy(vma),
vma->vm_userfaultfd_ctx, anon_vma_name(vma));
} else if (vma_adjust(vma, vma->vm_start, addr + new_len,
vma->vm_pgoff, NULL)) {
} else if (vma_adjust(&vmi, vma, vma->vm_start,
addr + new_len, vma->vm_pgoff, NULL)) {
vma = NULL;
}
if (!vma) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment