Commit 19066e58 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

exec: use VMA iterator instead of linked list

Remove a use of the vm_next list by doing the initial lookup with the VMA
iterator and then using it to find the next entry.

Link: https://lkml.kernel.org/r/20220906194824.2110408-42-Liam.Howlett@oracle.comSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarLiam R. Howlett <Liam.Howlett@Oracle.com>
Tested-by: default avatarYu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: Davidlohr Bueso <dave@stgolabs.net>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 182ea1d7
...@@ -683,6 +683,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) ...@@ -683,6 +683,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
unsigned long length = old_end - old_start; unsigned long length = old_end - old_start;
unsigned long new_start = old_start - shift; unsigned long new_start = old_start - shift;
unsigned long new_end = old_end - shift; unsigned long new_end = old_end - shift;
VMA_ITERATOR(vmi, mm, new_start);
struct vm_area_struct *next;
struct mmu_gather tlb; struct mmu_gather tlb;
BUG_ON(new_start > new_end); BUG_ON(new_start > new_end);
...@@ -691,7 +693,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) ...@@ -691,7 +693,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
* ensure there are no vmas between where we want to go * ensure there are no vmas between where we want to go
* and where we are * and where we are
*/ */
if (vma != find_vma(mm, new_start)) if (vma != vma_next(&vmi))
return -EFAULT; return -EFAULT;
/* /*
...@@ -710,12 +712,13 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) ...@@ -710,12 +712,13 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
lru_add_drain(); lru_add_drain();
tlb_gather_mmu(&tlb, mm); tlb_gather_mmu(&tlb, mm);
next = vma_next(&vmi);
if (new_end > old_start) { if (new_end > old_start) {
/* /*
* when the old and new regions overlap clear from new_end. * when the old and new regions overlap clear from new_end.
*/ */
free_pgd_range(&tlb, new_end, old_end, new_end, free_pgd_range(&tlb, new_end, old_end, new_end,
vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); next ? next->vm_start : USER_PGTABLES_CEILING);
} else { } else {
/* /*
* otherwise, clean from old_start; this is done to not touch * otherwise, clean from old_start; this is done to not touch
...@@ -724,7 +727,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift) ...@@ -724,7 +727,7 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
* for the others its just a little faster. * for the others its just a little faster.
*/ */
free_pgd_range(&tlb, old_start, old_end, new_end, free_pgd_range(&tlb, old_start, old_end, new_end,
vma->vm_next ? vma->vm_next->vm_start : USER_PGTABLES_CEILING); next ? next->vm_start : USER_PGTABLES_CEILING);
} }
tlb_finish_mmu(&tlb); tlb_finish_mmu(&tlb);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment