Commit a3884621 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

x86: remove vma linked list walks

Use the VMA iterator instead.

Link: https://lkml.kernel.org/r/20220906194824.2110408-36-Liam.Howlett@oracle.comSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarLiam R. Howlett <Liam.Howlett@Oracle.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarDavidlohr Bueso <dave@stgolabs.net>
Tested-by: default avatarYu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent e7b6b990
...@@ -127,17 +127,17 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) ...@@ -127,17 +127,17 @@ int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{ {
struct mm_struct *mm = task->mm; struct mm_struct *mm = task->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
mmap_read_lock(mm); mmap_read_lock(mm);
for_each_vma(vmi, vma) {
for (vma = mm->mmap; vma; vma = vma->vm_next) {
unsigned long size = vma->vm_end - vma->vm_start; unsigned long size = vma->vm_end - vma->vm_start;
if (vma_is_special_mapping(vma, &vvar_mapping)) if (vma_is_special_mapping(vma, &vvar_mapping))
zap_page_range(vma, vma->vm_start, size); zap_page_range(vma, vma->vm_start, size);
} }
mmap_read_unlock(mm); mmap_read_unlock(mm);
return 0; return 0;
} }
#else #else
...@@ -354,6 +354,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr) ...@@ -354,6 +354,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
{ {
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
mmap_write_lock(mm); mmap_write_lock(mm);
/* /*
...@@ -363,7 +364,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr) ...@@ -363,7 +364,7 @@ int map_vdso_once(const struct vdso_image *image, unsigned long addr)
* We could search vma near context.vdso, but it's a slowpath, * We could search vma near context.vdso, but it's a slowpath,
* so let's explicitly check all VMAs to be completely sure. * so let's explicitly check all VMAs to be completely sure.
*/ */
for (vma = mm->mmap; vma; vma = vma->vm_next) { for_each_vma(vmi, vma) {
if (vma_is_special_mapping(vma, &vdso_mapping) || if (vma_is_special_mapping(vma, &vdso_mapping) ||
vma_is_special_mapping(vma, &vvar_mapping)) { vma_is_special_mapping(vma, &vvar_mapping)) {
mmap_write_unlock(mm); mmap_write_unlock(mm);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment