Commit e7b6b990 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

s390: remove vma linked list walks

Use the VMA iterator instead.

Link: https://lkml.kernel.org/r/20220906194824.2110408-35-Liam.Howlett@oracle.comSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarLiam R. Howlett <Liam.Howlett@Oracle.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarDavidlohr Bueso <dave@stgolabs.net>
Tested-by: default avatarYu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 405e6691
...@@ -69,10 +69,11 @@ static struct page *find_timens_vvar_page(struct vm_area_struct *vma) ...@@ -69,10 +69,11 @@ static struct page *find_timens_vvar_page(struct vm_area_struct *vma)
int vdso_join_timens(struct task_struct *task, struct time_namespace *ns) int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
{ {
struct mm_struct *mm = task->mm; struct mm_struct *mm = task->mm;
VMA_ITERATOR(vmi, mm, 0);
struct vm_area_struct *vma; struct vm_area_struct *vma;
mmap_read_lock(mm); mmap_read_lock(mm);
for (vma = mm->mmap; vma; vma = vma->vm_next) { for_each_vma(vmi, vma) {
unsigned long size = vma->vm_end - vma->vm_start; unsigned long size = vma->vm_end - vma->vm_start;
if (!vma_is_special_mapping(vma, &vvar_mapping)) if (!vma_is_special_mapping(vma, &vvar_mapping))
......
...@@ -2515,8 +2515,9 @@ static const struct mm_walk_ops thp_split_walk_ops = { ...@@ -2515,8 +2515,9 @@ static const struct mm_walk_ops thp_split_walk_ops = {
static inline void thp_split_mm(struct mm_struct *mm) static inline void thp_split_mm(struct mm_struct *mm)
{ {
struct vm_area_struct *vma; struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) { for_each_vma(vmi, vma) {
vma->vm_flags &= ~VM_HUGEPAGE; vma->vm_flags &= ~VM_HUGEPAGE;
vma->vm_flags |= VM_NOHUGEPAGE; vma->vm_flags |= VM_NOHUGEPAGE;
walk_page_vma(vma, &thp_split_walk_ops, NULL); walk_page_vma(vma, &thp_split_walk_ops, NULL);
...@@ -2584,8 +2585,9 @@ int gmap_mark_unmergeable(void) ...@@ -2584,8 +2585,9 @@ int gmap_mark_unmergeable(void)
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
int ret; int ret;
VMA_ITERATOR(vmi, mm, 0);
for (vma = mm->mmap; vma; vma = vma->vm_next) { for_each_vma(vmi, vma) {
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end, ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
MADV_UNMERGEABLE, &vma->vm_flags); MADV_UNMERGEABLE, &vma->vm_flags);
if (ret) if (ret)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment