Commit 68540502 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm/khugepaged: stop using vma linked list

Use vma iterator & find_vma() instead of vma linked list.

Link: https://lkml.kernel.org/r/20220906194824.2110408-53-Liam.Howlett@oracle.comSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarLiam R. Howlett <Liam.Howlett@oracle.com>
Reviewed-by: default avatarDavidlohr Bueso <dave@stgolabs.net>
Tested-by: default avatarYu Zhao <yuzhao@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: David Howells <dhowells@redhat.com>
Cc: SeongJae Park <sj@kernel.org>
Cc: Sven Schnelle <svens@linux.ibm.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent c4d1a92d
...@@ -2341,11 +2341,11 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, ...@@ -2341,11 +2341,11 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
split_huge_pmd_if_needed(vma, end); split_huge_pmd_if_needed(vma, end);
/* /*
* If we're also updating the vma->vm_next->vm_start, * If we're also updating the next vma vm_start,
* check if we need to split it. * check if we need to split it.
*/ */
if (adjust_next > 0) { if (adjust_next > 0) {
struct vm_area_struct *next = vma->vm_next; struct vm_area_struct *next = find_vma(vma->vm_mm, vma->vm_end);
unsigned long nstart = next->vm_start; unsigned long nstart = next->vm_start;
nstart += adjust_next; nstart += adjust_next;
split_huge_pmd_if_needed(next, nstart); split_huge_pmd_if_needed(next, nstart);
......
...@@ -2050,6 +2050,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, ...@@ -2050,6 +2050,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
__releases(&khugepaged_mm_lock) __releases(&khugepaged_mm_lock)
__acquires(&khugepaged_mm_lock) __acquires(&khugepaged_mm_lock)
{ {
struct vma_iterator vmi;
struct mm_slot *mm_slot; struct mm_slot *mm_slot;
struct mm_struct *mm; struct mm_struct *mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
...@@ -2078,11 +2079,13 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result, ...@@ -2078,11 +2079,13 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
vma = NULL; vma = NULL;
if (unlikely(!mmap_read_trylock(mm))) if (unlikely(!mmap_read_trylock(mm)))
goto breakouterloop_mmap_lock; goto breakouterloop_mmap_lock;
if (likely(!hpage_collapse_test_exit(mm)))
vma = find_vma(mm, khugepaged_scan.address);
progress++; progress++;
for (; vma; vma = vma->vm_next) { if (unlikely(hpage_collapse_test_exit(mm)))
goto breakouterloop;
vma_iter_init(&vmi, mm, khugepaged_scan.address);
for_each_vma(vmi, vma) {
unsigned long hstart, hend; unsigned long hstart, hend;
cond_resched(); cond_resched();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment