Commit 7e027b14 authored by Linus Torvalds's avatar Linus Torvalds

vm: simplify unmap_vmas() calling convention

None of the callers want to pass in 'zap_details', and it doesn't even
make sense for the case of actually unmapping vma's.  So remove the
argument, and clean up the interface.
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 18b15fcd
...@@ -898,8 +898,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address, ...@@ -898,8 +898,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long address,
unsigned long size, struct zap_details *); unsigned long size, struct zap_details *);
void unmap_vmas(struct mmu_gather *tlb, void unmap_vmas(struct mmu_gather *tlb,
struct vm_area_struct *start_vma, unsigned long start_addr, struct vm_area_struct *start_vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted, unsigned long end_addr, unsigned long *nr_accounted);
struct zap_details *);
/** /**
* mm_walk - callbacks for walk_page_range * mm_walk - callbacks for walk_page_range
......
...@@ -1340,7 +1340,6 @@ static void unmap_single_vma(struct mmu_gather *tlb, ...@@ -1340,7 +1340,6 @@ static void unmap_single_vma(struct mmu_gather *tlb,
* @start_addr: virtual address at which to start unmapping * @start_addr: virtual address at which to start unmapping
* @end_addr: virtual address at which to end unmapping * @end_addr: virtual address at which to end unmapping
* @nr_accounted: Place number of unmapped pages in vm-accountable vma's here * @nr_accounted: Place number of unmapped pages in vm-accountable vma's here
* @details: details of nonlinear truncation or shared cache invalidation
* *
* Unmap all pages in the vma list. * Unmap all pages in the vma list.
* *
...@@ -1355,15 +1354,13 @@ static void unmap_single_vma(struct mmu_gather *tlb, ...@@ -1355,15 +1354,13 @@ static void unmap_single_vma(struct mmu_gather *tlb,
*/ */
void unmap_vmas(struct mmu_gather *tlb, void unmap_vmas(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start_addr, struct vm_area_struct *vma, unsigned long start_addr,
unsigned long end_addr, unsigned long *nr_accounted, unsigned long end_addr, unsigned long *nr_accounted)
struct zap_details *details)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
mmu_notifier_invalidate_range_start(mm, start_addr, end_addr); mmu_notifier_invalidate_range_start(mm, start_addr, end_addr);
for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next) for ( ; vma && vma->vm_start < end_addr; vma = vma->vm_next)
unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, unmap_single_vma(tlb, vma, start_addr, end_addr, nr_accounted, NULL);
details);
mmu_notifier_invalidate_range_end(mm, start_addr, end_addr); mmu_notifier_invalidate_range_end(mm, start_addr, end_addr);
} }
...@@ -1376,19 +1373,22 @@ void unmap_vmas(struct mmu_gather *tlb, ...@@ -1376,19 +1373,22 @@ void unmap_vmas(struct mmu_gather *tlb,
* *
* Caller must protect the VMA list * Caller must protect the VMA list
*/ */
void zap_page_range(struct vm_area_struct *vma, unsigned long address, void zap_page_range(struct vm_area_struct *vma, unsigned long start,
unsigned long size, struct zap_details *details) unsigned long size, struct zap_details *details)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb; struct mmu_gather tlb;
unsigned long end = address + size; unsigned long end = start + size;
unsigned long nr_accounted = 0; unsigned long nr_accounted = 0;
lru_add_drain(); lru_add_drain();
tlb_gather_mmu(&tlb, mm, 0); tlb_gather_mmu(&tlb, mm, 0);
update_hiwater_rss(mm); update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, address, end, &nr_accounted, details); mmu_notifier_invalidate_range_start(mm, start, end);
tlb_finish_mmu(&tlb, address, end); for ( ; vma && vma->vm_start < end; vma = vma->vm_next)
unmap_single_vma(&tlb, vma, start, end, &nr_accounted, details);
mmu_notifier_invalidate_range_end(mm, start, end);
tlb_finish_mmu(&tlb, start, end);
} }
/** /**
......
...@@ -1917,7 +1917,7 @@ static void unmap_region(struct mm_struct *mm, ...@@ -1917,7 +1917,7 @@ static void unmap_region(struct mm_struct *mm,
lru_add_drain(); lru_add_drain();
tlb_gather_mmu(&tlb, mm, 0); tlb_gather_mmu(&tlb, mm, 0);
update_hiwater_rss(mm); update_hiwater_rss(mm);
unmap_vmas(&tlb, vma, start, end, &nr_accounted, NULL); unmap_vmas(&tlb, vma, start, end, &nr_accounted);
vm_unacct_memory(nr_accounted); vm_unacct_memory(nr_accounted);
free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
next ? next->vm_start : 0); next ? next->vm_start : 0);
...@@ -2305,7 +2305,7 @@ void exit_mmap(struct mm_struct *mm) ...@@ -2305,7 +2305,7 @@ void exit_mmap(struct mm_struct *mm)
tlb_gather_mmu(&tlb, mm, 1); tlb_gather_mmu(&tlb, mm, 1);
/* update_hiwater_rss(mm) here? but nobody should be looking */ /* update_hiwater_rss(mm) here? but nobody should be looking */
/* Use -1 here to ensure all VMAs in the mm are unmapped */ /* Use -1 here to ensure all VMAs in the mm are unmapped */
unmap_vmas(&tlb, vma, 0, -1, &nr_accounted, NULL); unmap_vmas(&tlb, vma, 0, -1, &nr_accounted);
vm_unacct_memory(nr_accounted); vm_unacct_memory(nr_accounted);
free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0); free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, 0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment