Commit 94f59ea5 authored by Liam R. Howlett's avatar Liam R. Howlett Committed by Andrew Morton

mm: clean up unmap_region() argument list

With the only caller to unmap_region() being the error path of
mmap_region(), the argument list can be significantly reduced.

Link: https://lkml.kernel.org/r/20240830040101.822209-14-Liam.Howlett@oracle.comSigned-off-by: default avatarLiam R. Howlett <Liam.Howlett@Oracle.com>
Reviewed-by: default avatarLorenzo Stoakes <lorenzo.stoakes@oracle.com>
Cc: Bert Karwatzki <spasswolf@web.de>
Cc: Jeff Xu <jeffxu@chromium.org>
Cc: Jiri Olsa <olsajiri@gmail.com>
Cc: Kees Cook <kees@kernel.org>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Mark Brown <broonie@kernel.org>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: "Paul E. McKenney" <paulmck@kernel.org>
Cc: Paul Moore <paul@paul-moore.com>
Cc: Sidhartha Kumar <sidhartha.kumar@oracle.com>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 9c3ebeda
...@@ -1615,8 +1615,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr, ...@@ -1615,8 +1615,7 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
vma_iter_set(&vmi, vma->vm_end); vma_iter_set(&vmi, vma->vm_end);
/* Undo any partial mapping done by a device driver. */ /* Undo any partial mapping done by a device driver. */
unmap_region(mm, &vmi.mas, vma, prev, next, vma->vm_start, unmap_region(&vmi.mas, vma, prev, next);
vma->vm_end, vma->vm_end, true);
} }
if (writable_file_mapping) if (writable_file_mapping)
mapping_unmap_writable(file->f_mapping); mapping_unmap_writable(file->f_mapping);
......
...@@ -155,22 +155,21 @@ void remove_vma(struct vm_area_struct *vma, bool unreachable) ...@@ -155,22 +155,21 @@ void remove_vma(struct vm_area_struct *vma, bool unreachable)
* *
* Called with the mm semaphore held. * Called with the mm semaphore held.
*/ */
void unmap_region(struct mm_struct *mm, struct ma_state *mas, void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
struct vm_area_struct *vma, struct vm_area_struct *prev, struct vm_area_struct *prev, struct vm_area_struct *next)
struct vm_area_struct *next, unsigned long start,
unsigned long end, unsigned long tree_end, bool mm_wr_locked)
{ {
struct mm_struct *mm = vma->vm_mm;
struct mmu_gather tlb; struct mmu_gather tlb;
unsigned long mt_start = mas->index;
lru_add_drain(); lru_add_drain();
tlb_gather_mmu(&tlb, mm); tlb_gather_mmu(&tlb, mm);
update_hiwater_rss(mm); update_hiwater_rss(mm);
unmap_vmas(&tlb, mas, vma, start, end, tree_end, mm_wr_locked); unmap_vmas(&tlb, mas, vma, vma->vm_start, vma->vm_end, vma->vm_end,
mas_set(mas, mt_start); /* mm_wr_locked = */ true);
mas_set(mas, vma->vm_end);
free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS, free_pgtables(&tlb, mas, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
next ? next->vm_start : USER_PGTABLES_CEILING, next ? next->vm_start : USER_PGTABLES_CEILING,
mm_wr_locked); /* mm_wr_locked = */ true);
tlb_finish_mmu(&tlb); tlb_finish_mmu(&tlb);
} }
......
...@@ -149,10 +149,8 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm, ...@@ -149,10 +149,8 @@ int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
void remove_vma(struct vm_area_struct *vma, bool unreachable); void remove_vma(struct vm_area_struct *vma, bool unreachable);
void unmap_region(struct mm_struct *mm, struct ma_state *mas, void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
struct vm_area_struct *vma, struct vm_area_struct *prev, struct vm_area_struct *prev, struct vm_area_struct *next);
struct vm_area_struct *next, unsigned long start,
unsigned long end, unsigned long tree_end, bool mm_wr_locked);
/* Required by mmap_region(). */ /* Required by mmap_region(). */
bool bool
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment