Commit e0abfbb6 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: rename vma_pgoff_address back to vma_address

With all callers converted, we can use the nice shorter name.  Take this
opportunity to reorder the arguments to the logical order (larger object
first).

Link: https://lkml.kernel.org/r/20240328225831.1765286-4-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 412ad5fb
......@@ -805,17 +805,16 @@ void mlock_drain_remote(int cpu);
extern pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma);
/**
* vma_pgoff_address - Find the virtual address a page range is mapped at
* vma_address - Find the virtual address a page range is mapped at
* @vma: The vma which maps this object.
* @pgoff: The page offset within its object.
* @nr_pages: The number of pages to consider.
* @vma: The vma which maps this object.
*
* If any page in this range is mapped by this VMA, return the first address
* where any of these pages appear. Otherwise, return -EFAULT.
*/
static inline unsigned long
vma_pgoff_address(pgoff_t pgoff, unsigned long nr_pages,
struct vm_area_struct *vma)
static inline unsigned long vma_address(struct vm_area_struct *vma,
pgoff_t pgoff, unsigned long nr_pages)
{
unsigned long address;
......
......@@ -455,7 +455,7 @@ static void __add_to_kill(struct task_struct *tsk, struct page *p,
tk->addr = ksm_addr ? ksm_addr : page_address_in_vma(p, vma);
if (is_zone_device_page(p)) {
if (fsdax_pgoff != FSDAX_INVALID_PGOFF)
tk->addr = vma_pgoff_address(fsdax_pgoff, 1, vma);
tk->addr = vma_address(vma, fsdax_pgoff, 1);
tk->size_shift = dev_pagemap_mapping_shift(vma, tk->addr);
} else
tk->size_shift = page_shift(compound_head(p));
......
......@@ -334,7 +334,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
.flags = PVMW_SYNC,
};
pvmw.address = vma_pgoff_address(pgoff, 1, vma);
pvmw.address = vma_address(vma, pgoff, 1);
if (pvmw.address == -EFAULT)
return 0;
if (!page_vma_mapped_walk(&pvmw))
......
......@@ -794,7 +794,7 @@ unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
/* The !page__anon_vma above handles KSM folios */
pgoff = folio->index + folio_page_idx(folio, page);
return vma_pgoff_address(pgoff, 1, vma);
return vma_address(vma, pgoff, 1);
}
/*
......@@ -1132,7 +1132,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
if (invalid_mkclean_vma(vma, NULL))
return 0;
pvmw.address = vma_pgoff_address(pgoff, nr_pages, vma);
pvmw.address = vma_address(vma, pgoff, nr_pages);
VM_BUG_ON_VMA(pvmw.address == -EFAULT, vma);
return page_vma_mkclean_one(&pvmw);
......@@ -2592,8 +2592,8 @@ static void rmap_walk_anon(struct folio *folio,
anon_vma_interval_tree_foreach(avc, &anon_vma->rb_root,
pgoff_start, pgoff_end) {
struct vm_area_struct *vma = avc->vma;
unsigned long address = vma_pgoff_address(pgoff_start,
folio_nr_pages(folio), vma);
unsigned long address = vma_address(vma, pgoff_start,
folio_nr_pages(folio));
VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
......@@ -2654,8 +2654,8 @@ static void rmap_walk_file(struct folio *folio,
lookup:
vma_interval_tree_foreach(vma, &mapping->i_mmap,
pgoff_start, pgoff_end) {
unsigned long address = vma_pgoff_address(pgoff_start,
folio_nr_pages(folio), vma);
unsigned long address = vma_address(vma, pgoff_start,
folio_nr_pages(folio));
VM_BUG_ON_VMA(address == -EFAULT, vma);
cond_resched();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment