Commit b84fd283 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: make page_mapped() take a const argument

None of the functions called by page_mapped() modify the page or folio, so
mark them all as const.

Link: https://lkml.kernel.org/r/20240326171045.410737-7-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarDavid Hildenbrand <david@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 2ace5a67
...@@ -1200,7 +1200,7 @@ static inline int is_vmalloc_or_module_addr(const void *x) ...@@ -1200,7 +1200,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
* debugging purposes - it does not include PTE-mapped sub-pages; look * debugging purposes - it does not include PTE-mapped sub-pages; look
* at folio_mapcount() or page_mapcount() instead. * at folio_mapcount() or page_mapcount() instead.
*/ */
static inline int folio_entire_mapcount(struct folio *folio) static inline int folio_entire_mapcount(const struct folio *folio)
{ {
VM_BUG_ON_FOLIO(!folio_test_large(folio), folio); VM_BUG_ON_FOLIO(!folio_test_large(folio), folio);
return atomic_read(&folio->_entire_mapcount) + 1; return atomic_read(&folio->_entire_mapcount) + 1;
...@@ -1240,7 +1240,7 @@ static inline int page_mapcount(struct page *page) ...@@ -1240,7 +1240,7 @@ static inline int page_mapcount(struct page *page)
return mapcount; return mapcount;
} }
int folio_total_mapcount(struct folio *folio); int folio_total_mapcount(const struct folio *folio);
/** /**
* folio_mapcount() - Calculate the number of mappings of this folio. * folio_mapcount() - Calculate the number of mappings of this folio.
...@@ -1253,14 +1253,14 @@ int folio_total_mapcount(struct folio *folio); ...@@ -1253,14 +1253,14 @@ int folio_total_mapcount(struct folio *folio);
* *
* Return: The number of times this folio is mapped. * Return: The number of times this folio is mapped.
*/ */
static inline int folio_mapcount(struct folio *folio) static inline int folio_mapcount(const struct folio *folio)
{ {
if (likely(!folio_test_large(folio))) if (likely(!folio_test_large(folio)))
return atomic_read(&folio->_mapcount) + 1; return atomic_read(&folio->_mapcount) + 1;
return folio_total_mapcount(folio); return folio_total_mapcount(folio);
} }
static inline bool folio_large_is_mapped(struct folio *folio) static inline bool folio_large_is_mapped(const struct folio *folio)
{ {
/* /*
* Reading _entire_mapcount below could be omitted if hugetlb * Reading _entire_mapcount below could be omitted if hugetlb
...@@ -1288,7 +1288,7 @@ static inline bool folio_mapped(struct folio *folio) ...@@ -1288,7 +1288,7 @@ static inline bool folio_mapped(struct folio *folio)
* For compound page it returns true if any sub-page of compound page is mapped, * For compound page it returns true if any sub-page of compound page is mapped,
* even if this particular sub-page is not itself mapped by any PTE or PMD. * even if this particular sub-page is not itself mapped by any PTE or PMD.
*/ */
static inline bool page_mapped(struct page *page) static inline bool page_mapped(const struct page *page)
{ {
if (likely(!PageCompound(page))) if (likely(!PageCompound(page)))
return atomic_read(&page->_mapcount) >= 0; return atomic_read(&page->_mapcount) >= 0;
...@@ -2070,7 +2070,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone, ...@@ -2070,7 +2070,7 @@ static inline void set_page_links(struct page *page, enum zone_type zone,
* *
* Return: A positive power of two. * Return: A positive power of two.
*/ */
static inline long folio_nr_pages(struct folio *folio) static inline long folio_nr_pages(const struct folio *folio)
{ {
if (!folio_test_large(folio)) if (!folio_test_large(folio))
return 1; return 1;
......
...@@ -71,7 +71,7 @@ void page_writeback_init(void); ...@@ -71,7 +71,7 @@ void page_writeback_init(void);
* How many individual pages have an elevated _mapcount. Excludes * How many individual pages have an elevated _mapcount. Excludes
* the folio's entire_mapcount. * the folio's entire_mapcount.
*/ */
static inline int folio_nr_pages_mapped(struct folio *folio) static inline int folio_nr_pages_mapped(const struct folio *folio)
{ {
return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED; return atomic_read(&folio->_nr_pages_mapped) & FOLIO_PAGES_MAPPED;
} }
...@@ -81,7 +81,8 @@ static inline int folio_nr_pages_mapped(struct folio *folio) ...@@ -81,7 +81,8 @@ static inline int folio_nr_pages_mapped(struct folio *folio)
* folio. We cannot rely on folio->swap as there is no guarantee that it has * folio. We cannot rely on folio->swap as there is no guarantee that it has
* been initialized. Used for calling arch_swap_restore() * been initialized. Used for calling arch_swap_restore()
*/ */
static inline swp_entry_t folio_swap(swp_entry_t entry, struct folio *folio) static inline swp_entry_t folio_swap(swp_entry_t entry,
const struct folio *folio)
{ {
swp_entry_t swap = { swp_entry_t swap = {
.val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)), .val = ALIGN_DOWN(entry.val, folio_nr_pages(folio)),
...@@ -90,7 +91,7 @@ static inline swp_entry_t folio_swap(swp_entry_t entry, struct folio *folio) ...@@ -90,7 +91,7 @@ static inline swp_entry_t folio_swap(swp_entry_t entry, struct folio *folio)
return swap; return swap;
} }
static inline void *folio_raw_mapping(struct folio *folio) static inline void *folio_raw_mapping(const struct folio *folio)
{ {
unsigned long mapping = (unsigned long)folio->mapping; unsigned long mapping = (unsigned long)folio->mapping;
......
...@@ -1134,7 +1134,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff, ...@@ -1134,7 +1134,7 @@ int pfn_mkclean_range(unsigned long pfn, unsigned long nr_pages, pgoff_t pgoff,
return page_vma_mkclean_one(&pvmw); return page_vma_mkclean_one(&pvmw);
} }
int folio_total_mapcount(struct folio *folio) int folio_total_mapcount(const struct folio *folio)
{ {
int mapcount = folio_entire_mapcount(folio); int mapcount = folio_entire_mapcount(folio);
int nr_pages; int nr_pages;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment