mm/util: Add folio_mapping() and folio_file_mapping()

These are the folio equivalent of page_mapping() and page_file_mapping().
Add an out-of-line page_mapping() wrapper around folio_mapping()
in order to prevent the page_folio() call from bloating every caller
of page_mapping().  Adjust page_file_mapping() and page_mapping_file()
to use folios internally.  Rename __page_file_mapping() to
swapcache_mapping() and change it to take a folio.

This ends up saving 122 bytes of text overall.  folio_mapping() is
45 bytes shorter than page_mapping() was, but the new page_mapping()
wrapper is 30 bytes.  The major reduction is a few bytes less in dozens
of nfs functions (which call page_file_mapping()).  Most of these appear
to be a slight change in gcc's register allocation decisions, which allow:

   48 8b 56 08         mov    0x8(%rsi),%rdx
   48 8d 42 ff         lea    -0x1(%rdx),%rax
   83 e2 01            and    $0x1,%edx
   48 0f 44 c6         cmove  %rsi,%rax

to become:

   48 8b 46 08         mov    0x8(%rsi),%rax
   48 8d 78 ff         lea    -0x1(%rax),%rdi
   a8 01               test   $0x1,%al
   48 0f 44 fe         cmove  %rsi,%rdi

for a reduction of a single byte.  Once the NFS client is converted to
use folios, this entire sequence will disappear.

Also add folio_mapping() documentation.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarJeff Layton <jlayton@kernel.org>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Acked-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reviewed-by: default avatarWilliam Kucharski <william.kucharski@oracle.com>
Reviewed-by: default avatarDavid Howells <dhowells@redhat.com>
parent 352b47a6
...@@ -101,3 +101,5 @@ More Memory Management Functions ...@@ -101,3 +101,5 @@ More Memory Management Functions
:internal: :internal:
.. kernel-doc:: include/linux/page_ref.h .. kernel-doc:: include/linux/page_ref.h
.. kernel-doc:: include/linux/mmzone.h .. kernel-doc:: include/linux/mmzone.h
.. kernel-doc:: mm/util.c
:functions: folio_mapping
...@@ -1755,19 +1755,6 @@ void page_address_init(void); ...@@ -1755,19 +1755,6 @@ void page_address_init(void);
extern void *page_rmapping(struct page *page); extern void *page_rmapping(struct page *page);
extern struct anon_vma *page_anon_vma(struct page *page); extern struct anon_vma *page_anon_vma(struct page *page);
extern struct address_space *page_mapping(struct page *page);
extern struct address_space *__page_file_mapping(struct page *);
static inline
struct address_space *page_file_mapping(struct page *page)
{
if (unlikely(PageSwapCache(page)))
return __page_file_mapping(page);
return page->mapping;
}
extern pgoff_t __page_file_index(struct page *page); extern pgoff_t __page_file_index(struct page *page);
/* /*
...@@ -1782,7 +1769,6 @@ static inline pgoff_t page_index(struct page *page) ...@@ -1782,7 +1769,6 @@ static inline pgoff_t page_index(struct page *page)
} }
bool page_mapped(struct page *page); bool page_mapped(struct page *page);
struct address_space *page_mapping(struct page *page);
/* /*
* Return true only if the page has been allocated with * Return true only if the page has been allocated with
......
...@@ -162,14 +162,45 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping) ...@@ -162,14 +162,45 @@ static inline void filemap_nr_thps_dec(struct address_space *mapping)
void release_pages(struct page **pages, int nr); void release_pages(struct page **pages, int nr);
struct address_space *page_mapping(struct page *);
struct address_space *folio_mapping(struct folio *);
struct address_space *swapcache_mapping(struct folio *);
/**
* folio_file_mapping - Find the mapping this folio belongs to.
* @folio: The folio.
*
* For folios which are in the page cache, return the mapping that this
* page belongs to. Folios in the swap cache return the mapping of the
* swap file or swap device where the data is stored. This is different
* from the mapping returned by folio_mapping(). The only reason to
* use it is if, like NFS, you return 0 from ->activate_swapfile.
*
* Do not call this for folios which aren't in the page cache or swap cache.
*/
static inline struct address_space *folio_file_mapping(struct folio *folio)
{
if (unlikely(folio_test_swapcache(folio)))
return swapcache_mapping(folio);
return folio->mapping;
}
static inline struct address_space *page_file_mapping(struct page *page)
{
return folio_file_mapping(page_folio(page));
}
/* /*
* For file cache pages, return the address_space, otherwise return NULL * For file cache pages, return the address_space, otherwise return NULL
*/ */
static inline struct address_space *page_mapping_file(struct page *page) static inline struct address_space *page_mapping_file(struct page *page)
{ {
if (unlikely(PageSwapCache(page))) struct folio *folio = page_folio(page);
if (unlikely(folio_test_swapcache(folio)))
return NULL; return NULL;
return page_mapping(page); return folio_mapping(folio);
} }
static inline bool page_cache_add_speculative(struct page *page, int count) static inline bool page_cache_add_speculative(struct page *page, int count)
......
...@@ -320,6 +320,12 @@ struct vma_swap_readahead { ...@@ -320,6 +320,12 @@ struct vma_swap_readahead {
#endif #endif
}; };
static inline swp_entry_t folio_swap_entry(struct folio *folio)
{
swp_entry_t entry = { .val = page_private(&folio->page) };
return entry;
}
/* linux/mm/workingset.c */ /* linux/mm/workingset.c */
void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages); void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg); void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
......
...@@ -46,7 +46,7 @@ mmu-$(CONFIG_MMU) += process_vm_access.o ...@@ -46,7 +46,7 @@ mmu-$(CONFIG_MMU) += process_vm_access.o
endif endif
obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
maccess.o page-writeback.o \ maccess.o page-writeback.o folio-compat.o \
readahead.o swap.o truncate.o vmscan.o shmem.o \ readahead.o swap.o truncate.o vmscan.o shmem.o \
util.o mmzone.o vmstat.o backing-dev.o \ util.o mmzone.o vmstat.o backing-dev.o \
mm_init.o percpu.o slab_common.o \ mm_init.o percpu.o slab_common.o \
......
/*
* Compatibility functions which bloat the callers too much to make inline.
* All of the callers of these functions should be converted to use folios
* eventually.
*/
#include <linux/pagemap.h>
struct address_space *page_mapping(struct page *page)
{
return folio_mapping(page_folio(page));
}
EXPORT_SYMBOL(page_mapping);
...@@ -3534,13 +3534,13 @@ struct swap_info_struct *page_swap_info(struct page *page) ...@@ -3534,13 +3534,13 @@ struct swap_info_struct *page_swap_info(struct page *page)
} }
/* /*
* out-of-line __page_file_ methods to avoid include hell. * out-of-line methods to avoid include hell.
*/ */
struct address_space *__page_file_mapping(struct page *page) struct address_space *swapcache_mapping(struct folio *folio)
{ {
return page_swap_info(page)->swap_file->f_mapping; return page_swap_info(&folio->page)->swap_file->f_mapping;
} }
EXPORT_SYMBOL_GPL(__page_file_mapping); EXPORT_SYMBOL_GPL(swapcache_mapping);
pgoff_t __page_file_index(struct page *page) pgoff_t __page_file_index(struct page *page)
{ {
......
...@@ -705,30 +705,36 @@ struct anon_vma *page_anon_vma(struct page *page) ...@@ -705,30 +705,36 @@ struct anon_vma *page_anon_vma(struct page *page)
return __page_rmapping(page); return __page_rmapping(page);
} }
struct address_space *page_mapping(struct page *page) /**
* folio_mapping - Find the mapping where this folio is stored.
* @folio: The folio.
*
* For folios which are in the page cache, return the mapping that this
* page belongs to. Folios in the swap cache return the swap mapping
* this page is stored in (which is different from the mapping for the
* swap file or swap device where the data is stored).
*
* You can call this for folios which aren't in the swap cache or page
* cache and it will return NULL.
*/
struct address_space *folio_mapping(struct folio *folio)
{ {
struct address_space *mapping; struct address_space *mapping;
page = compound_head(page);
/* This happens if someone calls flush_dcache_page on slab page */ /* This happens if someone calls flush_dcache_page on slab page */
if (unlikely(PageSlab(page))) if (unlikely(folio_test_slab(folio)))
return NULL; return NULL;
if (unlikely(PageSwapCache(page))) { if (unlikely(folio_test_swapcache(folio)))
swp_entry_t entry; return swap_address_space(folio_swap_entry(folio));
entry.val = page_private(page);
return swap_address_space(entry);
}
mapping = page->mapping; mapping = folio->mapping;
if ((unsigned long)mapping & PAGE_MAPPING_ANON) if ((unsigned long)mapping & PAGE_MAPPING_ANON)
return NULL; return NULL;
return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS); return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
} }
EXPORT_SYMBOL(page_mapping); EXPORT_SYMBOL(folio_mapping);
/* Slow path of page_mapcount() for compound pages */ /* Slow path of page_mapcount() for compound pages */
int __page_mapcount(struct page *page) int __page_mapcount(struct page *page)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment