Commit a4575c41 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Andrew Morton

mm: convert swap_cluster_readahead and swap_vma_readahead to return a folio

shmem_swapin_cluster() immediately converts the page back to a folio, and
swapin_readahead() may as well call folio_file_page() once instead of
having each function call it.

[willy@infradead.org: avoid NULL pointer deref]
  Link: https://lkml.kernel.org/r/ZYI7OcVlM1voKfBl@casper.infradead.org
Link: https://lkml.kernel.org/r/20231213215842.671461-14-willy@infradead.orgSigned-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 6e03492e
...@@ -1570,15 +1570,13 @@ static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp, ...@@ -1570,15 +1570,13 @@ static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
{ {
struct mempolicy *mpol; struct mempolicy *mpol;
pgoff_t ilx; pgoff_t ilx;
struct page *page; struct folio *folio;
mpol = shmem_get_pgoff_policy(info, index, 0, &ilx); mpol = shmem_get_pgoff_policy(info, index, 0, &ilx);
page = swap_cluster_readahead(swap, gfp, mpol, ilx); folio = swap_cluster_readahead(swap, gfp, mpol, ilx);
mpol_cond_put(mpol); mpol_cond_put(mpol);
if (!page) return folio;
return NULL;
return page_folio(page);
} }
/* /*
......
...@@ -52,8 +52,8 @@ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, ...@@ -52,8 +52,8 @@ struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags, struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_flags,
struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated,
bool skip_if_exists); bool skip_if_exists);
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag, struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
struct mempolicy *mpol, pgoff_t ilx); struct mempolicy *mpol, pgoff_t ilx);
struct page *swapin_readahead(swp_entry_t entry, gfp_t flag, struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
struct vm_fault *vmf); struct vm_fault *vmf);
...@@ -80,7 +80,7 @@ static inline void show_swap_cache_info(void) ...@@ -80,7 +80,7 @@ static inline void show_swap_cache_info(void)
{ {
} }
static inline struct page *swap_cluster_readahead(swp_entry_t entry, static inline struct folio *swap_cluster_readahead(swp_entry_t entry,
gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx) gfp_t gfp_mask, struct mempolicy *mpol, pgoff_t ilx)
{ {
return NULL; return NULL;
......
...@@ -620,7 +620,7 @@ static unsigned long swapin_nr_pages(unsigned long offset) ...@@ -620,7 +620,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
* @mpol: NUMA memory allocation policy to be applied * @mpol: NUMA memory allocation policy to be applied
* @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
* *
* Returns the struct page for entry and addr, after queueing swapin. * Returns the struct folio for entry and addr, after queueing swapin.
* *
* Primitive swap readahead code. We simply read an aligned block of * Primitive swap readahead code. We simply read an aligned block of
* (1 << page_cluster) entries in the swap area. This method is chosen * (1 << page_cluster) entries in the swap area. This method is chosen
...@@ -631,7 +631,7 @@ static unsigned long swapin_nr_pages(unsigned long offset) ...@@ -631,7 +631,7 @@ static unsigned long swapin_nr_pages(unsigned long offset)
* are used for every page of the readahead: neighbouring pages on swap * are used for every page of the readahead: neighbouring pages on swap
* are fairly likely to have been swapped out from the same node. * are fairly likely to have been swapped out from the same node.
*/ */
struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
struct mempolicy *mpol, pgoff_t ilx) struct mempolicy *mpol, pgoff_t ilx)
{ {
struct folio *folio; struct folio *folio;
...@@ -683,7 +683,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, ...@@ -683,7 +683,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
if (unlikely(page_allocated)) if (unlikely(page_allocated))
swap_read_folio(folio, false, NULL); swap_read_folio(folio, false, NULL);
zswap_folio_swapin(folio); zswap_folio_swapin(folio);
return folio_file_page(folio, swp_offset(entry)); return folio;
} }
int init_swap_address_space(unsigned int type, unsigned long nr_pages) int init_swap_address_space(unsigned int type, unsigned long nr_pages)
...@@ -787,7 +787,7 @@ static void swap_ra_info(struct vm_fault *vmf, ...@@ -787,7 +787,7 @@ static void swap_ra_info(struct vm_fault *vmf,
* @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE
* @vmf: fault information * @vmf: fault information
* *
* Returns the struct page for entry and addr, after queueing swapin. * Returns the struct folio for entry and addr, after queueing swapin.
* *
* Primitive swap readahead code. We simply read in a few pages whose * Primitive swap readahead code. We simply read in a few pages whose
* virtual addresses are around the fault address in the same vma. * virtual addresses are around the fault address in the same vma.
...@@ -795,9 +795,8 @@ static void swap_ra_info(struct vm_fault *vmf, ...@@ -795,9 +795,8 @@ static void swap_ra_info(struct vm_fault *vmf,
* Caller must hold read mmap_lock if vmf->vma is not NULL. * Caller must hold read mmap_lock if vmf->vma is not NULL.
* *
*/ */
static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
struct mempolicy *mpol, pgoff_t targ_ilx, struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf)
struct vm_fault *vmf)
{ {
struct blk_plug plug; struct blk_plug plug;
struct swap_iocb *splug = NULL; struct swap_iocb *splug = NULL;
...@@ -859,7 +858,7 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, ...@@ -859,7 +858,7 @@ static struct page *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask,
if (unlikely(page_allocated)) if (unlikely(page_allocated))
swap_read_folio(folio, false, NULL); swap_read_folio(folio, false, NULL);
zswap_folio_swapin(folio); zswap_folio_swapin(folio);
return folio_file_page(folio, swp_offset(entry)); return folio;
} }
/** /**
...@@ -879,14 +878,17 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, ...@@ -879,14 +878,17 @@ struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
{ {
struct mempolicy *mpol; struct mempolicy *mpol;
pgoff_t ilx; pgoff_t ilx;
struct page *page; struct folio *folio;
mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx); mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx);
page = swap_use_vma_readahead() ? folio = swap_use_vma_readahead() ?
swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) : swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) :
swap_cluster_readahead(entry, gfp_mask, mpol, ilx); swap_cluster_readahead(entry, gfp_mask, mpol, ilx);
mpol_cond_put(mpol); mpol_cond_put(mpol);
return page;
if (!folio)
return NULL;
return folio_file_page(folio, swp_offset(entry));
} }
#ifdef CONFIG_SYSFS #ifdef CONFIG_SYSFS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment