Commit 79941493 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'folio-5.16b' of git://git.infradead.org/users/willy/pagecache

Pull folio fixes from Matthew Wilcox:
 "In the course of preparing the folio changes for iomap for next merge
  window, we discovered some problems that would be nice to address now:

   - Renaming multi-page folios to large folios.

     mapping_multi_page_folio_support() is just a little too long, so we
     settled on mapping_large_folio_support(). That meant renaming, eg
     folio_test_multi() to folio_test_large().

     Rename AS_THP_SUPPORT to match

   - I hadn't included folio wrappers for zero_user_segments(), etc.
     Also, multi-page^W^W large folio support is now independent of
     CONFIG_TRANSPARENT_HUGEPAGE, so machines with HIGHMEM always need
     to fall back to the out-of-line zero_user_segments().

     Remove FS_THP_SUPPORT to match

   - The build bots finally got round to telling me that I missed a
     couple of architectures when adding flush_dcache_folio(). Christoph
     suggested that we just add linux/cacheflush.h and not rely on
     asm-generic/cacheflush.h"

* tag 'folio-5.16b' of git://git.infradead.org/users/willy/pagecache:
  mm: Add functions to zero portions of a folio
  fs: Rename AS_THP_SUPPORT and mapping_thp_support
  fs: Remove FS_THP_SUPPORT
  mm: Remove folio_test_single
  mm: Rename folio_test_multi to folio_test_large
  Add linux/cacheflush.h
parents 5f53fa50 c0357139
...@@ -36,7 +36,6 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr); ...@@ -36,7 +36,6 @@ void __flush_dcache_page(phys_addr_t paddr, unsigned long vaddr);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page); void flush_dcache_page(struct page *page);
void flush_dcache_folio(struct folio *folio);
void dma_cache_wback_inv(phys_addr_t start, unsigned long sz); void dma_cache_wback_inv(phys_addr_t start, unsigned long sz);
void dma_cache_inv(phys_addr_t start, unsigned long sz); void dma_cache_inv(phys_addr_t start, unsigned long sz);
......
...@@ -290,7 +290,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr ...@@ -290,7 +290,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr
*/ */
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *); extern void flush_dcache_page(struct page *);
void flush_dcache_folio(struct folio *folio);
#define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1 #define ARCH_IMPLEMENTS_FLUSH_KERNEL_VMAP_RANGE 1
static inline void flush_kernel_vmap_range(void *addr, int size) static inline void flush_kernel_vmap_range(void *addr, int size)
......
...@@ -250,7 +250,6 @@ static inline void __flush_page_to_ram(void *vaddr) ...@@ -250,7 +250,6 @@ static inline void __flush_page_to_ram(void *vaddr)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
#define flush_dcache_page(page) __flush_page_to_ram(page_address(page)) #define flush_dcache_page(page) __flush_page_to_ram(page_address(page))
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
#define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page)) #define flush_icache_page(vma, page) __flush_page_to_ram(page_address(page))
......
...@@ -61,8 +61,6 @@ static inline void flush_dcache_page(struct page *page) ...@@ -61,8 +61,6 @@ static inline void flush_dcache_page(struct page *page)
SetPageDcacheDirty(page); SetPageDcacheDirty(page);
} }
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_mmap_lock(mapping) do { } while (0) #define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0) #define flush_dcache_mmap_unlock(mapping) do { } while (0)
......
...@@ -27,7 +27,6 @@ void flush_cache_vunmap(unsigned long start, unsigned long end); ...@@ -27,7 +27,6 @@ void flush_cache_vunmap(unsigned long start, unsigned long end);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page); void flush_dcache_page(struct page *page);
void flush_dcache_folio(struct folio *folio);
void copy_to_user_page(struct vm_area_struct *vma, struct page *page, void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, void *src, int len); unsigned long vaddr, void *dst, void *src, int len);
void copy_from_user_page(struct vm_area_struct *vma, struct page *page, void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
......
...@@ -29,7 +29,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, ...@@ -29,7 +29,6 @@ extern void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
unsigned long pfn); unsigned long pfn);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page); void flush_dcache_page(struct page *page);
void flush_dcache_folio(struct folio *folio);
extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end);
extern void flush_icache_page(struct vm_area_struct *vma, struct page *page); extern void flush_icache_page(struct vm_area_struct *vma, struct page *page);
......
...@@ -50,7 +50,6 @@ void invalidate_kernel_vmap_range(void *vaddr, int size); ...@@ -50,7 +50,6 @@ void invalidate_kernel_vmap_range(void *vaddr, int size);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page); void flush_dcache_page(struct page *page);
void flush_dcache_folio(struct folio *folio);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages) #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages) #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
......
...@@ -43,7 +43,6 @@ extern void flush_cache_range(struct vm_area_struct *vma, ...@@ -43,7 +43,6 @@ extern void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *page); void flush_dcache_page(struct page *page);
void flush_dcache_folio(struct folio *folio);
extern void flush_icache_range(unsigned long start, unsigned long end); extern void flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_user_range flush_icache_range #define flush_icache_user_range flush_icache_range
extern void flush_icache_page(struct vm_area_struct *vma, extern void flush_icache_page(struct vm_area_struct *vma,
......
...@@ -121,7 +121,6 @@ void flush_cache_page(struct vm_area_struct*, ...@@ -121,7 +121,6 @@ void flush_cache_page(struct vm_area_struct*,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
void flush_dcache_page(struct page *); void flush_dcache_page(struct page *);
void flush_dcache_folio(struct folio *);
void local_flush_cache_range(struct vm_area_struct *vma, void local_flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
...@@ -138,9 +137,7 @@ void local_flush_cache_page(struct vm_area_struct *vma, ...@@ -138,9 +137,7 @@ void local_flush_cache_page(struct vm_area_struct *vma,
#define flush_cache_vunmap(start,end) do { } while (0) #define flush_cache_vunmap(start,end) do { } while (0)
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
#define flush_dcache_page(page) do { } while (0) #define flush_dcache_page(page) do { } while (0)
static inline void flush_dcache_folio(struct folio *folio) { }
#define flush_icache_range local_flush_icache_range #define flush_icache_range local_flush_icache_range
#define flush_cache_page(vma, addr, pfn) do { } while (0) #define flush_cache_page(vma, addr, pfn) do { } while (0)
......
...@@ -180,8 +180,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode) ...@@ -180,8 +180,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
mapping->a_ops = &empty_aops; mapping->a_ops = &empty_aops;
mapping->host = inode; mapping->host = inode;
mapping->flags = 0; mapping->flags = 0;
if (sb->s_type->fs_flags & FS_THP_SUPPORT)
__set_bit(AS_THP_SUPPORT, &mapping->flags);
mapping->wb_err = 0; mapping->wb_err = 0;
atomic_set(&mapping->i_mmap_writable, 0); atomic_set(&mapping->i_mmap_writable, 0);
#ifdef CONFIG_READ_ONLY_THP_FOR_FS #ifdef CONFIG_READ_ONLY_THP_FOR_FS
......
...@@ -50,13 +50,7 @@ static inline void flush_dcache_page(struct page *page) ...@@ -50,13 +50,7 @@ static inline void flush_dcache_page(struct page *page)
{ {
} }
static inline void flush_dcache_folio(struct folio *folio) { }
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
#endif
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
void flush_dcache_folio(struct folio *folio);
#endif #endif
#ifndef flush_dcache_mmap_lock #ifndef flush_dcache_mmap_lock
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_CACHEFLUSH_H
#define _LINUX_CACHEFLUSH_H
#include <asm/cacheflush.h>
#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO
void flush_dcache_folio(struct folio *folio);
#endif
#else
static inline void flush_dcache_folio(struct folio *folio)
{
}
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO 0
#endif /* ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE */
#endif /* _LINUX_CACHEFLUSH_H */
...@@ -2518,7 +2518,6 @@ struct file_system_type { ...@@ -2518,7 +2518,6 @@ struct file_system_type {
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */ #define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */ #define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */ #define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
#define FS_THP_SUPPORT 8192 /* Remove once all fs converted */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */ #define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *); int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters; const struct fs_parameter_spec *parameters;
......
...@@ -5,12 +5,11 @@ ...@@ -5,12 +5,11 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/cacheflush.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/hardirq.h> #include <linux/hardirq.h>
#include <asm/cacheflush.h>
#include "highmem-internal.h" #include "highmem-internal.h"
/** /**
...@@ -231,10 +230,10 @@ static inline void tag_clear_highpage(struct page *page) ...@@ -231,10 +230,10 @@ static inline void tag_clear_highpage(struct page *page)
* If we pass in a base or tail page, we can zero up to PAGE_SIZE. * If we pass in a base or tail page, we can zero up to PAGE_SIZE.
* If we pass in a head page, we can zero up to the size of the compound page. * If we pass in a head page, we can zero up to the size of the compound page.
*/ */
#if defined(CONFIG_HIGHMEM) && defined(CONFIG_TRANSPARENT_HUGEPAGE) #ifdef CONFIG_HIGHMEM
void zero_user_segments(struct page *page, unsigned start1, unsigned end1, void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
unsigned start2, unsigned end2); unsigned start2, unsigned end2);
#else /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ #else
static inline void zero_user_segments(struct page *page, static inline void zero_user_segments(struct page *page,
unsigned start1, unsigned end1, unsigned start1, unsigned end1,
unsigned start2, unsigned end2) unsigned start2, unsigned end2)
...@@ -254,7 +253,7 @@ static inline void zero_user_segments(struct page *page, ...@@ -254,7 +253,7 @@ static inline void zero_user_segments(struct page *page,
for (i = 0; i < compound_nr(page); i++) for (i = 0; i < compound_nr(page); i++)
flush_dcache_page(page + i); flush_dcache_page(page + i);
} }
#endif /* !HIGHMEM || !TRANSPARENT_HUGEPAGE */ #endif
static inline void zero_user_segment(struct page *page, static inline void zero_user_segment(struct page *page,
unsigned start, unsigned end) unsigned start, unsigned end)
...@@ -364,4 +363,42 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len) ...@@ -364,4 +363,42 @@ static inline void memzero_page(struct page *page, size_t offset, size_t len)
kunmap_local(addr); kunmap_local(addr);
} }
/**
* folio_zero_segments() - Zero two byte ranges in a folio.
* @folio: The folio to write to.
* @start1: The first byte to zero.
* @xend1: One more than the last byte in the first range.
* @start2: The first byte to zero in the second range.
* @xend2: One more than the last byte in the second range.
*/
static inline void folio_zero_segments(struct folio *folio,
size_t start1, size_t xend1, size_t start2, size_t xend2)
{
zero_user_segments(&folio->page, start1, xend1, start2, xend2);
}
/**
* folio_zero_segment() - Zero a byte range in a folio.
* @folio: The folio to write to.
* @start: The first byte to zero.
* @xend: One more than the last byte to zero.
*/
static inline void folio_zero_segment(struct folio *folio,
size_t start, size_t xend)
{
zero_user_segments(&folio->page, start, xend, 0, 0);
}
/**
* folio_zero_range() - Zero a byte range in a folio.
* @folio: The folio to write to.
* @start: The first byte to zero.
* @length: The number of bytes to zero.
*/
static inline void folio_zero_range(struct folio *folio,
size_t start, size_t length)
{
zero_user_segments(&folio->page, start, start + length, 0, 0);
}
#endif /* _LINUX_HIGHMEM_H */ #endif /* _LINUX_HIGHMEM_H */
...@@ -686,13 +686,13 @@ static inline bool test_set_page_writeback(struct page *page) ...@@ -686,13 +686,13 @@ static inline bool test_set_page_writeback(struct page *page)
__PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY) __PAGEFLAG(Head, head, PF_ANY) CLEARPAGEFLAG(Head, head, PF_ANY)
/* Whether there are one or multiple pages in a folio */ /**
static inline bool folio_test_single(struct folio *folio) * folio_test_large() - Does this folio contain more than one page?
{ * @folio: The folio to test.
return !folio_test_head(folio); *
} * Return: True if the folio is larger than one page.
*/
static inline bool folio_test_multi(struct folio *folio) static inline bool folio_test_large(struct folio *folio)
{ {
return folio_test_head(folio); return folio_test_head(folio);
} }
......
...@@ -84,7 +84,7 @@ enum mapping_flags { ...@@ -84,7 +84,7 @@ enum mapping_flags {
AS_EXITING = 4, /* final truncate in progress */ AS_EXITING = 4, /* final truncate in progress */
/* writeback related tags are not used */ /* writeback related tags are not used */
AS_NO_WRITEBACK_TAGS = 5, AS_NO_WRITEBACK_TAGS = 5,
AS_THP_SUPPORT = 6, /* THPs supported */ AS_LARGE_FOLIO_SUPPORT = 6,
}; };
/** /**
...@@ -176,9 +176,25 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask) ...@@ -176,9 +176,25 @@ static inline void mapping_set_gfp_mask(struct address_space *m, gfp_t mask)
m->gfp_mask = mask; m->gfp_mask = mask;
} }
static inline bool mapping_thp_support(struct address_space *mapping) /**
* mapping_set_large_folios() - Indicate the file supports large folios.
* @mapping: The file.
*
* The filesystem should call this function in its inode constructor to
* indicate that the VFS can use large folios to cache the contents of
* the file.
*
* Context: This should not be called while the inode is active as it
* is non-atomic.
*/
static inline void mapping_set_large_folios(struct address_space *mapping)
{
__set_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
}
static inline bool mapping_large_folio_support(struct address_space *mapping)
{ {
return test_bit(AS_THP_SUPPORT, &mapping->flags); return test_bit(AS_LARGE_FOLIO_SUPPORT, &mapping->flags);
} }
static inline int filemap_nr_thps(struct address_space *mapping) static inline int filemap_nr_thps(struct address_space *mapping)
...@@ -193,7 +209,7 @@ static inline int filemap_nr_thps(struct address_space *mapping) ...@@ -193,7 +209,7 @@ static inline int filemap_nr_thps(struct address_space *mapping)
static inline void filemap_nr_thps_inc(struct address_space *mapping) static inline void filemap_nr_thps_inc(struct address_space *mapping)
{ {
#ifdef CONFIG_READ_ONLY_THP_FOR_FS #ifdef CONFIG_READ_ONLY_THP_FOR_FS
if (!mapping_thp_support(mapping)) if (!mapping_large_folio_support(mapping))
atomic_inc(&mapping->nr_thps); atomic_inc(&mapping->nr_thps);
#else #else
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
...@@ -203,7 +219,7 @@ static inline void filemap_nr_thps_inc(struct address_space *mapping) ...@@ -203,7 +219,7 @@ static inline void filemap_nr_thps_inc(struct address_space *mapping)
static inline void filemap_nr_thps_dec(struct address_space *mapping) static inline void filemap_nr_thps_dec(struct address_space *mapping)
{ {
#ifdef CONFIG_READ_ONLY_THP_FOR_FS #ifdef CONFIG_READ_ONLY_THP_FOR_FS
if (!mapping_thp_support(mapping)) if (!mapping_large_folio_support(mapping))
atomic_dec(&mapping->nr_thps); atomic_dec(&mapping->nr_thps);
#else #else
WARN_ON_ONCE(1); WARN_ON_ONCE(1);
......
...@@ -359,7 +359,6 @@ void kunmap_high(struct page *page) ...@@ -359,7 +359,6 @@ void kunmap_high(struct page *page)
} }
EXPORT_SYMBOL(kunmap_high); EXPORT_SYMBOL(kunmap_high);
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void zero_user_segments(struct page *page, unsigned start1, unsigned end1, void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
unsigned start2, unsigned end2) unsigned start2, unsigned end2)
{ {
...@@ -416,7 +415,6 @@ void zero_user_segments(struct page *page, unsigned start1, unsigned end1, ...@@ -416,7 +415,6 @@ void zero_user_segments(struct page *page, unsigned start1, unsigned end1,
BUG_ON((start1 | start2 | end1 | end2) != 0); BUG_ON((start1 | start2 | end1 | end2) != 0);
} }
EXPORT_SYMBOL(zero_user_segments); EXPORT_SYMBOL(zero_user_segments);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif /* CONFIG_HIGHMEM */ #endif /* CONFIG_HIGHMEM */
#ifdef CONFIG_KMAP_LOCAL #ifdef CONFIG_KMAP_LOCAL
......
...@@ -5558,7 +5558,7 @@ static int mem_cgroup_move_account(struct page *page, ...@@ -5558,7 +5558,7 @@ static int mem_cgroup_move_account(struct page *page,
VM_BUG_ON(from == to); VM_BUG_ON(from == to);
VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); VM_BUG_ON_FOLIO(folio_test_lru(folio), folio);
VM_BUG_ON(compound && !folio_test_multi(folio)); VM_BUG_ON(compound && !folio_test_large(folio));
/* /*
* Prevent mem_cgroup_migrate() from looking at * Prevent mem_cgroup_migrate() from looking at
......
...@@ -2303,6 +2303,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode ...@@ -2303,6 +2303,7 @@ static struct inode *shmem_get_inode(struct super_block *sb, const struct inode
INIT_LIST_HEAD(&info->swaplist); INIT_LIST_HEAD(&info->swaplist);
simple_xattrs_init(&info->xattrs); simple_xattrs_init(&info->xattrs);
cache_no_acl(inode); cache_no_acl(inode);
mapping_set_large_folios(inode->i_mapping);
switch (mode & S_IFMT) { switch (mode & S_IFMT) {
default: default:
...@@ -3870,7 +3871,7 @@ static struct file_system_type shmem_fs_type = { ...@@ -3870,7 +3871,7 @@ static struct file_system_type shmem_fs_type = {
.parameters = shmem_fs_parameters, .parameters = shmem_fs_parameters,
#endif #endif
.kill_sb = kill_litter_super, .kill_sb = kill_litter_super,
.fs_flags = FS_USERNS_MOUNT | FS_THP_SUPPORT, .fs_flags = FS_USERNS_MOUNT,
}; };
int __init shmem_init(void) int __init shmem_init(void)
......
...@@ -670,7 +670,7 @@ bool folio_mapped(struct folio *folio) ...@@ -670,7 +670,7 @@ bool folio_mapped(struct folio *folio)
{ {
long i, nr; long i, nr;
if (folio_test_single(folio)) if (!folio_test_large(folio))
return atomic_read(&folio->_mapcount) >= 0; return atomic_read(&folio->_mapcount) >= 0;
if (atomic_read(folio_mapcount_ptr(folio)) >= 0) if (atomic_read(folio_mapcount_ptr(folio)) >= 0)
return true; return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment