filemap: Add fgf_t typedef

Similarly to gfp_t, define fgf_t as its own type to prevent various
misuses and confusion.  Leave the flags as FGP_* for now to reduce the
size of this patch; they will be converted to FGF_* later.  Move the
documentation to the definition of the type insted of burying it in the
__filemap_get_folio() documentation.
Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 7a8eb01b
...@@ -876,9 +876,9 @@ static int prepare_uptodate_page(struct inode *inode, ...@@ -876,9 +876,9 @@ static int prepare_uptodate_page(struct inode *inode,
return 0; return 0;
} }
static unsigned int get_prepare_fgp_flags(bool nowait) static fgf_t get_prepare_fgp_flags(bool nowait)
{ {
unsigned int fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT; fgf_t fgp_flags = FGP_LOCK | FGP_ACCESSED | FGP_CREAT;
if (nowait) if (nowait)
fgp_flags |= FGP_NOWAIT; fgp_flags |= FGP_NOWAIT;
...@@ -910,7 +910,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages, ...@@ -910,7 +910,7 @@ static noinline int prepare_pages(struct inode *inode, struct page **pages,
int i; int i;
unsigned long index = pos >> PAGE_SHIFT; unsigned long index = pos >> PAGE_SHIFT;
gfp_t mask = get_prepare_gfp_flags(inode, nowait); gfp_t mask = get_prepare_gfp_flags(inode, nowait);
unsigned int fgp_flags = get_prepare_fgp_flags(nowait); fgf_t fgp_flags = get_prepare_fgp_flags(nowait);
int err = 0; int err = 0;
int faili; int faili;
......
...@@ -1045,7 +1045,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc, ...@@ -1045,7 +1045,7 @@ static int prepare_compress_overwrite(struct compress_ctx *cc,
struct address_space *mapping = cc->inode->i_mapping; struct address_space *mapping = cc->inode->i_mapping;
struct page *page; struct page *page;
sector_t last_block_in_bio; sector_t last_block_in_bio;
unsigned fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT; fgf_t fgp_flag = FGP_LOCK | FGP_WRITE | FGP_CREAT;
pgoff_t start_idx = start_idx_of_cluster(cc); pgoff_t start_idx = start_idx_of_cluster(cc);
int i, ret; int i, ret;
......
...@@ -2736,7 +2736,7 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping, ...@@ -2736,7 +2736,7 @@ static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
static inline struct page *f2fs_pagecache_get_page( static inline struct page *f2fs_pagecache_get_page(
struct address_space *mapping, pgoff_t index, struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp_mask) fgf_t fgp_flags, gfp_t gfp_mask)
{ {
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET))
return NULL; return NULL;
......
...@@ -467,7 +467,7 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate); ...@@ -467,7 +467,7 @@ EXPORT_SYMBOL_GPL(iomap_is_partially_uptodate);
*/ */
struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos) struct folio *iomap_get_folio(struct iomap_iter *iter, loff_t pos)
{ {
unsigned fgp = FGP_WRITEBEGIN | FGP_NOFS; fgf_t fgp = FGP_WRITEBEGIN | FGP_NOFS;
if (iter->flags & IOMAP_NOWAIT) if (iter->flags & IOMAP_NOWAIT)
fgp |= FGP_NOWAIT; fgp |= FGP_NOWAIT;
......
...@@ -501,22 +501,48 @@ pgoff_t page_cache_next_miss(struct address_space *mapping, ...@@ -501,22 +501,48 @@ pgoff_t page_cache_next_miss(struct address_space *mapping,
pgoff_t page_cache_prev_miss(struct address_space *mapping, pgoff_t page_cache_prev_miss(struct address_space *mapping,
pgoff_t index, unsigned long max_scan); pgoff_t index, unsigned long max_scan);
#define FGP_ACCESSED 0x00000001 /**
#define FGP_LOCK 0x00000002 * typedef fgf_t - Flags for getting folios from the page cache.
#define FGP_CREAT 0x00000004 *
#define FGP_WRITE 0x00000008 * Most users of the page cache will not need to use these flags;
#define FGP_NOFS 0x00000010 * there are convenience functions such as filemap_get_folio() and
#define FGP_NOWAIT 0x00000020 * filemap_lock_folio(). For users which need more control over exactly
#define FGP_FOR_MMAP 0x00000040 * what is done with the folios, these flags to __filemap_get_folio()
#define FGP_STABLE 0x00000080 * are available.
*
* * %FGP_ACCESSED - The folio will be marked accessed.
* * %FGP_LOCK - The folio is returned locked.
* * %FGP_CREAT - If no folio is present then a new folio is allocated,
* added to the page cache and the VM's LRU list. The folio is
* returned locked.
* * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
* folio is already in cache. If the folio was allocated, unlock it
* before returning so the caller can do the same dance.
* * %FGP_WRITE - The folio will be written to by the caller.
* * %FGP_NOFS - __GFP_FS will get cleared in gfp.
* * %FGP_NOWAIT - Don't block on the folio lock.
* * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
* * %FGP_WRITEBEGIN - The flags to use in a filesystem write_begin()
* implementation.
*/
typedef unsigned int __bitwise fgf_t;
#define FGP_ACCESSED ((__force fgf_t)0x00000001)
#define FGP_LOCK ((__force fgf_t)0x00000002)
#define FGP_CREAT ((__force fgf_t)0x00000004)
#define FGP_WRITE ((__force fgf_t)0x00000008)
#define FGP_NOFS ((__force fgf_t)0x00000010)
#define FGP_NOWAIT ((__force fgf_t)0x00000020)
#define FGP_FOR_MMAP ((__force fgf_t)0x00000040)
#define FGP_STABLE ((__force fgf_t)0x00000080)
#define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE) #define FGP_WRITEBEGIN (FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE)
void *filemap_get_entry(struct address_space *mapping, pgoff_t index); void *filemap_get_entry(struct address_space *mapping, pgoff_t index);
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp); fgf_t fgp_flags, gfp_t gfp);
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp); fgf_t fgp_flags, gfp_t gfp);
/** /**
* filemap_get_folio - Find and get a folio. * filemap_get_folio - Find and get a folio.
...@@ -590,7 +616,7 @@ static inline struct page *find_get_page(struct address_space *mapping, ...@@ -590,7 +616,7 @@ static inline struct page *find_get_page(struct address_space *mapping,
} }
static inline struct page *find_get_page_flags(struct address_space *mapping, static inline struct page *find_get_page_flags(struct address_space *mapping,
pgoff_t offset, int fgp_flags) pgoff_t offset, fgf_t fgp_flags)
{ {
return pagecache_get_page(mapping, offset, fgp_flags, 0); return pagecache_get_page(mapping, offset, fgp_flags, 0);
} }
......
...@@ -1855,30 +1855,15 @@ void *filemap_get_entry(struct address_space *mapping, pgoff_t index) ...@@ -1855,30 +1855,15 @@ void *filemap_get_entry(struct address_space *mapping, pgoff_t index)
* *
* Looks up the page cache entry at @mapping & @index. * Looks up the page cache entry at @mapping & @index.
* *
* @fgp_flags can be zero or more of these flags:
*
* * %FGP_ACCESSED - The folio will be marked accessed.
* * %FGP_LOCK - The folio is returned locked.
* * %FGP_CREAT - If no page is present then a new page is allocated using
* @gfp and added to the page cache and the VM's LRU list.
* The page is returned locked and with an increased refcount.
* * %FGP_FOR_MMAP - The caller wants to do its own locking dance if the
* page is already in cache. If the page was allocated, unlock it before
* returning so the caller can do the same dance.
* * %FGP_WRITE - The page will be written to by the caller.
* * %FGP_NOFS - __GFP_FS will get cleared in gfp.
* * %FGP_NOWAIT - Don't get blocked by page lock.
* * %FGP_STABLE - Wait for the folio to be stable (finished writeback)
*
* If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even * If %FGP_LOCK or %FGP_CREAT are specified then the function may sleep even
* if the %GFP flags specified for %FGP_CREAT are atomic. * if the %GFP flags specified for %FGP_CREAT are atomic.
* *
* If there is a page cache page, it is returned with an increased refcount. * If this function returns a folio, it is returned with an increased refcount.
* *
* Return: The found folio or an ERR_PTR() otherwise. * Return: The found folio or an ERR_PTR() otherwise.
*/ */
struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp) fgf_t fgp_flags, gfp_t gfp)
{ {
struct folio *folio; struct folio *folio;
......
...@@ -92,7 +92,7 @@ EXPORT_SYMBOL(add_to_page_cache_lru); ...@@ -92,7 +92,7 @@ EXPORT_SYMBOL(add_to_page_cache_lru);
noinline noinline
struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp) fgf_t fgp_flags, gfp_t gfp)
{ {
struct folio *folio; struct folio *folio;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment