Commit c1ec4d7c authored by David Howells's avatar David Howells

netfs: Provide invalidate_folio and release_folio calls

Provide default invalidate_folio and release_folio calls.  These will need
to interact with invalidation correctly at some point.  They will be needed
if netfslib is to make use of folio->private for its own purposes.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
cc: linux-cachefs@redhat.com
cc: linux-fsdevel@vger.kernel.org
cc: linux-mm@kvack.org
parent a34847d4
...@@ -88,35 +88,6 @@ const struct netfs_request_ops v9fs_req_ops = { ...@@ -88,35 +88,6 @@ const struct netfs_request_ops v9fs_req_ops = {
.issue_read = v9fs_issue_read, .issue_read = v9fs_issue_read,
}; };
/**
* v9fs_release_folio - release the private state associated with a folio
* @folio: The folio to be released
* @gfp: The caller's allocation restrictions
*
* Returns true if the page can be released, false otherwise.
*/
static bool v9fs_release_folio(struct folio *folio, gfp_t gfp)
{
if (folio_test_private(folio))
return false;
#ifdef CONFIG_9P_FSCACHE
if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
folio_wait_fscache(folio);
}
fscache_note_page_release(v9fs_inode_cookie(V9FS_I(folio_inode(folio))));
#endif
return true;
}
static void v9fs_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
folio_wait_fscache(folio);
}
#ifdef CONFIG_9P_FSCACHE #ifdef CONFIG_9P_FSCACHE
static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error, static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
bool was_async) bool was_async)
...@@ -324,8 +295,8 @@ const struct address_space_operations v9fs_addr_operations = { ...@@ -324,8 +295,8 @@ const struct address_space_operations v9fs_addr_operations = {
.writepage = v9fs_vfs_writepage, .writepage = v9fs_vfs_writepage,
.write_begin = v9fs_write_begin, .write_begin = v9fs_write_begin,
.write_end = v9fs_write_end, .write_end = v9fs_write_end,
.release_folio = v9fs_release_folio, .release_folio = netfs_release_folio,
.invalidate_folio = v9fs_invalidate_folio, .invalidate_folio = netfs_invalidate_folio,
.launder_folio = v9fs_launder_folio, .launder_folio = v9fs_launder_folio,
.direct_IO = v9fs_direct_IO, .direct_IO = v9fs_direct_IO,
}; };
...@@ -20,9 +20,6 @@ ...@@ -20,9 +20,6 @@
static int afs_file_mmap(struct file *file, struct vm_area_struct *vma); static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
static int afs_symlink_read_folio(struct file *file, struct folio *folio); static int afs_symlink_read_folio(struct file *file, struct folio *folio);
static void afs_invalidate_folio(struct folio *folio, size_t offset,
size_t length);
static bool afs_release_folio(struct folio *folio, gfp_t gfp_flags);
static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter); static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos, static ssize_t afs_file_splice_read(struct file *in, loff_t *ppos,
...@@ -57,8 +54,8 @@ const struct address_space_operations afs_file_aops = { ...@@ -57,8 +54,8 @@ const struct address_space_operations afs_file_aops = {
.readahead = netfs_readahead, .readahead = netfs_readahead,
.dirty_folio = netfs_dirty_folio, .dirty_folio = netfs_dirty_folio,
.launder_folio = afs_launder_folio, .launder_folio = afs_launder_folio,
.release_folio = afs_release_folio, .release_folio = netfs_release_folio,
.invalidate_folio = afs_invalidate_folio, .invalidate_folio = netfs_invalidate_folio,
.write_begin = afs_write_begin, .write_begin = afs_write_begin,
.write_end = afs_write_end, .write_end = afs_write_end,
.writepages = afs_writepages, .writepages = afs_writepages,
...@@ -67,8 +64,8 @@ const struct address_space_operations afs_file_aops = { ...@@ -67,8 +64,8 @@ const struct address_space_operations afs_file_aops = {
const struct address_space_operations afs_symlink_aops = { const struct address_space_operations afs_symlink_aops = {
.read_folio = afs_symlink_read_folio, .read_folio = afs_symlink_read_folio,
.release_folio = afs_release_folio, .release_folio = netfs_release_folio,
.invalidate_folio = afs_invalidate_folio, .invalidate_folio = netfs_invalidate_folio,
.migrate_folio = filemap_migrate_folio, .migrate_folio = filemap_migrate_folio,
}; };
...@@ -386,48 +383,6 @@ const struct netfs_request_ops afs_req_ops = { ...@@ -386,48 +383,6 @@ const struct netfs_request_ops afs_req_ops = {
.issue_read = afs_issue_read, .issue_read = afs_issue_read,
}; };
/*
* invalidate part or all of a page
* - release a page and clean up its private data if offset is 0 (indicating
* the entire page)
*/
static void afs_invalidate_folio(struct folio *folio, size_t offset,
size_t length)
{
_enter("{%lu},%zu,%zu", folio->index, offset, length);
folio_wait_fscache(folio);
_leave("");
}
/*
* release a page and clean up its private state if it's not busy
* - return true if the page can now be released, false if not
*/
static bool afs_release_folio(struct folio *folio, gfp_t gfp)
{
struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
_enter("{{%llx:%llu}[%lu],%lx},%x",
vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags,
gfp);
/* deny if folio is being written to the cache and the caller hasn't
* elected to wait */
#ifdef CONFIG_AFS_FSCACHE
if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
folio_wait_fscache(folio);
}
fscache_note_page_release(afs_vnode_cache(vnode));
#endif
/* Indicate that the folio can be released */
_leave(" = T");
return true;
}
static void afs_add_open_mmap(struct afs_vnode *vnode) static void afs_add_open_mmap(struct afs_vnode *vnode)
{ {
if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) { if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
......
...@@ -159,27 +159,7 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset, ...@@ -159,27 +159,7 @@ static void ceph_invalidate_folio(struct folio *folio, size_t offset,
ceph_put_snap_context(snapc); ceph_put_snap_context(snapc);
} }
folio_wait_fscache(folio); netfs_invalidate_folio(folio, offset, length);
}
static bool ceph_release_folio(struct folio *folio, gfp_t gfp)
{
struct inode *inode = folio->mapping->host;
struct ceph_client *cl = ceph_inode_to_client(inode);
doutc(cl, "%llx.%llx idx %lu (%sdirty)\n", ceph_vinop(inode),
folio->index, folio_test_dirty(folio) ? "" : "not ");
if (folio_test_private(folio))
return false;
if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
folio_wait_fscache(folio);
}
ceph_fscache_note_page_release(inode);
return true;
} }
static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq) static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
...@@ -1585,7 +1565,7 @@ const struct address_space_operations ceph_aops = { ...@@ -1585,7 +1565,7 @@ const struct address_space_operations ceph_aops = {
.write_end = ceph_write_end, .write_end = ceph_write_end,
.dirty_folio = ceph_dirty_folio, .dirty_folio = ceph_dirty_folio,
.invalidate_folio = ceph_invalidate_folio, .invalidate_folio = ceph_invalidate_folio,
.release_folio = ceph_release_folio, .release_folio = netfs_release_folio,
.direct_IO = noop_direct_IO, .direct_IO = noop_direct_IO,
}; };
......
...@@ -56,12 +56,6 @@ static inline bool ceph_is_cache_enabled(struct inode *inode) ...@@ -56,12 +56,6 @@ static inline bool ceph_is_cache_enabled(struct inode *inode)
return fscache_cookie_enabled(ceph_fscache_cookie(ceph_inode(inode))); return fscache_cookie_enabled(ceph_fscache_cookie(ceph_inode(inode)));
} }
static inline void ceph_fscache_note_page_release(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
fscache_note_page_release(ceph_fscache_cookie(ci));
}
#else /* CONFIG_CEPH_FSCACHE */ #else /* CONFIG_CEPH_FSCACHE */
static inline int ceph_fscache_register_fs(struct ceph_fs_client* fsc, static inline int ceph_fscache_register_fs(struct ceph_fs_client* fsc,
struct fs_context *fc) struct fs_context *fc)
...@@ -118,10 +112,6 @@ static inline bool ceph_is_cache_enabled(struct inode *inode) ...@@ -118,10 +112,6 @@ static inline bool ceph_is_cache_enabled(struct inode *inode)
{ {
return false; return false;
} }
static inline void ceph_fscache_note_page_release(struct inode *inode)
{
}
#endif /* CONFIG_CEPH_FSCACHE */ #endif /* CONFIG_CEPH_FSCACHE */
#endif #endif
...@@ -84,3 +84,45 @@ void netfs_clear_inode_writeback(struct inode *inode, const void *aux) ...@@ -84,3 +84,45 @@ void netfs_clear_inode_writeback(struct inode *inode, const void *aux)
} }
} }
EXPORT_SYMBOL(netfs_clear_inode_writeback); EXPORT_SYMBOL(netfs_clear_inode_writeback);
/**
* netfs_invalidate_folio - Invalidate or partially invalidate a folio
* @folio: Folio proposed for release
* @offset: Offset of the invalidated region
* @length: Length of the invalidated region
*
* Invalidate part or all of a folio for a network filesystem. The folio will
* be removed afterwards if the invalidated region covers the entire folio.
*/
void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{
_enter("{%lx},%zx,%zx", folio_index(folio), offset, length);
folio_wait_fscache(folio);
}
EXPORT_SYMBOL(netfs_invalidate_folio);
/**
* netfs_release_folio - Try to release a folio
* @folio: Folio proposed for release
* @gfp: Flags qualifying the release
*
* Request release of a folio and clean up its private state if it's not busy.
* Returns true if the folio can now be released, false if not
*/
bool netfs_release_folio(struct folio *folio, gfp_t gfp)
{
struct netfs_inode *ctx = netfs_inode(folio_inode(folio));
if (folio_test_private(folio))
return false;
if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
folio_wait_fscache(folio);
}
fscache_note_page_release(netfs_i_cookie(ctx));
return true;
}
EXPORT_SYMBOL(netfs_release_folio);
...@@ -298,6 +298,8 @@ int netfs_write_begin(struct netfs_inode *, struct file *, ...@@ -298,6 +298,8 @@ int netfs_write_begin(struct netfs_inode *, struct file *,
bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio); bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio);
int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc); int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
void netfs_clear_inode_writeback(struct inode *inode, const void *aux); void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
bool netfs_release_folio(struct folio *folio, gfp_t gfp);
void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool); void netfs_subreq_terminated(struct netfs_io_subrequest *, ssize_t, bool);
void netfs_get_subrequest(struct netfs_io_subrequest *subreq, void netfs_get_subrequest(struct netfs_io_subrequest *subreq,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment