Commit b4ff7b17 authored by David Howells's avatar David Howells

netfs: Remove ->launder_folio() support

Remove support for ->launder_folio() from netfslib and expect filesystems
to use filemap_invalidate_inode() instead.  netfs_launder_folio() can then
be got rid of.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
cc: Eric Van Hensbergen <ericvh@kernel.org>
cc: Latchesar Ionkov <lucho@ionkov.net>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Christian Schoenebeck <linux_oss@crudebyte.com>
cc: David Howells <dhowells@redhat.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: Steve French <sfrench@samba.org>
cc: Matthew Wilcox <willy@infradead.org>
cc: linux-mm@kvack.org
cc: linux-fsdevel@vger.kernel.org
cc: netfs@lists.linux.dev
cc: v9fs@lists.linux.dev
cc: linux-afs@lists.infradead.org
cc: ceph-devel@vger.kernel.org
cc: linux-cifs@vger.kernel.org
cc: devel@lists.orangefs.org
parent d73065e6
......@@ -1200,77 +1200,3 @@ int netfs_writepages(struct address_space *mapping,
return ret;
}
EXPORT_SYMBOL(netfs_writepages);
/*
* Deal with the disposition of a laundered folio.
*/
static void netfs_cleanup_launder_folio(struct netfs_io_request *wreq)
{
if (wreq->error) {
pr_notice("R=%08x Laundering error %d\n", wreq->debug_id, wreq->error);
mapping_set_error(wreq->mapping, wreq->error);
}
}
/**
* netfs_launder_folio - Clean up a dirty folio that's being invalidated
* @folio: The folio to clean
*
* This is called to write back a folio that's being invalidated when an inode
* is getting torn down. Ideally, writepages would be used instead.
*/
int netfs_launder_folio(struct folio *folio)
{
struct netfs_io_request *wreq;
struct address_space *mapping = folio->mapping;
struct netfs_folio *finfo = netfs_folio_info(folio);
struct netfs_group *group = netfs_folio_group(folio);
struct bio_vec bvec;
unsigned long long i_size = i_size_read(mapping->host);
unsigned long long start = folio_pos(folio);
size_t offset = 0, len;
int ret = 0;
if (finfo) {
offset = finfo->dirty_offset;
start += offset;
len = finfo->dirty_len;
} else {
len = folio_size(folio);
}
len = min_t(unsigned long long, len, i_size - start);
wreq = netfs_alloc_request(mapping, NULL, start, len, NETFS_LAUNDER_WRITE);
if (IS_ERR(wreq)) {
ret = PTR_ERR(wreq);
goto out;
}
if (!folio_clear_dirty_for_io(folio))
goto out_put;
trace_netfs_folio(folio, netfs_folio_trace_launder);
_debug("launder %llx-%llx", start, start + len - 1);
/* Speculatively write to the cache. We have to fix this up later if
* the store fails.
*/
wreq->cleanup = netfs_cleanup_launder_folio;
bvec_set_folio(&bvec, folio, len, offset);
iov_iter_bvec(&wreq->iter, ITER_SOURCE, &bvec, 1, len);
if (group != NETFS_FOLIO_COPY_TO_CACHE)
__set_bit(NETFS_RREQ_UPLOAD_TO_SERVER, &wreq->flags);
ret = netfs_begin_write(wreq, true, netfs_write_trace_launder);
out_put:
folio_detach_private(folio);
netfs_put_group(group);
kfree(finfo);
netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
out:
_leave(" = %d", ret);
return ret;
}
EXPORT_SYMBOL(netfs_launder_folio);
......@@ -34,7 +34,6 @@ static const char *netfs_origins[nr__netfs_io_origin] = {
[NETFS_COPY_TO_CACHE] = "CC",
[NETFS_WRITEBACK] = "WB",
[NETFS_WRITETHROUGH] = "WT",
[NETFS_LAUNDER_WRITE] = "LW",
[NETFS_UNBUFFERED_WRITE] = "UW",
[NETFS_DIO_READ] = "DR",
[NETFS_DIO_WRITE] = "DW",
......
......@@ -172,7 +172,6 @@ enum netfs_io_origin {
NETFS_COPY_TO_CACHE, /* This write is to copy a read to the cache */
NETFS_WRITEBACK, /* This write was triggered by writepages */
NETFS_WRITETHROUGH, /* This write was made by netfs_perform_write() */
NETFS_LAUNDER_WRITE, /* This is triggered by ->launder_folio() */
NETFS_UNBUFFERED_WRITE, /* This is an unbuffered write */
NETFS_DIO_READ, /* This is a direct I/O read */
NETFS_DIO_WRITE, /* This is a direct I/O write */
......@@ -352,7 +351,6 @@ int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length);
bool netfs_release_folio(struct folio *folio, gfp_t gfp);
int netfs_launder_folio(struct folio *folio);
/* VMA operations API. */
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group);
......
......@@ -26,7 +26,6 @@
#define netfs_write_traces \
EM(netfs_write_trace_copy_to_cache, "COPY2CACH") \
EM(netfs_write_trace_dio_write, "DIO-WRITE") \
EM(netfs_write_trace_launder, "LAUNDER ") \
EM(netfs_write_trace_unbuffered_write, "UNB-WRITE") \
EM(netfs_write_trace_writeback, "WRITEBACK") \
E_(netfs_write_trace_writethrough, "WRITETHRU")
......@@ -38,7 +37,6 @@
EM(NETFS_COPY_TO_CACHE, "CC") \
EM(NETFS_WRITEBACK, "WB") \
EM(NETFS_WRITETHROUGH, "WT") \
EM(NETFS_LAUNDER_WRITE, "LW") \
EM(NETFS_UNBUFFERED_WRITE, "UW") \
EM(NETFS_DIO_READ, "DR") \
E_(NETFS_DIO_WRITE, "DW")
......@@ -135,7 +133,6 @@
EM(netfs_folio_trace_end_copy, "end-copy") \
EM(netfs_folio_trace_filled_gaps, "filled-gaps") \
EM(netfs_folio_trace_kill, "kill") \
EM(netfs_folio_trace_launder, "launder") \
EM(netfs_folio_trace_mkwrite, "mkwrite") \
EM(netfs_folio_trace_mkwrite_plus, "mkwrite+") \
EM(netfs_folio_trace_read_gaps, "read-gaps") \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment