Commit 2df86547 authored by David Howells's avatar David Howells

netfs: Cut over to using new writeback code

Cut over to using the new writeback code.  The old code is #ifdef'd out or
otherwise removed from compilation to avoid conflicts and will be removed
in a future patch.
Signed-off-by: default avatarDavid Howells <dhowells@redhat.com>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
cc: Eric Van Hensbergen <ericvh@kernel.org>
cc: Latchesar Ionkov <lucho@ionkov.net>
cc: Dominique Martinet <asmadeus@codewreck.org>
cc: Christian Schoenebeck <linux_oss@crudebyte.com>
cc: Marc Dionne <marc.dionne@auristor.com>
cc: v9fs@lists.linux.dev
cc: linux-afs@lists.infradead.org
cc: netfs@lists.linux.dev
cc: linux-fsdevel@vger.kernel.org
parent 64e64e6c
...@@ -60,6 +60,7 @@ static void v9fs_issue_write(struct netfs_io_subrequest *subreq) ...@@ -60,6 +60,7 @@ static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
netfs_write_subrequest_terminated(subreq, len ?: err, false); netfs_write_subrequest_terminated(subreq, len ?: err, false);
} }
#if 0 // TODO: Remove
static void v9fs_upload_to_server(struct netfs_io_subrequest *subreq) static void v9fs_upload_to_server(struct netfs_io_subrequest *subreq)
{ {
struct p9_fid *fid = subreq->rreq->netfs_priv; struct p9_fid *fid = subreq->rreq->netfs_priv;
...@@ -91,6 +92,7 @@ static void v9fs_create_write_requests(struct netfs_io_request *wreq, loff_t sta ...@@ -91,6 +92,7 @@ static void v9fs_create_write_requests(struct netfs_io_request *wreq, loff_t sta
if (subreq) if (subreq)
netfs_queue_write_request(subreq); netfs_queue_write_request(subreq);
} }
#endif
/** /**
* v9fs_issue_read - Issue a read from 9P * v9fs_issue_read - Issue a read from 9P
...@@ -121,18 +123,15 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file) ...@@ -121,18 +123,15 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
{ {
struct p9_fid *fid; struct p9_fid *fid;
bool writing = (rreq->origin == NETFS_READ_FOR_WRITE || bool writing = (rreq->origin == NETFS_READ_FOR_WRITE ||
rreq->origin == NETFS_WRITEBACK ||
rreq->origin == NETFS_WRITETHROUGH || rreq->origin == NETFS_WRITETHROUGH ||
rreq->origin == NETFS_UNBUFFERED_WRITE || rreq->origin == NETFS_UNBUFFERED_WRITE ||
rreq->origin == NETFS_DIO_WRITE); rreq->origin == NETFS_DIO_WRITE);
#if 0 // TODO: Cut over
if (rreq->origin == NETFS_WRITEBACK) if (rreq->origin == NETFS_WRITEBACK)
return 0; /* We don't get the write handle until we find we return 0; /* We don't get the write handle until we find we
* have actually dirty data and not just * have actually dirty data and not just
* copy-to-cache data. * copy-to-cache data.
*/ */
#endif
if (file) { if (file) {
fid = file->private_data; fid = file->private_data;
...@@ -179,7 +178,6 @@ const struct netfs_request_ops v9fs_req_ops = { ...@@ -179,7 +178,6 @@ const struct netfs_request_ops v9fs_req_ops = {
.issue_read = v9fs_issue_read, .issue_read = v9fs_issue_read,
.begin_writeback = v9fs_begin_writeback, .begin_writeback = v9fs_begin_writeback,
.issue_write = v9fs_issue_write, .issue_write = v9fs_issue_write,
.create_write_requests = v9fs_create_write_requests,
}; };
const struct address_space_operations v9fs_addr_operations = { const struct address_space_operations v9fs_addr_operations = {
......
...@@ -353,7 +353,7 @@ static int afs_init_request(struct netfs_io_request *rreq, struct file *file) ...@@ -353,7 +353,7 @@ static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
if (file) if (file)
rreq->netfs_priv = key_get(afs_file_key(file)); rreq->netfs_priv = key_get(afs_file_key(file));
rreq->rsize = 256 * 1024; rreq->rsize = 256 * 1024;
rreq->wsize = 256 * 1024; rreq->wsize = 256 * 1024 * 1024;
return 0; return 0;
} }
...@@ -399,7 +399,6 @@ const struct netfs_request_ops afs_req_ops = { ...@@ -399,7 +399,6 @@ const struct netfs_request_ops afs_req_ops = {
.issue_read = afs_issue_read, .issue_read = afs_issue_read,
.update_i_size = afs_update_i_size, .update_i_size = afs_update_i_size,
.invalidate_cache = afs_netfs_invalidate_cache, .invalidate_cache = afs_netfs_invalidate_cache,
.create_write_requests = afs_create_write_requests,
.begin_writeback = afs_begin_writeback, .begin_writeback = afs_begin_writeback,
.prepare_write = afs_prepare_write, .prepare_write = afs_prepare_write,
.issue_write = afs_issue_write, .issue_write = afs_issue_write,
......
...@@ -1605,7 +1605,6 @@ extern int afs_writepages(struct address_space *, struct writeback_control *); ...@@ -1605,7 +1605,6 @@ extern int afs_writepages(struct address_space *, struct writeback_control *);
extern int afs_fsync(struct file *, loff_t, loff_t, int); extern int afs_fsync(struct file *, loff_t, loff_t, int);
extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf); extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf);
extern void afs_prune_wb_keys(struct afs_vnode *); extern void afs_prune_wb_keys(struct afs_vnode *);
void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len);
/* /*
* xattr.c * xattr.c
......
...@@ -156,6 +156,7 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t ...@@ -156,6 +156,7 @@ static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t
return afs_put_operation(op); return afs_put_operation(op);
} }
#if 0 // TODO: Remove
static void afs_upload_to_server(struct netfs_io_subrequest *subreq) static void afs_upload_to_server(struct netfs_io_subrequest *subreq)
{ {
struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode); struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode);
...@@ -193,6 +194,7 @@ void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size ...@@ -193,6 +194,7 @@ void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size
if (subreq) if (subreq)
netfs_queue_write_request(subreq); netfs_queue_write_request(subreq);
} }
#endif
/* /*
* Writeback calls this when it finds a folio that needs uploading. This isn't * Writeback calls this when it finds a folio that needs uploading. This isn't
......
...@@ -11,7 +11,6 @@ netfs-y := \ ...@@ -11,7 +11,6 @@ netfs-y := \
main.o \ main.o \
misc.o \ misc.o \
objects.o \ objects.o \
output.o \
write_collect.o \ write_collect.o \
write_issue.o write_issue.o
......
...@@ -26,8 +26,6 @@ enum netfs_how_to_modify { ...@@ -26,8 +26,6 @@ enum netfs_how_to_modify {
NETFS_FLUSH_CONTENT, /* Flush incompatible content. */ NETFS_FLUSH_CONTENT, /* Flush incompatible content. */
}; };
static void netfs_cleanup_buffered_write(struct netfs_io_request *wreq);
static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group) static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
{ {
void *priv = folio_get_private(folio); void *priv = folio_get_private(folio);
...@@ -180,7 +178,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, ...@@ -180,7 +178,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
}; };
struct netfs_io_request *wreq = NULL; struct netfs_io_request *wreq = NULL;
struct netfs_folio *finfo; struct netfs_folio *finfo;
struct folio *folio; struct folio *folio, *writethrough = NULL;
enum netfs_how_to_modify howto; enum netfs_how_to_modify howto;
enum netfs_folio_trace trace; enum netfs_folio_trace trace;
unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC; unsigned int bdp_flags = (iocb->ki_flags & IOCB_SYNC) ? 0: BDP_ASYNC;
...@@ -209,7 +207,6 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, ...@@ -209,7 +207,6 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
} }
if (!is_sync_kiocb(iocb)) if (!is_sync_kiocb(iocb))
wreq->iocb = iocb; wreq->iocb = iocb;
wreq->cleanup = netfs_cleanup_buffered_write;
netfs_stat(&netfs_n_wh_writethrough); netfs_stat(&netfs_n_wh_writethrough);
} else { } else {
netfs_stat(&netfs_n_wh_buffered_write); netfs_stat(&netfs_n_wh_buffered_write);
...@@ -253,6 +250,16 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, ...@@ -253,6 +250,16 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
offset = pos & (flen - 1); offset = pos & (flen - 1);
part = min_t(size_t, flen - offset, part); part = min_t(size_t, flen - offset, part);
/* Wait for writeback to complete. The writeback engine owns
* the info in folio->private and may change it until it
* removes the WB mark.
*/
if (folio_get_private(folio) &&
folio_wait_writeback_killable(folio)) {
ret = written ? -EINTR : -ERESTARTSYS;
goto error_folio_unlock;
}
if (signal_pending(current)) { if (signal_pending(current)) {
ret = written ? -EINTR : -ERESTARTSYS; ret = written ? -EINTR : -ERESTARTSYS;
goto error_folio_unlock; goto error_folio_unlock;
...@@ -327,6 +334,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, ...@@ -327,6 +334,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
maybe_trouble = true; maybe_trouble = true;
iov_iter_revert(iter, copied); iov_iter_revert(iter, copied);
copied = 0; copied = 0;
folio_unlock(folio);
goto retry; goto retry;
} }
netfs_set_group(folio, netfs_group); netfs_set_group(folio, netfs_group);
...@@ -382,23 +390,14 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, ...@@ -382,23 +390,14 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
if (likely(!wreq)) { if (likely(!wreq)) {
folio_mark_dirty(folio); folio_mark_dirty(folio);
folio_unlock(folio);
} else { } else {
if (folio_test_dirty(folio)) netfs_advance_writethrough(wreq, &wbc, folio, copied,
/* Sigh. mmap. */ offset + copied == flen,
folio_clear_dirty_for_io(folio); &writethrough);
/* We make multiple writes to the folio... */ /* Folio unlocked */
if (!folio_test_writeback(folio)) {
folio_start_writeback(folio);
if (wreq->iter.count == 0)
trace_netfs_folio(folio, netfs_folio_trace_wthru);
else
trace_netfs_folio(folio, netfs_folio_trace_wthru_plus);
}
netfs_advance_writethrough(wreq, copied,
offset + copied == flen);
} }
retry: retry:
folio_unlock(folio);
folio_put(folio); folio_put(folio);
folio = NULL; folio = NULL;
...@@ -407,7 +406,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter, ...@@ -407,7 +406,7 @@ ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
out: out:
if (unlikely(wreq)) { if (unlikely(wreq)) {
ret2 = netfs_end_writethrough(wreq, iocb); ret2 = netfs_end_writethrough(wreq, &wbc, writethrough);
wbc_detach_inode(&wbc); wbc_detach_inode(&wbc);
if (ret2 == -EIOCBQUEUED) if (ret2 == -EIOCBQUEUED)
return ret2; return ret2;
...@@ -529,11 +528,13 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr ...@@ -529,11 +528,13 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
sb_start_pagefault(inode->i_sb); sb_start_pagefault(inode->i_sb);
if (folio_wait_writeback_killable(folio)) if (folio_lock_killable(folio) < 0)
goto out; goto out;
if (folio_lock_killable(folio) < 0) if (folio_wait_writeback_killable(folio)) {
ret = VM_FAULT_LOCKED;
goto out; goto out;
}
/* Can we see a streaming write here? */ /* Can we see a streaming write here? */
if (WARN_ON(!folio_test_uptodate(folio))) { if (WARN_ON(!folio_test_uptodate(folio))) {
...@@ -573,6 +574,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr ...@@ -573,6 +574,7 @@ vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_gr
} }
EXPORT_SYMBOL(netfs_page_mkwrite); EXPORT_SYMBOL(netfs_page_mkwrite);
#if 0 // TODO: Remove
/* /*
* Kill all the pages in the given range * Kill all the pages in the given range
*/ */
...@@ -1199,3 +1201,4 @@ int netfs_writepages(struct address_space *mapping, ...@@ -1199,3 +1201,4 @@ int netfs_writepages(struct address_space *mapping,
return ret; return ret;
} }
EXPORT_SYMBOL(netfs_writepages); EXPORT_SYMBOL(netfs_writepages);
#endif
...@@ -34,6 +34,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -34,6 +34,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
unsigned long long start = iocb->ki_pos; unsigned long long start = iocb->ki_pos;
unsigned long long end = start + iov_iter_count(iter); unsigned long long end = start + iov_iter_count(iter);
ssize_t ret, n; ssize_t ret, n;
size_t len = iov_iter_count(iter);
bool async = !is_sync_kiocb(iocb); bool async = !is_sync_kiocb(iocb);
_enter(""); _enter("");
...@@ -46,13 +47,17 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -46,13 +47,17 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
_debug("uw %llx-%llx", start, end); _debug("uw %llx-%llx", start, end);
wreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp, wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
start, end - start, iocb->ki_flags & IOCB_DIRECT ?
iocb->ki_flags & IOCB_DIRECT ? NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
if (IS_ERR(wreq)) if (IS_ERR(wreq))
return PTR_ERR(wreq); return PTR_ERR(wreq);
wreq->io_streams[0].avail = true;
trace_netfs_write(wreq, (iocb->ki_flags & IOCB_DIRECT ?
netfs_write_trace_dio_write :
netfs_write_trace_unbuffered_write));
{ {
/* If this is an async op and we're not using a bounce buffer, /* If this is an async op and we're not using a bounce buffer,
* we have to save the source buffer as the iterator is only * we have to save the source buffer as the iterator is only
...@@ -63,7 +68,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -63,7 +68,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
* request. * request.
*/ */
if (async || user_backed_iter(iter)) { if (async || user_backed_iter(iter)) {
n = netfs_extract_user_iter(iter, wreq->len, &wreq->iter, 0); n = netfs_extract_user_iter(iter, len, &wreq->iter, 0);
if (n < 0) { if (n < 0) {
ret = n; ret = n;
goto out; goto out;
...@@ -71,7 +76,6 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -71,7 +76,6 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec; wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec;
wreq->direct_bv_count = n; wreq->direct_bv_count = n;
wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter); wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
wreq->len = iov_iter_count(&wreq->iter);
} else { } else {
wreq->iter = *iter; wreq->iter = *iter;
} }
...@@ -79,6 +83,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -79,6 +83,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
wreq->io_iter = wreq->iter; wreq->io_iter = wreq->iter;
} }
__set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
/* Copy the data into the bounce buffer and encrypt it. */ /* Copy the data into the bounce buffer and encrypt it. */
// TODO // TODO
...@@ -87,10 +93,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -87,10 +93,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
if (async) if (async)
wreq->iocb = iocb; wreq->iocb = iocb;
wreq->cleanup = netfs_cleanup_dio_write; wreq->cleanup = netfs_cleanup_dio_write;
ret = netfs_begin_write(wreq, is_sync_kiocb(iocb), ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), iov_iter_count(&wreq->io_iter));
iocb->ki_flags & IOCB_DIRECT ?
netfs_write_trace_dio_write :
netfs_write_trace_unbuffered_write);
if (ret < 0) { if (ret < 0) {
_debug("begin = %zd", ret); _debug("begin = %zd", ret);
goto out; goto out;
...@@ -100,9 +103,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -100,9 +103,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip); trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
smp_rmb(); /* Read error/transferred after RIP flag */
ret = wreq->error; ret = wreq->error;
_debug("waited = %zd", ret);
if (ret == 0) { if (ret == 0) {
ret = wreq->transferred; ret = wreq->transferred;
iocb->ki_pos += ret; iocb->ki_pos += ret;
......
...@@ -92,15 +92,6 @@ static inline void netfs_see_request(struct netfs_io_request *rreq, ...@@ -92,15 +92,6 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what); trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
} }
/*
* output.c
*/
int netfs_begin_write(struct netfs_io_request *wreq, bool may_wait,
enum netfs_write_trace what);
struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end);
int netfs_end_writethrough(struct netfs_io_request *wreq, struct kiocb *iocb);
/* /*
* stats.c * stats.c
*/ */
...@@ -172,12 +163,12 @@ void netfs_reissue_write(struct netfs_io_stream *stream, ...@@ -172,12 +163,12 @@ void netfs_reissue_write(struct netfs_io_stream *stream,
int netfs_advance_write(struct netfs_io_request *wreq, int netfs_advance_write(struct netfs_io_request *wreq,
struct netfs_io_stream *stream, struct netfs_io_stream *stream,
loff_t start, size_t len, bool to_eof); loff_t start, size_t len, bool to_eof);
struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t len); struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
struct folio *folio, size_t copied, bool to_page_end, struct folio *folio, size_t copied, bool to_page_end,
struct folio **writethrough_cache); struct folio **writethrough_cache);
int new_netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
struct folio *writethrough_cache); struct folio *writethrough_cache);
int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len); int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
/* /*
......
...@@ -709,7 +709,7 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async) ...@@ -709,7 +709,7 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
} }
/** /**
* new_netfs_write_subrequest_terminated - Note the termination of a write operation. * netfs_write_subrequest_terminated - Note the termination of a write operation.
* @_op: The I/O request that has terminated. * @_op: The I/O request that has terminated.
* @transferred_or_error: The amount of data transferred or an error code. * @transferred_or_error: The amount of data transferred or an error code.
* @was_async: The termination was asynchronous * @was_async: The termination was asynchronous
...@@ -731,8 +731,8 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async) ...@@ -731,8 +731,8 @@ void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async)
* Note that %_op is a void* so that the function can be passed to * Note that %_op is a void* so that the function can be passed to
* kiocb::term_func without the need for a casting wrapper. * kiocb::term_func without the need for a casting wrapper.
*/ */
void new_netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
bool was_async) bool was_async)
{ {
struct netfs_io_subrequest *subreq = _op; struct netfs_io_subrequest *subreq = _op;
struct netfs_io_request *wreq = subreq->rreq; struct netfs_io_request *wreq = subreq->rreq;
...@@ -800,4 +800,4 @@ void new_netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_err ...@@ -800,4 +800,4 @@ void new_netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_err
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated); netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
} }
EXPORT_SYMBOL(new_netfs_write_subrequest_terminated); EXPORT_SYMBOL(netfs_write_subrequest_terminated);
...@@ -494,8 +494,8 @@ static int netfs_write_folio(struct netfs_io_request *wreq, ...@@ -494,8 +494,8 @@ static int netfs_write_folio(struct netfs_io_request *wreq,
/* /*
* Write some of the pending data back to the server * Write some of the pending data back to the server
*/ */
int new_netfs_writepages(struct address_space *mapping, int netfs_writepages(struct address_space *mapping,
struct writeback_control *wbc) struct writeback_control *wbc)
{ {
struct netfs_inode *ictx = netfs_inode(mapping->host); struct netfs_inode *ictx = netfs_inode(mapping->host);
struct netfs_io_request *wreq = NULL; struct netfs_io_request *wreq = NULL;
...@@ -556,12 +556,12 @@ int new_netfs_writepages(struct address_space *mapping, ...@@ -556,12 +556,12 @@ int new_netfs_writepages(struct address_space *mapping,
_leave(" = %d", error); _leave(" = %d", error);
return error; return error;
} }
EXPORT_SYMBOL(new_netfs_writepages); EXPORT_SYMBOL(netfs_writepages);
/* /*
* Begin a write operation for writing through the pagecache. * Begin a write operation for writing through the pagecache.
*/ */
struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t len) struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len)
{ {
struct netfs_io_request *wreq = NULL; struct netfs_io_request *wreq = NULL;
struct netfs_inode *ictx = netfs_inode(file_inode(iocb->ki_filp)); struct netfs_inode *ictx = netfs_inode(file_inode(iocb->ki_filp));
...@@ -586,9 +586,9 @@ struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t ...@@ -586,9 +586,9 @@ struct netfs_io_request *new_netfs_begin_writethrough(struct kiocb *iocb, size_t
* to the request. If we've added more than wsize then we need to create a new * to the request. If we've added more than wsize then we need to create a new
* subrequest. * subrequest.
*/ */
int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
struct folio *folio, size_t copied, bool to_page_end, struct folio *folio, size_t copied, bool to_page_end,
struct folio **writethrough_cache) struct folio **writethrough_cache)
{ {
_enter("R=%x ic=%zu ws=%u cp=%zu tp=%u", _enter("R=%x ic=%zu ws=%u cp=%zu tp=%u",
wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end); wreq->debug_id, wreq->iter.count, wreq->wsize, copied, to_page_end);
...@@ -618,8 +618,8 @@ int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeba ...@@ -618,8 +618,8 @@ int new_netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeba
/* /*
* End a write operation used when writing through the pagecache. * End a write operation used when writing through the pagecache.
*/ */
int new_netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc, int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
struct folio *writethrough_cache) struct folio *writethrough_cache)
{ {
struct netfs_inode *ictx = netfs_inode(wreq->inode); struct netfs_inode *ictx = netfs_inode(wreq->inode);
int ret; int ret;
......
...@@ -303,8 +303,6 @@ struct netfs_request_ops { ...@@ -303,8 +303,6 @@ struct netfs_request_ops {
void (*update_i_size)(struct inode *inode, loff_t i_size); void (*update_i_size)(struct inode *inode, loff_t i_size);
/* Write request handling */ /* Write request handling */
void (*create_write_requests)(struct netfs_io_request *wreq,
loff_t start, size_t len);
void (*begin_writeback)(struct netfs_io_request *wreq); void (*begin_writeback)(struct netfs_io_request *wreq);
void (*prepare_write)(struct netfs_io_subrequest *subreq); void (*prepare_write)(struct netfs_io_subrequest *subreq);
void (*issue_write)(struct netfs_io_subrequest *subreq); void (*issue_write)(struct netfs_io_subrequest *subreq);
...@@ -409,8 +407,6 @@ int netfs_write_begin(struct netfs_inode *, struct file *, ...@@ -409,8 +407,6 @@ int netfs_write_begin(struct netfs_inode *, struct file *,
struct folio **, void **fsdata); struct folio **, void **fsdata);
int netfs_writepages(struct address_space *mapping, int netfs_writepages(struct address_space *mapping,
struct writeback_control *wbc); struct writeback_control *wbc);
int new_netfs_writepages(struct address_space *mapping,
struct writeback_control *wbc);
bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio); bool netfs_dirty_folio(struct address_space *mapping, struct folio *folio);
int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc); int netfs_unpin_writeback(struct inode *inode, struct writeback_control *wbc);
void netfs_clear_inode_writeback(struct inode *inode, const void *aux); void netfs_clear_inode_writeback(struct inode *inode, const void *aux);
...@@ -431,14 +427,9 @@ ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len, ...@@ -431,14 +427,9 @@ ssize_t netfs_extract_user_iter(struct iov_iter *orig, size_t orig_len,
iov_iter_extraction_t extraction_flags); iov_iter_extraction_t extraction_flags);
size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset, size_t netfs_limit_iter(const struct iov_iter *iter, size_t start_offset,
size_t max_size, size_t max_segs); size_t max_size, size_t max_segs);
struct netfs_io_subrequest *netfs_create_write_request(
struct netfs_io_request *wreq, enum netfs_io_source dest,
loff_t start, size_t len, work_func_t worker);
void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq); void netfs_prepare_write_failed(struct netfs_io_subrequest *subreq);
void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error, void netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
bool was_async); bool was_async);
void new_netfs_write_subrequest_terminated(void *_op, ssize_t transferred_or_error,
bool was_async);
void netfs_queue_write_request(struct netfs_io_subrequest *subreq); void netfs_queue_write_request(struct netfs_io_subrequest *subreq);
int netfs_start_io_read(struct inode *inode); int netfs_start_io_read(struct inode *inode);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment