Commit ef31ea6c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'vfs-6.10.netfs' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs

Pull netfs updates from Christian Brauner:
 "This reworks the netfslib writeback implementation so that pages read
  from the cache are written to the cache through ->writepages(),
  thereby allowing the fscache page flag to be retired.

  The reworking also:

   - builds on top of the new writeback_iter() infrastructure

   - makes it possible to use vectored write RPCs as discontiguous
     streams of pages can be accommodated

   - makes it easier to do simultaneous content crypto and stream
     division

   - provides support for retrying writes and re-dividing a stream

   - replaces the ->launder_folio() op, so that ->writepages() is used
     instead

   - uses mempools to allocate the netfs_io_request and
     netfs_io_subrequest structs to avoid allocation failure in the
     writeback path

  Some code that uses the fscache page flag is retained for
  compatibility purposes with nfs and ceph. The code is switched to
  using the synonymous private_2 label instead and marked with
  deprecation comments.

  The merge commit contains additional details on the new algorithm that
  I've left out of here as it would probably be excessively detailed.

  On top of the netfslib infrastructure this contains the work to
  convert cifs over to netfslib"

* tag 'vfs-6.10.netfs' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs: (38 commits)
  cifs: Enable large folio support
  cifs: Remove some code that's no longer used, part 3
  cifs: Remove some code that's no longer used, part 2
  cifs: Remove some code that's no longer used, part 1
  cifs: Cut over to using netfslib
  cifs: Implement netfslib hooks
  cifs: Make add_credits_and_wake_if() clear deducted credits
  cifs: Add mempools for cifs_io_request and cifs_io_subrequest structs
  cifs: Set zero_point in the copy_file_range() and remap_file_range()
  cifs: Move cifs_loose_read_iter() and cifs_file_write_iter() to file.c
  cifs: Replace the writedata replay bool with a netfs sreq flag
  cifs: Make wait_mtu_credits take size_t args
  cifs: Use more fields from netfs_io_subrequest
  cifs: Replace cifs_writedata with a wrapper around netfs_io_subrequest
  cifs: Replace cifs_readdata with a wrapper around netfs_io_subrequest
  cifs: Use alternative invalidation to using launder_folio
  netfs, afs: Use writeback retry to deal with alternate keys
  netfs: Miscellaneous tidy ups
  netfs: Remove the old writeback code
  netfs: Cut over to using new writeback code
  ...
parents 103fb219 e2bc9f6c
...@@ -26,36 +26,38 @@ ...@@ -26,36 +26,38 @@
#include "cache.h" #include "cache.h"
#include "fid.h" #include "fid.h"
static void v9fs_upload_to_server(struct netfs_io_subrequest *subreq) /*
* Writeback calls this when it finds a folio that needs uploading. This isn't
* called if writeback only has copy-to-cache to deal with.
*/
static void v9fs_begin_writeback(struct netfs_io_request *wreq)
{ {
struct p9_fid *fid = subreq->rreq->netfs_priv; struct p9_fid *fid;
int err, len;
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
netfs_write_subrequest_terminated(subreq, len ?: err, false);
}
static void v9fs_upload_to_server_worker(struct work_struct *work) fid = v9fs_fid_find_inode(wreq->inode, true, INVALID_UID, true);
{ if (!fid) {
struct netfs_io_subrequest *subreq = WARN_ONCE(1, "folio expected an open fid inode->i_ino=%lx\n",
container_of(work, struct netfs_io_subrequest, work); wreq->inode->i_ino);
return;
}
v9fs_upload_to_server(subreq); wreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
if (fid->iounit)
wreq->wsize = min(wreq->wsize, fid->iounit);
wreq->netfs_priv = fid;
wreq->io_streams[0].avail = true;
} }
/* /*
* Set up write requests for a writeback slice. We need to add a write request * Issue a subrequest to write to the server.
* for each write we want to make.
*/ */
static void v9fs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len) static void v9fs_issue_write(struct netfs_io_subrequest *subreq)
{ {
struct netfs_io_subrequest *subreq; struct p9_fid *fid = subreq->rreq->netfs_priv;
int err, len;
subreq = netfs_create_write_request(wreq, NETFS_UPLOAD_TO_SERVER, len = p9_client_write(fid, subreq->start, &subreq->io_iter, &err);
start, len, v9fs_upload_to_server_worker); netfs_write_subrequest_terminated(subreq, len ?: err, false);
if (subreq)
netfs_queue_write_request(subreq);
} }
/** /**
...@@ -87,12 +89,16 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file) ...@@ -87,12 +89,16 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
{ {
struct p9_fid *fid; struct p9_fid *fid;
bool writing = (rreq->origin == NETFS_READ_FOR_WRITE || bool writing = (rreq->origin == NETFS_READ_FOR_WRITE ||
rreq->origin == NETFS_WRITEBACK ||
rreq->origin == NETFS_WRITETHROUGH || rreq->origin == NETFS_WRITETHROUGH ||
rreq->origin == NETFS_LAUNDER_WRITE ||
rreq->origin == NETFS_UNBUFFERED_WRITE || rreq->origin == NETFS_UNBUFFERED_WRITE ||
rreq->origin == NETFS_DIO_WRITE); rreq->origin == NETFS_DIO_WRITE);
if (rreq->origin == NETFS_WRITEBACK)
return 0; /* We don't get the write handle until we find we
* have actually dirty data and not just
* copy-to-cache data.
*/
if (file) { if (file) {
fid = file->private_data; fid = file->private_data;
if (!fid) if (!fid)
...@@ -104,6 +110,10 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file) ...@@ -104,6 +110,10 @@ static int v9fs_init_request(struct netfs_io_request *rreq, struct file *file)
goto no_fid; goto no_fid;
} }
rreq->wsize = fid->clnt->msize - P9_IOHDRSZ;
if (fid->iounit)
rreq->wsize = min(rreq->wsize, fid->iounit);
/* we might need to read from a fid that was opened write-only /* we might need to read from a fid that was opened write-only
* for read-modify-write of page cache, use the writeback fid * for read-modify-write of page cache, use the writeback fid
* for that */ * for that */
...@@ -132,7 +142,8 @@ const struct netfs_request_ops v9fs_req_ops = { ...@@ -132,7 +142,8 @@ const struct netfs_request_ops v9fs_req_ops = {
.init_request = v9fs_init_request, .init_request = v9fs_init_request,
.free_request = v9fs_free_request, .free_request = v9fs_free_request,
.issue_read = v9fs_issue_read, .issue_read = v9fs_issue_read,
.create_write_requests = v9fs_create_write_requests, .begin_writeback = v9fs_begin_writeback,
.issue_write = v9fs_issue_write,
}; };
const struct address_space_operations v9fs_addr_operations = { const struct address_space_operations v9fs_addr_operations = {
...@@ -141,7 +152,6 @@ const struct address_space_operations v9fs_addr_operations = { ...@@ -141,7 +152,6 @@ const struct address_space_operations v9fs_addr_operations = {
.dirty_folio = netfs_dirty_folio, .dirty_folio = netfs_dirty_folio,
.release_folio = netfs_release_folio, .release_folio = netfs_release_folio,
.invalidate_folio = netfs_invalidate_folio, .invalidate_folio = netfs_invalidate_folio,
.launder_folio = netfs_launder_folio,
.direct_IO = noop_direct_IO, .direct_IO = noop_direct_IO,
.writepages = netfs_writepages, .writepages = netfs_writepages,
}; };
...@@ -54,7 +54,6 @@ const struct address_space_operations afs_file_aops = { ...@@ -54,7 +54,6 @@ const struct address_space_operations afs_file_aops = {
.read_folio = netfs_read_folio, .read_folio = netfs_read_folio,
.readahead = netfs_readahead, .readahead = netfs_readahead,
.dirty_folio = netfs_dirty_folio, .dirty_folio = netfs_dirty_folio,
.launder_folio = netfs_launder_folio,
.release_folio = netfs_release_folio, .release_folio = netfs_release_folio,
.invalidate_folio = netfs_invalidate_folio, .invalidate_folio = netfs_invalidate_folio,
.migrate_folio = filemap_migrate_folio, .migrate_folio = filemap_migrate_folio,
...@@ -354,7 +353,7 @@ static int afs_init_request(struct netfs_io_request *rreq, struct file *file) ...@@ -354,7 +353,7 @@ static int afs_init_request(struct netfs_io_request *rreq, struct file *file)
if (file) if (file)
rreq->netfs_priv = key_get(afs_file_key(file)); rreq->netfs_priv = key_get(afs_file_key(file));
rreq->rsize = 256 * 1024; rreq->rsize = 256 * 1024;
rreq->wsize = 256 * 1024; rreq->wsize = 256 * 1024 * 1024;
return 0; return 0;
} }
...@@ -369,6 +368,7 @@ static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len, ...@@ -369,6 +368,7 @@ static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
static void afs_free_request(struct netfs_io_request *rreq) static void afs_free_request(struct netfs_io_request *rreq)
{ {
key_put(rreq->netfs_priv); key_put(rreq->netfs_priv);
afs_put_wb_key(rreq->netfs_priv2);
} }
static void afs_update_i_size(struct inode *inode, loff_t new_i_size) static void afs_update_i_size(struct inode *inode, loff_t new_i_size)
...@@ -400,7 +400,9 @@ const struct netfs_request_ops afs_req_ops = { ...@@ -400,7 +400,9 @@ const struct netfs_request_ops afs_req_ops = {
.issue_read = afs_issue_read, .issue_read = afs_issue_read,
.update_i_size = afs_update_i_size, .update_i_size = afs_update_i_size,
.invalidate_cache = afs_netfs_invalidate_cache, .invalidate_cache = afs_netfs_invalidate_cache,
.create_write_requests = afs_create_write_requests, .begin_writeback = afs_begin_writeback,
.prepare_write = afs_prepare_write,
.issue_write = afs_issue_write,
}; };
static void afs_add_open_mmap(struct afs_vnode *vnode) static void afs_add_open_mmap(struct afs_vnode *vnode)
......
...@@ -916,7 +916,6 @@ struct afs_operation { ...@@ -916,7 +916,6 @@ struct afs_operation {
loff_t pos; loff_t pos;
loff_t size; loff_t size;
loff_t i_size; loff_t i_size;
bool laundering; /* Laundering page, PG_writeback not set */
} store; } store;
struct { struct {
struct iattr *attr; struct iattr *attr;
...@@ -1599,11 +1598,14 @@ extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *); ...@@ -1599,11 +1598,14 @@ extern int afs_check_volume_status(struct afs_volume *, struct afs_operation *);
/* /*
* write.c * write.c
*/ */
void afs_prepare_write(struct netfs_io_subrequest *subreq);
void afs_issue_write(struct netfs_io_subrequest *subreq);
void afs_begin_writeback(struct netfs_io_request *wreq);
void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *stream);
extern int afs_writepages(struct address_space *, struct writeback_control *); extern int afs_writepages(struct address_space *, struct writeback_control *);
extern int afs_fsync(struct file *, loff_t, loff_t, int); extern int afs_fsync(struct file *, loff_t, loff_t, int);
extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf); extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf);
extern void afs_prune_wb_keys(struct afs_vnode *); extern void afs_prune_wb_keys(struct afs_vnode *);
void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len);
/* /*
* xattr.c * xattr.c
......
...@@ -365,9 +365,9 @@ static void afs_zap_data(struct afs_vnode *vnode) ...@@ -365,9 +365,9 @@ static void afs_zap_data(struct afs_vnode *vnode)
* written back in a regular file and completely discard the pages in a * written back in a regular file and completely discard the pages in a
* directory or symlink */ * directory or symlink */
if (S_ISREG(vnode->netfs.inode.i_mode)) if (S_ISREG(vnode->netfs.inode.i_mode))
invalidate_remote_inode(&vnode->netfs.inode); filemap_invalidate_inode(&vnode->netfs.inode, true, 0, LLONG_MAX);
else else
invalidate_inode_pages2(vnode->netfs.inode.i_mapping); filemap_invalidate_inode(&vnode->netfs.inode, false, 0, LLONG_MAX);
} }
/* /*
......
...@@ -29,43 +29,39 @@ static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsign ...@@ -29,43 +29,39 @@ static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsign
/* /*
* Find a key to use for the writeback. We cached the keys used to author the * Find a key to use for the writeback. We cached the keys used to author the
* writes on the vnode. *_wbk will contain the last writeback key used or NULL * writes on the vnode. wreq->netfs_priv2 will contain the last writeback key
* and we need to start from there if it's set. * record used or NULL and we need to start from there if it's set.
* wreq->netfs_priv will be set to the key itself or NULL.
*/ */
static int afs_get_writeback_key(struct afs_vnode *vnode, static void afs_get_writeback_key(struct netfs_io_request *wreq)
struct afs_wb_key **_wbk)
{ {
struct afs_wb_key *wbk = NULL; struct afs_wb_key *wbk, *old = wreq->netfs_priv2;
struct list_head *p; struct afs_vnode *vnode = AFS_FS_I(wreq->inode);
int ret = -ENOKEY, ret2;
key_put(wreq->netfs_priv);
wreq->netfs_priv = NULL;
wreq->netfs_priv2 = NULL;
spin_lock(&vnode->wb_lock); spin_lock(&vnode->wb_lock);
if (*_wbk) if (old)
p = (*_wbk)->vnode_link.next; wbk = list_next_entry(old, vnode_link);
else else
p = vnode->wb_keys.next; wbk = list_first_entry(&vnode->wb_keys, struct afs_wb_key, vnode_link);
while (p != &vnode->wb_keys) { list_for_each_entry_from(wbk, &vnode->wb_keys, vnode_link) {
wbk = list_entry(p, struct afs_wb_key, vnode_link);
_debug("wbk %u", key_serial(wbk->key)); _debug("wbk %u", key_serial(wbk->key));
ret2 = key_validate(wbk->key); if (key_validate(wbk->key) == 0) {
if (ret2 == 0) {
refcount_inc(&wbk->usage); refcount_inc(&wbk->usage);
wreq->netfs_priv = key_get(wbk->key);
wreq->netfs_priv2 = wbk;
_debug("USE WB KEY %u", key_serial(wbk->key)); _debug("USE WB KEY %u", key_serial(wbk->key));
break; break;
} }
wbk = NULL;
if (ret == -ENOKEY)
ret = ret2;
p = p->next;
} }
spin_unlock(&vnode->wb_lock); spin_unlock(&vnode->wb_lock);
if (*_wbk)
afs_put_wb_key(*_wbk); afs_put_wb_key(old);
*_wbk = wbk;
return 0;
} }
static void afs_store_data_success(struct afs_operation *op) static void afs_store_data_success(struct afs_operation *op)
...@@ -75,8 +71,7 @@ static void afs_store_data_success(struct afs_operation *op) ...@@ -75,8 +71,7 @@ static void afs_store_data_success(struct afs_operation *op)
op->ctime = op->file[0].scb.status.mtime_client; op->ctime = op->file[0].scb.status.mtime_client;
afs_vnode_commit_status(op, &op->file[0]); afs_vnode_commit_status(op, &op->file[0]);
if (!afs_op_error(op)) { if (!afs_op_error(op)) {
if (!op->store.laundering) afs_pages_written_back(vnode, op->store.pos, op->store.size);
afs_pages_written_back(vnode, op->store.pos, op->store.size);
afs_stat_v(vnode, n_stores); afs_stat_v(vnode, n_stores);
atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes); atomic_long_add(op->store.size, &afs_v2net(vnode)->n_store_bytes);
} }
...@@ -89,113 +84,125 @@ static const struct afs_operation_ops afs_store_data_operation = { ...@@ -89,113 +84,125 @@ static const struct afs_operation_ops afs_store_data_operation = {
}; };
/* /*
* write to a file * Prepare a subrequest to write to the server. This sets the max_len
* parameter.
*/
void afs_prepare_write(struct netfs_io_subrequest *subreq)
{
//if (test_bit(NETFS_SREQ_RETRYING, &subreq->flags))
// subreq->max_len = 512 * 1024;
//else
subreq->max_len = 256 * 1024 * 1024;
}
/*
* Issue a subrequest to write to the server.
*/ */
static int afs_store_data(struct afs_vnode *vnode, struct iov_iter *iter, loff_t pos, static void afs_issue_write_worker(struct work_struct *work)
bool laundering)
{ {
struct netfs_io_subrequest *subreq = container_of(work, struct netfs_io_subrequest, work);
struct netfs_io_request *wreq = subreq->rreq;
struct afs_operation *op; struct afs_operation *op;
struct afs_wb_key *wbk = NULL; struct afs_vnode *vnode = AFS_FS_I(wreq->inode);
loff_t size = iov_iter_count(iter); unsigned long long pos = subreq->start + subreq->transferred;
size_t len = subreq->len - subreq->transferred;
int ret = -ENOKEY; int ret = -ENOKEY;
_enter("%s{%llx:%llu.%u},%llx,%llx", _enter("R=%x[%x],%s{%llx:%llu.%u},%llx,%zx",
wreq->debug_id, subreq->debug_index,
vnode->volume->name, vnode->volume->name,
vnode->fid.vid, vnode->fid.vid,
vnode->fid.vnode, vnode->fid.vnode,
vnode->fid.unique, vnode->fid.unique,
size, pos); pos, len);
ret = afs_get_writeback_key(vnode, &wbk); #if 0 // Error injection
if (ret) { if (subreq->debug_index == 3)
_leave(" = %d [no keys]", ret); return netfs_write_subrequest_terminated(subreq, -ENOANO, false);
return ret;
}
op = afs_alloc_operation(wbk->key, vnode->volume); if (!test_bit(NETFS_SREQ_RETRYING, &subreq->flags)) {
if (IS_ERR(op)) { set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
afs_put_wb_key(wbk); return netfs_write_subrequest_terminated(subreq, -EAGAIN, false);
return -ENOMEM;
} }
#endif
op = afs_alloc_operation(wreq->netfs_priv, vnode->volume);
if (IS_ERR(op))
return netfs_write_subrequest_terminated(subreq, -EAGAIN, false);
afs_op_set_vnode(op, 0, vnode); afs_op_set_vnode(op, 0, vnode);
op->file[0].dv_delta = 1; op->file[0].dv_delta = 1;
op->file[0].modification = true; op->file[0].modification = true;
op->store.pos = pos; op->store.pos = pos;
op->store.size = size; op->store.size = len;
op->store.laundering = laundering; op->flags |= AFS_OPERATION_UNINTR;
op->flags |= AFS_OPERATION_UNINTR; op->ops = &afs_store_data_operation;
op->ops = &afs_store_data_operation;
try_next_key:
afs_begin_vnode_operation(op); afs_begin_vnode_operation(op);
op->store.write_iter = iter; op->store.write_iter = &subreq->io_iter;
op->store.i_size = max(pos + size, vnode->netfs.remote_i_size); op->store.i_size = umax(pos + len, vnode->netfs.remote_i_size);
op->mtime = inode_get_mtime(&vnode->netfs.inode); op->mtime = inode_get_mtime(&vnode->netfs.inode);
afs_wait_for_operation(op); afs_wait_for_operation(op);
ret = afs_put_operation(op);
switch (afs_op_error(op)) { switch (ret) {
case -EACCES: case -EACCES:
case -EPERM: case -EPERM:
case -ENOKEY: case -ENOKEY:
case -EKEYEXPIRED: case -EKEYEXPIRED:
case -EKEYREJECTED: case -EKEYREJECTED:
case -EKEYREVOKED: case -EKEYREVOKED:
_debug("next"); /* If there are more keys we can try, use the retry algorithm
* to rotate the keys.
ret = afs_get_writeback_key(vnode, &wbk); */
if (ret == 0) { if (wreq->netfs_priv2)
key_put(op->key); set_bit(NETFS_SREQ_NEED_RETRY, &subreq->flags);
op->key = key_get(wbk->key);
goto try_next_key;
}
break; break;
} }
afs_put_wb_key(wbk); netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len, false);
_leave(" = %d", afs_op_error(op));
return afs_put_operation(op);
} }
static void afs_upload_to_server(struct netfs_io_subrequest *subreq) void afs_issue_write(struct netfs_io_subrequest *subreq)
{ {
struct afs_vnode *vnode = AFS_FS_I(subreq->rreq->inode); subreq->work.func = afs_issue_write_worker;
ssize_t ret; if (!queue_work(system_unbound_wq, &subreq->work))
WARN_ON_ONCE(1);
_enter("%x[%x],%zx",
subreq->rreq->debug_id, subreq->debug_index, subreq->io_iter.count);
trace_netfs_sreq(subreq, netfs_sreq_trace_submit);
ret = afs_store_data(vnode, &subreq->io_iter, subreq->start,
subreq->rreq->origin == NETFS_LAUNDER_WRITE);
netfs_write_subrequest_terminated(subreq, ret < 0 ? ret : subreq->len,
false);
} }
static void afs_upload_to_server_worker(struct work_struct *work) /*
* Writeback calls this when it finds a folio that needs uploading. This isn't
* called if writeback only has copy-to-cache to deal with.
*/
void afs_begin_writeback(struct netfs_io_request *wreq)
{ {
struct netfs_io_subrequest *subreq = afs_get_writeback_key(wreq);
container_of(work, struct netfs_io_subrequest, work); wreq->io_streams[0].avail = true;
afs_upload_to_server(subreq);
} }
/* /*
* Set up write requests for a writeback slice. We need to add a write request * Prepare to retry the writes in request. Use this to try rotating the
* for each write we want to make. * available writeback keys.
*/ */
void afs_create_write_requests(struct netfs_io_request *wreq, loff_t start, size_t len) void afs_retry_request(struct netfs_io_request *wreq, struct netfs_io_stream *stream)
{ {
struct netfs_io_subrequest *subreq; struct netfs_io_subrequest *subreq =
list_first_entry(&stream->subrequests,
_enter("%x,%llx-%llx", wreq->debug_id, start, start + len); struct netfs_io_subrequest, rreq_link);
subreq = netfs_create_write_request(wreq, NETFS_UPLOAD_TO_SERVER, switch (subreq->error) {
start, len, afs_upload_to_server_worker); case -EACCES:
if (subreq) case -EPERM:
netfs_queue_write_request(subreq); case -ENOKEY:
case -EKEYEXPIRED:
case -EKEYREJECTED:
case -EKEYREVOKED:
afs_get_writeback_key(wreq);
if (!wreq->netfs_priv)
stream->failed = true;
break;
}
} }
/* /*
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/file.h> #include <linux/file.h>
#include <linux/uio.h> #include <linux/uio.h>
#include <linux/bio.h>
#include <linux/falloc.h> #include <linux/falloc.h>
#include <linux/sched/mm.h> #include <linux/sched/mm.h>
#include <trace/events/fscache.h> #include <trace/events/fscache.h>
...@@ -493,7 +494,7 @@ cachefiles_do_prepare_read(struct netfs_cache_resources *cres, ...@@ -493,7 +494,7 @@ cachefiles_do_prepare_read(struct netfs_cache_resources *cres,
* boundary as appropriate. * boundary as appropriate.
*/ */
static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq, static enum netfs_io_source cachefiles_prepare_read(struct netfs_io_subrequest *subreq,
loff_t i_size) unsigned long long i_size)
{ {
return cachefiles_do_prepare_read(&subreq->rreq->cache_resources, return cachefiles_do_prepare_read(&subreq->rreq->cache_resources,
subreq->start, &subreq->len, i_size, subreq->start, &subreq->len, i_size,
...@@ -622,6 +623,77 @@ static int cachefiles_prepare_write(struct netfs_cache_resources *cres, ...@@ -622,6 +623,77 @@ static int cachefiles_prepare_write(struct netfs_cache_resources *cres,
return ret; return ret;
} }
static void cachefiles_prepare_write_subreq(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *wreq = subreq->rreq;
struct netfs_cache_resources *cres = &wreq->cache_resources;
_enter("W=%x[%x] %llx", wreq->debug_id, subreq->debug_index, subreq->start);
subreq->max_len = ULONG_MAX;
subreq->max_nr_segs = BIO_MAX_VECS;
if (!cachefiles_cres_file(cres)) {
if (!fscache_wait_for_operation(cres, FSCACHE_WANT_WRITE))
return netfs_prepare_write_failed(subreq);
if (!cachefiles_cres_file(cres))
return netfs_prepare_write_failed(subreq);
}
}
static void cachefiles_issue_write(struct netfs_io_subrequest *subreq)
{
struct netfs_io_request *wreq = subreq->rreq;
struct netfs_cache_resources *cres = &wreq->cache_resources;
struct cachefiles_object *object = cachefiles_cres_object(cres);
struct cachefiles_cache *cache = object->volume->cache;
const struct cred *saved_cred;
size_t off, pre, post, len = subreq->len;
loff_t start = subreq->start;
int ret;
_enter("W=%x[%x] %llx-%llx",
wreq->debug_id, subreq->debug_index, start, start + len - 1);
/* We need to start on the cache granularity boundary */
off = start & (CACHEFILES_DIO_BLOCK_SIZE - 1);
if (off) {
pre = CACHEFILES_DIO_BLOCK_SIZE - off;
if (pre >= len) {
netfs_write_subrequest_terminated(subreq, len, false);
return;
}
subreq->transferred += pre;
start += pre;
len -= pre;
iov_iter_advance(&subreq->io_iter, pre);
}
/* We also need to end on the cache granularity boundary */
post = len & (CACHEFILES_DIO_BLOCK_SIZE - 1);
if (post) {
len -= post;
if (len == 0) {
netfs_write_subrequest_terminated(subreq, post, false);
return;
}
iov_iter_truncate(&subreq->io_iter, len);
}
cachefiles_begin_secure(cache, &saved_cred);
ret = __cachefiles_prepare_write(object, cachefiles_cres_file(cres),
&start, &len, len, true);
cachefiles_end_secure(cache, saved_cred);
if (ret < 0) {
netfs_write_subrequest_terminated(subreq, ret, false);
return;
}
cachefiles_write(&subreq->rreq->cache_resources,
subreq->start, &subreq->io_iter,
netfs_write_subrequest_terminated, subreq);
}
/* /*
* Clean up an operation. * Clean up an operation.
*/ */
...@@ -638,8 +710,10 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = { ...@@ -638,8 +710,10 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = {
.end_operation = cachefiles_end_operation, .end_operation = cachefiles_end_operation,
.read = cachefiles_read, .read = cachefiles_read,
.write = cachefiles_write, .write = cachefiles_write,
.issue_write = cachefiles_issue_write,
.prepare_read = cachefiles_prepare_read, .prepare_read = cachefiles_prepare_read,
.prepare_write = cachefiles_prepare_write, .prepare_write = cachefiles_prepare_write,
.prepare_write_subreq = cachefiles_prepare_write_subreq,
.prepare_ondemand_read = cachefiles_prepare_ondemand_read, .prepare_ondemand_read = cachefiles_prepare_ondemand_read,
.query_occupancy = cachefiles_query_occupancy, .query_occupancy = cachefiles_query_occupancy,
}; };
......
...@@ -193,7 +193,7 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq) ...@@ -193,7 +193,7 @@ static void ceph_netfs_expand_readahead(struct netfs_io_request *rreq)
* block, but do not exceed the file size, unless the original * block, but do not exceed the file size, unless the original
* request already exceeds it. * request already exceeds it.
*/ */
new_end = min(round_up(end, lo->stripe_unit), rreq->i_size); new_end = umin(round_up(end, lo->stripe_unit), rreq->i_size);
if (new_end > end && new_end <= rreq->start + max_len) if (new_end > end && new_end <= rreq->start + max_len)
rreq->len = new_end - rreq->start; rreq->len = new_end - rreq->start;
...@@ -498,11 +498,6 @@ const struct netfs_request_ops ceph_netfs_ops = { ...@@ -498,11 +498,6 @@ const struct netfs_request_ops ceph_netfs_ops = {
}; };
#ifdef CONFIG_CEPH_FSCACHE #ifdef CONFIG_CEPH_FSCACHE
static void ceph_set_page_fscache(struct page *page)
{
set_page_fscache(page);
}
static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async) static void ceph_fscache_write_terminated(void *priv, ssize_t error, bool was_async)
{ {
struct inode *inode = priv; struct inode *inode = priv;
...@@ -517,13 +512,9 @@ static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, b ...@@ -517,13 +512,9 @@ static void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, b
struct fscache_cookie *cookie = ceph_fscache_cookie(ci); struct fscache_cookie *cookie = ceph_fscache_cookie(ci);
fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode), fscache_write_to_cache(cookie, inode->i_mapping, off, len, i_size_read(inode),
ceph_fscache_write_terminated, inode, caching); ceph_fscache_write_terminated, inode, true, caching);
} }
#else #else
static inline void ceph_set_page_fscache(struct page *page)
{
}
static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching) static inline void ceph_fscache_write_to_cache(struct inode *inode, u64 off, u64 len, bool caching)
{ {
} }
...@@ -715,8 +706,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc) ...@@ -715,8 +706,6 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
len = wlen; len = wlen;
set_page_writeback(page); set_page_writeback(page);
if (caching)
ceph_set_page_fscache(page);
ceph_fscache_write_to_cache(inode, page_off, len, caching); ceph_fscache_write_to_cache(inode, page_off, len, caching);
if (IS_ENCRYPTED(inode)) { if (IS_ENCRYPTED(inode)) {
...@@ -800,8 +789,6 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc) ...@@ -800,8 +789,6 @@ static int ceph_writepage(struct page *page, struct writeback_control *wbc)
return AOP_WRITEPAGE_ACTIVATE; return AOP_WRITEPAGE_ACTIVATE;
} }
wait_on_page_fscache(page);
err = writepage_nounlock(page, wbc); err = writepage_nounlock(page, wbc);
if (err == -ERESTARTSYS) { if (err == -ERESTARTSYS) {
/* direct memory reclaimer was killed by SIGKILL. return 0 /* direct memory reclaimer was killed by SIGKILL. return 0
...@@ -1075,7 +1062,7 @@ static int ceph_writepages_start(struct address_space *mapping, ...@@ -1075,7 +1062,7 @@ static int ceph_writepages_start(struct address_space *mapping,
unlock_page(page); unlock_page(page);
break; break;
} }
if (PageWriteback(page) || PageFsCache(page)) { if (PageWriteback(page)) {
if (wbc->sync_mode == WB_SYNC_NONE) { if (wbc->sync_mode == WB_SYNC_NONE) {
doutc(cl, "%p under writeback\n", page); doutc(cl, "%p under writeback\n", page);
unlock_page(page); unlock_page(page);
...@@ -1083,7 +1070,6 @@ static int ceph_writepages_start(struct address_space *mapping, ...@@ -1083,7 +1070,6 @@ static int ceph_writepages_start(struct address_space *mapping,
} }
doutc(cl, "waiting on writeback %p\n", page); doutc(cl, "waiting on writeback %p\n", page);
wait_on_page_writeback(page); wait_on_page_writeback(page);
wait_on_page_fscache(page);
} }
if (!clear_page_dirty_for_io(page)) { if (!clear_page_dirty_for_io(page)) {
...@@ -1268,8 +1254,6 @@ static int ceph_writepages_start(struct address_space *mapping, ...@@ -1268,8 +1254,6 @@ static int ceph_writepages_start(struct address_space *mapping,
} }
set_page_writeback(page); set_page_writeback(page);
if (caching)
ceph_set_page_fscache(page);
len += thp_size(page); len += thp_size(page);
} }
ceph_fscache_write_to_cache(inode, offset, len, caching); ceph_fscache_write_to_cache(inode, offset, len, caching);
...@@ -1513,7 +1497,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping, ...@@ -1513,7 +1497,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
if (r < 0) if (r < 0)
return r; return r;
folio_wait_fscache(folio); folio_wait_private_2(folio); /* [DEPRECATED] */
WARN_ON_ONCE(!folio_test_locked(folio)); WARN_ON_ONCE(!folio_test_locked(folio));
*pagep = &folio->page; *pagep = &folio->page;
return 0; return 0;
......
...@@ -577,6 +577,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb) ...@@ -577,6 +577,8 @@ struct inode *ceph_alloc_inode(struct super_block *sb)
/* Set parameters for the netfs library */ /* Set parameters for the netfs library */
netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false); netfs_inode_init(&ci->netfs, &ceph_netfs_ops, false);
/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
__set_bit(NETFS_ICTX_USE_PGPRIV2, &ci->netfs.flags);
spin_lock_init(&ci->i_ceph_lock); spin_lock_init(&ci->i_ceph_lock);
......
...@@ -11,7 +11,8 @@ netfs-y := \ ...@@ -11,7 +11,8 @@ netfs-y := \
main.o \ main.o \
misc.o \ misc.o \
objects.o \ objects.o \
output.o write_collect.o \
write_issue.o
netfs-$(CONFIG_NETFS_STATS) += stats.o netfs-$(CONFIG_NETFS_STATS) += stats.o
......
...@@ -10,8 +10,11 @@ ...@@ -10,8 +10,11 @@
#include "internal.h" #include "internal.h"
/* /*
* Unlock the folios in a read operation. We need to set PG_fscache on any * Unlock the folios in a read operation. We need to set PG_writeback on any
* folios we're going to write back before we unlock them. * folios we're going to write back before we unlock them.
*
* Note that if the deprecated NETFS_RREQ_USE_PGPRIV2 is set then we use
* PG_private_2 and do a direct write to the cache from here instead.
*/ */
void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
{ {
...@@ -48,14 +51,14 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) ...@@ -48,14 +51,14 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
xas_for_each(&xas, folio, last_page) { xas_for_each(&xas, folio, last_page) {
loff_t pg_end; loff_t pg_end;
bool pg_failed = false; bool pg_failed = false;
bool folio_started; bool wback_to_cache = false;
bool folio_started = false;
if (xas_retry(&xas, folio)) if (xas_retry(&xas, folio))
continue; continue;
pg_end = folio_pos(folio) + folio_size(folio) - 1; pg_end = folio_pos(folio) + folio_size(folio) - 1;
folio_started = false;
for (;;) { for (;;) {
loff_t sreq_end; loff_t sreq_end;
...@@ -63,10 +66,16 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) ...@@ -63,10 +66,16 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
pg_failed = true; pg_failed = true;
break; break;
} }
if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) { if (test_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags)) {
trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); if (!folio_started && test_bit(NETFS_SREQ_COPY_TO_CACHE,
folio_start_fscache(folio); &subreq->flags)) {
folio_started = true; trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
folio_start_private_2(folio);
folio_started = true;
}
} else {
wback_to_cache |=
test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags);
} }
pg_failed |= subreq_failed; pg_failed |= subreq_failed;
sreq_end = subreq->start + subreq->len - 1; sreq_end = subreq->start + subreq->len - 1;
...@@ -98,6 +107,11 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) ...@@ -98,6 +107,11 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
kfree(finfo); kfree(finfo);
} }
folio_mark_uptodate(folio); folio_mark_uptodate(folio);
if (wback_to_cache && !WARN_ON_ONCE(folio_get_private(folio) != NULL)) {
trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
folio_attach_private(folio, NETFS_FOLIO_COPY_TO_CACHE);
filemap_dirty_folio(folio->mapping, folio);
}
} }
if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) { if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
...@@ -116,7 +130,9 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq) ...@@ -116,7 +130,9 @@ void netfs_rreq_unlock_folios(struct netfs_io_request *rreq)
} }
static void netfs_cache_expand_readahead(struct netfs_io_request *rreq, static void netfs_cache_expand_readahead(struct netfs_io_request *rreq,
loff_t *_start, size_t *_len, loff_t i_size) unsigned long long *_start,
unsigned long long *_len,
unsigned long long i_size)
{ {
struct netfs_cache_resources *cres = &rreq->cache_resources; struct netfs_cache_resources *cres = &rreq->cache_resources;
...@@ -266,7 +282,7 @@ int netfs_read_folio(struct file *file, struct folio *folio) ...@@ -266,7 +282,7 @@ int netfs_read_folio(struct file *file, struct folio *folio)
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS)
goto discard; goto discard;
netfs_stat(&netfs_n_rh_readpage); netfs_stat(&netfs_n_rh_read_folio);
trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage); trace_netfs_read(rreq, rreq->start, rreq->len, netfs_read_trace_readpage);
/* Set up the output buffer */ /* Set up the output buffer */
...@@ -450,7 +466,7 @@ int netfs_write_begin(struct netfs_inode *ctx, ...@@ -450,7 +466,7 @@ int netfs_write_begin(struct netfs_inode *ctx,
if (!netfs_is_cache_enabled(ctx) && if (!netfs_is_cache_enabled(ctx) &&
netfs_skip_folio_read(folio, pos, len, false)) { netfs_skip_folio_read(folio, pos, len, false)) {
netfs_stat(&netfs_n_rh_write_zskip); netfs_stat(&netfs_n_rh_write_zskip);
goto have_folio_no_wait; goto have_folio;
} }
rreq = netfs_alloc_request(mapping, file, rreq = netfs_alloc_request(mapping, file,
...@@ -491,10 +507,6 @@ int netfs_write_begin(struct netfs_inode *ctx, ...@@ -491,10 +507,6 @@ int netfs_write_begin(struct netfs_inode *ctx,
netfs_put_request(rreq, false, netfs_rreq_trace_put_return); netfs_put_request(rreq, false, netfs_rreq_trace_put_return);
have_folio: have_folio:
ret = folio_wait_fscache_killable(folio);
if (ret < 0)
goto error;
have_folio_no_wait:
*_folio = folio; *_folio = folio;
_leave(" = 0"); _leave(" = 0");
return 0; return 0;
......
This diff is collapsed.
...@@ -34,6 +34,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -34,6 +34,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
unsigned long long start = iocb->ki_pos; unsigned long long start = iocb->ki_pos;
unsigned long long end = start + iov_iter_count(iter); unsigned long long end = start + iov_iter_count(iter);
ssize_t ret, n; ssize_t ret, n;
size_t len = iov_iter_count(iter);
bool async = !is_sync_kiocb(iocb); bool async = !is_sync_kiocb(iocb);
_enter(""); _enter("");
...@@ -46,13 +47,17 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -46,13 +47,17 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
_debug("uw %llx-%llx", start, end); _debug("uw %llx-%llx", start, end);
wreq = netfs_alloc_request(iocb->ki_filp->f_mapping, iocb->ki_filp, wreq = netfs_create_write_req(iocb->ki_filp->f_mapping, iocb->ki_filp, start,
start, end - start, iocb->ki_flags & IOCB_DIRECT ?
iocb->ki_flags & IOCB_DIRECT ? NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
NETFS_DIO_WRITE : NETFS_UNBUFFERED_WRITE);
if (IS_ERR(wreq)) if (IS_ERR(wreq))
return PTR_ERR(wreq); return PTR_ERR(wreq);
wreq->io_streams[0].avail = true;
trace_netfs_write(wreq, (iocb->ki_flags & IOCB_DIRECT ?
netfs_write_trace_dio_write :
netfs_write_trace_unbuffered_write));
{ {
/* If this is an async op and we're not using a bounce buffer, /* If this is an async op and we're not using a bounce buffer,
* we have to save the source buffer as the iterator is only * we have to save the source buffer as the iterator is only
...@@ -63,7 +68,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -63,7 +68,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
* request. * request.
*/ */
if (async || user_backed_iter(iter)) { if (async || user_backed_iter(iter)) {
n = netfs_extract_user_iter(iter, wreq->len, &wreq->iter, 0); n = netfs_extract_user_iter(iter, len, &wreq->iter, 0);
if (n < 0) { if (n < 0) {
ret = n; ret = n;
goto out; goto out;
...@@ -71,7 +76,6 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -71,7 +76,6 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec; wreq->direct_bv = (struct bio_vec *)wreq->iter.bvec;
wreq->direct_bv_count = n; wreq->direct_bv_count = n;
wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter); wreq->direct_bv_unpin = iov_iter_extract_will_pin(iter);
wreq->len = iov_iter_count(&wreq->iter);
} else { } else {
wreq->iter = *iter; wreq->iter = *iter;
} }
...@@ -79,6 +83,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -79,6 +83,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
wreq->io_iter = wreq->iter; wreq->io_iter = wreq->iter;
} }
__set_bit(NETFS_RREQ_USE_IO_ITER, &wreq->flags);
/* Copy the data into the bounce buffer and encrypt it. */ /* Copy the data into the bounce buffer and encrypt it. */
// TODO // TODO
...@@ -87,10 +93,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -87,10 +93,7 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
if (async) if (async)
wreq->iocb = iocb; wreq->iocb = iocb;
wreq->cleanup = netfs_cleanup_dio_write; wreq->cleanup = netfs_cleanup_dio_write;
ret = netfs_begin_write(wreq, is_sync_kiocb(iocb), ret = netfs_unbuffered_write(wreq, is_sync_kiocb(iocb), iov_iter_count(&wreq->io_iter));
iocb->ki_flags & IOCB_DIRECT ?
netfs_write_trace_dio_write :
netfs_write_trace_unbuffered_write);
if (ret < 0) { if (ret < 0) {
_debug("begin = %zd", ret); _debug("begin = %zd", ret);
goto out; goto out;
...@@ -100,9 +103,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -100,9 +103,8 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip); trace_netfs_rreq(wreq, netfs_rreq_trace_wait_ip);
wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS, wait_on_bit(&wreq->flags, NETFS_RREQ_IN_PROGRESS,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
smp_rmb(); /* Read error/transferred after RIP flag */
ret = wreq->error; ret = wreq->error;
_debug("waited = %zd", ret);
if (ret == 0) { if (ret == 0) {
ret = wreq->transferred; ret = wreq->transferred;
iocb->ki_pos += ret; iocb->ki_pos += ret;
...@@ -132,18 +134,20 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov ...@@ -132,18 +134,20 @@ static ssize_t netfs_unbuffered_write_iter_locked(struct kiocb *iocb, struct iov
ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from) ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
{ {
struct file *file = iocb->ki_filp; struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host; struct address_space *mapping = file->f_mapping;
struct inode *inode = mapping->host;
struct netfs_inode *ictx = netfs_inode(inode); struct netfs_inode *ictx = netfs_inode(inode);
unsigned long long end;
ssize_t ret; ssize_t ret;
loff_t pos = iocb->ki_pos;
unsigned long long end = pos + iov_iter_count(from) - 1;
_enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode)); _enter("%llx,%zx,%llx", pos, iov_iter_count(from), i_size_read(inode));
if (!iov_iter_count(from)) if (!iov_iter_count(from))
return 0; return 0;
trace_netfs_write_iter(iocb, from); trace_netfs_write_iter(iocb, from);
netfs_stat(&netfs_n_rh_dio_write); netfs_stat(&netfs_n_wh_dio_write);
ret = netfs_start_io_direct(inode); ret = netfs_start_io_direct(inode);
if (ret < 0) if (ret < 0)
...@@ -157,7 +161,25 @@ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from) ...@@ -157,7 +161,25 @@ ssize_t netfs_unbuffered_write_iter(struct kiocb *iocb, struct iov_iter *from)
ret = file_update_time(file); ret = file_update_time(file);
if (ret < 0) if (ret < 0)
goto out; goto out;
ret = kiocb_invalidate_pages(iocb, iov_iter_count(from)); if (iocb->ki_flags & IOCB_NOWAIT) {
/* We could block if there are any pages in the range. */
ret = -EAGAIN;
if (filemap_range_has_page(mapping, pos, end))
if (filemap_invalidate_inode(inode, true, pos, end))
goto out;
} else {
ret = filemap_write_and_wait_range(mapping, pos, end);
if (ret < 0)
goto out;
}
/*
* After a write we want buffered reads to be sure to go to disk to get
* the new data. We invalidate clean cached page from the region we're
* about to write. We do this *before* the write so that we can return
* without clobbering -EIOCBQUEUED from ->direct_IO().
*/
ret = filemap_invalidate_inode(inode, true, pos, end);
if (ret < 0) if (ret < 0)
goto out; goto out;
end = iocb->ki_pos + iov_iter_count(from); end = iocb->ki_pos + iov_iter_count(from);
......
...@@ -166,6 +166,7 @@ struct fscache_write_request { ...@@ -166,6 +166,7 @@ struct fscache_write_request {
loff_t start; loff_t start;
size_t len; size_t len;
bool set_bits; bool set_bits;
bool using_pgpriv2;
netfs_io_terminated_t term_func; netfs_io_terminated_t term_func;
void *term_func_priv; void *term_func_priv;
}; };
...@@ -182,7 +183,7 @@ void __fscache_clear_page_bits(struct address_space *mapping, ...@@ -182,7 +183,7 @@ void __fscache_clear_page_bits(struct address_space *mapping,
rcu_read_lock(); rcu_read_lock();
xas_for_each(&xas, page, last) { xas_for_each(&xas, page, last) {
end_page_fscache(page); folio_end_private_2(page_folio(page));
} }
rcu_read_unlock(); rcu_read_unlock();
} }
...@@ -197,8 +198,9 @@ static void fscache_wreq_done(void *priv, ssize_t transferred_or_error, ...@@ -197,8 +198,9 @@ static void fscache_wreq_done(void *priv, ssize_t transferred_or_error,
{ {
struct fscache_write_request *wreq = priv; struct fscache_write_request *wreq = priv;
fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len, if (wreq->using_pgpriv2)
wreq->set_bits); fscache_clear_page_bits(wreq->mapping, wreq->start, wreq->len,
wreq->set_bits);
if (wreq->term_func) if (wreq->term_func)
wreq->term_func(wreq->term_func_priv, transferred_or_error, wreq->term_func(wreq->term_func_priv, transferred_or_error,
...@@ -212,7 +214,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie, ...@@ -212,7 +214,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie,
loff_t start, size_t len, loff_t i_size, loff_t start, size_t len, loff_t i_size,
netfs_io_terminated_t term_func, netfs_io_terminated_t term_func,
void *term_func_priv, void *term_func_priv,
bool cond) bool using_pgpriv2, bool cond)
{ {
struct fscache_write_request *wreq; struct fscache_write_request *wreq;
struct netfs_cache_resources *cres; struct netfs_cache_resources *cres;
...@@ -230,6 +232,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie, ...@@ -230,6 +232,7 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie,
wreq->mapping = mapping; wreq->mapping = mapping;
wreq->start = start; wreq->start = start;
wreq->len = len; wreq->len = len;
wreq->using_pgpriv2 = using_pgpriv2;
wreq->set_bits = cond; wreq->set_bits = cond;
wreq->term_func = term_func; wreq->term_func = term_func;
wreq->term_func_priv = term_func_priv; wreq->term_func_priv = term_func_priv;
...@@ -257,7 +260,8 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie, ...@@ -257,7 +260,8 @@ void __fscache_write_to_cache(struct fscache_cookie *cookie,
abandon_free: abandon_free:
kfree(wreq); kfree(wreq);
abandon: abandon:
fscache_clear_page_bits(mapping, start, len, cond); if (using_pgpriv2)
fscache_clear_page_bits(mapping, start, len, cond);
if (term_func) if (term_func)
term_func(term_func_priv, ret, false); term_func(term_func_priv, ret, false);
} }
......
...@@ -37,6 +37,8 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync); ...@@ -37,6 +37,8 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync);
extern unsigned int netfs_debug; extern unsigned int netfs_debug;
extern struct list_head netfs_io_requests; extern struct list_head netfs_io_requests;
extern spinlock_t netfs_proc_lock; extern spinlock_t netfs_proc_lock;
extern mempool_t netfs_request_pool;
extern mempool_t netfs_subrequest_pool;
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq) static inline void netfs_proc_add_rreq(struct netfs_io_request *rreq)
...@@ -90,23 +92,13 @@ static inline void netfs_see_request(struct netfs_io_request *rreq, ...@@ -90,23 +92,13 @@ static inline void netfs_see_request(struct netfs_io_request *rreq,
trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what); trace_netfs_rreq_ref(rreq->debug_id, refcount_read(&rreq->ref), what);
} }
/*
* output.c
*/
int netfs_begin_write(struct netfs_io_request *wreq, bool may_wait,
enum netfs_write_trace what);
struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
int netfs_advance_writethrough(struct netfs_io_request *wreq, size_t copied, bool to_page_end);
int netfs_end_writethrough(struct netfs_io_request *wreq, struct kiocb *iocb);
/* /*
* stats.c * stats.c
*/ */
#ifdef CONFIG_NETFS_STATS #ifdef CONFIG_NETFS_STATS
extern atomic_t netfs_n_rh_dio_read; extern atomic_t netfs_n_rh_dio_read;
extern atomic_t netfs_n_rh_dio_write;
extern atomic_t netfs_n_rh_readahead; extern atomic_t netfs_n_rh_readahead;
extern atomic_t netfs_n_rh_readpage; extern atomic_t netfs_n_rh_read_folio;
extern atomic_t netfs_n_rh_rreq; extern atomic_t netfs_n_rh_rreq;
extern atomic_t netfs_n_rh_sreq; extern atomic_t netfs_n_rh_sreq;
extern atomic_t netfs_n_rh_download; extern atomic_t netfs_n_rh_download;
...@@ -123,6 +115,10 @@ extern atomic_t netfs_n_rh_write_begin; ...@@ -123,6 +115,10 @@ extern atomic_t netfs_n_rh_write_begin;
extern atomic_t netfs_n_rh_write_done; extern atomic_t netfs_n_rh_write_done;
extern atomic_t netfs_n_rh_write_failed; extern atomic_t netfs_n_rh_write_failed;
extern atomic_t netfs_n_rh_write_zskip; extern atomic_t netfs_n_rh_write_zskip;
extern atomic_t netfs_n_wh_buffered_write;
extern atomic_t netfs_n_wh_writethrough;
extern atomic_t netfs_n_wh_dio_write;
extern atomic_t netfs_n_wh_writepages;
extern atomic_t netfs_n_wh_wstream_conflict; extern atomic_t netfs_n_wh_wstream_conflict;
extern atomic_t netfs_n_wh_upload; extern atomic_t netfs_n_wh_upload;
extern atomic_t netfs_n_wh_upload_done; extern atomic_t netfs_n_wh_upload_done;
...@@ -148,6 +144,33 @@ static inline void netfs_stat_d(atomic_t *stat) ...@@ -148,6 +144,33 @@ static inline void netfs_stat_d(atomic_t *stat)
#define netfs_stat_d(x) do {} while(0) #define netfs_stat_d(x) do {} while(0)
#endif #endif
/*
* write_collect.c
*/
int netfs_folio_written_back(struct folio *folio);
void netfs_write_collection_worker(struct work_struct *work);
void netfs_wake_write_collector(struct netfs_io_request *wreq, bool was_async);
/*
* write_issue.c
*/
struct netfs_io_request *netfs_create_write_req(struct address_space *mapping,
struct file *file,
loff_t start,
enum netfs_io_origin origin);
void netfs_reissue_write(struct netfs_io_stream *stream,
struct netfs_io_subrequest *subreq);
int netfs_advance_write(struct netfs_io_request *wreq,
struct netfs_io_stream *stream,
loff_t start, size_t len, bool to_eof);
struct netfs_io_request *netfs_begin_writethrough(struct kiocb *iocb, size_t len);
int netfs_advance_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
struct folio *folio, size_t copied, bool to_page_end,
struct folio **writethrough_cache);
int netfs_end_writethrough(struct netfs_io_request *wreq, struct writeback_control *wbc,
struct folio *writethrough_cache);
int netfs_unbuffered_write(struct netfs_io_request *wreq, bool may_wait, size_t len);
/* /*
* Miscellaneous functions. * Miscellaneous functions.
*/ */
...@@ -168,7 +191,7 @@ static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx) ...@@ -168,7 +191,7 @@ static inline bool netfs_is_cache_enabled(struct netfs_inode *ctx)
*/ */
static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group) static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_group)
{ {
if (netfs_group) if (netfs_group && netfs_group != NETFS_FOLIO_COPY_TO_CACHE)
refcount_inc(&netfs_group->ref); refcount_inc(&netfs_group->ref);
return netfs_group; return netfs_group;
} }
...@@ -178,7 +201,9 @@ static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_grou ...@@ -178,7 +201,9 @@ static inline struct netfs_group *netfs_get_group(struct netfs_group *netfs_grou
*/ */
static inline void netfs_put_group(struct netfs_group *netfs_group) static inline void netfs_put_group(struct netfs_group *netfs_group)
{ {
if (netfs_group && refcount_dec_and_test(&netfs_group->ref)) if (netfs_group &&
netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
refcount_dec_and_test(&netfs_group->ref))
netfs_group->free(netfs_group); netfs_group->free(netfs_group);
} }
...@@ -187,7 +212,9 @@ static inline void netfs_put_group(struct netfs_group *netfs_group) ...@@ -187,7 +212,9 @@ static inline void netfs_put_group(struct netfs_group *netfs_group)
*/ */
static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr) static inline void netfs_put_group_many(struct netfs_group *netfs_group, int nr)
{ {
if (netfs_group && refcount_sub_and_test(nr, &netfs_group->ref)) if (netfs_group &&
netfs_group != NETFS_FOLIO_COPY_TO_CACHE &&
refcount_sub_and_test(nr, &netfs_group->ref))
netfs_group->free(netfs_group); netfs_group->free(netfs_group);
} }
......
...@@ -98,145 +98,6 @@ static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async) ...@@ -98,145 +98,6 @@ static void netfs_rreq_completed(struct netfs_io_request *rreq, bool was_async)
netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete); netfs_put_request(rreq, was_async, netfs_rreq_trace_put_complete);
} }
/*
* Deal with the completion of writing the data to the cache. We have to clear
* the PG_fscache bits on the folios involved and release the caller's ref.
*
* May be called in softirq mode and we inherit a ref from the caller.
*/
static void netfs_rreq_unmark_after_write(struct netfs_io_request *rreq,
bool was_async)
{
struct netfs_io_subrequest *subreq;
struct folio *folio;
pgoff_t unlocked = 0;
bool have_unlocked = false;
rcu_read_lock();
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
if (xas_retry(&xas, folio))
continue;
/* We might have multiple writes from the same huge
* folio, but we mustn't unlock a folio more than once.
*/
if (have_unlocked && folio->index <= unlocked)
continue;
unlocked = folio_next_index(folio) - 1;
trace_netfs_folio(folio, netfs_folio_trace_end_copy);
folio_end_fscache(folio);
have_unlocked = true;
}
}
rcu_read_unlock();
netfs_rreq_completed(rreq, was_async);
}
static void netfs_rreq_copy_terminated(void *priv, ssize_t transferred_or_error,
bool was_async)
{
struct netfs_io_subrequest *subreq = priv;
struct netfs_io_request *rreq = subreq->rreq;
if (IS_ERR_VALUE(transferred_or_error)) {
netfs_stat(&netfs_n_rh_write_failed);
trace_netfs_failure(rreq, subreq, transferred_or_error,
netfs_fail_copy_to_cache);
} else {
netfs_stat(&netfs_n_rh_write_done);
}
trace_netfs_sreq(subreq, netfs_sreq_trace_write_term);
/* If we decrement nr_copy_ops to 0, the ref belongs to us. */
if (atomic_dec_and_test(&rreq->nr_copy_ops))
netfs_rreq_unmark_after_write(rreq, was_async);
netfs_put_subrequest(subreq, was_async, netfs_sreq_trace_put_terminated);
}
/*
* Perform any outstanding writes to the cache. We inherit a ref from the
* caller.
*/
static void netfs_rreq_do_write_to_cache(struct netfs_io_request *rreq)
{
struct netfs_cache_resources *cres = &rreq->cache_resources;
struct netfs_io_subrequest *subreq, *next, *p;
struct iov_iter iter;
int ret;
trace_netfs_rreq(rreq, netfs_rreq_trace_copy);
/* We don't want terminating writes trying to wake us up whilst we're
* still going through the list.
*/
atomic_inc(&rreq->nr_copy_ops);
list_for_each_entry_safe(subreq, p, &rreq->subrequests, rreq_link) {
if (!test_bit(NETFS_SREQ_COPY_TO_CACHE, &subreq->flags)) {
list_del_init(&subreq->rreq_link);
netfs_put_subrequest(subreq, false,
netfs_sreq_trace_put_no_copy);
}
}
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
/* Amalgamate adjacent writes */
while (!list_is_last(&subreq->rreq_link, &rreq->subrequests)) {
next = list_next_entry(subreq, rreq_link);
if (next->start != subreq->start + subreq->len)
break;
subreq->len += next->len;
list_del_init(&next->rreq_link);
netfs_put_subrequest(next, false,
netfs_sreq_trace_put_merged);
}
ret = cres->ops->prepare_write(cres, &subreq->start, &subreq->len,
subreq->len, rreq->i_size, true);
if (ret < 0) {
trace_netfs_failure(rreq, subreq, ret, netfs_fail_prepare_write);
trace_netfs_sreq(subreq, netfs_sreq_trace_write_skip);
continue;
}
iov_iter_xarray(&iter, ITER_SOURCE, &rreq->mapping->i_pages,
subreq->start, subreq->len);
atomic_inc(&rreq->nr_copy_ops);
netfs_stat(&netfs_n_rh_write);
netfs_get_subrequest(subreq, netfs_sreq_trace_get_copy_to_cache);
trace_netfs_sreq(subreq, netfs_sreq_trace_write);
cres->ops->write(cres, subreq->start, &iter,
netfs_rreq_copy_terminated, subreq);
}
/* If we decrement nr_copy_ops to 0, the usage ref belongs to us. */
if (atomic_dec_and_test(&rreq->nr_copy_ops))
netfs_rreq_unmark_after_write(rreq, false);
}
static void netfs_rreq_write_to_cache_work(struct work_struct *work)
{
struct netfs_io_request *rreq =
container_of(work, struct netfs_io_request, work);
netfs_rreq_do_write_to_cache(rreq);
}
static void netfs_rreq_write_to_cache(struct netfs_io_request *rreq)
{
rreq->work.func = netfs_rreq_write_to_cache_work;
if (!queue_work(system_unbound_wq, &rreq->work))
BUG();
}
/* /*
* Handle a short read. * Handle a short read.
*/ */
...@@ -352,8 +213,13 @@ static void netfs_rreq_assess_dio(struct netfs_io_request *rreq) ...@@ -352,8 +213,13 @@ static void netfs_rreq_assess_dio(struct netfs_io_request *rreq)
unsigned int i; unsigned int i;
size_t transferred = 0; size_t transferred = 0;
for (i = 0; i < rreq->direct_bv_count; i++) for (i = 0; i < rreq->direct_bv_count; i++) {
flush_dcache_page(rreq->direct_bv[i].bv_page); flush_dcache_page(rreq->direct_bv[i].bv_page);
// TODO: cifs marks pages in the destination buffer
// dirty under some circumstances after a read. Do we
// need to do that too?
set_page_dirty(rreq->direct_bv[i].bv_page);
}
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) { list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
if (subreq->error || subreq->transferred == 0) if (subreq->error || subreq->transferred == 0)
...@@ -409,9 +275,6 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async) ...@@ -409,9 +275,6 @@ static void netfs_rreq_assess(struct netfs_io_request *rreq, bool was_async)
clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags); clear_bit_unlock(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS); wake_up_bit(&rreq->flags, NETFS_RREQ_IN_PROGRESS);
if (test_bit(NETFS_RREQ_COPY_TO_CACHE, &rreq->flags))
return netfs_rreq_write_to_cache(rreq);
netfs_rreq_completed(rreq, was_async); netfs_rreq_completed(rreq, was_async);
} }
...@@ -618,7 +481,7 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq, ...@@ -618,7 +481,7 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq,
set: set:
if (subreq->len > rreq->len) if (subreq->len > rreq->len)
pr_warn("R=%08x[%u] SREQ>RREQ %zx > %zx\n", pr_warn("R=%08x[%u] SREQ>RREQ %zx > %llx\n",
rreq->debug_id, subreq->debug_index, rreq->debug_id, subreq->debug_index,
subreq->len, rreq->len); subreq->len, rreq->len);
...@@ -643,8 +506,7 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq, ...@@ -643,8 +506,7 @@ netfs_rreq_prepare_read(struct netfs_io_request *rreq,
* Slice off a piece of a read request and submit an I/O request for it. * Slice off a piece of a read request and submit an I/O request for it.
*/ */
static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq, static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
struct iov_iter *io_iter, struct iov_iter *io_iter)
unsigned int *_debug_index)
{ {
struct netfs_io_subrequest *subreq; struct netfs_io_subrequest *subreq;
enum netfs_io_source source; enum netfs_io_source source;
...@@ -653,11 +515,10 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq, ...@@ -653,11 +515,10 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
if (!subreq) if (!subreq)
return false; return false;
subreq->debug_index = (*_debug_index)++;
subreq->start = rreq->start + rreq->submitted; subreq->start = rreq->start + rreq->submitted;
subreq->len = io_iter->count; subreq->len = io_iter->count;
_debug("slice %llx,%zx,%zx", subreq->start, subreq->len, rreq->submitted); _debug("slice %llx,%zx,%llx", subreq->start, subreq->len, rreq->submitted);
list_add_tail(&subreq->rreq_link, &rreq->subrequests); list_add_tail(&subreq->rreq_link, &rreq->subrequests);
/* Call out to the cache to find out what it can do with the remaining /* Call out to the cache to find out what it can do with the remaining
...@@ -707,7 +568,6 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq, ...@@ -707,7 +568,6 @@ static bool netfs_rreq_submit_slice(struct netfs_io_request *rreq,
int netfs_begin_read(struct netfs_io_request *rreq, bool sync) int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
{ {
struct iov_iter io_iter; struct iov_iter io_iter;
unsigned int debug_index = 0;
int ret; int ret;
_enter("R=%x %llx-%llx", _enter("R=%x %llx-%llx",
...@@ -733,12 +593,12 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync) ...@@ -733,12 +593,12 @@ int netfs_begin_read(struct netfs_io_request *rreq, bool sync)
atomic_set(&rreq->nr_outstanding, 1); atomic_set(&rreq->nr_outstanding, 1);
io_iter = rreq->io_iter; io_iter = rreq->io_iter;
do { do {
_debug("submit %llx + %zx >= %llx", _debug("submit %llx + %llx >= %llx",
rreq->start, rreq->submitted, rreq->i_size); rreq->start, rreq->submitted, rreq->i_size);
if (rreq->origin == NETFS_DIO_READ && if (rreq->origin == NETFS_DIO_READ &&
rreq->start + rreq->submitted >= rreq->i_size) rreq->start + rreq->submitted >= rreq->i_size)
break; break;
if (!netfs_rreq_submit_slice(rreq, &io_iter, &debug_index)) if (!netfs_rreq_submit_slice(rreq, &io_iter))
break; break;
if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) && if (test_bit(NETFS_RREQ_BLOCKED, &rreq->flags) &&
test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags)) test_bit(NETFS_RREQ_NONBLOCK, &rreq->flags))
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/mempool.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include "internal.h" #include "internal.h"
...@@ -23,6 +24,11 @@ unsigned netfs_debug; ...@@ -23,6 +24,11 @@ unsigned netfs_debug;
module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO); module_param_named(debug, netfs_debug, uint, S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask"); MODULE_PARM_DESC(netfs_debug, "Netfs support debugging mask");
static struct kmem_cache *netfs_request_slab;
static struct kmem_cache *netfs_subrequest_slab;
mempool_t netfs_request_pool;
mempool_t netfs_subrequest_pool;
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
LIST_HEAD(netfs_io_requests); LIST_HEAD(netfs_io_requests);
DEFINE_SPINLOCK(netfs_proc_lock); DEFINE_SPINLOCK(netfs_proc_lock);
...@@ -31,9 +37,9 @@ static const char *netfs_origins[nr__netfs_io_origin] = { ...@@ -31,9 +37,9 @@ static const char *netfs_origins[nr__netfs_io_origin] = {
[NETFS_READAHEAD] = "RA", [NETFS_READAHEAD] = "RA",
[NETFS_READPAGE] = "RP", [NETFS_READPAGE] = "RP",
[NETFS_READ_FOR_WRITE] = "RW", [NETFS_READ_FOR_WRITE] = "RW",
[NETFS_COPY_TO_CACHE] = "CC",
[NETFS_WRITEBACK] = "WB", [NETFS_WRITEBACK] = "WB",
[NETFS_WRITETHROUGH] = "WT", [NETFS_WRITETHROUGH] = "WT",
[NETFS_LAUNDER_WRITE] = "LW",
[NETFS_UNBUFFERED_WRITE] = "UW", [NETFS_UNBUFFERED_WRITE] = "UW",
[NETFS_DIO_READ] = "DR", [NETFS_DIO_READ] = "DR",
[NETFS_DIO_WRITE] = "DW", [NETFS_DIO_WRITE] = "DW",
...@@ -56,7 +62,7 @@ static int netfs_requests_seq_show(struct seq_file *m, void *v) ...@@ -56,7 +62,7 @@ static int netfs_requests_seq_show(struct seq_file *m, void *v)
rreq = list_entry(v, struct netfs_io_request, proc_link); rreq = list_entry(v, struct netfs_io_request, proc_link);
seq_printf(m, seq_printf(m,
"%08x %s %3d %2lx %4d %3d @%04llx %zx/%zx", "%08x %s %3d %2lx %4d %3d @%04llx %llx/%llx",
rreq->debug_id, rreq->debug_id,
netfs_origins[rreq->origin], netfs_origins[rreq->origin],
refcount_read(&rreq->ref), refcount_read(&rreq->ref),
...@@ -98,25 +104,54 @@ static int __init netfs_init(void) ...@@ -98,25 +104,54 @@ static int __init netfs_init(void)
{ {
int ret = -ENOMEM; int ret = -ENOMEM;
netfs_request_slab = kmem_cache_create("netfs_request",
sizeof(struct netfs_io_request), 0,
SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
NULL);
if (!netfs_request_slab)
goto error_req;
if (mempool_init_slab_pool(&netfs_request_pool, 100, netfs_request_slab) < 0)
goto error_reqpool;
netfs_subrequest_slab = kmem_cache_create("netfs_subrequest",
sizeof(struct netfs_io_subrequest), 0,
SLAB_HWCACHE_ALIGN | SLAB_ACCOUNT,
NULL);
if (!netfs_subrequest_slab)
goto error_subreq;
if (mempool_init_slab_pool(&netfs_subrequest_pool, 100, netfs_subrequest_slab) < 0)
goto error_subreqpool;
if (!proc_mkdir("fs/netfs", NULL)) if (!proc_mkdir("fs/netfs", NULL))
goto error; goto error_proc;
if (!proc_create_seq("fs/netfs/requests", S_IFREG | 0444, NULL, if (!proc_create_seq("fs/netfs/requests", S_IFREG | 0444, NULL,
&netfs_requests_seq_ops)) &netfs_requests_seq_ops))
goto error_proc; goto error_procfile;
#ifdef CONFIG_FSCACHE_STATS #ifdef CONFIG_FSCACHE_STATS
if (!proc_create_single("fs/netfs/stats", S_IFREG | 0444, NULL, if (!proc_create_single("fs/netfs/stats", S_IFREG | 0444, NULL,
netfs_stats_show)) netfs_stats_show))
goto error_proc; goto error_procfile;
#endif #endif
ret = fscache_init(); ret = fscache_init();
if (ret < 0) if (ret < 0)
goto error_proc; goto error_fscache;
return 0; return 0;
error_proc: error_fscache:
error_procfile:
remove_proc_entry("fs/netfs", NULL); remove_proc_entry("fs/netfs", NULL);
error: error_proc:
mempool_exit(&netfs_subrequest_pool);
error_subreqpool:
kmem_cache_destroy(netfs_subrequest_slab);
error_subreq:
mempool_exit(&netfs_request_pool);
error_reqpool:
kmem_cache_destroy(netfs_request_slab);
error_req:
return ret; return ret;
} }
fs_initcall(netfs_init); fs_initcall(netfs_init);
...@@ -125,5 +160,9 @@ static void __exit netfs_exit(void) ...@@ -125,5 +160,9 @@ static void __exit netfs_exit(void)
{ {
fscache_exit(); fscache_exit();
remove_proc_entry("fs/netfs", NULL); remove_proc_entry("fs/netfs", NULL);
mempool_exit(&netfs_subrequest_pool);
kmem_cache_destroy(netfs_subrequest_slab);
mempool_exit(&netfs_request_pool);
kmem_cache_destroy(netfs_request_slab);
} }
module_exit(netfs_exit); module_exit(netfs_exit);
...@@ -177,13 +177,11 @@ EXPORT_SYMBOL(netfs_clear_inode_writeback); ...@@ -177,13 +177,11 @@ EXPORT_SYMBOL(netfs_clear_inode_writeback);
*/ */
void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length) void netfs_invalidate_folio(struct folio *folio, size_t offset, size_t length)
{ {
struct netfs_folio *finfo = NULL; struct netfs_folio *finfo;
size_t flen = folio_size(folio); size_t flen = folio_size(folio);
_enter("{%lx},%zx,%zx", folio->index, offset, length); _enter("{%lx},%zx,%zx", folio->index, offset, length);
folio_wait_fscache(folio);
if (!folio_test_private(folio)) if (!folio_test_private(folio))
return; return;
...@@ -248,12 +246,6 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp) ...@@ -248,12 +246,6 @@ bool netfs_release_folio(struct folio *folio, gfp_t gfp)
if (folio_test_private(folio)) if (folio_test_private(folio))
return false; return false;
if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
folio_wait_fscache(folio);
}
fscache_note_page_release(netfs_i_cookie(ctx)); fscache_note_page_release(netfs_i_cookie(ctx));
return true; return true;
} }
......
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
*/ */
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/mempool.h>
#include <linux/delay.h>
#include "internal.h" #include "internal.h"
/* /*
...@@ -20,17 +22,22 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, ...@@ -20,17 +22,22 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
struct inode *inode = file ? file_inode(file) : mapping->host; struct inode *inode = file ? file_inode(file) : mapping->host;
struct netfs_inode *ctx = netfs_inode(inode); struct netfs_inode *ctx = netfs_inode(inode);
struct netfs_io_request *rreq; struct netfs_io_request *rreq;
mempool_t *mempool = ctx->ops->request_pool ?: &netfs_request_pool;
struct kmem_cache *cache = mempool->pool_data;
bool is_unbuffered = (origin == NETFS_UNBUFFERED_WRITE || bool is_unbuffered = (origin == NETFS_UNBUFFERED_WRITE ||
origin == NETFS_DIO_READ || origin == NETFS_DIO_READ ||
origin == NETFS_DIO_WRITE); origin == NETFS_DIO_WRITE);
bool cached = !is_unbuffered && netfs_is_cache_enabled(ctx); bool cached = !is_unbuffered && netfs_is_cache_enabled(ctx);
int ret; int ret;
rreq = kzalloc(ctx->ops->io_request_size ?: sizeof(struct netfs_io_request), for (;;) {
GFP_KERNEL); rreq = mempool_alloc(mempool, GFP_KERNEL);
if (!rreq) if (rreq)
return ERR_PTR(-ENOMEM); break;
msleep(10);
}
memset(rreq, 0, kmem_cache_size(cache));
rreq->start = start; rreq->start = start;
rreq->len = len; rreq->len = len;
rreq->upper_len = len; rreq->upper_len = len;
...@@ -40,19 +47,27 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping, ...@@ -40,19 +47,27 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
rreq->inode = inode; rreq->inode = inode;
rreq->i_size = i_size_read(inode); rreq->i_size = i_size_read(inode);
rreq->debug_id = atomic_inc_return(&debug_ids); rreq->debug_id = atomic_inc_return(&debug_ids);
rreq->wsize = INT_MAX;
spin_lock_init(&rreq->lock);
INIT_LIST_HEAD(&rreq->io_streams[0].subrequests);
INIT_LIST_HEAD(&rreq->io_streams[1].subrequests);
INIT_LIST_HEAD(&rreq->subrequests); INIT_LIST_HEAD(&rreq->subrequests);
INIT_WORK(&rreq->work, NULL); INIT_WORK(&rreq->work, NULL);
refcount_set(&rreq->ref, 1); refcount_set(&rreq->ref, 1);
__set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags); __set_bit(NETFS_RREQ_IN_PROGRESS, &rreq->flags);
if (cached) if (cached) {
__set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags); __set_bit(NETFS_RREQ_WRITE_TO_CACHE, &rreq->flags);
if (test_bit(NETFS_ICTX_USE_PGPRIV2, &ctx->flags))
/* Filesystem uses deprecated PG_private_2 marking. */
__set_bit(NETFS_RREQ_USE_PGPRIV2, &rreq->flags);
}
if (file && file->f_flags & O_NONBLOCK) if (file && file->f_flags & O_NONBLOCK)
__set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags); __set_bit(NETFS_RREQ_NONBLOCK, &rreq->flags);
if (rreq->netfs_ops->init_request) { if (rreq->netfs_ops->init_request) {
ret = rreq->netfs_ops->init_request(rreq, file); ret = rreq->netfs_ops->init_request(rreq, file);
if (ret < 0) { if (ret < 0) {
kfree(rreq); mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
} }
...@@ -74,6 +89,8 @@ void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace ...@@ -74,6 +89,8 @@ void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace
void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
{ {
struct netfs_io_subrequest *subreq; struct netfs_io_subrequest *subreq;
struct netfs_io_stream *stream;
int s;
while (!list_empty(&rreq->subrequests)) { while (!list_empty(&rreq->subrequests)) {
subreq = list_first_entry(&rreq->subrequests, subreq = list_first_entry(&rreq->subrequests,
...@@ -82,6 +99,25 @@ void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async) ...@@ -82,6 +99,25 @@ void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async)
netfs_put_subrequest(subreq, was_async, netfs_put_subrequest(subreq, was_async,
netfs_sreq_trace_put_clear); netfs_sreq_trace_put_clear);
} }
for (s = 0; s < ARRAY_SIZE(rreq->io_streams); s++) {
stream = &rreq->io_streams[s];
while (!list_empty(&stream->subrequests)) {
subreq = list_first_entry(&stream->subrequests,
struct netfs_io_subrequest, rreq_link);
list_del(&subreq->rreq_link);
netfs_put_subrequest(subreq, was_async,
netfs_sreq_trace_put_clear);
}
}
}
static void netfs_free_request_rcu(struct rcu_head *rcu)
{
struct netfs_io_request *rreq = container_of(rcu, struct netfs_io_request, rcu);
mempool_free(rreq, rreq->netfs_ops->request_pool ?: &netfs_request_pool);
netfs_stat_d(&netfs_n_rh_rreq);
} }
static void netfs_free_request(struct work_struct *work) static void netfs_free_request(struct work_struct *work)
...@@ -106,8 +142,7 @@ static void netfs_free_request(struct work_struct *work) ...@@ -106,8 +142,7 @@ static void netfs_free_request(struct work_struct *work)
} }
kvfree(rreq->direct_bv); kvfree(rreq->direct_bv);
} }
kfree_rcu(rreq, rcu); call_rcu(&rreq->rcu, netfs_free_request_rcu);
netfs_stat_d(&netfs_n_rh_rreq);
} }
void netfs_put_request(struct netfs_io_request *rreq, bool was_async, void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
...@@ -139,19 +174,25 @@ void netfs_put_request(struct netfs_io_request *rreq, bool was_async, ...@@ -139,19 +174,25 @@ void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq) struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq)
{ {
struct netfs_io_subrequest *subreq; struct netfs_io_subrequest *subreq;
mempool_t *mempool = rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool;
subreq = kzalloc(rreq->netfs_ops->io_subrequest_size ?: struct kmem_cache *cache = mempool->pool_data;
sizeof(struct netfs_io_subrequest),
GFP_KERNEL); for (;;) {
if (subreq) { subreq = mempool_alloc(rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool,
INIT_WORK(&subreq->work, NULL); GFP_KERNEL);
INIT_LIST_HEAD(&subreq->rreq_link); if (subreq)
refcount_set(&subreq->ref, 2); break;
subreq->rreq = rreq; msleep(10);
netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
netfs_stat(&netfs_n_rh_sreq);
} }
memset(subreq, 0, kmem_cache_size(cache));
INIT_WORK(&subreq->work, NULL);
INIT_LIST_HEAD(&subreq->rreq_link);
refcount_set(&subreq->ref, 2);
subreq->rreq = rreq;
subreq->debug_index = atomic_inc_return(&rreq->subreq_counter);
netfs_get_request(rreq, netfs_rreq_trace_get_subreq);
netfs_stat(&netfs_n_rh_sreq);
return subreq; return subreq;
} }
...@@ -173,7 +214,7 @@ static void netfs_free_subrequest(struct netfs_io_subrequest *subreq, ...@@ -173,7 +214,7 @@ static void netfs_free_subrequest(struct netfs_io_subrequest *subreq,
trace_netfs_sreq(subreq, netfs_sreq_trace_free); trace_netfs_sreq(subreq, netfs_sreq_trace_free);
if (rreq->netfs_ops->free_subrequest) if (rreq->netfs_ops->free_subrequest)
rreq->netfs_ops->free_subrequest(subreq); rreq->netfs_ops->free_subrequest(subreq);
kfree(subreq); mempool_free(subreq, rreq->netfs_ops->subrequest_pool ?: &netfs_subrequest_pool);
netfs_stat_d(&netfs_n_rh_sreq); netfs_stat_d(&netfs_n_rh_sreq);
netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq); netfs_put_request(rreq, was_async, netfs_rreq_trace_put_subreq);
} }
......
This diff is collapsed.
...@@ -10,9 +10,8 @@ ...@@ -10,9 +10,8 @@
#include "internal.h" #include "internal.h"
atomic_t netfs_n_rh_dio_read; atomic_t netfs_n_rh_dio_read;
atomic_t netfs_n_rh_dio_write;
atomic_t netfs_n_rh_readahead; atomic_t netfs_n_rh_readahead;
atomic_t netfs_n_rh_readpage; atomic_t netfs_n_rh_read_folio;
atomic_t netfs_n_rh_rreq; atomic_t netfs_n_rh_rreq;
atomic_t netfs_n_rh_sreq; atomic_t netfs_n_rh_sreq;
atomic_t netfs_n_rh_download; atomic_t netfs_n_rh_download;
...@@ -29,6 +28,10 @@ atomic_t netfs_n_rh_write_begin; ...@@ -29,6 +28,10 @@ atomic_t netfs_n_rh_write_begin;
atomic_t netfs_n_rh_write_done; atomic_t netfs_n_rh_write_done;
atomic_t netfs_n_rh_write_failed; atomic_t netfs_n_rh_write_failed;
atomic_t netfs_n_rh_write_zskip; atomic_t netfs_n_rh_write_zskip;
atomic_t netfs_n_wh_buffered_write;
atomic_t netfs_n_wh_writethrough;
atomic_t netfs_n_wh_dio_write;
atomic_t netfs_n_wh_writepages;
atomic_t netfs_n_wh_wstream_conflict; atomic_t netfs_n_wh_wstream_conflict;
atomic_t netfs_n_wh_upload; atomic_t netfs_n_wh_upload;
atomic_t netfs_n_wh_upload_done; atomic_t netfs_n_wh_upload_done;
...@@ -39,13 +42,17 @@ atomic_t netfs_n_wh_write_failed; ...@@ -39,13 +42,17 @@ atomic_t netfs_n_wh_write_failed;
int netfs_stats_show(struct seq_file *m, void *v) int netfs_stats_show(struct seq_file *m, void *v)
{ {
seq_printf(m, "Netfs : DR=%u DW=%u RA=%u RP=%u WB=%u WBZ=%u\n", seq_printf(m, "Netfs : DR=%u RA=%u RF=%u WB=%u WBZ=%u\n",
atomic_read(&netfs_n_rh_dio_read), atomic_read(&netfs_n_rh_dio_read),
atomic_read(&netfs_n_rh_dio_write),
atomic_read(&netfs_n_rh_readahead), atomic_read(&netfs_n_rh_readahead),
atomic_read(&netfs_n_rh_readpage), atomic_read(&netfs_n_rh_read_folio),
atomic_read(&netfs_n_rh_write_begin), atomic_read(&netfs_n_rh_write_begin),
atomic_read(&netfs_n_rh_write_zskip)); atomic_read(&netfs_n_rh_write_zskip));
seq_printf(m, "Netfs : BW=%u WT=%u DW=%u WP=%u\n",
atomic_read(&netfs_n_wh_buffered_write),
atomic_read(&netfs_n_wh_writethrough),
atomic_read(&netfs_n_wh_dio_write),
atomic_read(&netfs_n_wh_writepages));
seq_printf(m, "Netfs : ZR=%u sh=%u sk=%u\n", seq_printf(m, "Netfs : ZR=%u sh=%u sk=%u\n",
atomic_read(&netfs_n_rh_zero), atomic_read(&netfs_n_rh_zero),
atomic_read(&netfs_n_rh_short_read), atomic_read(&netfs_n_rh_short_read),
......
This diff is collapsed.
This diff is collapsed.
...@@ -433,7 +433,7 @@ static void nfs_invalidate_folio(struct folio *folio, size_t offset, ...@@ -433,7 +433,7 @@ static void nfs_invalidate_folio(struct folio *folio, size_t offset,
return; return;
/* Cancel any unstarted writes on this page */ /* Cancel any unstarted writes on this page */
nfs_wb_folio_cancel(inode, folio); nfs_wb_folio_cancel(inode, folio);
folio_wait_fscache(folio); folio_wait_private_2(folio); /* [DEPRECATED] */
trace_nfs_invalidate_folio(inode, folio); trace_nfs_invalidate_folio(inode, folio);
} }
...@@ -500,7 +500,7 @@ static int nfs_launder_folio(struct folio *folio) ...@@ -500,7 +500,7 @@ static int nfs_launder_folio(struct folio *folio)
dfprintk(PAGECACHE, "NFS: launder_folio(%ld, %llu)\n", dfprintk(PAGECACHE, "NFS: launder_folio(%ld, %llu)\n",
inode->i_ino, folio_pos(folio)); inode->i_ino, folio_pos(folio));
folio_wait_fscache(folio); folio_wait_private_2(folio); /* [DEPRECATED] */
ret = nfs_wb_folio(inode, folio); ret = nfs_wb_folio(inode, folio);
trace_nfs_launder_folio_done(inode, folio, ret); trace_nfs_launder_folio_done(inode, folio, ret);
return ret; return ret;
...@@ -593,8 +593,8 @@ static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf) ...@@ -593,8 +593,8 @@ static vm_fault_t nfs_vm_page_mkwrite(struct vm_fault *vmf)
sb_start_pagefault(inode->i_sb); sb_start_pagefault(inode->i_sb);
/* make sure the cache has finished storing the page */ /* make sure the cache has finished storing the page */
if (folio_test_fscache(folio) && if (folio_test_private_2(folio) && /* [DEPRECATED] */
folio_wait_fscache_killable(folio) < 0) { folio_wait_private_2_killable(folio) < 0) {
ret = VM_FAULT_RETRY; ret = VM_FAULT_RETRY;
goto out; goto out;
} }
......
...@@ -81,6 +81,8 @@ static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs) ...@@ -81,6 +81,8 @@ static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi) static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
{ {
netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops, false); netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops, false);
/* [DEPRECATED] Use PG_private_2 to mark folio being written to the cache. */
__set_bit(NETFS_ICTX_USE_PGPRIV2, &nfsi->netfs.flags);
} }
extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr); extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr);
extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr); extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
...@@ -101,10 +103,10 @@ extern int nfs_netfs_read_folio(struct file *file, struct folio *folio); ...@@ -101,10 +103,10 @@ extern int nfs_netfs_read_folio(struct file *file, struct folio *folio);
static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp) static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
{ {
if (folio_test_fscache(folio)) { if (folio_test_private_2(folio)) { /* [DEPRECATED] */
if (current_is_kswapd() || !(gfp & __GFP_FS)) if (current_is_kswapd() || !(gfp & __GFP_FS))
return false; return false;
folio_wait_fscache(folio); folio_wait_private_2(folio);
} }
fscache_note_page_release(netfs_i_cookie(netfs_inode(folio->mapping->host))); fscache_note_page_release(netfs_i_cookie(netfs_inode(folio->mapping->host)));
return true; return true;
......
...@@ -2120,10 +2120,10 @@ int nfs_migrate_folio(struct address_space *mapping, struct folio *dst, ...@@ -2120,10 +2120,10 @@ int nfs_migrate_folio(struct address_space *mapping, struct folio *dst,
if (folio_test_private(src)) if (folio_test_private(src))
return -EBUSY; return -EBUSY;
if (folio_test_fscache(src)) { if (folio_test_private_2(src)) { /* [DEPRECATED] */
if (mode == MIGRATE_ASYNC) if (mode == MIGRATE_ASYNC)
return -EBUSY; return -EBUSY;
folio_wait_fscache(src); folio_wait_private_2(src);
} }
return migrate_folio(mapping, dst, src, mode); return migrate_folio(mapping, dst, src, mode);
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
config CIFS config CIFS
tristate "SMB3 and CIFS support (advanced network filesystem)" tristate "SMB3 and CIFS support (advanced network filesystem)"
depends on INET depends on INET
select NETFS_SUPPORT
select NLS select NLS
select NLS_UCS2_UTILS select NLS_UCS2_UTILS
select CRYPTO select CRYPTO
......
...@@ -371,9 +371,13 @@ static struct kmem_cache *cifs_inode_cachep; ...@@ -371,9 +371,13 @@ static struct kmem_cache *cifs_inode_cachep;
static struct kmem_cache *cifs_req_cachep; static struct kmem_cache *cifs_req_cachep;
static struct kmem_cache *cifs_mid_cachep; static struct kmem_cache *cifs_mid_cachep;
static struct kmem_cache *cifs_sm_req_cachep; static struct kmem_cache *cifs_sm_req_cachep;
static struct kmem_cache *cifs_io_request_cachep;
static struct kmem_cache *cifs_io_subrequest_cachep;
mempool_t *cifs_sm_req_poolp; mempool_t *cifs_sm_req_poolp;
mempool_t *cifs_req_poolp; mempool_t *cifs_req_poolp;
mempool_t *cifs_mid_poolp; mempool_t *cifs_mid_poolp;
mempool_t cifs_io_request_pool;
mempool_t cifs_io_subrequest_pool;
static struct inode * static struct inode *
cifs_alloc_inode(struct super_block *sb) cifs_alloc_inode(struct super_block *sb)
...@@ -986,61 +990,6 @@ cifs_smb3_do_mount(struct file_system_type *fs_type, ...@@ -986,61 +990,6 @@ cifs_smb3_do_mount(struct file_system_type *fs_type,
return root; return root;
} }
static ssize_t
cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
{
ssize_t rc;
struct inode *inode = file_inode(iocb->ki_filp);
if (iocb->ki_flags & IOCB_DIRECT)
return cifs_user_readv(iocb, iter);
rc = cifs_revalidate_mapping(inode);
if (rc)
return rc;
return generic_file_read_iter(iocb, iter);
}
static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
{
struct inode *inode = file_inode(iocb->ki_filp);
struct cifsInodeInfo *cinode = CIFS_I(inode);
ssize_t written;
int rc;
if (iocb->ki_filp->f_flags & O_DIRECT) {
written = cifs_user_writev(iocb, from);
if (written > 0 && CIFS_CACHE_READ(cinode)) {
cifs_zap_mapping(inode);
cifs_dbg(FYI,
"Set no oplock for inode=%p after a write operation\n",
inode);
cinode->oplock = 0;
}
return written;
}
written = cifs_get_writer(cinode);
if (written)
return written;
written = generic_file_write_iter(iocb, from);
if (CIFS_CACHE_WRITE(CIFS_I(inode)))
goto out;
rc = filemap_fdatawrite(inode->i_mapping);
if (rc)
cifs_dbg(FYI, "cifs_file_write_iter: %d rc on %p inode\n",
rc, inode);
out:
cifs_put_writer(cinode);
return written;
}
static loff_t cifs_llseek(struct file *file, loff_t offset, int whence) static loff_t cifs_llseek(struct file *file, loff_t offset, int whence)
{ {
struct cifsFileInfo *cfile = file->private_data; struct cifsFileInfo *cfile = file->private_data;
...@@ -1342,6 +1291,8 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off, ...@@ -1342,6 +1291,8 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false); rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
if (rc) if (rc)
goto unlock; goto unlock;
if (fend > target_cifsi->netfs.zero_point)
target_cifsi->netfs.zero_point = fend + 1;
/* Discard all the folios that overlap the destination region. */ /* Discard all the folios that overlap the destination region. */
cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend); cifs_dbg(FYI, "about to discard pages %llx-%llx\n", fstart, fend);
...@@ -1360,6 +1311,8 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off, ...@@ -1360,6 +1311,8 @@ static loff_t cifs_remap_file_range(struct file *src_file, loff_t off,
fscache_resize_cookie(cifs_inode_cookie(target_inode), fscache_resize_cookie(cifs_inode_cookie(target_inode),
new_size); new_size);
} }
if (rc == 0 && new_size > target_cifsi->netfs.zero_point)
target_cifsi->netfs.zero_point = new_size;
} }
/* force revalidate of size and timestamps of target file now /* force revalidate of size and timestamps of target file now
...@@ -1451,6 +1404,8 @@ ssize_t cifs_file_copychunk_range(unsigned int xid, ...@@ -1451,6 +1404,8 @@ ssize_t cifs_file_copychunk_range(unsigned int xid,
rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false); rc = cifs_flush_folio(target_inode, destend, &fstart, &fend, false);
if (rc) if (rc)
goto unlock; goto unlock;
if (fend > target_cifsi->netfs.zero_point)
target_cifsi->netfs.zero_point = fend + 1;
/* Discard all the folios that overlap the destination region. */ /* Discard all the folios that overlap the destination region. */
truncate_inode_pages_range(&target_inode->i_data, fstart, fend); truncate_inode_pages_range(&target_inode->i_data, fstart, fend);
...@@ -1567,8 +1522,8 @@ const struct file_operations cifs_file_strict_ops = { ...@@ -1567,8 +1522,8 @@ const struct file_operations cifs_file_strict_ops = {
}; };
const struct file_operations cifs_file_direct_ops = { const struct file_operations cifs_file_direct_ops = {
.read_iter = cifs_direct_readv, .read_iter = netfs_unbuffered_read_iter,
.write_iter = cifs_direct_writev, .write_iter = netfs_file_write_iter,
.open = cifs_open, .open = cifs_open,
.release = cifs_close, .release = cifs_close,
.lock = cifs_lock, .lock = cifs_lock,
...@@ -1623,8 +1578,8 @@ const struct file_operations cifs_file_strict_nobrl_ops = { ...@@ -1623,8 +1578,8 @@ const struct file_operations cifs_file_strict_nobrl_ops = {
}; };
const struct file_operations cifs_file_direct_nobrl_ops = { const struct file_operations cifs_file_direct_nobrl_ops = {
.read_iter = cifs_direct_readv, .read_iter = netfs_unbuffered_read_iter,
.write_iter = cifs_direct_writev, .write_iter = netfs_file_write_iter,
.open = cifs_open, .open = cifs_open,
.release = cifs_close, .release = cifs_close,
.fsync = cifs_fsync, .fsync = cifs_fsync,
...@@ -1799,6 +1754,48 @@ static void destroy_mids(void) ...@@ -1799,6 +1754,48 @@ static void destroy_mids(void)
kmem_cache_destroy(cifs_mid_cachep); kmem_cache_destroy(cifs_mid_cachep);
} }
static int cifs_init_netfs(void)
{
cifs_io_request_cachep =
kmem_cache_create("cifs_io_request",
sizeof(struct cifs_io_request), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!cifs_io_request_cachep)
goto nomem_req;
if (mempool_init_slab_pool(&cifs_io_request_pool, 100, cifs_io_request_cachep) < 0)
goto nomem_reqpool;
cifs_io_subrequest_cachep =
kmem_cache_create("cifs_io_subrequest",
sizeof(struct cifs_io_subrequest), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!cifs_io_subrequest_cachep)
goto nomem_subreq;
if (mempool_init_slab_pool(&cifs_io_subrequest_pool, 100, cifs_io_subrequest_cachep) < 0)
goto nomem_subreqpool;
return 0;
nomem_subreqpool:
kmem_cache_destroy(cifs_io_subrequest_cachep);
nomem_subreq:
mempool_destroy(&cifs_io_request_pool);
nomem_reqpool:
kmem_cache_destroy(cifs_io_request_cachep);
nomem_req:
return -ENOMEM;
}
static void cifs_destroy_netfs(void)
{
mempool_destroy(&cifs_io_subrequest_pool);
kmem_cache_destroy(cifs_io_subrequest_cachep);
mempool_destroy(&cifs_io_request_pool);
kmem_cache_destroy(cifs_io_request_cachep);
}
static int __init static int __init
init_cifs(void) init_cifs(void)
{ {
...@@ -1903,10 +1900,14 @@ init_cifs(void) ...@@ -1903,10 +1900,14 @@ init_cifs(void)
if (rc) if (rc)
goto out_destroy_deferredclose_wq; goto out_destroy_deferredclose_wq;
rc = init_mids(); rc = cifs_init_netfs();
if (rc) if (rc)
goto out_destroy_inodecache; goto out_destroy_inodecache;
rc = init_mids();
if (rc)
goto out_destroy_netfs;
rc = cifs_init_request_bufs(); rc = cifs_init_request_bufs();
if (rc) if (rc)
goto out_destroy_mids; goto out_destroy_mids;
...@@ -1961,6 +1962,8 @@ init_cifs(void) ...@@ -1961,6 +1962,8 @@ init_cifs(void)
cifs_destroy_request_bufs(); cifs_destroy_request_bufs();
out_destroy_mids: out_destroy_mids:
destroy_mids(); destroy_mids();
out_destroy_netfs:
cifs_destroy_netfs();
out_destroy_inodecache: out_destroy_inodecache:
cifs_destroy_inodecache(); cifs_destroy_inodecache();
out_destroy_deferredclose_wq: out_destroy_deferredclose_wq:
...@@ -1999,6 +2002,7 @@ exit_cifs(void) ...@@ -1999,6 +2002,7 @@ exit_cifs(void)
#endif #endif
cifs_destroy_request_bufs(); cifs_destroy_request_bufs();
destroy_mids(); destroy_mids();
cifs_destroy_netfs();
cifs_destroy_inodecache(); cifs_destroy_inodecache();
destroy_workqueue(deferredclose_wq); destroy_workqueue(deferredclose_wq);
destroy_workqueue(cifsoplockd_wq); destroy_workqueue(cifsoplockd_wq);
......
...@@ -69,7 +69,6 @@ extern int cifs_revalidate_file_attr(struct file *filp); ...@@ -69,7 +69,6 @@ extern int cifs_revalidate_file_attr(struct file *filp);
extern int cifs_revalidate_dentry_attr(struct dentry *); extern int cifs_revalidate_dentry_attr(struct dentry *);
extern int cifs_revalidate_file(struct file *filp); extern int cifs_revalidate_file(struct file *filp);
extern int cifs_revalidate_dentry(struct dentry *); extern int cifs_revalidate_dentry(struct dentry *);
extern int cifs_invalidate_mapping(struct inode *inode);
extern int cifs_revalidate_mapping(struct inode *inode); extern int cifs_revalidate_mapping(struct inode *inode);
extern int cifs_zap_mapping(struct inode *inode); extern int cifs_zap_mapping(struct inode *inode);
extern int cifs_getattr(struct mnt_idmap *, const struct path *, extern int cifs_getattr(struct mnt_idmap *, const struct path *,
...@@ -85,6 +84,7 @@ extern const struct inode_operations cifs_namespace_inode_operations; ...@@ -85,6 +84,7 @@ extern const struct inode_operations cifs_namespace_inode_operations;
/* Functions related to files and directories */ /* Functions related to files and directories */
extern const struct netfs_request_ops cifs_req_ops;
extern const struct file_operations cifs_file_ops; extern const struct file_operations cifs_file_ops;
extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */ extern const struct file_operations cifs_file_direct_ops; /* if directio mnt */
extern const struct file_operations cifs_file_strict_ops; /* if strictio mnt */ extern const struct file_operations cifs_file_strict_ops; /* if strictio mnt */
...@@ -94,12 +94,10 @@ extern const struct file_operations cifs_file_strict_nobrl_ops; ...@@ -94,12 +94,10 @@ extern const struct file_operations cifs_file_strict_nobrl_ops;
extern int cifs_open(struct inode *inode, struct file *file); extern int cifs_open(struct inode *inode, struct file *file);
extern int cifs_close(struct inode *inode, struct file *file); extern int cifs_close(struct inode *inode, struct file *file);
extern int cifs_closedir(struct inode *inode, struct file *file); extern int cifs_closedir(struct inode *inode, struct file *file);
extern ssize_t cifs_user_readv(struct kiocb *iocb, struct iov_iter *to);
extern ssize_t cifs_direct_readv(struct kiocb *iocb, struct iov_iter *to);
extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to); extern ssize_t cifs_strict_readv(struct kiocb *iocb, struct iov_iter *to);
extern ssize_t cifs_user_writev(struct kiocb *iocb, struct iov_iter *from);
extern ssize_t cifs_direct_writev(struct kiocb *iocb, struct iov_iter *from);
extern ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from); extern ssize_t cifs_strict_writev(struct kiocb *iocb, struct iov_iter *from);
ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from);
ssize_t cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter);
extern int cifs_flock(struct file *pfile, int cmd, struct file_lock *plock); extern int cifs_flock(struct file *pfile, int cmd, struct file_lock *plock);
extern int cifs_lock(struct file *, int, struct file_lock *); extern int cifs_lock(struct file *, int, struct file_lock *);
extern int cifs_fsync(struct file *, loff_t, loff_t, int); extern int cifs_fsync(struct file *, loff_t, loff_t, int);
...@@ -110,9 +108,6 @@ extern int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma); ...@@ -110,9 +108,6 @@ extern int cifs_file_strict_mmap(struct file *file, struct vm_area_struct *vma);
extern const struct file_operations cifs_dir_ops; extern const struct file_operations cifs_dir_ops;
extern int cifs_dir_open(struct inode *inode, struct file *file); extern int cifs_dir_open(struct inode *inode, struct file *file);
extern int cifs_readdir(struct file *file, struct dir_context *ctx); extern int cifs_readdir(struct file *file, struct dir_context *ctx);
extern void cifs_pages_written_back(struct inode *inode, loff_t start, unsigned int len);
extern void cifs_pages_write_failed(struct inode *inode, loff_t start, unsigned int len);
extern void cifs_pages_write_redirty(struct inode *inode, loff_t start, unsigned int len);
/* Functions related to dir entries */ /* Functions related to dir entries */
extern const struct dentry_operations cifs_dentry_ops; extern const struct dentry_operations cifs_dentry_ops;
......
...@@ -268,8 +268,7 @@ struct dfs_info3_param; ...@@ -268,8 +268,7 @@ struct dfs_info3_param;
struct cifs_fattr; struct cifs_fattr;
struct smb3_fs_context; struct smb3_fs_context;
struct cifs_fid; struct cifs_fid;
struct cifs_readdata; struct cifs_io_subrequest;
struct cifs_writedata;
struct cifs_io_parms; struct cifs_io_parms;
struct cifs_search_info; struct cifs_search_info;
struct cifsInodeInfo; struct cifsInodeInfo;
...@@ -450,10 +449,9 @@ struct smb_version_operations { ...@@ -450,10 +449,9 @@ struct smb_version_operations {
/* send a flush request to the server */ /* send a flush request to the server */
int (*flush)(const unsigned int, struct cifs_tcon *, struct cifs_fid *); int (*flush)(const unsigned int, struct cifs_tcon *, struct cifs_fid *);
/* async read from the server */ /* async read from the server */
int (*async_readv)(struct cifs_readdata *); int (*async_readv)(struct cifs_io_subrequest *);
/* async write to the server */ /* async write to the server */
int (*async_writev)(struct cifs_writedata *, void (*async_writev)(struct cifs_io_subrequest *);
void (*release)(struct kref *));
/* sync read from the server */ /* sync read from the server */
int (*sync_read)(const unsigned int, struct cifs_fid *, int (*sync_read)(const unsigned int, struct cifs_fid *,
struct cifs_io_parms *, unsigned int *, char **, struct cifs_io_parms *, unsigned int *, char **,
...@@ -548,8 +546,8 @@ struct smb_version_operations { ...@@ -548,8 +546,8 @@ struct smb_version_operations {
/* writepages retry size */ /* writepages retry size */
unsigned int (*wp_retry_size)(struct inode *); unsigned int (*wp_retry_size)(struct inode *);
/* get mtu credits */ /* get mtu credits */
int (*wait_mtu_credits)(struct TCP_Server_Info *, unsigned int, int (*wait_mtu_credits)(struct TCP_Server_Info *, size_t,
unsigned int *, struct cifs_credits *); size_t *, struct cifs_credits *);
/* adjust previously taken mtu credits to request size */ /* adjust previously taken mtu credits to request size */
int (*adjust_credits)(struct TCP_Server_Info *server, int (*adjust_credits)(struct TCP_Server_Info *server,
struct cifs_credits *credits, struct cifs_credits *credits,
...@@ -883,11 +881,12 @@ add_credits(struct TCP_Server_Info *server, const struct cifs_credits *credits, ...@@ -883,11 +881,12 @@ add_credits(struct TCP_Server_Info *server, const struct cifs_credits *credits,
static inline void static inline void
add_credits_and_wake_if(struct TCP_Server_Info *server, add_credits_and_wake_if(struct TCP_Server_Info *server,
const struct cifs_credits *credits, const int optype) struct cifs_credits *credits, const int optype)
{ {
if (credits->value) { if (credits->value) {
server->ops->add_credits(server, credits, optype); server->ops->add_credits(server, credits, optype);
wake_up(&server->request_q); wake_up(&server->request_q);
credits->value = 0;
} }
} }
...@@ -1492,50 +1491,30 @@ struct cifs_aio_ctx { ...@@ -1492,50 +1491,30 @@ struct cifs_aio_ctx {
bool direct_io; bool direct_io;
}; };
/* asynchronous read support */ struct cifs_io_request {
struct cifs_readdata { struct netfs_io_request rreq;
struct kref refcount;
struct list_head list;
struct completion done;
struct cifsFileInfo *cfile; struct cifsFileInfo *cfile;
struct address_space *mapping;
struct cifs_aio_ctx *ctx;
__u64 offset;
ssize_t got_bytes;
unsigned int bytes;
pid_t pid;
int result;
struct work_struct work;
struct iov_iter iter;
struct kvec iov[2];
struct TCP_Server_Info *server;
#ifdef CONFIG_CIFS_SMB_DIRECT
struct smbd_mr *mr;
#endif
struct cifs_credits credits;
}; };
/* asynchronous write support */ /* asynchronous read support */
struct cifs_writedata { struct cifs_io_subrequest {
struct kref refcount; union {
struct list_head list; struct netfs_io_subrequest subreq;
struct completion done; struct netfs_io_request *rreq;
enum writeback_sync_modes sync_mode; struct cifs_io_request *req;
struct work_struct work; };
struct cifsFileInfo *cfile; ssize_t got_bytes;
struct cifs_aio_ctx *ctx;
struct iov_iter iter;
struct bio_vec *bv;
__u64 offset;
pid_t pid; pid_t pid;
unsigned int bytes; unsigned int xid;
int result; int result;
bool have_xid;
bool replay;
struct kvec iov[2];
struct TCP_Server_Info *server; struct TCP_Server_Info *server;
#ifdef CONFIG_CIFS_SMB_DIRECT #ifdef CONFIG_CIFS_SMB_DIRECT
struct smbd_mr *mr; struct smbd_mr *mr;
#endif #endif
struct cifs_credits credits; struct cifs_credits credits;
bool replay;
}; };
/* /*
...@@ -2115,6 +2094,8 @@ extern __u32 cifs_lock_secret; ...@@ -2115,6 +2094,8 @@ extern __u32 cifs_lock_secret;
extern mempool_t *cifs_sm_req_poolp; extern mempool_t *cifs_sm_req_poolp;
extern mempool_t *cifs_req_poolp; extern mempool_t *cifs_req_poolp;
extern mempool_t *cifs_mid_poolp; extern mempool_t *cifs_mid_poolp;
extern mempool_t cifs_io_request_pool;
extern mempool_t cifs_io_subrequest_pool;
/* Operations for different SMB versions */ /* Operations for different SMB versions */
#define SMB1_VERSION_STRING "1.0" #define SMB1_VERSION_STRING "1.0"
......
...@@ -121,7 +121,7 @@ extern struct mid_q_entry *cifs_setup_async_request(struct TCP_Server_Info *, ...@@ -121,7 +121,7 @@ extern struct mid_q_entry *cifs_setup_async_request(struct TCP_Server_Info *,
extern int cifs_check_receive(struct mid_q_entry *mid, extern int cifs_check_receive(struct mid_q_entry *mid,
struct TCP_Server_Info *server, bool log_error); struct TCP_Server_Info *server, bool log_error);
extern int cifs_wait_mtu_credits(struct TCP_Server_Info *server, extern int cifs_wait_mtu_credits(struct TCP_Server_Info *server,
unsigned int size, unsigned int *num, size_t size, size_t *num,
struct cifs_credits *credits); struct cifs_credits *credits);
extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *, extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
struct kvec *, int /* nvec to send */, struct kvec *, int /* nvec to send */,
...@@ -148,6 +148,8 @@ extern bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 eof, ...@@ -148,6 +148,8 @@ extern bool is_size_safe_to_change(struct cifsInodeInfo *cifsInode, __u64 eof,
bool from_readdir); bool from_readdir);
extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
unsigned int bytes_written); unsigned int bytes_written);
void cifs_write_subrequest_terminated(struct cifs_io_subrequest *wdata, ssize_t result,
bool was_async);
extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int); extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int);
extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
int flags, int flags,
...@@ -599,15 +601,11 @@ void __cifs_put_smb_ses(struct cifs_ses *ses); ...@@ -599,15 +601,11 @@ void __cifs_put_smb_ses(struct cifs_ses *ses);
extern struct cifs_ses * extern struct cifs_ses *
cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx); cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx);
void cifs_readdata_release(struct kref *refcount); int cifs_async_readv(struct cifs_io_subrequest *rdata);
int cifs_async_readv(struct cifs_readdata *rdata);
int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid); int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid);
int cifs_async_writev(struct cifs_writedata *wdata, void cifs_async_writev(struct cifs_io_subrequest *wdata);
void (*release)(struct kref *kref));
void cifs_writev_complete(struct work_struct *work); void cifs_writev_complete(struct work_struct *work);
struct cifs_writedata *cifs_writedata_alloc(work_func_t complete);
void cifs_writedata_release(struct kref *refcount);
int cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon, int cifs_query_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, struct cifs_sb_info *cifs_sb,
const unsigned char *path, char *pbuf, const unsigned char *path, char *pbuf,
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -74,41 +74,6 @@ static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags ...@@ -74,41 +74,6 @@ static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags
i_size_read(inode), flags); i_size_read(inode), flags);
} }
extern int __cifs_fscache_query_occupancy(struct inode *inode,
pgoff_t first, unsigned int nr_pages,
pgoff_t *_data_first,
unsigned int *_data_nr_pages);
static inline int cifs_fscache_query_occupancy(struct inode *inode,
pgoff_t first, unsigned int nr_pages,
pgoff_t *_data_first,
unsigned int *_data_nr_pages)
{
if (!cifs_inode_cookie(inode))
return -ENOBUFS;
return __cifs_fscache_query_occupancy(inode, first, nr_pages,
_data_first, _data_nr_pages);
}
extern int __cifs_readpage_from_fscache(struct inode *pinode, struct page *ppage);
extern void __cifs_readahead_to_fscache(struct inode *pinode, loff_t pos, size_t len);
static inline int cifs_readpage_from_fscache(struct inode *inode,
struct page *page)
{
if (cifs_inode_cookie(inode))
return __cifs_readpage_from_fscache(inode, page);
return -ENOBUFS;
}
static inline void cifs_readahead_to_fscache(struct inode *inode,
loff_t pos, size_t len)
{
if (cifs_inode_cookie(inode))
__cifs_readahead_to_fscache(inode, pos, len);
}
static inline bool cifs_fscache_enabled(struct inode *inode) static inline bool cifs_fscache_enabled(struct inode *inode)
{ {
return fscache_cookie_enabled(cifs_inode_cookie(inode)); return fscache_cookie_enabled(cifs_inode_cookie(inode));
...@@ -131,25 +96,6 @@ static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { re ...@@ -131,25 +96,6 @@ static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { re
static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {} static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {}
static inline bool cifs_fscache_enabled(struct inode *inode) { return false; } static inline bool cifs_fscache_enabled(struct inode *inode) { return false; }
static inline int cifs_fscache_query_occupancy(struct inode *inode,
pgoff_t first, unsigned int nr_pages,
pgoff_t *_data_first,
unsigned int *_data_nr_pages)
{
*_data_first = ULONG_MAX;
*_data_nr_pages = 0;
return -ENOBUFS;
}
static inline int
cifs_readpage_from_fscache(struct inode *inode, struct page *page)
{
return -ENOBUFS;
}
static inline
void cifs_readahead_to_fscache(struct inode *inode, loff_t pos, size_t len) {}
#endif /* CONFIG_CIFS_FSCACHE */ #endif /* CONFIG_CIFS_FSCACHE */
#endif /* _CIFS_FSCACHE_H */ #endif /* _CIFS_FSCACHE_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -210,11 +210,10 @@ extern int SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon, ...@@ -210,11 +210,10 @@ extern int SMB2_query_acl(const unsigned int xid, struct cifs_tcon *tcon,
extern int SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon, extern int SMB2_get_srv_num(const unsigned int xid, struct cifs_tcon *tcon,
u64 persistent_fid, u64 volatile_fid, u64 persistent_fid, u64 volatile_fid,
__le64 *uniqueid); __le64 *uniqueid);
extern int smb2_async_readv(struct cifs_readdata *rdata); extern int smb2_async_readv(struct cifs_io_subrequest *rdata);
extern int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms, extern int SMB2_read(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, char **buf, int *buf_type); unsigned int *nbytes, char **buf, int *buf_type);
extern int smb2_async_writev(struct cifs_writedata *wdata, extern void smb2_async_writev(struct cifs_io_subrequest *wdata);
void (*release)(struct kref *kref));
extern int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms, extern int SMB2_write(const unsigned int xid, struct cifs_io_parms *io_parms,
unsigned int *nbytes, struct kvec *iov, int n_vec); unsigned int *nbytes, struct kvec *iov, int n_vec);
extern int SMB2_echo(struct TCP_Server_Info *server); extern int SMB2_echo(struct TCP_Server_Info *server);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -40,6 +40,8 @@ int filemap_fdatawait_keep_errors(struct address_space *mapping); ...@@ -40,6 +40,8 @@ int filemap_fdatawait_keep_errors(struct address_space *mapping);
int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend); int filemap_fdatawait_range(struct address_space *, loff_t lstart, loff_t lend);
int filemap_fdatawait_range_keep_errors(struct address_space *mapping, int filemap_fdatawait_range_keep_errors(struct address_space *mapping,
loff_t start_byte, loff_t end_byte); loff_t start_byte, loff_t end_byte);
int filemap_invalidate_inode(struct inode *inode, bool flush,
loff_t start, loff_t end);
static inline int filemap_fdatawait(struct address_space *mapping) static inline int filemap_fdatawait(struct address_space *mapping)
{ {
......
...@@ -207,6 +207,8 @@ int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err ...@@ -207,6 +207,8 @@ int p9_client_read(struct p9_fid *fid, u64 offset, struct iov_iter *to, int *err
int p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to, int p9_client_read_once(struct p9_fid *fid, u64 offset, struct iov_iter *to,
int *err); int *err);
int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err); int p9_client_write(struct p9_fid *fid, u64 offset, struct iov_iter *from, int *err);
struct netfs_io_subrequest;
void p9_client_write_subreq(struct netfs_io_subrequest *subreq);
int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset); int p9_client_readdir(struct p9_fid *fid, char *data, u32 count, u64 offset);
int p9dirent_read(struct p9_client *clnt, char *buf, int len, int p9dirent_read(struct p9_client *clnt, char *buf, int len,
struct p9_dirent *dirent); struct p9_dirent *dirent);
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment