Commit 0127f25b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfs-for-6.4-1' of git://git.linux-nfs.org/projects/anna/linux-nfs

Pull NFS client updates from Anna Schumaker:
 "New Features:

   - Convert the readdir path to use folios

   - Convert the NFS fscache code to use netfs

  Bugfixes and Cleanups:

   - Always send a RECLAIM_COMPLETE after establishing a lease

   - Simplify sysctl registrations and other cleanups

   - Handle out-of-order write replies on NFS v3

   - Have sunrpc call_bind_status use standard hard/soft task semantics

   - Other minor cleanups"

* tag 'nfs-for-6.4-1' of git://git.linux-nfs.org/projects/anna/linux-nfs:
  NFSv4.2: Rework scratch handling for READ_PLUS
  NFS: Cleanup unused rpc_clnt variable
  NFS: set varaiable nfs_netfs_debug_id storage-class-specifier to static
  SUNRPC: remove the maximum number of retries in call_bind_status
  NFS: Convert readdir page array functions to use a folio
  NFS: Convert the readdir array-of-pages into an array-of-folios
  NFSv3: handle out-of-order write replies.
  NFS: Remove fscache specific trace points and NFS_INO_FSCACHE bit
  NFS: Remove all NFSIOS_FSCACHE counters due to conversion to netfs API
  NFS: Convert buffered read paths to use netfs when fscache is enabled
  NFS: Configure support for netfs when NFS fscache is configured
  NFS: Rename readpage_async_filler to nfs_read_add_folio
  sunrpc: simplify one-level sysctl registration for debug_table
  sunrpc: move sunrpc_table and proc routines above
  sunrpc: simplify one-level sysctl registration for xs_tunables_table
  sunrpc: simplify one-level sysctl registration for xr_tunables_table
  nfs: simplify two-level sysctl registration for nfs_cb_sysctls
  nfs: simplify two-level sysctl registration for nfs4_cb_sysctls
  lockd: simplify two-level sysctl registration for nlm_sysctls
  NFSv4.1: Always send a RECLAIM_COMPLETE after establishing lease
parents 1e098dec fbd2a05f
...@@ -170,6 +170,7 @@ config ROOT_NFS ...@@ -170,6 +170,7 @@ config ROOT_NFS
config NFS_FSCACHE config NFS_FSCACHE
bool "Provide NFS client caching support" bool "Provide NFS client caching support"
depends on NFS_FS=m && FSCACHE || NFS_FS=y && FSCACHE=y depends on NFS_FS=m && FSCACHE || NFS_FS=y && FSCACHE=y
select NETFS_SUPPORT
help help
Say Y here if you want NFS data to be cached locally on disc through Say Y here if you want NFS data to be cached locally on disc through
the general filesystem cache manager the general filesystem cache manager
......
This diff is collapsed.
...@@ -15,6 +15,9 @@ ...@@ -15,6 +15,9 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/iversion.h> #include <linux/iversion.h>
#include <linux/xarray.h>
#include <linux/fscache.h>
#include <linux/netfs.h>
#include "internal.h" #include "internal.h"
#include "iostat.h" #include "iostat.h"
...@@ -163,13 +166,14 @@ void nfs_fscache_init_inode(struct inode *inode) ...@@ -163,13 +166,14 @@ void nfs_fscache_init_inode(struct inode *inode)
struct nfs_server *nfss = NFS_SERVER(inode); struct nfs_server *nfss = NFS_SERVER(inode);
struct nfs_inode *nfsi = NFS_I(inode); struct nfs_inode *nfsi = NFS_I(inode);
nfsi->fscache = NULL; netfs_inode(inode)->cache = NULL;
if (!(nfss->fscache && S_ISREG(inode->i_mode))) if (!(nfss->fscache && S_ISREG(inode->i_mode)))
return; return;
nfs_fscache_update_auxdata(&auxdata, inode); nfs_fscache_update_auxdata(&auxdata, inode);
nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache, netfs_inode(inode)->cache = fscache_acquire_cookie(
nfss->fscache,
0, 0,
nfsi->fh.data, /* index_key */ nfsi->fh.data, /* index_key */
nfsi->fh.size, nfsi->fh.size,
...@@ -183,11 +187,8 @@ void nfs_fscache_init_inode(struct inode *inode) ...@@ -183,11 +187,8 @@ void nfs_fscache_init_inode(struct inode *inode)
*/ */
void nfs_fscache_clear_inode(struct inode *inode) void nfs_fscache_clear_inode(struct inode *inode)
{ {
struct nfs_inode *nfsi = NFS_I(inode); fscache_relinquish_cookie(netfs_i_cookie(netfs_inode(inode)), false);
struct fscache_cookie *cookie = nfs_i_fscache(inode); netfs_inode(inode)->cache = NULL;
fscache_relinquish_cookie(cookie, false);
nfsi->fscache = NULL;
} }
/* /*
...@@ -212,7 +213,7 @@ void nfs_fscache_clear_inode(struct inode *inode) ...@@ -212,7 +213,7 @@ void nfs_fscache_clear_inode(struct inode *inode)
void nfs_fscache_open_file(struct inode *inode, struct file *filp) void nfs_fscache_open_file(struct inode *inode, struct file *filp)
{ {
struct nfs_fscache_inode_auxdata auxdata; struct nfs_fscache_inode_auxdata auxdata;
struct fscache_cookie *cookie = nfs_i_fscache(inode); struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
bool open_for_write = inode_is_open_for_write(inode); bool open_for_write = inode_is_open_for_write(inode);
if (!fscache_cookie_valid(cookie)) if (!fscache_cookie_valid(cookie))
...@@ -230,115 +231,160 @@ EXPORT_SYMBOL_GPL(nfs_fscache_open_file); ...@@ -230,115 +231,160 @@ EXPORT_SYMBOL_GPL(nfs_fscache_open_file);
void nfs_fscache_release_file(struct inode *inode, struct file *filp) void nfs_fscache_release_file(struct inode *inode, struct file *filp)
{ {
struct nfs_fscache_inode_auxdata auxdata; struct nfs_fscache_inode_auxdata auxdata;
struct fscache_cookie *cookie = nfs_i_fscache(inode); struct fscache_cookie *cookie = netfs_i_cookie(netfs_inode(inode));
loff_t i_size = i_size_read(inode); loff_t i_size = i_size_read(inode);
nfs_fscache_update_auxdata(&auxdata, inode); nfs_fscache_update_auxdata(&auxdata, inode);
fscache_unuse_cookie(cookie, &auxdata, &i_size); fscache_unuse_cookie(cookie, &auxdata, &i_size);
} }
/* int nfs_netfs_read_folio(struct file *file, struct folio *folio)
* Fallback page reading interface.
*/
static int fscache_fallback_read_page(struct inode *inode, struct page *page)
{ {
struct netfs_cache_resources cres; if (!netfs_inode(folio_inode(folio))->cache)
struct fscache_cookie *cookie = nfs_i_fscache(inode); return -ENOBUFS;
struct iov_iter iter;
struct bio_vec bvec; return netfs_read_folio(file, folio);
int ret;
memset(&cres, 0, sizeof(cres));
bvec_set_page(&bvec, page, PAGE_SIZE, 0);
iov_iter_bvec(&iter, ITER_DEST, &bvec, 1, PAGE_SIZE);
ret = fscache_begin_read_operation(&cres, cookie);
if (ret < 0)
return ret;
ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
NULL, NULL);
fscache_end_operation(&cres);
return ret;
} }
/* int nfs_netfs_readahead(struct readahead_control *ractl)
* Fallback page writing interface.
*/
static int fscache_fallback_write_page(struct inode *inode, struct page *page,
bool no_space_allocated_yet)
{ {
struct netfs_cache_resources cres; struct inode *inode = ractl->mapping->host;
struct fscache_cookie *cookie = nfs_i_fscache(inode);
struct iov_iter iter; if (!netfs_inode(inode)->cache)
struct bio_vec bvec; return -ENOBUFS;
loff_t start = page_offset(page);
size_t len = PAGE_SIZE; netfs_readahead(ractl);
int ret; return 0;
memset(&cres, 0, sizeof(cres));
bvec_set_page(&bvec, page, PAGE_SIZE, 0);
iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);
ret = fscache_begin_write_operation(&cres, cookie);
if (ret < 0)
return ret;
ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
no_space_allocated_yet);
if (ret == 0)
ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL);
fscache_end_operation(&cres);
return ret;
} }
/* static atomic_t nfs_netfs_debug_id;
* Retrieve a page from fscache static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file)
*/
int __nfs_fscache_read_page(struct inode *inode, struct page *page)
{ {
int ret; rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);
trace_nfs_fscache_read_page(inode, page); return 0;
if (PageChecked(page)) { }
ClearPageChecked(page);
ret = 1;
goto out;
}
ret = fscache_fallback_read_page(inode, page); static void nfs_netfs_free_request(struct netfs_io_request *rreq)
if (ret < 0) { {
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL); put_nfs_open_context(rreq->netfs_priv);
SetPageChecked(page); }
goto out;
}
/* Read completed synchronously */ static inline int nfs_netfs_begin_cache_operation(struct netfs_io_request *rreq)
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK); {
SetPageUptodate(page); return fscache_begin_read_operation(&rreq->cache_resources,
ret = 0; netfs_i_cookie(netfs_inode(rreq->inode)));
out:
trace_nfs_fscache_read_page_exit(inode, page, ret);
return ret;
} }
/* static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
* Store a newly fetched page in fscache. We can be certain there's no page
* stored in the cache as yet otherwise we would've read it from there.
*/
void __nfs_fscache_write_page(struct inode *inode, struct page *page)
{ {
int ret; struct nfs_netfs_io_data *netfs;
netfs = kzalloc(sizeof(*netfs), GFP_KERNEL_ACCOUNT);
if (!netfs)
return NULL;
netfs->sreq = sreq;
refcount_set(&netfs->refcount, 1);
return netfs;
}
trace_nfs_fscache_write_page(inode, page); static bool nfs_netfs_clamp_length(struct netfs_io_subrequest *sreq)
{
size_t rsize = NFS_SB(sreq->rreq->inode->i_sb)->rsize;
ret = fscache_fallback_write_page(inode, page, true); sreq->len = min(sreq->len, rsize);
return true;
}
if (ret != 0) { static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL); {
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED); struct nfs_netfs_io_data *netfs;
} else { struct nfs_pageio_descriptor pgio;
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_OK); struct inode *inode = sreq->rreq->inode;
struct nfs_open_context *ctx = sreq->rreq->netfs_priv;
struct page *page;
int err;
pgoff_t start = (sreq->start + sreq->transferred) >> PAGE_SHIFT;
pgoff_t last = ((sreq->start + sreq->len -
sreq->transferred - 1) >> PAGE_SHIFT);
XA_STATE(xas, &sreq->rreq->mapping->i_pages, start);
nfs_pageio_init_read(&pgio, inode, false,
&nfs_async_read_completion_ops);
netfs = nfs_netfs_alloc(sreq);
if (!netfs)
return netfs_subreq_terminated(sreq, -ENOMEM, false);
pgio.pg_netfs = netfs; /* used in completion */
xas_lock(&xas);
xas_for_each(&xas, page, last) {
/* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs */
xas_pause(&xas);
xas_unlock(&xas);
err = nfs_read_add_folio(&pgio, ctx, page_folio(page));
if (err < 0) {
netfs->error = err;
goto out;
}
xas_lock(&xas);
} }
trace_nfs_fscache_write_page_exit(inode, page, ret); xas_unlock(&xas);
out:
nfs_pageio_complete_read(&pgio);
nfs_netfs_put(netfs);
}
void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr)
{
struct nfs_netfs_io_data *netfs = hdr->netfs;
if (!netfs)
return;
nfs_netfs_get(netfs);
}
int nfs_netfs_folio_unlock(struct folio *folio)
{
struct inode *inode = folio_file_mapping(folio)->host;
/*
* If fscache is enabled, netfs will unlock pages.
*/
if (netfs_inode(inode)->cache)
return 0;
return 1;
} }
void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
{
struct nfs_netfs_io_data *netfs = hdr->netfs;
struct netfs_io_subrequest *sreq;
if (!netfs)
return;
sreq = netfs->sreq;
if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
__set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags);
if (hdr->error)
netfs->error = hdr->error;
else
atomic64_add(hdr->res.count, &netfs->transferred);
nfs_netfs_put(netfs);
hdr->netfs = NULL;
}
const struct netfs_request_ops nfs_netfs_ops = {
.init_request = nfs_netfs_init_request,
.free_request = nfs_netfs_free_request,
.begin_cache_operation = nfs_netfs_begin_cache_operation,
.issue_read = nfs_netfs_issue_read,
.clamp_length = nfs_netfs_clamp_length
};
...@@ -34,6 +34,58 @@ struct nfs_fscache_inode_auxdata { ...@@ -34,6 +34,58 @@ struct nfs_fscache_inode_auxdata {
u64 change_attr; u64 change_attr;
}; };
struct nfs_netfs_io_data {
/*
* NFS may split a netfs_io_subrequest into multiple RPCs, each
* with their own read completion. In netfs, we can only call
* netfs_subreq_terminated() once for each subrequest. Use the
* refcount here to double as a marker of the last RPC completion,
* and only call netfs via netfs_subreq_terminated() once.
*/
refcount_t refcount;
struct netfs_io_subrequest *sreq;
/*
* Final disposition of the netfs_io_subrequest, sent in
* netfs_subreq_terminated()
*/
atomic64_t transferred;
int error;
};
static inline void nfs_netfs_get(struct nfs_netfs_io_data *netfs)
{
refcount_inc(&netfs->refcount);
}
static inline void nfs_netfs_put(struct nfs_netfs_io_data *netfs)
{
ssize_t final_len;
/* Only the last RPC completion should call netfs_subreq_terminated() */
if (!refcount_dec_and_test(&netfs->refcount))
return;
/*
* The NFS pageio interface may read a complete page, even when netfs
* only asked for a partial page. Specifically, this may be seen when
* one thread is truncating a file while another one is reading the last
* page of the file.
* Correct the final length here to be no larger than the netfs subrequest
* length, and thus avoid netfs's "Subreq overread" warning message.
*/
final_len = min_t(s64, netfs->sreq->len, atomic64_read(&netfs->transferred));
netfs_subreq_terminated(netfs->sreq, netfs->error ?: final_len, false);
kfree(netfs);
}
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi)
{
netfs_inode_init(&nfsi->netfs, &nfs_netfs_ops);
}
extern void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr);
extern void nfs_netfs_read_completion(struct nfs_pgio_header *hdr);
extern int nfs_netfs_folio_unlock(struct folio *folio);
/* /*
* fscache.c * fscache.c
*/ */
...@@ -44,9 +96,8 @@ extern void nfs_fscache_init_inode(struct inode *); ...@@ -44,9 +96,8 @@ extern void nfs_fscache_init_inode(struct inode *);
extern void nfs_fscache_clear_inode(struct inode *); extern void nfs_fscache_clear_inode(struct inode *);
extern void nfs_fscache_open_file(struct inode *, struct file *); extern void nfs_fscache_open_file(struct inode *, struct file *);
extern void nfs_fscache_release_file(struct inode *, struct file *); extern void nfs_fscache_release_file(struct inode *, struct file *);
extern int nfs_netfs_readahead(struct readahead_control *ractl);
extern int __nfs_fscache_read_page(struct inode *, struct page *); extern int nfs_netfs_read_folio(struct file *file, struct folio *folio);
extern void __nfs_fscache_write_page(struct inode *, struct page *);
static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp) static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
{ {
...@@ -54,34 +105,11 @@ static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp) ...@@ -54,34 +105,11 @@ static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
if (current_is_kswapd() || !(gfp & __GFP_FS)) if (current_is_kswapd() || !(gfp & __GFP_FS))
return false; return false;
folio_wait_fscache(folio); folio_wait_fscache(folio);
fscache_note_page_release(nfs_i_fscache(folio->mapping->host));
nfs_inc_fscache_stats(folio->mapping->host,
NFSIOS_FSCACHE_PAGES_UNCACHED);
} }
fscache_note_page_release(netfs_i_cookie(netfs_inode(folio->mapping->host)));
return true; return true;
} }
/*
* Retrieve a page from an inode data storage object.
*/
static inline int nfs_fscache_read_page(struct inode *inode, struct page *page)
{
if (nfs_i_fscache(inode))
return __nfs_fscache_read_page(inode, page);
return -ENOBUFS;
}
/*
* Store a page newly fetched from the server in an inode data storage object
* in the cache.
*/
static inline void nfs_fscache_write_page(struct inode *inode,
struct page *page)
{
if (nfs_i_fscache(inode))
__nfs_fscache_write_page(inode, page);
}
static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata, static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
struct inode *inode) struct inode *inode)
{ {
...@@ -101,13 +129,10 @@ static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata * ...@@ -101,13 +129,10 @@ static inline void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *
static inline void nfs_fscache_invalidate(struct inode *inode, int flags) static inline void nfs_fscache_invalidate(struct inode *inode, int flags)
{ {
struct nfs_fscache_inode_auxdata auxdata; struct nfs_fscache_inode_auxdata auxdata;
struct nfs_inode *nfsi = NFS_I(inode); struct fscache_cookie *cookie = netfs_i_cookie(&NFS_I(inode)->netfs);
if (nfsi->fscache) { nfs_fscache_update_auxdata(&auxdata, inode);
nfs_fscache_update_auxdata(&auxdata, inode); fscache_invalidate(cookie, &auxdata, i_size_read(inode), flags);
fscache_invalidate(nfsi->fscache, &auxdata,
i_size_read(inode), flags);
}
} }
/* /*
...@@ -120,7 +145,28 @@ static inline const char *nfs_server_fscache_state(struct nfs_server *server) ...@@ -120,7 +145,28 @@ static inline const char *nfs_server_fscache_state(struct nfs_server *server)
return "no "; return "no ";
} }
static inline void nfs_netfs_set_pgio_header(struct nfs_pgio_header *hdr,
struct nfs_pageio_descriptor *desc)
{
hdr->netfs = desc->pg_netfs;
}
static inline void nfs_netfs_set_pageio_descriptor(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr)
{
desc->pg_netfs = hdr->netfs;
}
static inline void nfs_netfs_reset_pageio_descriptor(struct nfs_pageio_descriptor *desc)
{
desc->pg_netfs = NULL;
}
#else /* CONFIG_NFS_FSCACHE */ #else /* CONFIG_NFS_FSCACHE */
static inline void nfs_netfs_inode_init(struct nfs_inode *nfsi) {}
static inline void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr) {}
static inline void nfs_netfs_read_completion(struct nfs_pgio_header *hdr) {}
static inline int nfs_netfs_folio_unlock(struct folio *folio)
{
return 1;
}
static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {} static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
static inline void nfs_fscache_init_inode(struct inode *inode) {} static inline void nfs_fscache_init_inode(struct inode *inode) {}
...@@ -128,22 +174,29 @@ static inline void nfs_fscache_clear_inode(struct inode *inode) {} ...@@ -128,22 +174,29 @@ static inline void nfs_fscache_clear_inode(struct inode *inode) {}
static inline void nfs_fscache_open_file(struct inode *inode, static inline void nfs_fscache_open_file(struct inode *inode,
struct file *filp) {} struct file *filp) {}
static inline void nfs_fscache_release_file(struct inode *inode, struct file *file) {} static inline void nfs_fscache_release_file(struct inode *inode, struct file *file) {}
static inline int nfs_netfs_readahead(struct readahead_control *ractl)
static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
{ {
return true; /* may release folio */ return -ENOBUFS;
} }
static inline int nfs_fscache_read_page(struct inode *inode, struct page *page) static inline int nfs_netfs_read_folio(struct file *file, struct folio *folio)
{ {
return -ENOBUFS; return -ENOBUFS;
} }
static inline void nfs_fscache_write_page(struct inode *inode, struct page *page) {}
static inline bool nfs_fscache_release_folio(struct folio *folio, gfp_t gfp)
{
return true; /* may release folio */
}
static inline void nfs_fscache_invalidate(struct inode *inode, int flags) {} static inline void nfs_fscache_invalidate(struct inode *inode, int flags) {}
static inline const char *nfs_server_fscache_state(struct nfs_server *server) static inline const char *nfs_server_fscache_state(struct nfs_server *server)
{ {
return "no "; return "no ";
} }
static inline void nfs_netfs_set_pgio_header(struct nfs_pgio_header *hdr,
struct nfs_pageio_descriptor *desc) {}
static inline void nfs_netfs_set_pageio_descriptor(struct nfs_pageio_descriptor *desc,
struct nfs_pgio_header *hdr) {}
static inline void nfs_netfs_reset_pageio_descriptor(struct nfs_pageio_descriptor *desc) {}
#endif /* CONFIG_NFS_FSCACHE */ #endif /* CONFIG_NFS_FSCACHE */
#endif /* _NFS_FSCACHE_H */ #endif /* _NFS_FSCACHE_H */
...@@ -208,11 +208,12 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags) ...@@ -208,11 +208,12 @@ void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
nfsi->cache_validity |= flags; nfsi->cache_validity |= flags;
if (inode->i_mapping->nrpages == 0) if (inode->i_mapping->nrpages == 0) {
nfsi->cache_validity &= ~(NFS_INO_INVALID_DATA | nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
NFS_INO_DATA_INVAL_DEFER); nfs_ooo_clear(nfsi);
else if (nfsi->cache_validity & NFS_INO_INVALID_DATA) } else if (nfsi->cache_validity & NFS_INO_INVALID_DATA) {
nfsi->cache_validity &= ~NFS_INO_DATA_INVAL_DEFER; nfs_ooo_clear(nfsi);
}
trace_nfs_set_cache_invalid(inode, 0); trace_nfs_set_cache_invalid(inode, 0);
} }
EXPORT_SYMBOL_GPL(nfs_set_cache_invalid); EXPORT_SYMBOL_GPL(nfs_set_cache_invalid);
...@@ -677,9 +678,10 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset) ...@@ -677,9 +678,10 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset)
trace_nfs_size_truncate(inode, offset); trace_nfs_size_truncate(inode, offset);
i_size_write(inode, offset); i_size_write(inode, offset);
/* Optimisation */ /* Optimisation */
if (offset == 0) if (offset == 0) {
NFS_I(inode)->cache_validity &= ~(NFS_INO_INVALID_DATA | NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA;
NFS_INO_DATA_INVAL_DEFER); nfs_ooo_clear(NFS_I(inode));
}
NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE; NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
...@@ -1107,7 +1109,7 @@ void nfs_inode_attach_open_context(struct nfs_open_context *ctx) ...@@ -1107,7 +1109,7 @@ void nfs_inode_attach_open_context(struct nfs_open_context *ctx)
spin_lock(&inode->i_lock); spin_lock(&inode->i_lock);
if (list_empty(&nfsi->open_files) && if (list_empty(&nfsi->open_files) &&
(nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER)) nfs_ooo_test(nfsi))
nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA | nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA |
NFS_INO_REVAL_FORCED); NFS_INO_REVAL_FORCED);
list_add_tail_rcu(&ctx->list, &nfsi->open_files); list_add_tail_rcu(&ctx->list, &nfsi->open_files);
...@@ -1351,8 +1353,8 @@ int nfs_clear_invalid_mapping(struct address_space *mapping) ...@@ -1351,8 +1353,8 @@ int nfs_clear_invalid_mapping(struct address_space *mapping)
set_bit(NFS_INO_INVALIDATING, bitlock); set_bit(NFS_INO_INVALIDATING, bitlock);
smp_wmb(); smp_wmb();
nfsi->cache_validity &= nfsi->cache_validity &= ~NFS_INO_INVALID_DATA;
~(NFS_INO_INVALID_DATA | NFS_INO_DATA_INVAL_DEFER); nfs_ooo_clear(nfsi);
spin_unlock(&inode->i_lock); spin_unlock(&inode->i_lock);
trace_nfs_invalidate_mapping_enter(inode); trace_nfs_invalidate_mapping_enter(inode);
ret = nfs_invalidate_mapping(inode, mapping); ret = nfs_invalidate_mapping(inode, mapping);
...@@ -1814,6 +1816,66 @@ static int nfs_inode_finish_partial_attr_update(const struct nfs_fattr *fattr, ...@@ -1814,6 +1816,66 @@ static int nfs_inode_finish_partial_attr_update(const struct nfs_fattr *fattr,
return 0; return 0;
} }
static void nfs_ooo_merge(struct nfs_inode *nfsi,
u64 start, u64 end)
{
int i, cnt;
if (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER)
/* No point merging anything */
return;
if (!nfsi->ooo) {
nfsi->ooo = kmalloc(sizeof(*nfsi->ooo), GFP_ATOMIC);
if (!nfsi->ooo) {
nfsi->cache_validity |= NFS_INO_DATA_INVAL_DEFER;
return;
}
nfsi->ooo->cnt = 0;
}
/* add this range, merging if possible */
cnt = nfsi->ooo->cnt;
for (i = 0; i < cnt; i++) {
if (end == nfsi->ooo->gap[i].start)
end = nfsi->ooo->gap[i].end;
else if (start == nfsi->ooo->gap[i].end)
start = nfsi->ooo->gap[i].start;
else
continue;
/* Remove 'i' from table and loop to insert the new range */
cnt -= 1;
nfsi->ooo->gap[i] = nfsi->ooo->gap[cnt];
i = -1;
}
if (start != end) {
if (cnt >= ARRAY_SIZE(nfsi->ooo->gap)) {
nfsi->cache_validity |= NFS_INO_DATA_INVAL_DEFER;
kfree(nfsi->ooo);
nfsi->ooo = NULL;
return;
}
nfsi->ooo->gap[cnt].start = start;
nfsi->ooo->gap[cnt].end = end;
cnt += 1;
}
nfsi->ooo->cnt = cnt;
}
static void nfs_ooo_record(struct nfs_inode *nfsi,
struct nfs_fattr *fattr)
{
/* This reply was out-of-order, so record in the
* pre/post change id, possibly cancelling
* gaps created when iversion was jumpped forward.
*/
if ((fattr->valid & NFS_ATTR_FATTR_CHANGE) &&
(fattr->valid & NFS_ATTR_FATTR_PRECHANGE))
nfs_ooo_merge(nfsi,
fattr->change_attr,
fattr->pre_change_attr);
}
static int nfs_refresh_inode_locked(struct inode *inode, static int nfs_refresh_inode_locked(struct inode *inode,
struct nfs_fattr *fattr) struct nfs_fattr *fattr)
{ {
...@@ -1824,8 +1886,12 @@ static int nfs_refresh_inode_locked(struct inode *inode, ...@@ -1824,8 +1886,12 @@ static int nfs_refresh_inode_locked(struct inode *inode,
if (attr_cmp > 0 || nfs_inode_finish_partial_attr_update(fattr, inode)) if (attr_cmp > 0 || nfs_inode_finish_partial_attr_update(fattr, inode))
ret = nfs_update_inode(inode, fattr); ret = nfs_update_inode(inode, fattr);
else if (attr_cmp == 0) else {
ret = nfs_check_inode_attributes(inode, fattr); nfs_ooo_record(NFS_I(inode), fattr);
if (attr_cmp == 0)
ret = nfs_check_inode_attributes(inode, fattr);
}
trace_nfs_refresh_inode_exit(inode, ret); trace_nfs_refresh_inode_exit(inode, ret);
return ret; return ret;
...@@ -1916,6 +1982,8 @@ int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fa ...@@ -1916,6 +1982,8 @@ int nfs_post_op_update_inode_force_wcc_locked(struct inode *inode, struct nfs_fa
if (attr_cmp < 0) if (attr_cmp < 0)
return 0; return 0;
if ((fattr->valid & NFS_ATTR_FATTR) == 0 || !attr_cmp) { if ((fattr->valid & NFS_ATTR_FATTR) == 0 || !attr_cmp) {
/* Record the pre/post change info before clearing PRECHANGE */
nfs_ooo_record(NFS_I(inode), fattr);
fattr->valid &= ~(NFS_ATTR_FATTR_PRECHANGE fattr->valid &= ~(NFS_ATTR_FATTR_PRECHANGE
| NFS_ATTR_FATTR_PRESIZE | NFS_ATTR_FATTR_PRESIZE
| NFS_ATTR_FATTR_PREMTIME | NFS_ATTR_FATTR_PREMTIME
...@@ -2070,6 +2138,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) ...@@ -2070,6 +2138,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
/* More cache consistency checks */ /* More cache consistency checks */
if (fattr->valid & NFS_ATTR_FATTR_CHANGE) { if (fattr->valid & NFS_ATTR_FATTR_CHANGE) {
if (!have_writers && nfsi->ooo && nfsi->ooo->cnt == 1 &&
nfsi->ooo->gap[0].end == inode_peek_iversion_raw(inode)) {
/* There is one remaining gap that hasn't been
* merged into iversion - do that now.
*/
inode_set_iversion_raw(inode, nfsi->ooo->gap[0].start);
kfree(nfsi->ooo);
nfsi->ooo = NULL;
}
if (!inode_eq_iversion_raw(inode, fattr->change_attr)) { if (!inode_eq_iversion_raw(inode, fattr->change_attr)) {
/* Could it be a race with writeback? */ /* Could it be a race with writeback? */
if (!(have_writers || have_delegation)) { if (!(have_writers || have_delegation)) {
...@@ -2091,8 +2168,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr) ...@@ -2091,8 +2168,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
dprintk("NFS: change_attr change on server for file %s/%ld\n", dprintk("NFS: change_attr change on server for file %s/%ld\n",
inode->i_sb->s_id, inode->i_sb->s_id,
inode->i_ino); inode->i_ino);
} else if (!have_delegation) } else if (!have_delegation) {
nfsi->cache_validity |= NFS_INO_DATA_INVAL_DEFER; nfs_ooo_record(nfsi, fattr);
nfs_ooo_merge(nfsi, inode_peek_iversion_raw(inode),
fattr->change_attr);
}
inode_set_iversion_raw(inode, fattr->change_attr); inode_set_iversion_raw(inode, fattr->change_attr);
} }
} else { } else {
...@@ -2246,18 +2326,22 @@ struct inode *nfs_alloc_inode(struct super_block *sb) ...@@ -2246,18 +2326,22 @@ struct inode *nfs_alloc_inode(struct super_block *sb)
return NULL; return NULL;
nfsi->flags = 0UL; nfsi->flags = 0UL;
nfsi->cache_validity = 0UL; nfsi->cache_validity = 0UL;
nfsi->ooo = NULL;
#if IS_ENABLED(CONFIG_NFS_V4) #if IS_ENABLED(CONFIG_NFS_V4)
nfsi->nfs4_acl = NULL; nfsi->nfs4_acl = NULL;
#endif /* CONFIG_NFS_V4 */ #endif /* CONFIG_NFS_V4 */
#ifdef CONFIG_NFS_V4_2 #ifdef CONFIG_NFS_V4_2
nfsi->xattr_cache = NULL; nfsi->xattr_cache = NULL;
#endif #endif
nfs_netfs_inode_init(nfsi);
return &nfsi->vfs_inode; return &nfsi->vfs_inode;
} }
EXPORT_SYMBOL_GPL(nfs_alloc_inode); EXPORT_SYMBOL_GPL(nfs_alloc_inode);
void nfs_free_inode(struct inode *inode) void nfs_free_inode(struct inode *inode)
{ {
kfree(NFS_I(inode)->ooo);
kmem_cache_free(nfs_inode_cachep, NFS_I(inode)); kmem_cache_free(nfs_inode_cachep, NFS_I(inode));
} }
EXPORT_SYMBOL_GPL(nfs_free_inode); EXPORT_SYMBOL_GPL(nfs_free_inode);
......
...@@ -452,6 +452,10 @@ extern void nfs_sb_deactive(struct super_block *sb); ...@@ -452,6 +452,10 @@ extern void nfs_sb_deactive(struct super_block *sb);
extern int nfs_client_for_each_server(struct nfs_client *clp, extern int nfs_client_for_each_server(struct nfs_client *clp,
int (*fn)(struct nfs_server *, void *), int (*fn)(struct nfs_server *, void *),
void *data); void *data);
#ifdef CONFIG_NFS_FSCACHE
extern const struct netfs_request_ops nfs_netfs_ops;
#endif
/* io.c */ /* io.c */
extern void nfs_start_io_read(struct inode *inode); extern void nfs_start_io_read(struct inode *inode);
extern void nfs_end_io_read(struct inode *inode); extern void nfs_end_io_read(struct inode *inode);
...@@ -481,9 +485,14 @@ extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh, bool ...@@ -481,9 +485,14 @@ extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh, bool
struct nfs_pgio_completion_ops; struct nfs_pgio_completion_ops;
/* read.c */ /* read.c */
extern const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
struct inode *inode, bool force_mds, struct inode *inode, bool force_mds,
const struct nfs_pgio_completion_ops *compl_ops); const struct nfs_pgio_completion_ops *compl_ops);
extern int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
struct nfs_open_context *ctx,
struct folio *folio);
extern void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio);
extern void nfs_read_prepare(struct rpc_task *task, void *calldata); extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
......
...@@ -17,9 +17,6 @@ ...@@ -17,9 +17,6 @@
struct nfs_iostats { struct nfs_iostats {
unsigned long long bytes[__NFSIOS_BYTESMAX]; unsigned long long bytes[__NFSIOS_BYTESMAX];
#ifdef CONFIG_NFS_FSCACHE
unsigned long long fscache[__NFSIOS_FSCACHEMAX];
#endif
unsigned long events[__NFSIOS_COUNTSMAX]; unsigned long events[__NFSIOS_COUNTSMAX];
} ____cacheline_aligned; } ____cacheline_aligned;
...@@ -49,20 +46,6 @@ static inline void nfs_add_stats(const struct inode *inode, ...@@ -49,20 +46,6 @@ static inline void nfs_add_stats(const struct inode *inode,
nfs_add_server_stats(NFS_SERVER(inode), stat, addend); nfs_add_server_stats(NFS_SERVER(inode), stat, addend);
} }
#ifdef CONFIG_NFS_FSCACHE
static inline void nfs_add_fscache_stats(struct inode *inode,
enum nfs_stat_fscachecounters stat,
long addend)
{
this_cpu_add(NFS_SERVER(inode)->io_stats->fscache[stat], addend);
}
static inline void nfs_inc_fscache_stats(struct inode *inode,
enum nfs_stat_fscachecounters stat)
{
this_cpu_inc(NFS_SERVER(inode)->io_stats->fscache[stat]);
}
#endif
static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void) static inline struct nfs_iostats __percpu *nfs_alloc_iostats(void)
{ {
return alloc_percpu(struct nfs_iostats); return alloc_percpu(struct nfs_iostats);
......
...@@ -1122,7 +1122,6 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res) ...@@ -1122,7 +1122,6 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
uint32_t segments; uint32_t segments;
struct read_plus_segment *segs; struct read_plus_segment *segs;
int status, i; int status, i;
char scratch_buf[16];
__be32 *p; __be32 *p;
status = decode_op_hdr(xdr, OP_READ_PLUS); status = decode_op_hdr(xdr, OP_READ_PLUS);
...@@ -1143,7 +1142,6 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res) ...@@ -1143,7 +1142,6 @@ static int decode_read_plus(struct xdr_stream *xdr, struct nfs_pgio_res *res)
if (!segs) if (!segs)
return -ENOMEM; return -ENOMEM;
xdr_set_scratch_buffer(xdr, &scratch_buf, sizeof(scratch_buf));
status = -EIO; status = -EIO;
for (i = 0; i < segments; i++) { for (i = 0; i < segments; i++) {
status = decode_read_plus_segment(xdr, &segs[i]); status = decode_read_plus_segment(xdr, &segs[i]);
...@@ -1348,6 +1346,8 @@ static int nfs4_xdr_dec_read_plus(struct rpc_rqst *rqstp, ...@@ -1348,6 +1346,8 @@ static int nfs4_xdr_dec_read_plus(struct rpc_rqst *rqstp,
struct compound_hdr hdr; struct compound_hdr hdr;
int status; int status;
xdr_set_scratch_buffer(xdr, res->scratch, sizeof(res->scratch));
status = decode_compound_hdr(xdr, &hdr); status = decode_compound_hdr(xdr, &hdr);
if (status) if (status)
goto out; goto out;
......
...@@ -5439,6 +5439,8 @@ static bool nfs4_read_plus_not_supported(struct rpc_task *task, ...@@ -5439,6 +5439,8 @@ static bool nfs4_read_plus_not_supported(struct rpc_task *task,
static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
{ {
if (hdr->res.scratch)
kfree(hdr->res.scratch);
if (!nfs4_sequence_done(task, &hdr->res.seq_res)) if (!nfs4_sequence_done(task, &hdr->res.seq_res))
return -EAGAIN; return -EAGAIN;
if (nfs4_read_stateid_changed(task, &hdr->args)) if (nfs4_read_stateid_changed(task, &hdr->args))
...@@ -5452,17 +5454,22 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr) ...@@ -5452,17 +5454,22 @@ static int nfs4_read_done(struct rpc_task *task, struct nfs_pgio_header *hdr)
} }
#if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS #if defined CONFIG_NFS_V4_2 && defined CONFIG_NFS_V4_2_READ_PLUS
static void nfs42_read_plus_support(struct nfs_pgio_header *hdr, static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
struct rpc_message *msg) struct rpc_message *msg)
{ {
/* Note: We don't use READ_PLUS with pNFS yet */ /* Note: We don't use READ_PLUS with pNFS yet */
if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) if (nfs_server_capable(hdr->inode, NFS_CAP_READ_PLUS) && !hdr->ds_clp) {
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS]; msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ_PLUS];
hdr->res.scratch = kmalloc(32, GFP_KERNEL);
return hdr->res.scratch != NULL;
}
return false;
} }
#else #else
static void nfs42_read_plus_support(struct nfs_pgio_header *hdr, static bool nfs42_read_plus_support(struct nfs_pgio_header *hdr,
struct rpc_message *msg) struct rpc_message *msg)
{ {
return false;
} }
#endif /* CONFIG_NFS_V4_2 */ #endif /* CONFIG_NFS_V4_2 */
...@@ -5472,8 +5479,8 @@ static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr, ...@@ -5472,8 +5479,8 @@ static void nfs4_proc_read_setup(struct nfs_pgio_header *hdr,
hdr->timestamp = jiffies; hdr->timestamp = jiffies;
if (!hdr->pgio_done_cb) if (!hdr->pgio_done_cb)
hdr->pgio_done_cb = nfs4_read_done_cb; hdr->pgio_done_cb = nfs4_read_done_cb;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ]; if (!nfs42_read_plus_support(hdr, msg))
nfs42_read_plus_support(hdr, msg); msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_READ];
nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0); nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
} }
......
...@@ -67,6 +67,8 @@ ...@@ -67,6 +67,8 @@
#define OPENOWNER_POOL_SIZE 8 #define OPENOWNER_POOL_SIZE 8
static void nfs4_state_start_reclaim_reboot(struct nfs_client *clp);
const nfs4_stateid zero_stateid = { const nfs4_stateid zero_stateid = {
{ .data = { 0 } }, { .data = { 0 } },
.type = NFS4_SPECIAL_STATEID_TYPE, .type = NFS4_SPECIAL_STATEID_TYPE,
...@@ -330,6 +332,8 @@ int nfs41_init_clientid(struct nfs_client *clp, const struct cred *cred) ...@@ -330,6 +332,8 @@ int nfs41_init_clientid(struct nfs_client *clp, const struct cred *cred)
status = nfs4_proc_create_session(clp, cred); status = nfs4_proc_create_session(clp, cred);
if (status != 0) if (status != 0)
goto out; goto out;
if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_CONFIRMED_R))
nfs4_state_start_reclaim_reboot(clp);
nfs41_finish_session_reset(clp); nfs41_finish_session_reset(clp);
nfs_mark_client_ready(clp, NFS_CS_READY); nfs_mark_client_ready(clp, NFS_CS_READY);
out: out:
...@@ -1205,10 +1209,6 @@ void nfs4_schedule_state_manager(struct nfs_client *clp) ...@@ -1205,10 +1209,6 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
{ {
struct task_struct *task; struct task_struct *task;
char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1]; char buf[INET6_ADDRSTRLEN + sizeof("-manager") + 1];
struct rpc_clnt *cl = clp->cl_rpcclient;
while (cl != cl->cl_parent)
cl = cl->cl_parent;
set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state); set_bit(NFS4CLNT_RUN_MANAGER, &clp->cl_state);
if (test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) != 0) { if (test_and_set_bit(NFS4CLNT_MANAGER_AVAILABLE, &clp->cl_state) != 0) {
......
...@@ -39,7 +39,6 @@ ...@@ -39,7 +39,6 @@
{ BIT(NFS_INO_STALE), "STALE" }, \ { BIT(NFS_INO_STALE), "STALE" }, \
{ BIT(NFS_INO_ACL_LRU_SET), "ACL_LRU_SET" }, \ { BIT(NFS_INO_ACL_LRU_SET), "ACL_LRU_SET" }, \
{ BIT(NFS_INO_INVALIDATING), "INVALIDATING" }, \ { BIT(NFS_INO_INVALIDATING), "INVALIDATING" }, \
{ BIT(NFS_INO_FSCACHE), "FSCACHE" }, \
{ BIT(NFS_INO_LAYOUTCOMMIT), "NEED_LAYOUTCOMMIT" }, \ { BIT(NFS_INO_LAYOUTCOMMIT), "NEED_LAYOUTCOMMIT" }, \
{ BIT(NFS_INO_LAYOUTCOMMITTING), "LAYOUTCOMMIT" }, \ { BIT(NFS_INO_LAYOUTCOMMITTING), "LAYOUTCOMMIT" }, \
{ BIT(NFS_INO_LAYOUTSTATS), "LAYOUTSTATS" }, \ { BIT(NFS_INO_LAYOUTSTATS), "LAYOUTSTATS" }, \
...@@ -1243,96 +1242,6 @@ TRACE_EVENT(nfs_readpage_short, ...@@ -1243,96 +1242,6 @@ TRACE_EVENT(nfs_readpage_short,
) )
); );
DECLARE_EVENT_CLASS(nfs_fscache_page_event,
TP_PROTO(
const struct inode *inode,
struct page *page
),
TP_ARGS(inode, page),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
__field(loff_t, offset)
),
TP_fast_assign(
const struct nfs_inode *nfsi = NFS_I(inode);
const struct nfs_fh *fh = &nfsi->fh;
__entry->offset = page_index(page) << PAGE_SHIFT;
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(fh);
),
TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x "
"offset=%lld",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
(long long)__entry->offset
)
);
DECLARE_EVENT_CLASS(nfs_fscache_page_event_done,
TP_PROTO(
const struct inode *inode,
struct page *page,
int error
),
TP_ARGS(inode, page, error),
TP_STRUCT__entry(
__field(int, error)
__field(dev_t, dev)
__field(u32, fhandle)
__field(u64, fileid)
__field(loff_t, offset)
),
TP_fast_assign(
const struct nfs_inode *nfsi = NFS_I(inode);
const struct nfs_fh *fh = &nfsi->fh;
__entry->offset = page_index(page) << PAGE_SHIFT;
__entry->dev = inode->i_sb->s_dev;
__entry->fileid = nfsi->fileid;
__entry->fhandle = nfs_fhandle_hash(fh);
__entry->error = error;
),
TP_printk(
"fileid=%02x:%02x:%llu fhandle=0x%08x "
"offset=%lld error=%d",
MAJOR(__entry->dev), MINOR(__entry->dev),
(unsigned long long)__entry->fileid,
__entry->fhandle,
(long long)__entry->offset, __entry->error
)
);
#define DEFINE_NFS_FSCACHE_PAGE_EVENT(name) \
DEFINE_EVENT(nfs_fscache_page_event, name, \
TP_PROTO( \
const struct inode *inode, \
struct page *page \
), \
TP_ARGS(inode, page))
#define DEFINE_NFS_FSCACHE_PAGE_EVENT_DONE(name) \
DEFINE_EVENT(nfs_fscache_page_event_done, name, \
TP_PROTO( \
const struct inode *inode, \
struct page *page, \
int error \
), \
TP_ARGS(inode, page, error))
DEFINE_NFS_FSCACHE_PAGE_EVENT(nfs_fscache_read_page);
DEFINE_NFS_FSCACHE_PAGE_EVENT_DONE(nfs_fscache_read_page_exit);
DEFINE_NFS_FSCACHE_PAGE_EVENT(nfs_fscache_write_page);
DEFINE_NFS_FSCACHE_PAGE_EVENT_DONE(nfs_fscache_write_page_exit);
TRACE_EVENT(nfs_pgio_error, TRACE_EVENT(nfs_pgio_error,
TP_PROTO( TP_PROTO(
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#include "internal.h" #include "internal.h"
#include "pnfs.h" #include "pnfs.h"
#include "nfstrace.h" #include "nfstrace.h"
#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE #define NFSDBG_FACILITY NFSDBG_PAGECACHE
...@@ -105,6 +106,7 @@ void nfs_pgheader_init(struct nfs_pageio_descriptor *desc, ...@@ -105,6 +106,7 @@ void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
hdr->good_bytes = mirror->pg_count; hdr->good_bytes = mirror->pg_count;
hdr->io_completion = desc->pg_io_completion; hdr->io_completion = desc->pg_io_completion;
hdr->dreq = desc->pg_dreq; hdr->dreq = desc->pg_dreq;
nfs_netfs_set_pgio_header(hdr, desc);
hdr->release = release; hdr->release = release;
hdr->completion_ops = desc->pg_completion_ops; hdr->completion_ops = desc->pg_completion_ops;
if (hdr->completion_ops->init_hdr) if (hdr->completion_ops->init_hdr)
...@@ -941,6 +943,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc, ...@@ -941,6 +943,7 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_lseg = NULL; desc->pg_lseg = NULL;
desc->pg_io_completion = NULL; desc->pg_io_completion = NULL;
desc->pg_dreq = NULL; desc->pg_dreq = NULL;
nfs_netfs_reset_pageio_descriptor(desc);
desc->pg_bsize = bsize; desc->pg_bsize = bsize;
desc->pg_mirror_count = 1; desc->pg_mirror_count = 1;
...@@ -1477,6 +1480,7 @@ int nfs_pageio_resend(struct nfs_pageio_descriptor *desc, ...@@ -1477,6 +1480,7 @@ int nfs_pageio_resend(struct nfs_pageio_descriptor *desc,
desc->pg_io_completion = hdr->io_completion; desc->pg_io_completion = hdr->io_completion;
desc->pg_dreq = hdr->dreq; desc->pg_dreq = hdr->dreq;
nfs_netfs_set_pageio_descriptor(desc, hdr);
list_splice_init(&hdr->pages, &pages); list_splice_init(&hdr->pages, &pages);
while (!list_empty(&pages)) { while (!list_empty(&pages)) {
struct nfs_page *req = nfs_list_entry(pages.next); struct nfs_page *req = nfs_list_entry(pages.next);
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
#define NFSDBG_FACILITY NFSDBG_PAGECACHE #define NFSDBG_FACILITY NFSDBG_PAGECACHE
static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops; const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
static const struct nfs_rw_ops nfs_rw_read_ops; static const struct nfs_rw_ops nfs_rw_read_ops;
static struct kmem_cache *nfs_rdata_cachep; static struct kmem_cache *nfs_rdata_cachep;
...@@ -74,7 +74,7 @@ void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, ...@@ -74,7 +74,7 @@ void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
} }
EXPORT_SYMBOL_GPL(nfs_pageio_init_read); EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
static void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio) void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
{ {
struct nfs_pgio_mirror *pgm; struct nfs_pgio_mirror *pgm;
unsigned long npages; unsigned long npages;
...@@ -110,28 +110,17 @@ EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); ...@@ -110,28 +110,17 @@ EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
static void nfs_readpage_release(struct nfs_page *req, int error) static void nfs_readpage_release(struct nfs_page *req, int error)
{ {
struct inode *inode = d_inode(nfs_req_openctx(req)->dentry);
struct folio *folio = nfs_page_to_folio(req); struct folio *folio = nfs_page_to_folio(req);
dprintk("NFS: read done (%s/%llu %d@%lld)\n", inode->i_sb->s_id,
(unsigned long long)NFS_FILEID(inode), req->wb_bytes,
(long long)req_offset(req));
if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT) if (nfs_error_is_fatal_on_server(error) && error != -ETIMEDOUT)
folio_set_error(folio); folio_set_error(folio);
if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE)) { if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE))
if (folio_test_uptodate(folio)) if (nfs_netfs_folio_unlock(folio))
nfs_fscache_write_page(inode, &folio->page); folio_unlock(folio);
folio_unlock(folio);
}
nfs_release_request(req); nfs_release_request(req);
} }
struct nfs_readdesc {
struct nfs_pageio_descriptor pgio;
struct nfs_open_context *ctx;
};
static void nfs_page_group_set_uptodate(struct nfs_page *req) static void nfs_page_group_set_uptodate(struct nfs_page *req)
{ {
if (nfs_page_group_sync_on_bit(req, PG_UPTODATE)) if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
...@@ -153,7 +142,8 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr) ...@@ -153,7 +142,8 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) { if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
/* note: regions of the page not covered by a /* note: regions of the page not covered by a
* request are zeroed in readpage_async_filler */ * request are zeroed in nfs_read_add_folio
*/
if (bytes > hdr->good_bytes) { if (bytes > hdr->good_bytes) {
/* nothing in this request was good, so zero /* nothing in this request was good, so zero
* the full extent of the request */ * the full extent of the request */
...@@ -181,6 +171,8 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr) ...@@ -181,6 +171,8 @@ static void nfs_read_completion(struct nfs_pgio_header *hdr)
nfs_list_remove_request(req); nfs_list_remove_request(req);
nfs_readpage_release(req, error); nfs_readpage_release(req, error);
} }
nfs_netfs_read_completion(hdr);
out: out:
hdr->release(hdr); hdr->release(hdr);
} }
...@@ -191,6 +183,7 @@ static void nfs_initiate_read(struct nfs_pgio_header *hdr, ...@@ -191,6 +183,7 @@ static void nfs_initiate_read(struct nfs_pgio_header *hdr,
struct rpc_task_setup *task_setup_data, int how) struct rpc_task_setup *task_setup_data, int how)
{ {
rpc_ops->read_setup(hdr, msg); rpc_ops->read_setup(hdr, msg);
nfs_netfs_initiate_read(hdr);
trace_nfs_initiate_read(hdr); trace_nfs_initiate_read(hdr);
} }
...@@ -206,7 +199,7 @@ nfs_async_read_error(struct list_head *head, int error) ...@@ -206,7 +199,7 @@ nfs_async_read_error(struct list_head *head, int error)
} }
} }
static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = { const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
.error_cleanup = nfs_async_read_error, .error_cleanup = nfs_async_read_error,
.completion = nfs_read_completion, .completion = nfs_read_completion,
}; };
...@@ -281,7 +274,9 @@ static void nfs_readpage_result(struct rpc_task *task, ...@@ -281,7 +274,9 @@ static void nfs_readpage_result(struct rpc_task *task,
nfs_readpage_retry(task, hdr); nfs_readpage_retry(task, hdr);
} }
static int readpage_async_filler(struct nfs_readdesc *desc, struct folio *folio) int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
struct nfs_open_context *ctx,
struct folio *folio)
{ {
struct inode *inode = folio_file_mapping(folio)->host; struct inode *inode = folio_file_mapping(folio)->host;
struct nfs_server *server = NFS_SERVER(inode); struct nfs_server *server = NFS_SERVER(inode);
...@@ -297,29 +292,21 @@ static int readpage_async_filler(struct nfs_readdesc *desc, struct folio *folio) ...@@ -297,29 +292,21 @@ static int readpage_async_filler(struct nfs_readdesc *desc, struct folio *folio)
aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize); aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
if (!IS_SYNC(inode)) { new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
error = nfs_fscache_read_page(inode, &folio->page); if (IS_ERR(new)) {
if (error == 0) error = PTR_ERR(new);
goto out_unlock; goto out;
} }
new = nfs_page_create_from_folio(desc->ctx, folio, 0, aligned_len);
if (IS_ERR(new))
goto out_error;
if (len < fsize) if (len < fsize)
folio_zero_segment(folio, len, fsize); folio_zero_segment(folio, len, fsize);
if (!nfs_pageio_add_request(&desc->pgio, new)) { if (!nfs_pageio_add_request(pgio, new)) {
nfs_list_remove_request(new); nfs_list_remove_request(new);
error = desc->pgio.pg_error; error = pgio->pg_error;
nfs_readpage_release(new, error); nfs_readpage_release(new, error);
goto out; goto out;
} }
return 0; return 0;
out_error:
error = PTR_ERR(new);
out_unlock:
folio_unlock(folio);
out: out:
return error; return error;
} }
...@@ -332,8 +319,9 @@ static int readpage_async_filler(struct nfs_readdesc *desc, struct folio *folio) ...@@ -332,8 +319,9 @@ static int readpage_async_filler(struct nfs_readdesc *desc, struct folio *folio)
*/ */
int nfs_read_folio(struct file *file, struct folio *folio) int nfs_read_folio(struct file *file, struct folio *folio)
{ {
struct nfs_readdesc desc;
struct inode *inode = file_inode(file); struct inode *inode = file_inode(file);
struct nfs_pageio_descriptor pgio;
struct nfs_open_context *ctx;
int ret; int ret;
trace_nfs_aop_readpage(inode, folio); trace_nfs_aop_readpage(inode, folio);
...@@ -357,38 +345,43 @@ int nfs_read_folio(struct file *file, struct folio *folio) ...@@ -357,38 +345,43 @@ int nfs_read_folio(struct file *file, struct folio *folio)
if (NFS_STALE(inode)) if (NFS_STALE(inode))
goto out_unlock; goto out_unlock;
desc.ctx = get_nfs_open_context(nfs_file_open_context(file)); ret = nfs_netfs_read_folio(file, folio);
if (!ret)
goto out;
ctx = get_nfs_open_context(nfs_file_open_context(file));
xchg(&desc.ctx->error, 0); xchg(&ctx->error, 0);
nfs_pageio_init_read(&desc.pgio, inode, false, nfs_pageio_init_read(&pgio, inode, false,
&nfs_async_read_completion_ops); &nfs_async_read_completion_ops);
ret = readpage_async_filler(&desc, folio); ret = nfs_read_add_folio(&pgio, ctx, folio);
if (ret) if (ret)
goto out; goto out_put;
nfs_pageio_complete_read(&desc.pgio); nfs_pageio_complete_read(&pgio);
ret = desc.pgio.pg_error < 0 ? desc.pgio.pg_error : 0; ret = pgio.pg_error < 0 ? pgio.pg_error : 0;
if (!ret) { if (!ret) {
ret = folio_wait_locked_killable(folio); ret = folio_wait_locked_killable(folio);
if (!folio_test_uptodate(folio) && !ret) if (!folio_test_uptodate(folio) && !ret)
ret = xchg(&desc.ctx->error, 0); ret = xchg(&ctx->error, 0);
} }
out_put:
put_nfs_open_context(ctx);
out: out:
put_nfs_open_context(desc.ctx);
trace_nfs_aop_readpage_done(inode, folio, ret); trace_nfs_aop_readpage_done(inode, folio, ret);
return ret; return ret;
out_unlock: out_unlock:
folio_unlock(folio); folio_unlock(folio);
trace_nfs_aop_readpage_done(inode, folio, ret); goto out;
return ret;
} }
void nfs_readahead(struct readahead_control *ractl) void nfs_readahead(struct readahead_control *ractl)
{ {
struct nfs_pageio_descriptor pgio;
struct nfs_open_context *ctx;
unsigned int nr_pages = readahead_count(ractl); unsigned int nr_pages = readahead_count(ractl);
struct file *file = ractl->file; struct file *file = ractl->file;
struct nfs_readdesc desc;
struct inode *inode = ractl->mapping->host; struct inode *inode = ractl->mapping->host;
struct folio *folio; struct folio *folio;
int ret; int ret;
...@@ -401,26 +394,30 @@ void nfs_readahead(struct readahead_control *ractl) ...@@ -401,26 +394,30 @@ void nfs_readahead(struct readahead_control *ractl)
if (NFS_STALE(inode)) if (NFS_STALE(inode))
goto out; goto out;
ret = nfs_netfs_readahead(ractl);
if (!ret)
goto out;
if (file == NULL) { if (file == NULL) {
ret = -EBADF; ret = -EBADF;
desc.ctx = nfs_find_open_context(inode, NULL, FMODE_READ); ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
if (desc.ctx == NULL) if (ctx == NULL)
goto out; goto out;
} else } else
desc.ctx = get_nfs_open_context(nfs_file_open_context(file)); ctx = get_nfs_open_context(nfs_file_open_context(file));
nfs_pageio_init_read(&desc.pgio, inode, false, nfs_pageio_init_read(&pgio, inode, false,
&nfs_async_read_completion_ops); &nfs_async_read_completion_ops);
while ((folio = readahead_folio(ractl)) != NULL) { while ((folio = readahead_folio(ractl)) != NULL) {
ret = readpage_async_filler(&desc, folio); ret = nfs_read_add_folio(&pgio, ctx, folio);
if (ret) if (ret)
break; break;
} }
nfs_pageio_complete_read(&desc.pgio); nfs_pageio_complete_read(&pgio);
put_nfs_open_context(desc.ctx); put_nfs_open_context(ctx);
out: out:
trace_nfs_aop_readahead_done(inode, nr_pages, ret); trace_nfs_aop_readahead_done(inode, nr_pages, ret);
} }
......
...@@ -692,10 +692,6 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root) ...@@ -692,10 +692,6 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root)
totals.events[i] += stats->events[i]; totals.events[i] += stats->events[i];
for (i = 0; i < __NFSIOS_BYTESMAX; i++) for (i = 0; i < __NFSIOS_BYTESMAX; i++)
totals.bytes[i] += stats->bytes[i]; totals.bytes[i] += stats->bytes[i];
#ifdef CONFIG_NFS_FSCACHE
for (i = 0; i < __NFSIOS_FSCACHEMAX; i++)
totals.fscache[i] += stats->fscache[i];
#endif
preempt_enable(); preempt_enable();
} }
...@@ -706,13 +702,6 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root) ...@@ -706,13 +702,6 @@ int nfs_show_stats(struct seq_file *m, struct dentry *root)
seq_puts(m, "\n\tbytes:\t"); seq_puts(m, "\n\tbytes:\t");
for (i = 0; i < __NFSIOS_BYTESMAX; i++) for (i = 0; i < __NFSIOS_BYTESMAX; i++)
seq_printf(m, "%Lu ", totals.bytes[i]); seq_printf(m, "%Lu ", totals.bytes[i]);
#ifdef CONFIG_NFS_FSCACHE
if (nfss->options & NFS_OPTION_FSCACHE) {
seq_puts(m, "\n\tfsc:\t");
for (i = 0; i < __NFSIOS_FSCACHEMAX; i++)
seq_printf(m, "%Lu ", totals.fscache[i]);
}
#endif
seq_putc(m, '\n'); seq_putc(m, '\n');
rpc_clnt_show_stats(m, nfss->client); rpc_clnt_show_stats(m, nfss->client);
......
...@@ -31,6 +31,10 @@ ...@@ -31,6 +31,10 @@
#include <linux/sunrpc/auth.h> #include <linux/sunrpc/auth.h>
#include <linux/sunrpc/clnt.h> #include <linux/sunrpc/clnt.h>
#ifdef CONFIG_NFS_FSCACHE
#include <linux/netfs.h>
#endif
#include <linux/nfs.h> #include <linux/nfs.h>
#include <linux/nfs2.h> #include <linux/nfs2.h>
#include <linux/nfs3.h> #include <linux/nfs3.h>
...@@ -191,6 +195,39 @@ struct nfs_inode { ...@@ -191,6 +195,39 @@ struct nfs_inode {
/* Open contexts for shared mmap writes */ /* Open contexts for shared mmap writes */
struct list_head open_files; struct list_head open_files;
/* Keep track of out-of-order replies.
* The ooo array contains start/end pairs of
* numbers from the changeid sequence when
* the inode's iversion has been updated.
* It also contains end/start pair (i.e. reverse order)
* of sections of the changeid sequence that have
* been seen in replies from the server.
* Normally these should match and when both
* A:B and B:A are found in ooo, they are both removed.
* And if a reply with A:B causes an iversion update
* of A:B, then neither are added.
* When a reply has pre_change that doesn't match
* iversion, then the changeid pair and any consequent
* change in iversion ARE added. Later replies
* might fill in the gaps, or possibly a gap is caused
* by a change from another client.
* When a file or directory is opened, if the ooo table
* is not empty, then we assume the gaps were due to
* another client and we invalidate the cached data.
*
* We can only track a limited number of concurrent gaps.
* Currently that limit is 16.
* We allocate the table on demand. If there is insufficient
* memory, then we probably cannot cache the file anyway
* so there is no loss.
*/
struct {
int cnt;
struct {
u64 start, end;
} gap[16];
} *ooo;
#if IS_ENABLED(CONFIG_NFS_V4) #if IS_ENABLED(CONFIG_NFS_V4)
struct nfs4_cached_acl *nfs4_acl; struct nfs4_cached_acl *nfs4_acl;
/* NFSv4 state */ /* NFSv4 state */
...@@ -204,14 +241,15 @@ struct nfs_inode { ...@@ -204,14 +241,15 @@ struct nfs_inode {
/* how many bytes have been written/read and how many bytes queued up */ /* how many bytes have been written/read and how many bytes queued up */
__u64 write_io; __u64 write_io;
__u64 read_io; __u64 read_io;
#ifdef CONFIG_NFS_FSCACHE
struct fscache_cookie *fscache;
#endif
struct inode vfs_inode;
#ifdef CONFIG_NFS_V4_2 #ifdef CONFIG_NFS_V4_2
struct nfs4_xattr_cache *xattr_cache; struct nfs4_xattr_cache *xattr_cache;
#endif #endif
union {
struct inode vfs_inode;
#ifdef CONFIG_NFS_FSCACHE
struct netfs_inode netfs; /* netfs context and VFS inode */
#endif
};
}; };
struct nfs4_copy_state { struct nfs4_copy_state {
...@@ -276,7 +314,6 @@ struct nfs4_copy_state { ...@@ -276,7 +314,6 @@ struct nfs4_copy_state {
#define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */ #define NFS_INO_ACL_LRU_SET (2) /* Inode is on the LRU list */
#define NFS_INO_INVALIDATING (3) /* inode is being invalidated */ #define NFS_INO_INVALIDATING (3) /* inode is being invalidated */
#define NFS_INO_PRESERVE_UNLINKED (4) /* preserve file if removed while open */ #define NFS_INO_PRESERVE_UNLINKED (4) /* preserve file if removed while open */
#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ #define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ #define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
#define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */ #define NFS_INO_LAYOUTSTATS (11) /* layoutstats inflight */
...@@ -329,15 +366,6 @@ static inline int NFS_STALE(const struct inode *inode) ...@@ -329,15 +366,6 @@ static inline int NFS_STALE(const struct inode *inode)
return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags); return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
} }
static inline struct fscache_cookie *nfs_i_fscache(struct inode *inode)
{
#ifdef CONFIG_NFS_FSCACHE
return NFS_I(inode)->fscache;
#else
return NULL;
#endif
}
static inline __u64 NFS_FILEID(const struct inode *inode) static inline __u64 NFS_FILEID(const struct inode *inode)
{ {
return NFS_I(inode)->fileid; return NFS_I(inode)->fileid;
...@@ -617,6 +645,20 @@ nfs_fileid_to_ino_t(u64 fileid) ...@@ -617,6 +645,20 @@ nfs_fileid_to_ino_t(u64 fileid)
return ino; return ino;
} }
static inline void nfs_ooo_clear(struct nfs_inode *nfsi)
{
nfsi->cache_validity &= ~NFS_INO_DATA_INVAL_DEFER;
kfree(nfsi->ooo);
nfsi->ooo = NULL;
}
static inline bool nfs_ooo_test(struct nfs_inode *nfsi)
{
return (nfsi->cache_validity & NFS_INO_DATA_INVAL_DEFER) ||
(nfsi->ooo && nfsi->ooo->cnt > 0);
}
#define NFS_JUKEBOX_RETRY_TIME (5 * HZ) #define NFS_JUKEBOX_RETRY_TIME (5 * HZ)
/* We need to block new opens while a file is being unlinked. /* We need to block new opens while a file is being unlinked.
......
...@@ -119,16 +119,4 @@ enum nfs_stat_eventcounters { ...@@ -119,16 +119,4 @@ enum nfs_stat_eventcounters {
__NFSIOS_COUNTSMAX, __NFSIOS_COUNTSMAX,
}; };
/*
* NFS local caching servicing counters
*/
enum nfs_stat_fscachecounters {
NFSIOS_FSCACHE_PAGES_READ_OK,
NFSIOS_FSCACHE_PAGES_READ_FAIL,
NFSIOS_FSCACHE_PAGES_WRITTEN_OK,
NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL,
NFSIOS_FSCACHE_PAGES_UNCACHED,
__NFSIOS_FSCACHEMAX,
};
#endif /* _LINUX_NFS_IOSTAT */ #endif /* _LINUX_NFS_IOSTAT */
...@@ -105,6 +105,9 @@ struct nfs_pageio_descriptor { ...@@ -105,6 +105,9 @@ struct nfs_pageio_descriptor {
struct pnfs_layout_segment *pg_lseg; struct pnfs_layout_segment *pg_lseg;
struct nfs_io_completion *pg_io_completion; struct nfs_io_completion *pg_io_completion;
struct nfs_direct_req *pg_dreq; struct nfs_direct_req *pg_dreq;
#ifdef CONFIG_NFS_FSCACHE
void *pg_netfs;
#endif
unsigned int pg_bsize; /* default bsize for mirrors */ unsigned int pg_bsize; /* default bsize for mirrors */
u32 pg_mirror_count; u32 pg_mirror_count;
......
...@@ -670,6 +670,7 @@ struct nfs_pgio_res { ...@@ -670,6 +670,7 @@ struct nfs_pgio_res {
struct { struct {
unsigned int replen; /* used by read */ unsigned int replen; /* used by read */
int eof; /* used by read */ int eof; /* used by read */
void * scratch; /* used by read */
}; };
struct { struct {
struct nfs_writeverf * verf; /* used by write */ struct nfs_writeverf * verf; /* used by write */
...@@ -1619,6 +1620,9 @@ struct nfs_pgio_header { ...@@ -1619,6 +1620,9 @@ struct nfs_pgio_header {
const struct nfs_rw_ops *rw_ops; const struct nfs_rw_ops *rw_ops;
struct nfs_io_completion *io_completion; struct nfs_io_completion *io_completion;
struct nfs_direct_req *dreq; struct nfs_direct_req *dreq;
#ifdef CONFIG_NFS_FSCACHE
void *netfs;
#endif
int pnfs_error; int pnfs_error;
int error; /* merge with pnfs_error */ int error; /* merge with pnfs_error */
......
...@@ -90,8 +90,7 @@ struct rpc_task { ...@@ -90,8 +90,7 @@ struct rpc_task {
#endif #endif
unsigned char tk_priority : 2,/* Task priority */ unsigned char tk_priority : 2,/* Task priority */
tk_garb_retry : 2, tk_garb_retry : 2,
tk_cred_retry : 2, tk_cred_retry : 2;
tk_rebind_retry : 2;
}; };
typedef void (*rpc_action)(struct rpc_task *); typedef void (*rpc_action)(struct rpc_task *);
......
...@@ -2050,9 +2050,6 @@ call_bind_status(struct rpc_task *task) ...@@ -2050,9 +2050,6 @@ call_bind_status(struct rpc_task *task)
status = -EOPNOTSUPP; status = -EOPNOTSUPP;
break; break;
} }
if (task->tk_rebind_retry == 0)
break;
task->tk_rebind_retry--;
rpc_delay(task, 3*HZ); rpc_delay(task, 3*HZ);
goto retry_timeout; goto retry_timeout;
case -ENOBUFS: case -ENOBUFS:
......
...@@ -817,7 +817,6 @@ rpc_init_task_statistics(struct rpc_task *task) ...@@ -817,7 +817,6 @@ rpc_init_task_statistics(struct rpc_task *task)
/* Initialize retry counters */ /* Initialize retry counters */
task->tk_garb_retry = 2; task->tk_garb_retry = 2;
task->tk_cred_retry = 2; task->tk_cred_retry = 2;
task->tk_rebind_retry = 2;
/* starting timestamp */ /* starting timestamp */
task->tk_start = ktime_get(); task->tk_start = ktime_get();
......
...@@ -40,25 +40,6 @@ EXPORT_SYMBOL_GPL(nlm_debug); ...@@ -40,25 +40,6 @@ EXPORT_SYMBOL_GPL(nlm_debug);
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
static struct ctl_table_header *sunrpc_table_header;
static struct ctl_table sunrpc_table[];
void
rpc_register_sysctl(void)
{
if (!sunrpc_table_header)
sunrpc_table_header = register_sysctl_table(sunrpc_table);
}
void
rpc_unregister_sysctl(void)
{
if (sunrpc_table_header) {
unregister_sysctl_table(sunrpc_table_header);
sunrpc_table_header = NULL;
}
}
static int proc_do_xprt(struct ctl_table *table, int write, static int proc_do_xprt(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos) void *buffer, size_t *lenp, loff_t *ppos)
{ {
...@@ -142,6 +123,7 @@ proc_dodebug(struct ctl_table *table, int write, void *buffer, size_t *lenp, ...@@ -142,6 +123,7 @@ proc_dodebug(struct ctl_table *table, int write, void *buffer, size_t *lenp,
return 0; return 0;
} }
static struct ctl_table_header *sunrpc_table_header;
static struct ctl_table debug_table[] = { static struct ctl_table debug_table[] = {
{ {
...@@ -181,13 +163,19 @@ static struct ctl_table debug_table[] = { ...@@ -181,13 +163,19 @@ static struct ctl_table debug_table[] = {
{ } { }
}; };
static struct ctl_table sunrpc_table[] = { void
{ rpc_register_sysctl(void)
.procname = "sunrpc", {
.mode = 0555, if (!sunrpc_table_header)
.child = debug_table sunrpc_table_header = register_sysctl("sunrpc", debug_table);
}, }
{ }
};
void
rpc_unregister_sysctl(void)
{
if (sunrpc_table_header) {
unregister_sysctl_table(sunrpc_table_header);
sunrpc_table_header = NULL;
}
}
#endif #endif
...@@ -140,15 +140,6 @@ static struct ctl_table xr_tunables_table[] = { ...@@ -140,15 +140,6 @@ static struct ctl_table xr_tunables_table[] = {
{ }, { },
}; };
static struct ctl_table sunrpc_table[] = {
{
.procname = "sunrpc",
.mode = 0555,
.child = xr_tunables_table
},
{ },
};
#endif #endif
static const struct rpc_xprt_ops xprt_rdma_procs; static const struct rpc_xprt_ops xprt_rdma_procs;
...@@ -799,7 +790,7 @@ int xprt_rdma_init(void) ...@@ -799,7 +790,7 @@ int xprt_rdma_init(void)
#if IS_ENABLED(CONFIG_SUNRPC_DEBUG) #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
if (!sunrpc_table_header) if (!sunrpc_table_header)
sunrpc_table_header = register_sysctl_table(sunrpc_table); sunrpc_table_header = register_sysctl("sunrpc", xr_tunables_table);
#endif #endif
return 0; return 0;
} }
...@@ -78,7 +78,7 @@ static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO; ...@@ -78,7 +78,7 @@ static unsigned int xs_tcp_fin_timeout __read_mostly = XS_TCP_LINGER_TO;
/* /*
* We can register our own files under /proc/sys/sunrpc by * We can register our own files under /proc/sys/sunrpc by
* calling register_sysctl_table() again. The files in that * calling register_sysctl() again. The files in that
* directory become the union of all files registered there. * directory become the union of all files registered there.
* *
* We simply need to make sure that we don't collide with * We simply need to make sure that we don't collide with
...@@ -158,15 +158,6 @@ static struct ctl_table xs_tunables_table[] = { ...@@ -158,15 +158,6 @@ static struct ctl_table xs_tunables_table[] = {
{ }, { },
}; };
static struct ctl_table sunrpc_table[] = {
{
.procname = "sunrpc",
.mode = 0555,
.child = xs_tunables_table
},
{ },
};
/* /*
* Wait duration for a reply from the RPC portmapper. * Wait duration for a reply from the RPC portmapper.
*/ */
...@@ -3178,7 +3169,7 @@ static struct xprt_class xs_bc_tcp_transport = { ...@@ -3178,7 +3169,7 @@ static struct xprt_class xs_bc_tcp_transport = {
int init_socket_xprt(void) int init_socket_xprt(void)
{ {
if (!sunrpc_table_header) if (!sunrpc_table_header)
sunrpc_table_header = register_sysctl_table(sunrpc_table); sunrpc_table_header = register_sysctl("sunrpc", xs_tunables_table);
xprt_register_transport(&xs_local_transport); xprt_register_transport(&xs_local_transport);
xprt_register_transport(&xs_udp_transport); xprt_register_transport(&xs_udp_transport);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment