Commit 99f1a587 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfsd-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux

Pull nfsd updates from Chuck Lever:

 - Update NFSv2 and NFSv3 XDR decoding functions

 - Further improve support for re-exporting NFS mounts

 - Convert NFSD stats to per-CPU counters

 - Add batch Receive posting to the server's RPC/RDMA transport

* tag 'nfsd-5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/cel/linux: (65 commits)
  nfsd: skip some unnecessary stats in the v4 case
  nfs: use change attribute for NFS re-exports
  NFSv4_2: SSC helper should use its own config.
  nfsd: cstate->session->se_client -> cstate->clp
  nfsd: simplify nfsd4_check_open_reclaim
  nfsd: remove unused set_client argument
  nfsd: find_cpntf_state cleanup
  nfsd: refactor set_client
  nfsd: rename lookup_clientid->set_client
  nfsd: simplify nfsd_renew
  nfsd: simplify process_lock
  nfsd4: simplify process_lookup1
  SUNRPC: Correct a comment
  svcrdma: DMA-sync the receive buffer in svc_rdma_recvfrom()
  svcrdma: Reduce Receive doorbell rate
  svcrdma: Deprecate stat variables that are no longer used
  svcrdma: Restore read and write stats
  svcrdma: Convert rdma_stat_sq_starve to a per-CPU counter
  svcrdma: Convert rdma_stat_recv to a per-CPU counter
  svcrdma: Refactor svc_rdma_init() and svc_rdma_clean_up()
  ...
parents 681e2abe 428a23d2
......@@ -333,6 +333,10 @@ config NFS_COMMON
depends on NFSD || NFS_FS || LOCKD
default y
config NFS_V4_2_SSC_HELPER
tristate
default y if NFS_V4=y || NFS_FS=y
source "net/sunrpc/Kconfig"
source "fs/ceph/Kconfig"
source "fs/cifs/Kconfig"
......
......@@ -512,6 +512,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "NULL",
},
[NLMPROC_TEST] = {
.pc_func = nlm4svc_proc_test,
......@@ -520,6 +521,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+2+No+Rg,
.pc_name = "TEST",
},
[NLMPROC_LOCK] = {
.pc_func = nlm4svc_proc_lock,
......@@ -528,6 +530,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "LOCK",
},
[NLMPROC_CANCEL] = {
.pc_func = nlm4svc_proc_cancel,
......@@ -536,6 +539,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "CANCEL",
},
[NLMPROC_UNLOCK] = {
.pc_func = nlm4svc_proc_unlock,
......@@ -544,6 +548,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "UNLOCK",
},
[NLMPROC_GRANTED] = {
.pc_func = nlm4svc_proc_granted,
......@@ -552,6 +557,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "GRANTED",
},
[NLMPROC_TEST_MSG] = {
.pc_func = nlm4svc_proc_test_msg,
......@@ -560,6 +566,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "TEST_MSG",
},
[NLMPROC_LOCK_MSG] = {
.pc_func = nlm4svc_proc_lock_msg,
......@@ -568,6 +575,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "LOCK_MSG",
},
[NLMPROC_CANCEL_MSG] = {
.pc_func = nlm4svc_proc_cancel_msg,
......@@ -576,6 +584,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "CANCEL_MSG",
},
[NLMPROC_UNLOCK_MSG] = {
.pc_func = nlm4svc_proc_unlock_msg,
......@@ -584,6 +593,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "UNLOCK_MSG",
},
[NLMPROC_GRANTED_MSG] = {
.pc_func = nlm4svc_proc_granted_msg,
......@@ -592,6 +602,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "GRANTED_MSG",
},
[NLMPROC_TEST_RES] = {
.pc_func = nlm4svc_proc_null,
......@@ -600,6 +611,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "TEST_RES",
},
[NLMPROC_LOCK_RES] = {
.pc_func = nlm4svc_proc_null,
......@@ -608,6 +620,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "LOCK_RES",
},
[NLMPROC_CANCEL_RES] = {
.pc_func = nlm4svc_proc_null,
......@@ -616,6 +629,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "CANCEL_RES",
},
[NLMPROC_UNLOCK_RES] = {
.pc_func = nlm4svc_proc_null,
......@@ -624,6 +638,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "UNLOCK_RES",
},
[NLMPROC_GRANTED_RES] = {
.pc_func = nlm4svc_proc_granted_res,
......@@ -632,6 +647,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "GRANTED_RES",
},
[NLMPROC_NSM_NOTIFY] = {
.pc_func = nlm4svc_proc_sm_notify,
......@@ -640,6 +656,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_reboot),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "SM_NOTIFY",
},
[17] = {
.pc_func = nlm4svc_proc_unused,
......@@ -648,6 +665,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = 0,
.pc_name = "UNUSED",
},
[18] = {
.pc_func = nlm4svc_proc_unused,
......@@ -656,6 +674,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = 0,
.pc_name = "UNUSED",
},
[19] = {
.pc_func = nlm4svc_proc_unused,
......@@ -664,6 +683,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = 0,
.pc_name = "UNUSED",
},
[NLMPROC_SHARE] = {
.pc_func = nlm4svc_proc_share,
......@@ -672,6 +692,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+1,
.pc_name = "SHARE",
},
[NLMPROC_UNSHARE] = {
.pc_func = nlm4svc_proc_unshare,
......@@ -680,6 +701,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+1,
.pc_name = "UNSHARE",
},
[NLMPROC_NM_LOCK] = {
.pc_func = nlm4svc_proc_nm_lock,
......@@ -688,6 +710,7 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "NM_LOCK",
},
[NLMPROC_FREE_ALL] = {
.pc_func = nlm4svc_proc_free_all,
......@@ -696,5 +719,6 @@ const struct svc_procedure nlmsvc_procedures4[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "FREE_ALL",
},
};
......@@ -554,6 +554,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "NULL",
},
[NLMPROC_TEST] = {
.pc_func = nlmsvc_proc_test,
......@@ -562,6 +563,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+2+No+Rg,
.pc_name = "TEST",
},
[NLMPROC_LOCK] = {
.pc_func = nlmsvc_proc_lock,
......@@ -570,6 +572,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "LOCK",
},
[NLMPROC_CANCEL] = {
.pc_func = nlmsvc_proc_cancel,
......@@ -578,6 +581,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "CANCEL",
},
[NLMPROC_UNLOCK] = {
.pc_func = nlmsvc_proc_unlock,
......@@ -586,6 +590,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "UNLOCK",
},
[NLMPROC_GRANTED] = {
.pc_func = nlmsvc_proc_granted,
......@@ -594,6 +599,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "GRANTED",
},
[NLMPROC_TEST_MSG] = {
.pc_func = nlmsvc_proc_test_msg,
......@@ -602,6 +608,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "TEST_MSG",
},
[NLMPROC_LOCK_MSG] = {
.pc_func = nlmsvc_proc_lock_msg,
......@@ -610,6 +617,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "LOCK_MSG",
},
[NLMPROC_CANCEL_MSG] = {
.pc_func = nlmsvc_proc_cancel_msg,
......@@ -618,6 +626,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "CANCEL_MSG",
},
[NLMPROC_UNLOCK_MSG] = {
.pc_func = nlmsvc_proc_unlock_msg,
......@@ -626,6 +635,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "UNLOCK_MSG",
},
[NLMPROC_GRANTED_MSG] = {
.pc_func = nlmsvc_proc_granted_msg,
......@@ -634,6 +644,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "GRANTED_MSG",
},
[NLMPROC_TEST_RES] = {
.pc_func = nlmsvc_proc_null,
......@@ -642,6 +653,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "TEST_RES",
},
[NLMPROC_LOCK_RES] = {
.pc_func = nlmsvc_proc_null,
......@@ -650,6 +662,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "LOCK_RES",
},
[NLMPROC_CANCEL_RES] = {
.pc_func = nlmsvc_proc_null,
......@@ -658,6 +671,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "CANCEL_RES",
},
[NLMPROC_UNLOCK_RES] = {
.pc_func = nlmsvc_proc_null,
......@@ -666,6 +680,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "UNLOCK_RES",
},
[NLMPROC_GRANTED_RES] = {
.pc_func = nlmsvc_proc_granted_res,
......@@ -674,6 +689,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_res),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "GRANTED_RES",
},
[NLMPROC_NSM_NOTIFY] = {
.pc_func = nlmsvc_proc_sm_notify,
......@@ -682,6 +698,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_reboot),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "SM_NOTIFY",
},
[17] = {
.pc_func = nlmsvc_proc_unused,
......@@ -690,6 +707,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "UNUSED",
},
[18] = {
.pc_func = nlmsvc_proc_unused,
......@@ -698,6 +716,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "UNUSED",
},
[19] = {
.pc_func = nlmsvc_proc_unused,
......@@ -706,6 +725,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_void),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = St,
.pc_name = "UNUSED",
},
[NLMPROC_SHARE] = {
.pc_func = nlmsvc_proc_share,
......@@ -714,6 +734,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+1,
.pc_name = "SHARE",
},
[NLMPROC_UNSHARE] = {
.pc_func = nlmsvc_proc_unshare,
......@@ -722,6 +743,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St+1,
.pc_name = "UNSHARE",
},
[NLMPROC_NM_LOCK] = {
.pc_func = nlmsvc_proc_nm_lock,
......@@ -730,6 +752,7 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_res),
.pc_xdrressize = Ck+St,
.pc_name = "NM_LOCK",
},
[NLMPROC_FREE_ALL] = {
.pc_func = nlmsvc_proc_free_all,
......@@ -738,5 +761,6 @@ const struct svc_procedure nlmsvc_procedures[24] = {
.pc_argsize = sizeof(struct nlm_args),
.pc_ressize = sizeof(struct nlm_void),
.pc_xdrressize = 0,
.pc_name = "FREE_ALL",
},
};
......@@ -1060,6 +1060,7 @@ static const struct svc_procedure nfs4_callback_procedures1[] = {
.pc_decode = nfs4_decode_void,
.pc_encode = nfs4_encode_void,
.pc_xdrressize = 1,
.pc_name = "NULL",
},
[CB_COMPOUND] = {
.pc_func = nfs4_callback_compound,
......@@ -1067,6 +1068,7 @@ static const struct svc_procedure nfs4_callback_procedures1[] = {
.pc_argsize = 256,
.pc_ressize = 256,
.pc_xdrressize = NFS4_CALLBACK_BUFSIZE,
.pc_name = "COMPOUND",
}
};
......
......@@ -167,10 +167,28 @@ nfs_get_parent(struct dentry *dentry)
return parent;
}
static u64 nfs_fetch_iversion(struct inode *inode)
{
struct nfs_server *server = NFS_SERVER(inode);
/* Is this the right call?: */
nfs_revalidate_inode(server, inode);
/*
* Also, note we're ignoring any returned error. That seems to be
* the practice for cache consistency information elsewhere in
* the server, but I'm not sure why.
*/
if (server->nfs_client->rpc_ops->version >= 4)
return inode_peek_iversion_raw(inode);
else
return time_to_chattr(&inode->i_ctime);
}
const struct export_operations nfs_export_ops = {
.encode_fh = nfs_encode_fh,
.fh_to_dentry = nfs_fh_to_dentry,
.get_parent = nfs_get_parent,
.fetch_iversion = nfs_fetch_iversion,
.flags = EXPORT_OP_NOWCC|EXPORT_OP_NOSUBTREECHK|
EXPORT_OP_CLOSE_BEFORE_UNLINK|EXPORT_OP_REMOTE_FS|
EXPORT_OP_NOATOMIC_ATTR,
......
......@@ -420,7 +420,9 @@ static const struct nfs4_ssc_client_ops nfs4_ssc_clnt_ops_tbl = {
*/
void nfs42_ssc_register_ops(void)
{
#ifdef CONFIG_NFSD_V4
nfs42_ssc_register(&nfs4_ssc_clnt_ops_tbl);
#endif
}
/**
......@@ -431,7 +433,9 @@ void nfs42_ssc_register_ops(void)
*/
void nfs42_ssc_unregister_ops(void)
{
#ifdef CONFIG_NFSD_V4
nfs42_ssc_unregister(&nfs4_ssc_clnt_ops_tbl);
#endif
}
#endif /* CONFIG_NFS_V4_2 */
......
......@@ -86,9 +86,11 @@ const struct super_operations nfs_sops = {
};
EXPORT_SYMBOL_GPL(nfs_sops);
#ifdef CONFIG_NFS_V4_2
static const struct nfs_ssc_client_ops nfs_ssc_clnt_ops_tbl = {
.sco_sb_deactive = nfs_sb_deactive,
};
#endif
#if IS_ENABLED(CONFIG_NFS_V4)
static int __init register_nfs4_fs(void)
......@@ -111,15 +113,21 @@ static void unregister_nfs4_fs(void)
}
#endif
#ifdef CONFIG_NFS_V4_2
static void nfs_ssc_register_ops(void)
{
#ifdef CONFIG_NFSD_V4
nfs_ssc_register(&nfs_ssc_clnt_ops_tbl);
#endif
}
static void nfs_ssc_unregister_ops(void)
{
#ifdef CONFIG_NFSD_V4
nfs_ssc_unregister(&nfs_ssc_clnt_ops_tbl);
#endif
}
#endif /* CONFIG_NFS_V4_2 */
static struct shrinker acl_shrinker = {
.count_objects = nfs_access_cache_count,
......@@ -148,7 +156,9 @@ int __init register_nfs_fs(void)
ret = register_shrinker(&acl_shrinker);
if (ret < 0)
goto error_3;
#ifdef CONFIG_NFS_V4_2
nfs_ssc_register_ops();
#endif
return 0;
error_3:
nfs_unregister_sysctl();
......@@ -168,7 +178,9 @@ void __exit unregister_nfs_fs(void)
unregister_shrinker(&acl_shrinker);
nfs_unregister_sysctl();
unregister_nfs4_fs();
#ifdef CONFIG_NFS_V4_2
nfs_ssc_unregister_ops();
#endif
unregister_filesystem(&nfs_fs_type);
}
......
......@@ -7,4 +7,4 @@ obj-$(CONFIG_NFS_ACL_SUPPORT) += nfs_acl.o
nfs_acl-objs := nfsacl.o
obj-$(CONFIG_GRACE_PERIOD) += grace.o
obj-$(CONFIG_GRACE_PERIOD) += nfs_ssc.o
obj-$(CONFIG_NFS_V4_2_SSC_HELPER) += nfs_ssc.o
// SPDX-License-Identifier: GPL-2.0-only
/*
* fs/nfs_common/nfs_ssc_comm.c
*
* Helper for knfsd's SSC to access ops in NFS client modules
*
* Author: Dai Ngo <dai.ngo@oracle.com>
......
......@@ -295,3 +295,55 @@ int nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
nfsacl_desc.desc.array_len;
}
EXPORT_SYMBOL_GPL(nfsacl_decode);
/**
* nfs_stream_decode_acl - Decode an NFSv3 ACL
*
* @xdr: an xdr_stream positioned at an encoded ACL
* @aclcnt: OUT: count of ACEs in decoded posix_acl
* @pacl: OUT: a dynamically-allocated buffer containing the decoded posix_acl
*
* Return values:
* %false: The encoded ACL is not valid
* %true: @pacl contains a decoded ACL, and @xdr is advanced
*
* On a successful return, caller must release *pacl using posix_acl_release().
*/
bool nfs_stream_decode_acl(struct xdr_stream *xdr, unsigned int *aclcnt,
struct posix_acl **pacl)
{
const size_t elem_size = XDR_UNIT * 3;
struct nfsacl_decode_desc nfsacl_desc = {
.desc = {
.elem_size = elem_size,
.xcode = pacl ? xdr_nfsace_decode : NULL,
},
};
unsigned int base;
u32 entries;
if (xdr_stream_decode_u32(xdr, &entries) < 0)
return false;
if (entries > NFS_ACL_MAX_ENTRIES)
return false;
base = xdr_stream_pos(xdr);
if (!xdr_inline_decode(xdr, XDR_UNIT + elem_size * entries))
return false;
nfsacl_desc.desc.array_maxlen = entries;
if (xdr_decode_array2(xdr->buf, base, &nfsacl_desc.desc))
return false;
if (pacl) {
if (entries != nfsacl_desc.desc.array_len ||
posix_acl_from_nfsacl(nfsacl_desc.acl) != 0) {
posix_acl_release(nfsacl_desc.acl);
return false;
}
*pacl = nfsacl_desc.acl;
}
if (aclcnt)
*aclcnt = entries;
return true;
}
EXPORT_SYMBOL_GPL(nfs_stream_decode_acl);
......@@ -76,6 +76,7 @@ config NFSD_V4
select CRYPTO_MD5
select CRYPTO_SHA256
select GRACE_PERIOD
select NFS_V4_2_SSC_HELPER if NFS_V4_2
help
This option enables support in your system's NFS server for
version 4 of the NFS protocol (RFC 3530).
......
......@@ -331,12 +331,29 @@ static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
fsloc->locations = NULL;
}
static int export_stats_init(struct export_stats *stats)
{
stats->start_time = ktime_get_seconds();
return nfsd_percpu_counters_init(stats->counter, EXP_STATS_COUNTERS_NUM);
}
static void export_stats_reset(struct export_stats *stats)
{
nfsd_percpu_counters_reset(stats->counter, EXP_STATS_COUNTERS_NUM);
}
static void export_stats_destroy(struct export_stats *stats)
{
nfsd_percpu_counters_destroy(stats->counter, EXP_STATS_COUNTERS_NUM);
}
static void svc_export_put(struct kref *ref)
{
struct svc_export *exp = container_of(ref, struct svc_export, h.ref);
path_put(&exp->ex_path);
auth_domain_put(exp->ex_client);
nfsd4_fslocs_free(&exp->ex_fslocs);
export_stats_destroy(&exp->ex_stats);
kfree(exp->ex_uuid);
kfree_rcu(exp, ex_rcu);
}
......@@ -692,22 +709,47 @@ static void exp_flags(struct seq_file *m, int flag, int fsid,
kuid_t anonu, kgid_t anong, struct nfsd4_fs_locations *fslocs);
static void show_secinfo(struct seq_file *m, struct svc_export *exp);
static int is_export_stats_file(struct seq_file *m)
{
/*
* The export_stats file uses the same ops as the exports file.
* We use the file's name to determine the reported info per export.
* There is no rename in nsfdfs, so d_name.name is stable.
*/
return !strcmp(m->file->f_path.dentry->d_name.name, "export_stats");
}
static int svc_export_show(struct seq_file *m,
struct cache_detail *cd,
struct cache_head *h)
{
struct svc_export *exp ;
struct svc_export *exp;
bool export_stats = is_export_stats_file(m);
if (h ==NULL) {
seq_puts(m, "#path domain(flags)\n");
if (h == NULL) {
if (export_stats)
seq_puts(m, "#path domain start-time\n#\tstats\n");
else
seq_puts(m, "#path domain(flags)\n");
return 0;
}
exp = container_of(h, struct svc_export, h);
seq_path(m, &exp->ex_path, " \t\n\\");
seq_putc(m, '\t');
seq_escape(m, exp->ex_client->name, " \t\n\\");
if (export_stats) {
seq_printf(m, "\t%lld\n", exp->ex_stats.start_time);
seq_printf(m, "\tfh_stale: %lld\n",
percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_FH_STALE]));
seq_printf(m, "\tio_read: %lld\n",
percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_READ]));
seq_printf(m, "\tio_write: %lld\n",
percpu_counter_sum_positive(&exp->ex_stats.counter[EXP_STATS_IO_WRITE]));
seq_putc(m, '\n');
return 0;
}
seq_putc(m, '(');
if (test_bit(CACHE_VALID, &h->flags) &&
if (test_bit(CACHE_VALID, &h->flags) &&
!test_bit(CACHE_NEGATIVE, &h->flags)) {
exp_flags(m, exp->ex_flags, exp->ex_fsid,
exp->ex_anon_uid, exp->ex_anon_gid, &exp->ex_fslocs);
......@@ -748,6 +790,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
new->ex_layout_types = 0;
new->ex_uuid = NULL;
new->cd = item->cd;
export_stats_reset(&new->ex_stats);
}
static void export_update(struct cache_head *cnew, struct cache_head *citem)
......@@ -780,10 +823,15 @@ static void export_update(struct cache_head *cnew, struct cache_head *citem)
static struct cache_head *svc_export_alloc(void)
{
struct svc_export *i = kmalloc(sizeof(*i), GFP_KERNEL);
if (i)
return &i->h;
else
if (!i)
return NULL;
if (export_stats_init(&i->ex_stats)) {
kfree(i);
return NULL;
}
return &i->h;
}
static const struct cache_detail svc_export_cache_template = {
......@@ -1245,10 +1293,14 @@ static int e_show(struct seq_file *m, void *p)
struct cache_head *cp = p;
struct svc_export *exp = container_of(cp, struct svc_export, h);
struct cache_detail *cd = m->private;
bool export_stats = is_export_stats_file(m);
if (p == SEQ_START_TOKEN) {
seq_puts(m, "# Version 1.1\n");
seq_puts(m, "# Path Client(Flags) # IPs\n");
if (export_stats)
seq_puts(m, "# Path Client Start-time\n#\tStats\n");
else
seq_puts(m, "# Path Client(Flags) # IPs\n");
return 0;
}
......
......@@ -6,6 +6,7 @@
#define NFSD_EXPORT_H
#include <linux/sunrpc/cache.h>
#include <linux/percpu_counter.h>
#include <uapi/linux/nfsd/export.h>
#include <linux/nfs4.h>
......@@ -46,6 +47,19 @@ struct exp_flavor_info {
u32 flags;
};
/* Per-export stats */
enum {
EXP_STATS_FH_STALE,
EXP_STATS_IO_READ,
EXP_STATS_IO_WRITE,
EXP_STATS_COUNTERS_NUM
};
struct export_stats {
time64_t start_time;
struct percpu_counter counter[EXP_STATS_COUNTERS_NUM];
};
struct svc_export {
struct cache_head h;
struct auth_domain * ex_client;
......@@ -62,6 +76,7 @@ struct svc_export {
struct nfsd4_deviceid_map *ex_devid_map;
struct cache_detail *cd;
struct rcu_head ex_rcu;
struct export_stats ex_stats;
};
/* an "export key" (expkey) maps a filehandlefragement to an
......
......@@ -10,6 +10,7 @@
#include <net/net_namespace.h>
#include <net/netns/generic.h>
#include <linux/percpu_counter.h>
/* Hash tables for nfs4_clientid state */
#define CLIENT_HASH_BITS 4
......@@ -21,6 +22,14 @@
struct cld_net;
struct nfsd4_client_tracking_ops;
enum {
/* cache misses due only to checksum comparison failures */
NFSD_NET_PAYLOAD_MISSES,
/* amount of memory (in bytes) currently consumed by the DRC */
NFSD_NET_DRC_MEM_USAGE,
NFSD_NET_COUNTERS_NUM
};
/*
* Represents a nfsd "container". With respect to nfsv4 state tracking, the
* fields of interest are the *_id_hashtbls and the *_name_tree. These track
......@@ -149,20 +158,16 @@ struct nfsd_net {
/*
* Stats and other tracking of on the duplicate reply cache.
* These fields and the "rc" fields in nfsdstats are modified
* with only the per-bucket cache lock, which isn't really safe
* and should be fixed if we want the statistics to be
* completely accurate.
* The longest_chain* fields are modified with only the per-bucket
* cache lock, which isn't really safe and should be fixed if we want
* these statistics to be completely accurate.
*/
/* total number of entries */
atomic_t num_drc_entries;
/* cache misses due only to checksum comparison failures */
unsigned int payload_misses;
/* amount of memory (in bytes) currently consumed by the DRC */
unsigned int drc_mem_usage;
/* Per-netns stats counters */
struct percpu_counter counter[NFSD_NET_COUNTERS_NUM];
/* longest hash chain seen */
unsigned int longest_chain;
......
......@@ -188,63 +188,49 @@ static __be32 nfsacld_proc_access(struct svc_rqst *rqstp)
static int nfsaclsvc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p)
{
struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_getaclargs *argp = rqstp->rq_argp;
p = nfs2svc_decode_fh(p, &argp->fh);
if (!p)
if (!svcxdr_decode_fhandle(xdr, &argp->fh))
return 0;
if (xdr_stream_decode_u32(xdr, &argp->mask) < 0)
return 0;
argp->mask = ntohl(*p); p++;
return xdr_argsize_check(rqstp, p);
return 1;
}
static int nfsaclsvc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p)
{
struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_setaclargs *argp = rqstp->rq_argp;
struct kvec *head = rqstp->rq_arg.head;
unsigned int base;
int n;
p = nfs2svc_decode_fh(p, &argp->fh);
if (!p)
if (!svcxdr_decode_fhandle(xdr, &argp->fh))
return 0;
argp->mask = ntohl(*p++);
if (argp->mask & ~NFS_ACL_MASK ||
!xdr_argsize_check(rqstp, p))
if (xdr_stream_decode_u32(xdr, &argp->mask) < 0)
return 0;
base = (char *)p - (char *)head->iov_base;
n = nfsacl_decode(&rqstp->rq_arg, base, NULL,
(argp->mask & NFS_ACL) ?
&argp->acl_access : NULL);
if (n > 0)
n = nfsacl_decode(&rqstp->rq_arg, base + n, NULL,
(argp->mask & NFS_DFACL) ?
&argp->acl_default : NULL);
return (n > 0);
}
static int nfsaclsvc_decode_fhandleargs(struct svc_rqst *rqstp, __be32 *p)
{
struct nfsd_fhandle *argp = rqstp->rq_argp;
p = nfs2svc_decode_fh(p, &argp->fh);
if (!p)
if (argp->mask & ~NFS_ACL_MASK)
return 0;
if (!nfs_stream_decode_acl(xdr, NULL, (argp->mask & NFS_ACL) ?
&argp->acl_access : NULL))
return 0;
return xdr_argsize_check(rqstp, p);
if (!nfs_stream_decode_acl(xdr, NULL, (argp->mask & NFS_DFACL) ?
&argp->acl_default : NULL))
return 0;
return 1;
}
static int nfsaclsvc_decode_accessargs(struct svc_rqst *rqstp, __be32 *p)
{
struct nfsd3_accessargs *argp = rqstp->rq_argp;
struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_accessargs *args = rqstp->rq_argp;
p = nfs2svc_decode_fh(p, &argp->fh);
if (!p)
if (!svcxdr_decode_fhandle(xdr, &args->fh))
return 0;
if (xdr_stream_decode_u32(xdr, &args->access) < 0)
return 0;
argp->access = ntohl(*p++);
return xdr_argsize_check(rqstp, p);
return 1;
}
/*
......@@ -371,6 +357,7 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
.pc_name = "NULL",
},
[ACLPROC2_GETACL] = {
.pc_func = nfsacld_proc_getacl,
......@@ -381,6 +368,7 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
.pc_ressize = sizeof(struct nfsd3_getaclres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+1+2*(1+ACL),
.pc_name = "GETACL",
},
[ACLPROC2_SETACL] = {
.pc_func = nfsacld_proc_setacl,
......@@ -391,16 +379,18 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
.pc_name = "SETACL",
},
[ACLPROC2_GETATTR] = {
.pc_func = nfsacld_proc_getattr,
.pc_decode = nfsaclsvc_decode_fhandleargs,
.pc_decode = nfssvc_decode_fhandleargs,
.pc_encode = nfsaclsvc_encode_attrstatres,
.pc_release = nfsaclsvc_release_attrstat,
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
.pc_name = "GETATTR",
},
[ACLPROC2_ACCESS] = {
.pc_func = nfsacld_proc_access,
......@@ -411,6 +401,7 @@ static const struct svc_procedure nfsd_acl_procedures2[5] = {
.pc_ressize = sizeof(struct nfsd3_accessres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT+1,
.pc_name = "SETATTR",
},
};
......
......@@ -124,43 +124,39 @@ static __be32 nfsd3_proc_setacl(struct svc_rqst *rqstp)
/*
* XDR decode functions
*/
static int nfs3svc_decode_getaclargs(struct svc_rqst *rqstp, __be32 *p)
{
struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_getaclargs *args = rqstp->rq_argp;
p = nfs3svc_decode_fh(p, &args->fh);
if (!p)
if (!svcxdr_decode_nfs_fh3(xdr, &args->fh))
return 0;
if (xdr_stream_decode_u32(xdr, &args->mask) < 0)
return 0;
args->mask = ntohl(*p); p++;
return xdr_argsize_check(rqstp, p);
return 1;
}
static int nfs3svc_decode_setaclargs(struct svc_rqst *rqstp, __be32 *p)
{
struct nfsd3_setaclargs *args = rqstp->rq_argp;
struct kvec *head = rqstp->rq_arg.head;
unsigned int base;
int n;
struct xdr_stream *xdr = &rqstp->rq_arg_stream;
struct nfsd3_setaclargs *argp = rqstp->rq_argp;
p = nfs3svc_decode_fh(p, &args->fh);
if (!p)
if (!svcxdr_decode_nfs_fh3(xdr, &argp->fh))
return 0;
args->mask = ntohl(*p++);
if (args->mask & ~NFS_ACL_MASK ||
!xdr_argsize_check(rqstp, p))
if (xdr_stream_decode_u32(xdr, &argp->mask) < 0)
return 0;
if (argp->mask & ~NFS_ACL_MASK)
return 0;
if (!nfs_stream_decode_acl(xdr, NULL, (argp->mask & NFS_ACL) ?
&argp->acl_access : NULL))
return 0;
if (!nfs_stream_decode_acl(xdr, NULL, (argp->mask & NFS_DFACL) ?
&argp->acl_default : NULL))
return 0;
base = (char *)p - (char *)head->iov_base;
n = nfsacl_decode(&rqstp->rq_arg, base, NULL,
(args->mask & NFS_ACL) ?
&args->acl_access : NULL);
if (n > 0)
n = nfsacl_decode(&rqstp->rq_arg, base + n, NULL,
(args->mask & NFS_DFACL) ?
&args->acl_default : NULL);
return (n > 0);
return 1;
}
/*
......@@ -251,6 +247,7 @@ static const struct svc_procedure nfsd_acl_procedures3[3] = {
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST,
.pc_name = "NULL",
},
[ACLPROC3_GETACL] = {
.pc_func = nfsd3_proc_getacl,
......@@ -261,6 +258,7 @@ static const struct svc_procedure nfsd_acl_procedures3[3] = {
.pc_ressize = sizeof(struct nfsd3_getaclres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+1+2*(1+ACL),
.pc_name = "GETACL",
},
[ACLPROC3_SETACL] = {
.pc_func = nfsd3_proc_setacl,
......@@ -271,6 +269,7 @@ static const struct svc_procedure nfsd_acl_procedures3[3] = {
.pc_ressize = sizeof(struct nfsd3_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+pAT,
.pc_name = "SETACL",
},
};
......
This diff is collapsed.
This diff is collapsed.
......@@ -378,8 +378,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
* Before RECLAIM_COMPLETE done, server should deny new lock
*/
if (nfsd4_has_session(cstate) &&
!test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
&cstate->session->se_client->cl_flags) &&
!test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags) &&
open->op_claim_type != NFS4_OPEN_CLAIM_PREVIOUS)
return nfserr_grace;
......@@ -428,8 +427,7 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
goto out;
break;
case NFS4_OPEN_CLAIM_PREVIOUS:
status = nfs4_check_open_reclaim(&open->op_clientid,
cstate, nn);
status = nfs4_check_open_reclaim(cstate->clp);
if (status)
goto out;
open->op_openowner->oo_flags |= NFS4_OO_CONFIRMED;
......@@ -1888,7 +1886,7 @@ nfsd4_getdeviceinfo(struct svc_rqst *rqstp,
nfserr = nfs_ok;
if (gdp->gd_maxcount != 0) {
nfserr = ops->proc_getdeviceinfo(exp->ex_path.mnt->mnt_sb,
rqstp, cstate->session->se_client, gdp);
rqstp, cstate->clp, gdp);
}
gdp->gd_notify_types &= ops->notify_types;
......@@ -2174,7 +2172,7 @@ nfsd4_proc_null(struct svc_rqst *rqstp)
static inline void nfsd4_increment_op_stats(u32 opnum)
{
if (opnum >= FIRST_NFS4_OP && opnum <= LAST_NFS4_OP)
nfsdstats.nfs4_opcount[opnum]++;
percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_NFS4_OP(opnum)]);
}
static const struct nfsd4_operation nfsd4_ops[];
......@@ -3305,6 +3303,7 @@ static const struct svc_procedure nfsd_procedures4[2] = {
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 1,
.pc_name = "NULL",
},
[NFSPROC4_COMPOUND] = {
.pc_func = nfsd4_proc_compound,
......@@ -3315,6 +3314,7 @@ static const struct svc_procedure nfsd_procedures4[2] = {
.pc_release = nfsd4_release_compoundargs,
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = NFSD_BUFSIZE/4,
.pc_name = "COMPOUND",
},
};
......
......@@ -3891,6 +3891,7 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp,
struct nfsd4_compound_state *cstate, union nfsd4_op_u *u)
{
struct nfsd4_reclaim_complete *rc = &u->reclaim_complete;
struct nfs4_client *clp = cstate->clp;
__be32 status = 0;
if (rc->rca_one_fs) {
......@@ -3904,12 +3905,11 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp,
}
status = nfserr_complete_already;
if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE,
&cstate->session->se_client->cl_flags))
if (test_and_set_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
goto out;
status = nfserr_stale_clientid;
if (is_client_expired(cstate->session->se_client))
if (is_client_expired(clp))
/*
* The following error isn't really legal.
* But we only get here if the client just explicitly
......@@ -3920,8 +3920,8 @@ nfsd4_reclaim_complete(struct svc_rqst *rqstp,
goto out;
status = nfs_ok;
nfsd4_client_record_create(cstate->session->se_client);
inc_reclaim_complete(cstate->session->se_client);
nfsd4_client_record_create(clp);
inc_reclaim_complete(clp);
out:
return status;
}
......@@ -4633,40 +4633,37 @@ static __be32 nfsd4_check_seqid(struct nfsd4_compound_state *cstate, struct nfs4
return nfserr_bad_seqid;
}
static __be32 lookup_clientid(clientid_t *clid,
struct nfsd4_compound_state *cstate,
struct nfsd_net *nn,
bool sessions)
static struct nfs4_client *lookup_clientid(clientid_t *clid, bool sessions,
struct nfsd_net *nn)
{
struct nfs4_client *found;
spin_lock(&nn->client_lock);
found = find_confirmed_client(clid, sessions, nn);
if (found)
atomic_inc(&found->cl_rpc_users);
spin_unlock(&nn->client_lock);
return found;
}
static __be32 set_client(clientid_t *clid,
struct nfsd4_compound_state *cstate,
struct nfsd_net *nn)
{
if (cstate->clp) {
found = cstate->clp;
if (!same_clid(&found->cl_clientid, clid))
if (!same_clid(&cstate->clp->cl_clientid, clid))
return nfserr_stale_clientid;
return nfs_ok;
}
if (STALE_CLIENTID(clid, nn))
return nfserr_stale_clientid;
/*
* For v4.1+ we get the client in the SEQUENCE op. If we don't have one
* cached already then we know this is for is for v4.0 and "sessions"
* will be false.
* We're in the 4.0 case (otherwise the SEQUENCE op would have
* set cstate->clp), so session = false:
*/
WARN_ON_ONCE(cstate->session);
spin_lock(&nn->client_lock);
found = find_confirmed_client(clid, sessions, nn);
if (!found) {
spin_unlock(&nn->client_lock);
cstate->clp = lookup_clientid(clid, false, nn);
if (!cstate->clp)
return nfserr_expired;
}
atomic_inc(&found->cl_rpc_users);
spin_unlock(&nn->client_lock);
/* Cache the nfs4_client in cstate! */
cstate->clp = found;
return nfs_ok;
}
......@@ -4680,8 +4677,6 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
struct nfs4_openowner *oo = NULL;
__be32 status;
if (STALE_CLIENTID(&open->op_clientid, nn))
return nfserr_stale_clientid;
/*
* In case we need it later, after we've already created the
* file and don't want to risk a further failure:
......@@ -4690,7 +4685,7 @@ nfsd4_process_open1(struct nfsd4_compound_state *cstate,
if (open->op_file == NULL)
return nfserr_jukebox;
status = lookup_clientid(clientid, cstate, nn, false);
status = set_client(clientid, cstate, nn);
if (status)
return status;
clp = cstate->clp;
......@@ -5300,17 +5295,14 @@ nfsd4_renew(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
trace_nfsd_clid_renew(clid);
status = lookup_clientid(clid, cstate, nn, false);
status = set_client(clid, cstate, nn);
if (status)
goto out;
return status;
clp = cstate->clp;
status = nfserr_cb_path_down;
if (!list_empty(&clp->cl_delegations)
&& clp->cl_cb_state != NFSD4_CB_UP)
goto out;
status = nfs_ok;
out:
return status;
return nfserr_cb_path_down;
return nfs_ok;
}
void
......@@ -5686,8 +5678,7 @@ nfsd4_lookup_stateid(struct nfsd4_compound_state *cstate,
if (ZERO_STATEID(stateid) || ONE_STATEID(stateid) ||
CLOSE_STATEID(stateid))
return nfserr_bad_stateid;
status = lookup_clientid(&stateid->si_opaque.so_clid, cstate, nn,
false);
status = set_client(&stateid->si_opaque.so_clid, cstate, nn);
if (status == nfserr_stale_clientid) {
if (cstate->session)
return nfserr_bad_stateid;
......@@ -5818,21 +5809,27 @@ static __be32 find_cpntf_state(struct nfsd_net *nn, stateid_t *st,
{
__be32 status;
struct nfs4_cpntf_state *cps = NULL;
struct nfsd4_compound_state cstate;
struct nfs4_client *found;
status = manage_cpntf_state(nn, st, NULL, &cps);
if (status)
return status;
cps->cpntf_time = ktime_get_boottime_seconds();
memset(&cstate, 0, sizeof(cstate));
status = lookup_clientid(&cps->cp_p_clid, &cstate, nn, true);
if (status)
status = nfserr_expired;
found = lookup_clientid(&cps->cp_p_clid, true, nn);
if (!found)
goto out;
status = nfsd4_lookup_stateid(&cstate, &cps->cp_p_stateid,
NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID,
stid, nn);
put_client_renew(cstate.clp);
*stid = find_stateid_by_type(found, &cps->cp_p_stateid,
NFS4_DELEG_STID|NFS4_OPEN_STID|NFS4_LOCK_STID);
if (*stid)
status = nfs_ok;
else
status = nfserr_bad_stateid;
put_client_renew(found);
out:
nfs4_put_cpntf_state(nn, cps);
return status;
......@@ -5921,7 +5918,7 @@ nfsd4_test_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
{
struct nfsd4_test_stateid *test_stateid = &u->test_stateid;
struct nfsd4_test_stateid_id *stateid;
struct nfs4_client *cl = cstate->session->se_client;
struct nfs4_client *cl = cstate->clp;
list_for_each_entry(stateid, &test_stateid->ts_stateid_list, ts_id_list)
stateid->ts_id_status =
......@@ -5967,7 +5964,7 @@ nfsd4_free_stateid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
stateid_t *stateid = &free_stateid->fr_stateid;
struct nfs4_stid *s;
struct nfs4_delegation *dp;
struct nfs4_client *cl = cstate->session->se_client;
struct nfs4_client *cl = cstate->clp;
__be32 ret = nfserr_bad_stateid;
spin_lock(&cl->cl_lock);
......@@ -6696,13 +6693,9 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (nfsd4_has_session(cstate))
/* See rfc 5661 18.10.3: given clientid is ignored: */
memcpy(&lock->lk_new_clientid,
&cstate->session->se_client->cl_clientid,
&cstate->clp->cl_clientid,
sizeof(clientid_t));
status = nfserr_stale_clientid;
if (STALE_CLIENTID(&lock->lk_new_clientid, nn))
goto out;
/* validate and update open stateid and open seqid */
status = nfs4_preprocess_confirmed_seqid_op(cstate,
lock->lk_new_open_seqid,
......@@ -6909,8 +6902,7 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
return nfserr_inval;
if (!nfsd4_has_session(cstate)) {
status = lookup_clientid(&lockt->lt_clientid, cstate, nn,
false);
status = set_client(&lockt->lt_clientid, cstate, nn);
if (status)
goto out;
}
......@@ -7094,7 +7086,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
dprintk("nfsd4_release_lockowner clientid: (%08x/%08x):\n",
clid->cl_boot, clid->cl_id);
status = lookup_clientid(clid, cstate, nn, false);
status = set_client(clid, cstate, nn);
if (status)
return status;
......@@ -7230,25 +7222,13 @@ nfsd4_find_reclaim_client(struct xdr_netobj name, struct nfsd_net *nn)
return NULL;
}
/*
* Called from OPEN. Look for clientid in reclaim list.
*/
__be32
nfs4_check_open_reclaim(clientid_t *clid,
struct nfsd4_compound_state *cstate,
struct nfsd_net *nn)
nfs4_check_open_reclaim(struct nfs4_client *clp)
{
__be32 status;
/* find clientid in conf_id_hashtbl */
status = lookup_clientid(clid, cstate, nn, false);
if (status)
return nfserr_reclaim_bad;
if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &cstate->clp->cl_flags))
if (test_bit(NFSD4_CLIENT_RECLAIM_COMPLETE, &clp->cl_flags))
return nfserr_no_grace;
if (nfsd4_client_record_check(cstate->clp))
if (nfsd4_client_record_check(clp))
return nfserr_reclaim_bad;
return nfs_ok;
......
......@@ -121,14 +121,14 @@ nfsd_reply_cache_free_locked(struct nfsd_drc_bucket *b, struct svc_cacherep *rp,
struct nfsd_net *nn)
{
if (rp->c_type == RC_REPLBUFF && rp->c_replvec.iov_base) {
nn->drc_mem_usage -= rp->c_replvec.iov_len;
nfsd_stats_drc_mem_usage_sub(nn, rp->c_replvec.iov_len);
kfree(rp->c_replvec.iov_base);
}
if (rp->c_state != RC_UNUSED) {
rb_erase(&rp->c_node, &b->rb_head);
list_del(&rp->c_lru);
atomic_dec(&nn->num_drc_entries);
nn->drc_mem_usage -= sizeof(*rp);
nfsd_stats_drc_mem_usage_sub(nn, sizeof(*rp));
}
kmem_cache_free(drc_slab, rp);
}
......@@ -154,6 +154,16 @@ void nfsd_drc_slab_free(void)
kmem_cache_destroy(drc_slab);
}
static int nfsd_reply_cache_stats_init(struct nfsd_net *nn)
{
return nfsd_percpu_counters_init(nn->counter, NFSD_NET_COUNTERS_NUM);
}
static void nfsd_reply_cache_stats_destroy(struct nfsd_net *nn)
{
nfsd_percpu_counters_destroy(nn->counter, NFSD_NET_COUNTERS_NUM);
}
int nfsd_reply_cache_init(struct nfsd_net *nn)
{
unsigned int hashsize;
......@@ -165,12 +175,16 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
hashsize = nfsd_hashsize(nn->max_drc_entries);
nn->maskbits = ilog2(hashsize);
status = nfsd_reply_cache_stats_init(nn);
if (status)
goto out_nomem;
nn->nfsd_reply_cache_shrinker.scan_objects = nfsd_reply_cache_scan;
nn->nfsd_reply_cache_shrinker.count_objects = nfsd_reply_cache_count;
nn->nfsd_reply_cache_shrinker.seeks = 1;
status = register_shrinker(&nn->nfsd_reply_cache_shrinker);
if (status)
goto out_nomem;
goto out_stats_destroy;
nn->drc_hashtbl = kvzalloc(array_size(hashsize,
sizeof(*nn->drc_hashtbl)), GFP_KERNEL);
......@@ -186,6 +200,8 @@ int nfsd_reply_cache_init(struct nfsd_net *nn)
return 0;
out_shrinker:
unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
out_stats_destroy:
nfsd_reply_cache_stats_destroy(nn);
out_nomem:
printk(KERN_ERR "nfsd: failed to allocate reply cache\n");
return -ENOMEM;
......@@ -196,6 +212,7 @@ void nfsd_reply_cache_shutdown(struct nfsd_net *nn)
struct svc_cacherep *rp;
unsigned int i;
nfsd_reply_cache_stats_destroy(nn);
unregister_shrinker(&nn->nfsd_reply_cache_shrinker);
for (i = 0; i < nn->drc_hashsize; i++) {
......@@ -324,7 +341,7 @@ nfsd_cache_key_cmp(const struct svc_cacherep *key,
{
if (key->c_key.k_xid == rp->c_key.k_xid &&
key->c_key.k_csum != rp->c_key.k_csum) {
++nn->payload_misses;
nfsd_stats_payload_misses_inc(nn);
trace_nfsd_drc_mismatch(nn, key, rp);
}
......@@ -407,7 +424,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp)
rqstp->rq_cacherep = NULL;
if (type == RC_NOCACHE) {
nfsdstats.rcnocache++;
nfsd_stats_rc_nocache_inc();
goto out;
}
......@@ -429,12 +446,12 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp)
goto found_entry;
}
nfsdstats.rcmisses++;
nfsd_stats_rc_misses_inc();
rqstp->rq_cacherep = rp;
rp->c_state = RC_INPROG;
atomic_inc(&nn->num_drc_entries);
nn->drc_mem_usage += sizeof(*rp);
nfsd_stats_drc_mem_usage_add(nn, sizeof(*rp));
/* go ahead and prune the cache */
prune_bucket(b, nn);
......@@ -446,7 +463,7 @@ int nfsd_cache_lookup(struct svc_rqst *rqstp)
found_entry:
/* We found a matching entry which is either in progress or done. */
nfsdstats.rchits++;
nfsd_stats_rc_hits_inc();
rtn = RC_DROPIT;
/* Request being processed */
......@@ -548,7 +565,7 @@ void nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
return;
}
spin_lock(&b->cache_lock);
nn->drc_mem_usage += bufsize;
nfsd_stats_drc_mem_usage_add(nn, bufsize);
lru_put_end(b, rp);
rp->c_secure = test_bit(RQ_SECURE, &rqstp->rq_flags);
rp->c_type = cachetype;
......@@ -588,13 +605,18 @@ static int nfsd_reply_cache_stats_show(struct seq_file *m, void *v)
seq_printf(m, "max entries: %u\n", nn->max_drc_entries);
seq_printf(m, "num entries: %u\n",
atomic_read(&nn->num_drc_entries));
atomic_read(&nn->num_drc_entries));
seq_printf(m, "hash buckets: %u\n", 1 << nn->maskbits);
seq_printf(m, "mem usage: %u\n", nn->drc_mem_usage);
seq_printf(m, "cache hits: %u\n", nfsdstats.rchits);
seq_printf(m, "cache misses: %u\n", nfsdstats.rcmisses);
seq_printf(m, "not cached: %u\n", nfsdstats.rcnocache);
seq_printf(m, "payload misses: %u\n", nn->payload_misses);
seq_printf(m, "mem usage: %lld\n",
percpu_counter_sum_positive(&nn->counter[NFSD_NET_DRC_MEM_USAGE]));
seq_printf(m, "cache hits: %lld\n",
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]));
seq_printf(m, "cache misses: %lld\n",
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]));
seq_printf(m, "not cached: %lld\n",
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]));
seq_printf(m, "payload misses: %lld\n",
percpu_counter_sum_positive(&nn->counter[NFSD_NET_PAYLOAD_MISSES]));
seq_printf(m, "longest chain len: %u\n", nn->longest_chain);
seq_printf(m, "cachesize at longest: %u\n", nn->longest_chain_cachesize);
return 0;
......
......@@ -32,6 +32,7 @@
enum {
NFSD_Root = 1,
NFSD_List,
NFSD_Export_Stats,
NFSD_Export_features,
NFSD_Fh,
NFSD_FO_UnlockIP,
......@@ -1348,6 +1349,8 @@ static int nfsd_fill_super(struct super_block *sb, struct fs_context *fc)
static const struct tree_descr nfsd_files[] = {
[NFSD_List] = {"exports", &exports_nfsd_operations, S_IRUGO},
/* Per-export io stats use same ops as exports file */
[NFSD_Export_Stats] = {"export_stats", &exports_nfsd_operations, S_IRUGO},
[NFSD_Export_features] = {"export_features",
&export_features_operations, S_IRUGO},
[NFSD_FO_UnlockIP] = {"unlock_ip",
......@@ -1534,7 +1537,9 @@ static int __init init_nfsd(void)
retval = nfsd4_init_pnfs();
if (retval)
goto out_free_slabs;
nfsd_stat_init(); /* Statistics */
retval = nfsd_stat_init(); /* Statistics */
if (retval)
goto out_free_pnfs;
retval = nfsd_drc_slab_create();
if (retval)
goto out_free_stat;
......@@ -1554,6 +1559,7 @@ static int __init init_nfsd(void)
nfsd_drc_slab_free();
out_free_stat:
nfsd_stat_shutdown();
out_free_pnfs:
nfsd4_exit_pnfs();
out_free_slabs:
nfsd4_free_slabs();
......
......@@ -24,8 +24,8 @@
#include <uapi/linux/nfsd/debug.h>
#include "netns.h"
#include "stats.h"
#include "export.h"
#include "stats.h"
#undef ifdebug
#ifdef CONFIG_SUNRPC_DEBUG
......
......@@ -349,7 +349,7 @@ static __be32 nfsd_set_fh_dentry(struct svc_rqst *rqstp, struct svc_fh *fhp)
__be32
fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
{
struct svc_export *exp;
struct svc_export *exp = NULL;
struct dentry *dentry;
__be32 error;
......@@ -422,7 +422,7 @@ fh_verify(struct svc_rqst *rqstp, struct svc_fh *fhp, umode_t type, int access)
}
out:
if (error == nfserr_stale)
nfsdstats.fh_stale++;
nfsd_stats_fh_stale_inc(exp);
return error;
}
......
......@@ -12,6 +12,7 @@
#include <linux/sunrpc/svc.h>
#include <uapi/linux/nfsd/nfsfh.h>
#include <linux/iversion.h>
#include <linux/exportfs.h>
static inline __u32 ino_t_to_u32(ino_t ino)
{
......@@ -264,7 +265,9 @@ fh_clear_wcc(struct svc_fh *fhp)
static inline u64 nfsd4_change_attribute(struct kstat *stat,
struct inode *inode)
{
if (IS_I_VERSION(inode)) {
if (inode->i_sb->s_export_op->fetch_iversion)
return inode->i_sb->s_export_op->fetch_iversion(inode);
else if (IS_I_VERSION(inode)) {
u64 chattr;
chattr = stat->ctime.tv_sec;
......
......@@ -149,14 +149,15 @@ nfsd_proc_lookup(struct svc_rqst *rqstp)
static __be32
nfsd_proc_readlink(struct svc_rqst *rqstp)
{
struct nfsd_readlinkargs *argp = rqstp->rq_argp;
struct nfsd_fhandle *argp = rqstp->rq_argp;
struct nfsd_readlinkres *resp = rqstp->rq_resp;
char *buffer = page_address(*(rqstp->rq_next_page++));
dprintk("nfsd: READLINK %s\n", SVCFH_fmt(&argp->fh));
/* Read the symlink. */
resp->len = NFS_MAXPATHLEN;
resp->status = nfsd_readlink(rqstp, &argp->fh, argp->buffer, &resp->len);
resp->status = nfsd_readlink(rqstp, &argp->fh, buffer, &resp->len);
fh_put(&argp->fh);
return rpc_success;
......@@ -171,32 +172,36 @@ nfsd_proc_read(struct svc_rqst *rqstp)
{
struct nfsd_readargs *argp = rqstp->rq_argp;
struct nfsd_readres *resp = rqstp->rq_resp;
unsigned int len;
u32 eof;
int v;
dprintk("nfsd: READ %s %d bytes at %d\n",
SVCFH_fmt(&argp->fh),
argp->count, argp->offset);
argp->count = min_t(u32, argp->count, NFSSVC_MAXBLKSIZE_V2);
v = 0;
len = argp->count;
while (len > 0) {
struct page *page = *(rqstp->rq_next_page++);
rqstp->rq_vec[v].iov_base = page_address(page);
rqstp->rq_vec[v].iov_len = min_t(unsigned int, len, PAGE_SIZE);
len -= rqstp->rq_vec[v].iov_len;
v++;
}
/* Obtain buffer pointer for payload. 19 is 1 word for
* status, 17 words for fattr, and 1 word for the byte count.
*/
if (NFSSVC_MAXBLKSIZE_V2 < argp->count) {
char buf[RPC_MAX_ADDRBUFLEN];
printk(KERN_NOTICE
"oversized read request from %s (%d bytes)\n",
svc_print_addr(rqstp, buf, sizeof(buf)),
argp->count);
argp->count = NFSSVC_MAXBLKSIZE_V2;
}
svc_reserve_auth(rqstp, (19<<2) + argp->count + 4);
resp->count = argp->count;
resp->status = nfsd_read(rqstp, fh_copy(&resp->fh, &argp->fh),
argp->offset,
rqstp->rq_vec, argp->vlen,
&resp->count,
&eof);
fh_copy(&resp->fh, &argp->fh);
resp->status = nfsd_read(rqstp, &resp->fh, argp->offset,
rqstp->rq_vec, v, &resp->count, &eof);
if (resp->status == nfs_ok)
resp->status = fh_getattr(&resp->fh, &resp->stat);
else if (resp->status == nfserr_jukebox)
......@@ -548,6 +553,20 @@ nfsd_proc_rmdir(struct svc_rqst *rqstp)
return rpc_success;
}
static void nfsd_init_dirlist_pages(struct svc_rqst *rqstp,
struct nfsd_readdirres *resp,
int count)
{
count = min_t(u32, count, PAGE_SIZE);
/* Convert byte count to number of words (i.e. >> 2),
* and reserve room for the NULL ptr & eof flag (-2 words) */
resp->buflen = (count >> 2) - 2;
resp->buffer = page_address(*rqstp->rq_next_page);
rqstp->rq_next_page++;
}
/*
* Read a portion of a directory.
*/
......@@ -556,31 +575,24 @@ nfsd_proc_readdir(struct svc_rqst *rqstp)
{
struct nfsd_readdirargs *argp = rqstp->rq_argp;
struct nfsd_readdirres *resp = rqstp->rq_resp;
int count;
loff_t offset;
__be32 *buffer;
dprintk("nfsd: READDIR %s %d bytes at %d\n",
SVCFH_fmt(&argp->fh),
argp->count, argp->cookie);
/* Shrink to the client read size */
count = (argp->count >> 2) - 2;
/* Make sure we've room for the NULL ptr & eof flag */
count -= 2;
if (count < 0)
count = 0;
nfsd_init_dirlist_pages(rqstp, resp, argp->count);
buffer = resp->buffer;
resp->buffer = argp->buffer;
resp->offset = NULL;
resp->buflen = count;
resp->common.err = nfs_ok;
/* Read directory and encode entries on the fly */
offset = argp->cookie;
resp->status = nfsd_readdir(rqstp, &argp->fh, &offset,
&resp->common, nfssvc_encode_entry);
resp->count = resp->buffer - argp->buffer;
resp->count = resp->buffer - buffer;
if (resp->offset)
*resp->offset = htonl(offset);
......@@ -623,16 +635,18 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 0,
.pc_name = "NULL",
},
[NFSPROC_GETATTR] = {
.pc_func = nfsd_proc_getattr,
.pc_decode = nfssvc_decode_fhandle,
.pc_decode = nfssvc_decode_fhandleargs,
.pc_encode = nfssvc_encode_attrstat,
.pc_release = nfssvc_release_attrstat,
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT,
.pc_name = "GETATTR",
},
[NFSPROC_SETATTR] = {
.pc_func = nfsd_proc_setattr,
......@@ -643,6 +657,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+AT,
.pc_name = "SETATTR",
},
[NFSPROC_ROOT] = {
.pc_func = nfsd_proc_root,
......@@ -652,6 +667,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 0,
.pc_name = "ROOT",
},
[NFSPROC_LOOKUP] = {
.pc_func = nfsd_proc_lookup,
......@@ -662,15 +678,17 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+FH+AT,
.pc_name = "LOOKUP",
},
[NFSPROC_READLINK] = {
.pc_func = nfsd_proc_readlink,
.pc_decode = nfssvc_decode_readlinkargs,
.pc_decode = nfssvc_decode_fhandleargs,
.pc_encode = nfssvc_encode_readlinkres,
.pc_argsize = sizeof(struct nfsd_readlinkargs),
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_readlinkres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+1+NFS_MAXPATHLEN/4,
.pc_name = "READLINK",
},
[NFSPROC_READ] = {
.pc_func = nfsd_proc_read,
......@@ -681,6 +699,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_readres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+AT+1+NFSSVC_MAXBLKSIZE_V2/4,
.pc_name = "READ",
},
[NFSPROC_WRITECACHE] = {
.pc_func = nfsd_proc_writecache,
......@@ -690,6 +709,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_voidres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = 0,
.pc_name = "WRITECACHE",
},
[NFSPROC_WRITE] = {
.pc_func = nfsd_proc_write,
......@@ -700,6 +720,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_attrstat),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+AT,
.pc_name = "WRITE",
},
[NFSPROC_CREATE] = {
.pc_func = nfsd_proc_create,
......@@ -710,6 +731,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+FH+AT,
.pc_name = "CREATE",
},
[NFSPROC_REMOVE] = {
.pc_func = nfsd_proc_remove,
......@@ -719,6 +741,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
.pc_name = "REMOVE",
},
[NFSPROC_RENAME] = {
.pc_func = nfsd_proc_rename,
......@@ -728,6 +751,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
.pc_name = "RENAME",
},
[NFSPROC_LINK] = {
.pc_func = nfsd_proc_link,
......@@ -737,6 +761,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
.pc_name = "LINK",
},
[NFSPROC_SYMLINK] = {
.pc_func = nfsd_proc_symlink,
......@@ -746,6 +771,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
.pc_name = "SYMLINK",
},
[NFSPROC_MKDIR] = {
.pc_func = nfsd_proc_mkdir,
......@@ -756,6 +782,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_diropres),
.pc_cachetype = RC_REPLBUFF,
.pc_xdrressize = ST+FH+AT,
.pc_name = "MKDIR",
},
[NFSPROC_RMDIR] = {
.pc_func = nfsd_proc_rmdir,
......@@ -765,6 +792,7 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_ressize = sizeof(struct nfsd_stat),
.pc_cachetype = RC_REPLSTAT,
.pc_xdrressize = ST,
.pc_name = "RMDIR",
},
[NFSPROC_READDIR] = {
.pc_func = nfsd_proc_readdir,
......@@ -773,15 +801,17 @@ static const struct svc_procedure nfsd_procedures2[18] = {
.pc_argsize = sizeof(struct nfsd_readdirargs),
.pc_ressize = sizeof(struct nfsd_readdirres),
.pc_cachetype = RC_NOCACHE,
.pc_name = "READDIR",
},
[NFSPROC_STATFS] = {
.pc_func = nfsd_proc_statfs,
.pc_decode = nfssvc_decode_fhandle,
.pc_decode = nfssvc_decode_fhandleargs,
.pc_encode = nfssvc_encode_statfsres,
.pc_argsize = sizeof(struct nfsd_fhandle),
.pc_ressize = sizeof(struct nfsd_statfsres),
.pc_cachetype = RC_NOCACHE,
.pc_xdrressize = ST+5,
.pc_name = "STATFS",
},
};
......
......@@ -955,37 +955,6 @@ nfsd(void *vrqstp)
return 0;
}
/*
* A write procedure can have a large argument, and a read procedure can
* have a large reply, but no NFSv2 or NFSv3 procedure has argument and
* reply that can both be larger than a page. The xdr code has taken
* advantage of this assumption to be a sloppy about bounds checking in
* some cases. Pending a rewrite of the NFSv2/v3 xdr code to fix that
* problem, we enforce these assumptions here:
*/
static bool nfs_request_too_big(struct svc_rqst *rqstp,
const struct svc_procedure *proc)
{
/*
* The ACL code has more careful bounds-checking and is not
* susceptible to this problem:
*/
if (rqstp->rq_prog != NFS_PROGRAM)
return false;
/*
* Ditto NFSv4 (which can in theory have argument and reply both
* more than a page):
*/
if (rqstp->rq_vers >= 4)
return false;
/* The reply will be small, we're OK: */
if (proc->pc_xdrressize > 0 &&
proc->pc_xdrressize < XDR_QUADLEN(PAGE_SIZE))
return false;
return rqstp->rq_arg.len > PAGE_SIZE;
}
/**
* nfsd_dispatch - Process an NFS or NFSACL Request
* @rqstp: incoming request
......@@ -1004,9 +973,6 @@ int nfsd_dispatch(struct svc_rqst *rqstp, __be32 *statp)
struct kvec *resv = &rqstp->rq_res.head[0];
__be32 *p;
if (nfs_request_too_big(rqstp, proc))
goto out_decode_err;
/*
* Give the xdr decoder a chance to change this if it wants
* (necessary in the NFSv4.0 compound case)
......
This diff is collapsed.
......@@ -649,8 +649,7 @@ void nfs4_remove_reclaim_record(struct nfs4_client_reclaim *, struct nfsd_net *)
extern void nfs4_release_reclaim(struct nfsd_net *);
extern struct nfs4_client_reclaim *nfsd4_find_reclaim_client(struct xdr_netobj name,
struct nfsd_net *nn);
extern __be32 nfs4_check_open_reclaim(clientid_t *clid,
struct nfsd4_compound_state *cstate, struct nfsd_net *nn);
extern __be32 nfs4_check_open_reclaim(struct nfs4_client *);
extern void nfsd4_probe_callback(struct nfs4_client *clp);
extern void nfsd4_probe_callback_sync(struct nfs4_client *clp);
extern void nfsd4_change_callback(struct nfs4_client *clp, struct nfs4_cb_conn *);
......
......@@ -7,16 +7,14 @@
* Format:
* rc <hits> <misses> <nocache>
* Statistsics for the reply cache
* fh <stale> <total-lookups> <anonlookups> <dir-not-in-dcache> <nondir-not-in-dcache>
* fh <stale> <deprecated filehandle cache stats>
* statistics for filehandle lookup
* io <bytes-read> <bytes-written>
* statistics for IO throughput
* th <threads> <fullcnt> <10%-20%> <20%-30%> ... <90%-100%> <100%>
* time (seconds) when nfsd thread usage above thresholds
* and number of times that all threads were in use
* ra cache-size <10% <20% <30% ... <100% not-found
* number of times that read-ahead entry was found that deep in
* the cache.
* th <threads> <deprecated thread usage histogram stats>
* number of threads
* ra <deprecated ra-cache stats>
*
* plus generic RPC stats (see net/sunrpc/stats.c)
*
* Copyright (C) 1995, 1996, 1997 Olaf Kirch <okir@monad.swb.de>
......@@ -38,31 +36,24 @@ static int nfsd_proc_show(struct seq_file *seq, void *v)
{
int i;
seq_printf(seq, "rc %u %u %u\nfh %u %u %u %u %u\nio %u %u\n",
nfsdstats.rchits,
nfsdstats.rcmisses,
nfsdstats.rcnocache,
nfsdstats.fh_stale,
nfsdstats.fh_lookup,
nfsdstats.fh_anon,
nfsdstats.fh_nocache_dir,
nfsdstats.fh_nocache_nondir,
nfsdstats.io_read,
nfsdstats.io_write);
seq_printf(seq, "rc %lld %lld %lld\nfh %lld 0 0 0 0\nio %lld %lld\n",
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_HITS]),
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_MISSES]),
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]),
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_FH_STALE]),
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_READ]),
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_IO_WRITE]));
/* thread usage: */
seq_printf(seq, "th %u %u", nfsdstats.th_cnt, nfsdstats.th_fullcnt);
for (i=0; i<10; i++) {
unsigned int jifs = nfsdstats.th_usage[i];
unsigned int sec = jifs / HZ, msec = (jifs % HZ)*1000/HZ;
seq_printf(seq, " %u.%03u", sec, msec);
}
seq_printf(seq, "th %u 0", nfsdstats.th_cnt);
/* deprecated thread usage histogram stats */
for (i = 0; i < 10; i++)
seq_puts(seq, " 0.000");
/* deprecated ra-cache stats */
seq_puts(seq, "\nra 0 0 0 0 0 0 0 0 0 0 0 0\n");
/* newline and ra-cache */
seq_printf(seq, "\nra %u", nfsdstats.ra_size);
for (i=0; i<11; i++)
seq_printf(seq, " %u", nfsdstats.ra_depth[i]);
seq_putc(seq, '\n');
/* show my rpc info */
svc_seq_show(seq, &nfsd_svcstats);
......@@ -70,8 +61,10 @@ static int nfsd_proc_show(struct seq_file *seq, void *v)
/* Show count for individual nfsv4 operations */
/* Writing operation numbers 0 1 2 also for maintaining uniformity */
seq_printf(seq,"proc4ops %u", LAST_NFS4_OP + 1);
for (i = 0; i <= LAST_NFS4_OP; i++)
seq_printf(seq, " %u", nfsdstats.nfs4_opcount[i]);
for (i = 0; i <= LAST_NFS4_OP; i++) {
seq_printf(seq, " %lld",
percpu_counter_sum_positive(&nfsdstats.counter[NFSD_STATS_NFS4_OP(i)]));
}
seq_putc(seq, '\n');
#endif
......@@ -91,14 +84,63 @@ static const struct proc_ops nfsd_proc_ops = {
.proc_release = single_release,
};
void
nfsd_stat_init(void)
int nfsd_percpu_counters_init(struct percpu_counter counters[], int num)
{
int i, err = 0;
for (i = 0; !err && i < num; i++)
err = percpu_counter_init(&counters[i], 0, GFP_KERNEL);
if (!err)
return 0;
for (; i > 0; i--)
percpu_counter_destroy(&counters[i-1]);
return err;
}
void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num)
{
int i;
for (i = 0; i < num; i++)
percpu_counter_set(&counters[i], 0);
}
void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num)
{
int i;
for (i = 0; i < num; i++)
percpu_counter_destroy(&counters[i]);
}
static int nfsd_stat_counters_init(void)
{
return nfsd_percpu_counters_init(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
}
static void nfsd_stat_counters_destroy(void)
{
nfsd_percpu_counters_destroy(nfsdstats.counter, NFSD_STATS_COUNTERS_NUM);
}
int nfsd_stat_init(void)
{
int err;
err = nfsd_stat_counters_init();
if (err)
return err;
svc_proc_register(&init_net, &nfsd_svcstats, &nfsd_proc_ops);
return 0;
}
void
nfsd_stat_shutdown(void)
void nfsd_stat_shutdown(void)
{
nfsd_stat_counters_destroy();
svc_proc_unregister(&init_net, "nfsd");
}
......@@ -8,37 +8,91 @@
#define _NFSD_STATS_H
#include <uapi/linux/nfsd/stats.h>
#include <linux/percpu_counter.h>
struct nfsd_stats {
unsigned int rchits; /* repcache hits */
unsigned int rcmisses; /* repcache hits */
unsigned int rcnocache; /* uncached reqs */
unsigned int fh_stale; /* FH stale error */
unsigned int fh_lookup; /* dentry cached */
unsigned int fh_anon; /* anon file dentry returned */
unsigned int fh_nocache_dir; /* filehandle not found in dcache */
unsigned int fh_nocache_nondir; /* filehandle not found in dcache */
unsigned int io_read; /* bytes returned to read requests */
unsigned int io_write; /* bytes passed in write requests */
unsigned int th_cnt; /* number of available threads */
unsigned int th_usage[10]; /* number of ticks during which n perdeciles
* of available threads were in use */
unsigned int th_fullcnt; /* number of times last free thread was used */
unsigned int ra_size; /* size of ra cache */
unsigned int ra_depth[11]; /* number of times ra entry was found that deep
* in the cache (10percentiles). [10] = not found */
enum {
NFSD_STATS_RC_HITS, /* repcache hits */
NFSD_STATS_RC_MISSES, /* repcache misses */
NFSD_STATS_RC_NOCACHE, /* uncached reqs */
NFSD_STATS_FH_STALE, /* FH stale error */
NFSD_STATS_IO_READ, /* bytes returned to read requests */
NFSD_STATS_IO_WRITE, /* bytes passed in write requests */
#ifdef CONFIG_NFSD_V4
unsigned int nfs4_opcount[LAST_NFS4_OP + 1]; /* count of individual nfsv4 operations */
NFSD_STATS_FIRST_NFS4_OP, /* count of individual nfsv4 operations */
NFSD_STATS_LAST_NFS4_OP = NFSD_STATS_FIRST_NFS4_OP + LAST_NFS4_OP,
#define NFSD_STATS_NFS4_OP(op) (NFSD_STATS_FIRST_NFS4_OP + (op))
#endif
NFSD_STATS_COUNTERS_NUM
};
struct nfsd_stats {
struct percpu_counter counter[NFSD_STATS_COUNTERS_NUM];
/* Protected by nfsd_mutex */
unsigned int th_cnt; /* number of available threads */
};
extern struct nfsd_stats nfsdstats;
extern struct svc_stat nfsd_svcstats;
void nfsd_stat_init(void);
void nfsd_stat_shutdown(void);
int nfsd_percpu_counters_init(struct percpu_counter counters[], int num);
void nfsd_percpu_counters_reset(struct percpu_counter counters[], int num);
void nfsd_percpu_counters_destroy(struct percpu_counter counters[], int num);
int nfsd_stat_init(void);
void nfsd_stat_shutdown(void);
static inline void nfsd_stats_rc_hits_inc(void)
{
percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_HITS]);
}
static inline void nfsd_stats_rc_misses_inc(void)
{
percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_MISSES]);
}
static inline void nfsd_stats_rc_nocache_inc(void)
{
percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_RC_NOCACHE]);
}
static inline void nfsd_stats_fh_stale_inc(struct svc_export *exp)
{
percpu_counter_inc(&nfsdstats.counter[NFSD_STATS_FH_STALE]);
if (exp)
percpu_counter_inc(&exp->ex_stats.counter[EXP_STATS_FH_STALE]);
}
static inline void nfsd_stats_io_read_add(struct svc_export *exp, s64 amount)
{
percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_READ], amount);
if (exp)
percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_READ], amount);
}
static inline void nfsd_stats_io_write_add(struct svc_export *exp, s64 amount)
{
percpu_counter_add(&nfsdstats.counter[NFSD_STATS_IO_WRITE], amount);
if (exp)
percpu_counter_add(&exp->ex_stats.counter[EXP_STATS_IO_WRITE], amount);
}
static inline void nfsd_stats_payload_misses_inc(struct nfsd_net *nn)
{
percpu_counter_inc(&nn->counter[NFSD_NET_PAYLOAD_MISSES]);
}
static inline void nfsd_stats_drc_mem_usage_add(struct nfsd_net *nn, s64 amount)
{
percpu_counter_add(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
}
static inline void nfsd_stats_drc_mem_usage_sub(struct nfsd_net *nn, s64 amount)
{
percpu_counter_sub(&nn->counter[NFSD_NET_DRC_MEM_USAGE], amount);
}
#endif /* _NFSD_STATS_H */
......@@ -889,7 +889,7 @@ static __be32 nfsd_finish_read(struct svc_rqst *rqstp, struct svc_fh *fhp,
unsigned long *count, u32 *eof, ssize_t host_err)
{
if (host_err >= 0) {
nfsdstats.io_read += host_err;
nfsd_stats_io_read_add(fhp->fh_export, host_err);
*eof = nfsd_eof_on_read(file, offset, host_err, *count);
*count = host_err;
fsnotify_access(file);
......@@ -1040,7 +1040,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct nfsd_file *nf,
goto out_nfserr;
}
*cnt = host_err;
nfsdstats.io_write += *cnt;
nfsd_stats_io_write_add(exp, *cnt);
fsnotify_modify(file);
if (stable && use_wgather) {
......
......@@ -27,7 +27,6 @@ struct nfsd_readargs {
struct svc_fh fh;
__u32 offset;
__u32 count;
int vlen;
};
struct nfsd_writeargs {
......@@ -53,11 +52,6 @@ struct nfsd_renameargs {
unsigned int tlen;
};
struct nfsd_readlinkargs {
struct svc_fh fh;
char * buffer;
};
struct nfsd_linkargs {
struct svc_fh ffh;
struct svc_fh tfh;
......@@ -79,7 +73,6 @@ struct nfsd_readdirargs {
struct svc_fh fh;
__u32 cookie;
__u32 count;
__be32 * buffer;
};
struct nfsd_stat {
......@@ -144,14 +137,13 @@ union nfsd_xdrstore {
#define NFS2_SVC_XDRSIZE sizeof(union nfsd_xdrstore)
int nfssvc_decode_fhandle(struct svc_rqst *, __be32 *);
int nfssvc_decode_fhandleargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_sattrargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_diropargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_readargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_writeargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_createargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_renameargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_readlinkargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_linkargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_symlinkargs(struct svc_rqst *, __be32 *);
int nfssvc_decode_readdirargs(struct svc_rqst *, __be32 *);
......@@ -172,6 +164,6 @@ void nfssvc_release_readres(struct svc_rqst *rqstp);
/* Helper functions for NFSv2 ACL code */
__be32 *nfs2svc_encode_fattr(struct svc_rqst *rqstp, __be32 *p, struct svc_fh *fhp, struct kstat *stat);
__be32 *nfs2svc_decode_fh(__be32 *p, struct svc_fh *fhp);
bool svcxdr_decode_fhandle(struct xdr_stream *xdr, struct svc_fh *fhp);
#endif /* LINUX_NFSD_H */
......@@ -25,14 +25,13 @@ struct nfsd3_diropargs {
struct nfsd3_accessargs {
struct svc_fh fh;
unsigned int access;
__u32 access;
};
struct nfsd3_readargs {
struct svc_fh fh;
__u64 offset;
__u32 count;
int vlen;
};
struct nfsd3_writeargs {
......@@ -71,11 +70,6 @@ struct nfsd3_renameargs {
unsigned int tlen;
};
struct nfsd3_readlinkargs {
struct svc_fh fh;
char * buffer;
};
struct nfsd3_linkargs {
struct svc_fh ffh;
struct svc_fh tfh;
......@@ -96,10 +90,8 @@ struct nfsd3_symlinkargs {
struct nfsd3_readdirargs {
struct svc_fh fh;
__u64 cookie;
__u32 dircount;
__u32 count;
__be32 * verf;
__be32 * buffer;
};
struct nfsd3_commitargs {
......@@ -110,13 +102,13 @@ struct nfsd3_commitargs {
struct nfsd3_getaclargs {
struct svc_fh fh;
int mask;
__u32 mask;
};
struct posix_acl;
struct nfsd3_setaclargs {
struct svc_fh fh;
int mask;
__u32 mask;
struct posix_acl *acl_access;
struct posix_acl *acl_default;
};
......@@ -273,7 +265,7 @@ union nfsd3_xdrstore {
#define NFS3_SVC_XDRSIZE sizeof(union nfsd3_xdrstore)
int nfs3svc_decode_fhandle(struct svc_rqst *, __be32 *);
int nfs3svc_decode_fhandleargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_sattrargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_diropargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_accessargs(struct svc_rqst *, __be32 *);
......@@ -283,7 +275,6 @@ int nfs3svc_decode_createargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_mkdirargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_mknodargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_renameargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_readlinkargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_linkargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_symlinkargs(struct svc_rqst *, __be32 *);
int nfs3svc_decode_readdirargs(struct svc_rqst *, __be32 *);
......@@ -316,7 +307,6 @@ int nfs3svc_encode_entry_plus(void *, const char *name,
/* Helper functions for NFSv3 ACL code */
__be32 *nfs3svc_encode_post_op_attr(struct svc_rqst *rqstp, __be32 *p,
struct svc_fh *fhp);
__be32 *nfs3svc_decode_fh(__be32 *p, struct svc_fh *fhp);
bool svcxdr_decode_nfs_fh3(struct xdr_stream *xdr, struct svc_fh *fhp);
#endif /* _LINUX_NFSD_XDR3_H */
......@@ -213,6 +213,7 @@ struct export_operations {
bool write, u32 *device_generation);
int (*commit_blocks)(struct inode *inode, struct iomap *iomaps,
int nr_iomaps, struct iattr *iattr);
u64 (*fetch_iversion)(struct inode *);
#define EXPORT_OP_NOWCC (0x1) /* don't collect v3 wcc data */
#define EXPORT_OP_NOSUBTREECHK (0x2) /* no subtree checking */
#define EXPORT_OP_CLOSE_BEFORE_UNLINK (0x4) /* close files before unlink */
......
......@@ -38,5 +38,8 @@ nfsacl_encode(struct xdr_buf *buf, unsigned int base, struct inode *inode,
extern int
nfsacl_decode(struct xdr_buf *buf, unsigned int base, unsigned int *aclcnt,
struct posix_acl **pacl);
extern bool
nfs_stream_decode_acl(struct xdr_stream *xdr, unsigned int *aclcnt,
struct posix_acl **pacl);
#endif /* __LINUX_NFSACL_H */
......@@ -10,9 +10,6 @@
#define RPC_VERSION 2
/* size of an XDR encoding unit in bytes, i.e. 32bit */
#define XDR_UNIT (4)
/* spec defines authentication flavor as an unsigned 32 bit integer */
typedef u32 rpc_authflavor_t;
......
......@@ -463,6 +463,7 @@ struct svc_procedure {
unsigned int pc_ressize; /* result struct size */
unsigned int pc_cachetype; /* cache info (NFS) */
unsigned int pc_xdrressize; /* maximum size of XDR reply */
const char * pc_name; /* for display */
};
/*
......
......@@ -49,6 +49,7 @@
#include <linux/sunrpc/rpc_rdma_cid.h>
#include <linux/sunrpc/svc_rdma_pcl.h>
#include <linux/percpu_counter.h>
#include <rdma/ib_verbs.h>
#include <rdma/rdma_cm.h>
......@@ -65,15 +66,10 @@ extern unsigned int svcrdma_max_requests;
extern unsigned int svcrdma_max_bc_requests;
extern unsigned int svcrdma_max_req_size;
extern atomic_t rdma_stat_recv;
extern atomic_t rdma_stat_read;
extern atomic_t rdma_stat_write;
extern atomic_t rdma_stat_sq_starve;
extern atomic_t rdma_stat_rq_starve;
extern atomic_t rdma_stat_rq_poll;
extern atomic_t rdma_stat_rq_prod;
extern atomic_t rdma_stat_sq_poll;
extern atomic_t rdma_stat_sq_prod;
extern struct percpu_counter svcrdma_stat_read;
extern struct percpu_counter svcrdma_stat_recv;
extern struct percpu_counter svcrdma_stat_sq_starve;
extern struct percpu_counter svcrdma_stat_write;
struct svcxprt_rdma {
struct svc_xprt sc_xprt; /* SVC transport structure */
......@@ -108,6 +104,7 @@ struct svcxprt_rdma {
wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */
unsigned long sc_flags;
u32 sc_pending_recvs;
struct list_head sc_read_complete_q;
struct work_struct sc_work;
......
......@@ -19,6 +19,13 @@
struct bio_vec;
struct rpc_rqst;
/*
* Size of an XDR encoding unit in bytes, i.e. 32 bits,
* as defined in Section 3 of RFC 4506. All encoded
* XDR data items are aligned on a boundary of 32 bits.
*/
#define XDR_UNIT sizeof(__be32)
/*
* Buffer adjustment
*/
......@@ -329,7 +336,7 @@ ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
static inline size_t
xdr_align_size(size_t n)
{
const size_t mask = sizeof(__u32) - 1;
const size_t mask = XDR_UNIT - 1;
return (n + mask) & ~mask;
}
......@@ -359,7 +366,7 @@ static inline size_t xdr_pad_size(size_t n)
*/
static inline ssize_t xdr_stream_encode_item_present(struct xdr_stream *xdr)
{
const size_t len = sizeof(__be32);
const size_t len = XDR_UNIT;
__be32 *p = xdr_reserve_space(xdr, len);
if (unlikely(!p))
......@@ -378,7 +385,7 @@ static inline ssize_t xdr_stream_encode_item_present(struct xdr_stream *xdr)
*/
static inline int xdr_stream_encode_item_absent(struct xdr_stream *xdr)
{
const size_t len = sizeof(__be32);
const size_t len = XDR_UNIT;
__be32 *p = xdr_reserve_space(xdr, len);
if (unlikely(!p))
......
......@@ -1603,6 +1603,7 @@ TRACE_EVENT(svc_process,
__field(u32, vers)
__field(u32, proc)
__string(service, name)
__string(procedure, rqst->rq_procinfo->pc_name)
__string(addr, rqst->rq_xprt ?
rqst->rq_xprt->xpt_remotebuf : "(null)")
),
......@@ -1612,13 +1613,16 @@ TRACE_EVENT(svc_process,
__entry->vers = rqst->rq_vers;
__entry->proc = rqst->rq_proc;
__assign_str(service, name);
__assign_str(procedure, rqst->rq_procinfo->pc_name);
__assign_str(addr, rqst->rq_xprt ?
rqst->rq_xprt->xpt_remotebuf : "(null)");
),
TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%u",
TP_printk("addr=%s xid=0x%08x service=%s vers=%u proc=%s",
__get_str(addr), __entry->xid,
__get_str(service), __entry->vers, __entry->proc)
__get_str(service), __entry->vers,
__get_str(procedure)
)
);
DECLARE_EVENT_CLASS(svc_rqst_event,
......@@ -1874,6 +1878,7 @@ TRACE_EVENT(svc_stats_latency,
TP_STRUCT__entry(
__field(u32, xid)
__field(unsigned long, execute)
__string(procedure, rqst->rq_procinfo->pc_name)
__string(addr, rqst->rq_xprt->xpt_remotebuf)
),
......@@ -1881,11 +1886,13 @@ TRACE_EVENT(svc_stats_latency,
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->execute = ktime_to_us(ktime_sub(ktime_get(),
rqst->rq_stime));
__assign_str(procedure, rqst->rq_procinfo->pc_name);
__assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
),
TP_printk("addr=%s xid=0x%08x execute-us=%lu",
__get_str(addr), __entry->xid, __entry->execute)
TP_printk("addr=%s xid=0x%08x proc=%s execute-us=%lu",
__get_str(addr), __entry->xid, __get_str(procedure),
__entry->execute)
);
DECLARE_EVENT_CLASS(svc_deferred_event,
......
......@@ -63,6 +63,12 @@ enum nfs3_ftype {
NF3BAD = 8
};
enum nfs3_time_how {
DONT_CHANGE = 0,
SET_TO_SERVER_TIME = 1,
SET_TO_CLIENT_TIME = 2,
};
struct nfs3_fh {
unsigned short size;
unsigned char data[NFS3_FHSIZE];
......
......@@ -559,7 +559,7 @@ EXPORT_SYMBOL_GPL(svc_destroy);
/*
* Allocate an RPC server's buffer space.
* We allocate pages and place them in rq_argpages.
* We allocate pages and place them in rq_pages.
*/
static int
svc_init_buffer(struct svc_rqst *rqstp, unsigned int size, int node)
......
......@@ -62,51 +62,47 @@ static unsigned int max_max_requests = 16384;
unsigned int svcrdma_max_req_size = RPCRDMA_DEF_INLINE_THRESH;
static unsigned int min_max_inline = RPCRDMA_DEF_INLINE_THRESH;
static unsigned int max_max_inline = RPCRDMA_MAX_INLINE_THRESH;
static unsigned int svcrdma_stat_unused;
static unsigned int zero;
atomic_t rdma_stat_recv;
atomic_t rdma_stat_read;
atomic_t rdma_stat_write;
atomic_t rdma_stat_sq_starve;
atomic_t rdma_stat_rq_starve;
atomic_t rdma_stat_rq_poll;
atomic_t rdma_stat_rq_prod;
atomic_t rdma_stat_sq_poll;
atomic_t rdma_stat_sq_prod;
struct percpu_counter svcrdma_stat_read;
struct percpu_counter svcrdma_stat_recv;
struct percpu_counter svcrdma_stat_sq_starve;
struct percpu_counter svcrdma_stat_write;
/*
* This function implements reading and resetting an atomic_t stat
* variable through read/write to a proc file. Any write to the file
* resets the associated statistic to zero. Any read returns it's
* current value.
*/
static int read_reset_stat(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
enum {
SVCRDMA_COUNTER_BUFSIZ = sizeof(unsigned long long),
};
static int svcrdma_counter_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos)
{
atomic_t *stat = (atomic_t *)table->data;
if (!stat)
return -EINVAL;
if (write)
atomic_set(stat, 0);
else {
char str_buf[32];
int len = snprintf(str_buf, 32, "%d\n", atomic_read(stat));
if (len >= 32)
return -EFAULT;
len = strlen(str_buf);
if (*ppos > len) {
*lenp = 0;
return 0;
}
len -= *ppos;
if (len > *lenp)
len = *lenp;
if (len)
memcpy(buffer, str_buf, len);
*lenp = len;
*ppos += len;
struct percpu_counter *stat = (struct percpu_counter *)table->data;
char tmp[SVCRDMA_COUNTER_BUFSIZ + 1];
int len;
if (write) {
percpu_counter_set(stat, 0);
return 0;
}
len = snprintf(tmp, SVCRDMA_COUNTER_BUFSIZ, "%lld\n",
percpu_counter_sum_positive(stat));
if (len >= SVCRDMA_COUNTER_BUFSIZ)
return -EFAULT;
len = strlen(tmp);
if (*ppos > len) {
*lenp = 0;
return 0;
}
len -= *ppos;
if (len > *lenp)
len = *lenp;
if (len)
memcpy(buffer, tmp, len);
*lenp = len;
*ppos += len;
return 0;
}
......@@ -142,66 +138,76 @@ static struct ctl_table svcrdma_parm_table[] = {
{
.procname = "rdma_stat_read",
.data = &rdma_stat_read,
.maxlen = sizeof(atomic_t),
.data = &svcrdma_stat_read,
.maxlen = SVCRDMA_COUNTER_BUFSIZ,
.mode = 0644,
.proc_handler = read_reset_stat,
.proc_handler = svcrdma_counter_handler,
},
{
.procname = "rdma_stat_recv",
.data = &rdma_stat_recv,
.maxlen = sizeof(atomic_t),
.data = &svcrdma_stat_recv,
.maxlen = SVCRDMA_COUNTER_BUFSIZ,
.mode = 0644,
.proc_handler = read_reset_stat,
.proc_handler = svcrdma_counter_handler,
},
{
.procname = "rdma_stat_write",
.data = &rdma_stat_write,
.maxlen = sizeof(atomic_t),
.data = &svcrdma_stat_write,
.maxlen = SVCRDMA_COUNTER_BUFSIZ,
.mode = 0644,
.proc_handler = read_reset_stat,
.proc_handler = svcrdma_counter_handler,
},
{
.procname = "rdma_stat_sq_starve",
.data = &rdma_stat_sq_starve,
.maxlen = sizeof(atomic_t),
.data = &svcrdma_stat_sq_starve,
.maxlen = SVCRDMA_COUNTER_BUFSIZ,
.mode = 0644,
.proc_handler = read_reset_stat,
.proc_handler = svcrdma_counter_handler,
},
{
.procname = "rdma_stat_rq_starve",
.data = &rdma_stat_rq_starve,
.maxlen = sizeof(atomic_t),
.data = &svcrdma_stat_unused,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = read_reset_stat,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &zero,
},
{
.procname = "rdma_stat_rq_poll",
.data = &rdma_stat_rq_poll,
.maxlen = sizeof(atomic_t),
.data = &svcrdma_stat_unused,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = read_reset_stat,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &zero,
},
{
.procname = "rdma_stat_rq_prod",
.data = &rdma_stat_rq_prod,
.maxlen = sizeof(atomic_t),
.data = &svcrdma_stat_unused,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = read_reset_stat,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &zero,
},
{
.procname = "rdma_stat_sq_poll",
.data = &rdma_stat_sq_poll,
.maxlen = sizeof(atomic_t),
.data = &svcrdma_stat_unused,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = read_reset_stat,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &zero,
},
{
.procname = "rdma_stat_sq_prod",
.data = &rdma_stat_sq_prod,
.maxlen = sizeof(atomic_t),
.data = &svcrdma_stat_unused,
.maxlen = sizeof(unsigned int),
.mode = 0644,
.proc_handler = read_reset_stat,
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
.extra2 = &zero,
},
{ },
};
......@@ -224,27 +230,69 @@ static struct ctl_table svcrdma_root_table[] = {
{ },
};
static void svc_rdma_proc_cleanup(void)
{
if (!svcrdma_table_header)
return;
unregister_sysctl_table(svcrdma_table_header);
svcrdma_table_header = NULL;
percpu_counter_destroy(&svcrdma_stat_write);
percpu_counter_destroy(&svcrdma_stat_sq_starve);
percpu_counter_destroy(&svcrdma_stat_recv);
percpu_counter_destroy(&svcrdma_stat_read);
}
static int svc_rdma_proc_init(void)
{
int rc;
if (svcrdma_table_header)
return 0;
rc = percpu_counter_init(&svcrdma_stat_read, 0, GFP_KERNEL);
if (rc)
goto out_err;
rc = percpu_counter_init(&svcrdma_stat_recv, 0, GFP_KERNEL);
if (rc)
goto out_err;
rc = percpu_counter_init(&svcrdma_stat_sq_starve, 0, GFP_KERNEL);
if (rc)
goto out_err;
rc = percpu_counter_init(&svcrdma_stat_write, 0, GFP_KERNEL);
if (rc)
goto out_err;
svcrdma_table_header = register_sysctl_table(svcrdma_root_table);
return 0;
out_err:
percpu_counter_destroy(&svcrdma_stat_sq_starve);
percpu_counter_destroy(&svcrdma_stat_recv);
percpu_counter_destroy(&svcrdma_stat_read);
return rc;
}
void svc_rdma_cleanup(void)
{
dprintk("SVCRDMA Module Removed, deregister RPC RDMA transport\n");
if (svcrdma_table_header) {
unregister_sysctl_table(svcrdma_table_header);
svcrdma_table_header = NULL;
}
svc_unreg_xprt_class(&svc_rdma_class);
svc_rdma_proc_cleanup();
}
int svc_rdma_init(void)
{
int rc;
dprintk("SVCRDMA Module Init, register RPC RDMA transport\n");
dprintk("\tsvcrdma_ord : %d\n", svcrdma_ord);
dprintk("\tmax_requests : %u\n", svcrdma_max_requests);
dprintk("\tmax_bc_requests : %u\n", svcrdma_max_bc_requests);
dprintk("\tmax_inline : %d\n", svcrdma_max_req_size);
if (!svcrdma_table_header)
svcrdma_table_header =
register_sysctl_table(svcrdma_root_table);
rc = svc_rdma_proc_init();
if (rc)
return rc;
/* Register RDMA with the SVC transport switch */
svc_reg_xprt_class(&svc_rdma_class);
......
......@@ -266,33 +266,46 @@ void svc_rdma_release_rqst(struct svc_rqst *rqstp)
svc_rdma_recv_ctxt_put(rdma, ctxt);
}
static int __svc_rdma_post_recv(struct svcxprt_rdma *rdma,
struct svc_rdma_recv_ctxt *ctxt)
static bool svc_rdma_refresh_recvs(struct svcxprt_rdma *rdma,
unsigned int wanted, bool temp)
{
const struct ib_recv_wr *bad_wr = NULL;
struct svc_rdma_recv_ctxt *ctxt;
struct ib_recv_wr *recv_chain;
int ret;
trace_svcrdma_post_recv(ctxt);
ret = ib_post_recv(rdma->sc_qp, &ctxt->rc_recv_wr, NULL);
recv_chain = NULL;
while (wanted--) {
ctxt = svc_rdma_recv_ctxt_get(rdma);
if (!ctxt)
break;
trace_svcrdma_post_recv(ctxt);
ctxt->rc_temp = temp;
ctxt->rc_recv_wr.next = recv_chain;
recv_chain = &ctxt->rc_recv_wr;
rdma->sc_pending_recvs++;
}
if (!recv_chain)
return false;
ret = ib_post_recv(rdma->sc_qp, recv_chain, &bad_wr);
if (ret)
goto err_post;
return 0;
return true;
err_post:
trace_svcrdma_rq_post_err(rdma, ret);
svc_rdma_recv_ctxt_put(rdma, ctxt);
return ret;
}
static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
{
struct svc_rdma_recv_ctxt *ctxt;
while (bad_wr) {
ctxt = container_of(bad_wr, struct svc_rdma_recv_ctxt,
rc_recv_wr);
bad_wr = bad_wr->next;
svc_rdma_recv_ctxt_put(rdma, ctxt);
}
if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
return 0;
ctxt = svc_rdma_recv_ctxt_get(rdma);
if (!ctxt)
return -ENOMEM;
return __svc_rdma_post_recv(rdma, ctxt);
trace_svcrdma_rq_post_err(rdma, ret);
/* Since we're destroying the xprt, no need to reset
* sc_pending_recvs. */
return false;
}
/**
......@@ -303,20 +316,7 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
*/
bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
{
struct svc_rdma_recv_ctxt *ctxt;
unsigned int i;
int ret;
for (i = 0; i < rdma->sc_max_requests; i++) {
ctxt = svc_rdma_recv_ctxt_get(rdma);
if (!ctxt)
return false;
ctxt->rc_temp = true;
ret = __svc_rdma_post_recv(rdma, ctxt);
if (ret)
return false;
}
return true;
return svc_rdma_refresh_recvs(rdma, rdma->sc_max_requests, true);
}
/**
......@@ -324,8 +324,6 @@ bool svc_rdma_post_recvs(struct svcxprt_rdma *rdma)
* @cq: Completion Queue context
* @wc: Work Completion object
*
* NB: The svc_xprt/svcxprt_rdma is pinned whenever it's possible that
* the Receive completion handler could be running.
*/
static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
{
......@@ -333,6 +331,8 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
struct ib_cqe *cqe = wc->wr_cqe;
struct svc_rdma_recv_ctxt *ctxt;
rdma->sc_pending_recvs--;
/* WARNING: Only wc->wr_cqe and wc->status are reliable */
ctxt = container_of(cqe, struct svc_rdma_recv_ctxt, rc_cqe);
......@@ -340,14 +340,8 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
if (wc->status != IB_WC_SUCCESS)
goto flushed;
if (svc_rdma_post_recv(rdma))
goto post_err;
/* All wc fields are now known to be valid */
ctxt->rc_byte_len = wc->byte_len;
ib_dma_sync_single_for_cpu(rdma->sc_pd->device,
ctxt->rc_recv_sge.addr,
wc->byte_len, DMA_FROM_DEVICE);
spin_lock(&rdma->sc_rq_dto_lock);
list_add_tail(&ctxt->rc_list, &rdma->sc_rq_dto_q);
......@@ -356,11 +350,18 @@ static void svc_rdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
spin_unlock(&rdma->sc_rq_dto_lock);
if (!test_bit(RDMAXPRT_CONN_PENDING, &rdma->sc_flags))
svc_xprt_enqueue(&rdma->sc_xprt);
if (!test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags) &&
rdma->sc_pending_recvs < rdma->sc_max_requests)
if (!svc_rdma_refresh_recvs(rdma, RPCRDMA_MAX_RECV_BATCH,
false))
goto post_err;
return;
flushed:
post_err:
svc_rdma_recv_ctxt_put(rdma, ctxt);
post_err:
set_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags);
svc_xprt_enqueue(&rdma->sc_xprt);
}
......@@ -845,9 +846,11 @@ int svc_rdma_recvfrom(struct svc_rqst *rqstp)
}
list_del(&ctxt->rc_list);
spin_unlock(&rdma_xprt->sc_rq_dto_lock);
percpu_counter_inc(&svcrdma_stat_recv);
atomic_inc(&rdma_stat_recv);
ib_dma_sync_single_for_cpu(rdma_xprt->sc_pd->device,
ctxt->rc_recv_sge.addr, ctxt->rc_byte_len,
DMA_FROM_DEVICE);
svc_rdma_build_arg_xdr(rqstp, ctxt);
/* Prevent svc_xprt_release from releasing pages in rq_pages
......
......@@ -364,6 +364,7 @@ static int svc_rdma_post_chunk_ctxt(struct svc_rdma_chunk_ctxt *cc)
return 0;
}
percpu_counter_inc(&svcrdma_stat_sq_starve);
trace_svcrdma_sq_full(rdma);
atomic_add(cc->cc_sqecount, &rdma->sc_sq_avail);
wait_event(rdma->sc_send_wait,
......@@ -468,6 +469,7 @@ svc_rdma_build_writes(struct svc_rdma_write_info *info,
DMA_TO_DEVICE);
if (ret < 0)
return -EIO;
percpu_counter_inc(&svcrdma_stat_write);
list_add(&ctxt->rw_list, &cc->cc_rwctxts);
cc->cc_sqecount += ret;
......@@ -718,6 +720,7 @@ static int svc_rdma_build_read_segment(struct svc_rdma_read_info *info,
segment->rs_handle, DMA_FROM_DEVICE);
if (ret < 0)
return -EIO;
percpu_counter_inc(&svcrdma_stat_read);
list_add(&ctxt->rw_list, &cc->cc_rwctxts);
cc->cc_sqecount += ret;
......
......@@ -317,7 +317,7 @@ int svc_rdma_send(struct svcxprt_rdma *rdma, struct svc_rdma_send_ctxt *ctxt)
/* If the SQ is full, wait until an SQ entry is available */
while (1) {
if ((atomic_dec_return(&rdma->sc_sq_avail) < 0)) {
atomic_inc(&rdma_stat_sq_starve);
percpu_counter_inc(&svcrdma_stat_sq_starve);
trace_svcrdma_sq_full(rdma);
atomic_inc(&rdma->sc_sq_avail);
wait_event(rdma->sc_send_wait,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment