Commit 233607db authored by Trond Myklebust's avatar Trond Myklebust

Merge branch 'devel'

parents 3dc50637 b48633bd
......@@ -1664,30 +1664,6 @@ config NFS_V4
If unsure, say N.
config NFS_DIRECTIO
bool "Allow direct I/O on NFS files"
depends on NFS_FS
help
This option enables applications to perform uncached I/O on files
in NFS file systems using the O_DIRECT open() flag. When O_DIRECT
is set for a file, its data is not cached in the system's page
cache. Data is moved to and from user-level application buffers
directly. Unlike local disk-based file systems, NFS O_DIRECT has
no alignment restrictions.
Unless your program is designed to use O_DIRECT properly, you are
much better off allowing the NFS client to manage data caching for
you. Misusing O_DIRECT can cause poor server performance or network
storms. This kernel build option defaults OFF to avoid exposing
system administrators unwittingly to a potentially hazardous
feature.
For more details on NFS O_DIRECT, see fs/nfs/direct.c.
If unsure, say N. This reduces the size of the NFS client, and
causes open() to return EINVAL if a file residing in NFS is
opened with the O_DIRECT flag.
config NFSD
tristate "NFS server support"
depends on INET
......@@ -1808,15 +1784,33 @@ config SUNRPC_XPRT_RDMA
tristate
depends on SUNRPC && INFINIBAND && EXPERIMENTAL
default SUNRPC && INFINIBAND
help
This option enables an RPC client transport capability that
allows the NFS client to mount servers via an RDMA-enabled
transport.
To compile RPC client RDMA transport support as a module,
choose M here: the module will be called xprtrdma.
If unsure, say N.
config SUNRPC_BIND34
bool "Support for rpcbind versions 3 & 4 (EXPERIMENTAL)"
depends on SUNRPC && EXPERIMENTAL
default n
help
Provides kernel support for querying rpcbind servers via versions 3
and 4 of the rpcbind protocol. The kernel automatically falls back
to version 2 if a remote rpcbind service does not support versions
3 or 4.
RPC requests over IPv6 networks require support for larger
addresses when performing an RPC bind. Sun added support for
IPv6 addressing by creating two new versions of the rpcbind
protocol (RFC 1833).
This option enables support in the kernel RPC client for
querying rpcbind servers via versions 3 and 4 of the rpcbind
protocol. The kernel automatically falls back to version 2
if a remote rpcbind service does not support versions 3 or 4.
By themselves, these new versions do not provide support for
RPC over IPv6, but the new protocol versions are necessary to
support it.
If unsure, say N to get traditional behavior (version 2 rpcbind
requests only).
......@@ -1830,12 +1824,13 @@ config RPCSEC_GSS_KRB5
select CRYPTO_DES
select CRYPTO_CBC
help
Provides for secure RPC calls by means of a gss-api
mechanism based on Kerberos V5. This is required for
NFSv4.
Choose Y here to enable Secure RPC using the Kerberos version 5
GSS-API mechanism (RFC 1964).
Note: Requires an auxiliary userspace daemon which may be found on
http://www.citi.umich.edu/projects/nfsv4/
Secure RPC calls with Kerberos require an auxiliary user-space
daemon which may be found in the Linux nfs-utils package
available from http://linux-nfs.org/. In addition, user-space
Kerberos support should be installed.
If unsure, say N.
......@@ -1849,11 +1844,12 @@ config RPCSEC_GSS_SPKM3
select CRYPTO_CAST5
select CRYPTO_CBC
help
Provides for secure RPC calls by means of a gss-api
mechanism based on the SPKM3 public-key mechanism.
Choose Y here to enable Secure RPC using the SPKM3 public key
GSS-API mechansim (RFC 2025).
Note: Requires an auxiliary userspace daemon which may be found on
http://www.citi.umich.edu/projects/nfsv4/
Secure RPC calls with SPKM3 require an auxiliary userspace
daemon which may be found in the Linux nfs-utils package
available from http://linux-nfs.org/.
If unsure, say N.
......
This diff is collapsed.
......@@ -42,9 +42,10 @@ static struct nsm_handle * nsm_find(const struct sockaddr_in *sin,
/*
* Common host lookup routine for server & client
*/
static struct nlm_host *
nlm_lookup_host(int server, const struct sockaddr_in *sin,
int proto, int version, const char *hostname,
static struct nlm_host *nlm_lookup_host(int server,
const struct sockaddr_in *sin,
int proto, u32 version,
const char *hostname,
unsigned int hostname_len,
const struct sockaddr_in *ssin)
{
......@@ -55,7 +56,7 @@ nlm_lookup_host(int server, const struct sockaddr_in *sin,
int hash;
dprintk("lockd: nlm_lookup_host("NIPQUAD_FMT"->"NIPQUAD_FMT
", p=%d, v=%d, my role=%s, name=%.*s)\n",
", p=%d, v=%u, my role=%s, name=%.*s)\n",
NIPQUAD(ssin->sin_addr.s_addr),
NIPQUAD(sin->sin_addr.s_addr), proto, version,
server? "server" : "client",
......@@ -175,9 +176,10 @@ nlm_destroy_host(struct nlm_host *host)
/*
* Find an NLM server handle in the cache. If there is none, create it.
*/
struct nlm_host *
nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
const char *hostname, unsigned int hostname_len)
struct nlm_host *nlmclnt_lookup_host(const struct sockaddr_in *sin,
int proto, u32 version,
const char *hostname,
unsigned int hostname_len)
{
struct sockaddr_in ssin = {0};
......
......@@ -18,6 +18,8 @@
#define NLMDBG_FACILITY NLMDBG_MONITOR
#define XDR_ADDRBUF_LEN (20)
static struct rpc_clnt * nsm_create(void);
static struct rpc_program nsm_program;
......@@ -147,28 +149,55 @@ nsm_create(void)
/*
* XDR functions for NSM.
*
* See http://www.opengroup.org/ for details on the Network
* Status Monitor wire protocol.
*/
static __be32 *
xdr_encode_common(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp)
static __be32 *xdr_encode_nsm_string(__be32 *p, char *string)
{
char buffer[20], *name;
size_t len = strlen(string);
if (len > SM_MAXSTRLEN)
len = SM_MAXSTRLEN;
return xdr_encode_opaque(p, string, len);
}
/*
* Use the dotted-quad IP address of the remote host as
* identifier. Linux statd always looks up the canonical
* hostname first for whatever remote hostname it receives,
* so this works alright.
/*
* "mon_name" specifies the host to be monitored.
*
* Linux uses a text version of the IP address of the remote
* host as the host identifier (the "mon_name" argument).
*
* Linux statd always looks up the canonical hostname first for
* whatever remote hostname it receives, so this works alright.
*/
if (nsm_use_hostnames) {
name = argp->mon_name;
} else {
sprintf(buffer, "%u.%u.%u.%u", NIPQUAD(argp->addr));
static __be32 *xdr_encode_mon_name(__be32 *p, struct nsm_args *argp)
{
char buffer[XDR_ADDRBUF_LEN + 1];
char *name = argp->mon_name;
if (!nsm_use_hostnames) {
snprintf(buffer, XDR_ADDRBUF_LEN,
NIPQUAD_FMT, NIPQUAD(argp->addr));
name = buffer;
}
if (!(p = xdr_encode_string(p, name))
|| !(p = xdr_encode_string(p, utsname()->nodename)))
return xdr_encode_nsm_string(p, name);
}
/*
* The "my_id" argument specifies the hostname and RPC procedure
* to be called when the status manager receives notification
* (via the SM_NOTIFY call) that the state of host "mon_name"
* has changed.
*/
static __be32 *xdr_encode_my_id(__be32 *p, struct nsm_args *argp)
{
p = xdr_encode_nsm_string(p, utsname()->nodename);
if (!p)
return ERR_PTR(-EIO);
*p++ = htonl(argp->prog);
*p++ = htonl(argp->vers);
*p++ = htonl(argp->proc);
......@@ -176,18 +205,48 @@ xdr_encode_common(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp)
return p;
}
static int
xdr_encode_mon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp)
/*
* The "mon_id" argument specifies the non-private arguments
* of an SM_MON or SM_UNMON call.
*/
static __be32 *xdr_encode_mon_id(__be32 *p, struct nsm_args *argp)
{
p = xdr_encode_common(rqstp, p, argp);
if (IS_ERR(p))
return PTR_ERR(p);
p = xdr_encode_mon_name(p, argp);
if (!p)
return ERR_PTR(-EIO);
return xdr_encode_my_id(p, argp);
}
/* Surprise - there may even be room for an IPv6 address now */
/*
* The "priv" argument may contain private information required
* by the SM_MON call. This information will be supplied in the
* SM_NOTIFY call.
*
* Linux provides the raw IP address of the monitored host,
* left in network byte order.
*/
static __be32 *xdr_encode_priv(__be32 *p, struct nsm_args *argp)
{
*p++ = argp->addr;
*p++ = 0;
*p++ = 0;
*p++ = 0;
return p;
}
static int
xdr_encode_mon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp)
{
p = xdr_encode_mon_id(p, argp);
if (IS_ERR(p))
return PTR_ERR(p);
p = xdr_encode_priv(p, argp);
if (IS_ERR(p))
return PTR_ERR(p);
rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p);
return 0;
}
......@@ -195,7 +254,7 @@ xdr_encode_mon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp)
static int
xdr_encode_unmon(struct rpc_rqst *rqstp, __be32 *p, struct nsm_args *argp)
{
p = xdr_encode_common(rqstp, p, argp);
p = xdr_encode_mon_id(p, argp);
if (IS_ERR(p))
return PTR_ERR(p);
rqstp->rq_slen = xdr_adjust_iovec(rqstp->rq_svec, p);
......@@ -220,9 +279,11 @@ xdr_decode_stat(struct rpc_rqst *rqstp, __be32 *p, struct nsm_res *resp)
}
#define SM_my_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN))
#define SM_my_id_sz (3+1+SM_my_name_sz)
#define SM_mon_id_sz (1+XDR_QUADLEN(20)+SM_my_id_sz)
#define SM_mon_sz (SM_mon_id_sz+4)
#define SM_my_id_sz (SM_my_name_sz+3)
#define SM_mon_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN))
#define SM_mon_id_sz (SM_mon_name_sz+SM_my_id_sz)
#define SM_priv_sz (XDR_QUADLEN(SM_PRIV_SIZE))
#define SM_mon_sz (SM_mon_id_sz+SM_priv_sz)
#define SM_monres_sz 2
#define SM_unmonres_sz 1
......
......@@ -74,7 +74,9 @@ static const unsigned long nlm_timeout_min = 3;
static const unsigned long nlm_timeout_max = 20;
static const int nlm_port_min = 0, nlm_port_max = 65535;
#ifdef CONFIG_SYSCTL
static struct ctl_table_header * nlm_sysctl_table;
#endif
static unsigned long get_lockd_grace_period(void)
{
......@@ -359,6 +361,8 @@ lockd_down(void)
}
EXPORT_SYMBOL(lockd_down);
#ifdef CONFIG_SYSCTL
/*
* Sysctl parameters (same as module parameters, different interface).
*/
......@@ -443,6 +447,8 @@ static ctl_table nlm_sysctl_root[] = {
{ .ctl_name = 0 }
};
#endif /* CONFIG_SYSCTL */
/*
* Module (and sysfs) parameters.
*/
......@@ -516,15 +522,21 @@ module_param(nsm_use_hostnames, bool, 0644);
static int __init init_nlm(void)
{
#ifdef CONFIG_SYSCTL
nlm_sysctl_table = register_sysctl_table(nlm_sysctl_root);
return nlm_sysctl_table ? 0 : -ENOMEM;
#else
return 0;
#endif
}
static void __exit exit_nlm(void)
{
/* FIXME: delete all NLM clients */
nlm_shutdown_hosts();
#ifdef CONFIG_SYSCTL
unregister_sysctl_table(nlm_sysctl_table);
#endif
}
module_init(init_nlm);
......
......@@ -5,7 +5,7 @@
obj-$(CONFIG_NFS_FS) += nfs.o
nfs-y := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \
pagelist.o proc.o read.o symlink.o unlink.o \
direct.o pagelist.o proc.o read.o symlink.o unlink.o \
write.o namespace.o mount_clnt.o
nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
......@@ -14,5 +14,4 @@ nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
delegation.o idmap.o \
callback.o callback_xdr.o callback_proc.o \
nfs4namespace.o
nfs-$(CONFIG_NFS_DIRECTIO) += direct.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
......@@ -112,6 +112,7 @@ struct nfs_client_initdata {
static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init)
{
struct nfs_client *clp;
struct rpc_cred *cred;
if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL)
goto error_0;
......@@ -150,6 +151,9 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
#endif
cred = rpc_lookup_machine_cred();
if (!IS_ERR(cred))
clp->cl_machine_cred = cred;
return clp;
......@@ -170,6 +174,8 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
BUG_ON(!RB_EMPTY_ROOT(&clp->cl_state_owners));
if (__test_and_clear_bit(NFS_CS_IDMAP, &clp->cl_res_state))
nfs_idmap_delete(clp);
rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
#endif
}
......@@ -189,6 +195,9 @@ static void nfs_free_client(struct nfs_client *clp)
if (__test_and_clear_bit(NFS_CS_CALLBACK, &clp->cl_res_state))
nfs_callback_down();
if (clp->cl_machine_cred != NULL)
put_rpccred(clp->cl_machine_cred);
kfree(clp->cl_hostname);
kfree(clp);
......@@ -680,10 +689,22 @@ static int nfs_init_server(struct nfs_server *server,
if (error < 0)
goto error;
server->port = data->nfs_server.port;
error = nfs_init_server_rpcclient(server, &timeparms, data->auth_flavors[0]);
if (error < 0)
goto error;
/* Preserve the values of mount_server-related mount options */
if (data->mount_server.addrlen) {
memcpy(&server->mountd_address, &data->mount_server.address,
data->mount_server.addrlen);
server->mountd_addrlen = data->mount_server.addrlen;
}
server->mountd_version = data->mount_server.version;
server->mountd_port = data->mount_server.port;
server->mountd_protocol = data->mount_server.protocol;
server->namelen = data->namlen;
/* Create a client RPC handle for the NFSv3 ACL management interface */
nfs_init_server_aclclient(server);
......@@ -1062,6 +1083,8 @@ static int nfs4_init_server(struct nfs_server *server,
server->acdirmin = data->acdirmin * HZ;
server->acdirmax = data->acdirmax * HZ;
server->port = data->nfs_server.port;
error = nfs_init_server_rpcclient(server, &timeparms, data->auth_flavors[0]);
error:
......
......@@ -1967,7 +1967,7 @@ int nfs_permission(struct inode *inode, int mask, struct nameidata *nd)
if (!NFS_PROTO(inode)->access)
goto out_notsup;
cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0);
cred = rpc_lookup_cred();
if (!IS_ERR(cred)) {
res = nfs_do_access(inode, cred, mask);
put_rpccred(cred);
......
......@@ -229,14 +229,20 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
{
struct nfs_read_data *data = calldata;
struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
if (nfs_readpage_result(task, data) != 0)
return;
nfs_readpage_result(task, data);
}
static void nfs_direct_read_release(void *calldata)
{
struct nfs_read_data *data = calldata;
struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
int status = data->task.tk_status;
spin_lock(&dreq->lock);
if (unlikely(task->tk_status < 0)) {
dreq->error = task->tk_status;
if (unlikely(status < 0)) {
dreq->error = status;
spin_unlock(&dreq->lock);
} else {
dreq->count += data->res.count;
......@@ -249,11 +255,12 @@ static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
if (put_dreq(dreq))
nfs_direct_complete(dreq);
nfs_readdata_release(calldata);
}
static const struct rpc_call_ops nfs_read_direct_ops = {
.rpc_call_done = nfs_direct_read_result,
.rpc_release = nfs_readdata_release,
.rpc_release = nfs_direct_read_release,
};
/*
......@@ -280,6 +287,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
.rpc_client = NFS_CLIENT(inode),
.rpc_message = &msg,
.callback_ops = &nfs_read_direct_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
unsigned int pgbase;
......@@ -323,7 +331,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
data->inode = inode;
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(inode);
data->args.context = ctx;
data->args.context = get_nfs_open_context(ctx);
data->args.offset = pos;
data->args.pgbase = pgbase;
data->args.pages = data->pagevec;
......@@ -339,7 +347,8 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
NFS_PROTO(inode)->read_setup(data, &msg);
task = rpc_run_task(&task_setup_data);
if (!IS_ERR(task))
if (IS_ERR(task))
break;
rpc_put_task(task);
dprintk("NFS: %5u initiated direct read call "
......@@ -446,6 +455,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
struct rpc_task_setup task_setup_data = {
.rpc_client = NFS_CLIENT(inode),
.callback_ops = &nfs_write_direct_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
......@@ -499,27 +509,34 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
/* Call the NFS version-specific code */
if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
return;
if (unlikely(task->tk_status < 0)) {
NFS_PROTO(data->inode)->commit_done(task, data);
}
static void nfs_direct_commit_release(void *calldata)
{
struct nfs_write_data *data = calldata;
struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
int status = data->task.tk_status;
if (status < 0) {
dprintk("NFS: %5u commit failed with error %d.\n",
task->tk_pid, task->tk_status);
data->task.tk_pid, status);
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
} else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
dprintk("NFS: %5u commit verify failed\n", task->tk_pid);
dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
}
dprintk("NFS: %5u commit returned %d\n", task->tk_pid, task->tk_status);
dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
nfs_direct_write_complete(dreq, data->inode);
nfs_commitdata_release(calldata);
}
static const struct rpc_call_ops nfs_commit_direct_ops = {
.rpc_call_done = nfs_direct_commit_result,
.rpc_release = nfs_commit_release,
.rpc_release = nfs_direct_commit_release,
};
static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
......@@ -537,6 +554,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
.rpc_message = &msg,
.callback_ops = &nfs_commit_direct_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
......@@ -546,6 +564,7 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
data->args.fh = NFS_FH(data->inode);
data->args.offset = 0;
data->args.count = 0;
data->args.context = get_nfs_open_context(dreq->ctx);
data->res.count = 0;
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
......@@ -585,7 +604,7 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
{
dreq->commit_data = nfs_commit_alloc();
dreq->commit_data = nfs_commitdata_alloc();
if (dreq->commit_data != NULL)
dreq->commit_data->req = (struct nfs_page *) dreq;
}
......@@ -606,11 +625,20 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = calldata;
struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
int status = task->tk_status;
if (nfs_writeback_done(task, data) != 0)
return;
}
/*
* NB: Return the value of the first error return code. Subsequent
* errors after the first one are ignored.
*/
static void nfs_direct_write_release(void *calldata)
{
struct nfs_write_data *data = calldata;
struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
int status = data->task.tk_status;
spin_lock(&dreq->lock);
......@@ -632,23 +660,13 @@ static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
break;
case NFS_ODIRECT_DO_COMMIT:
if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
dprintk("NFS: %5u write verify failed\n", task->tk_pid);
dprintk("NFS: %5u write verify failed\n", data->task.tk_pid);
dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
}
}
}
out_unlock:
spin_unlock(&dreq->lock);
}
/*
* NB: Return the value of the first error return code. Subsequent
* errors after the first one are ignored.
*/
static void nfs_direct_write_release(void *calldata)
{
struct nfs_write_data *data = calldata;
struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
if (put_dreq(dreq))
nfs_direct_write_complete(dreq, data->inode);
......@@ -682,6 +700,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
.rpc_client = NFS_CLIENT(inode),
.rpc_message = &msg,
.callback_ops = &nfs_write_direct_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
size_t wsize = NFS_SERVER(inode)->wsize;
......@@ -728,7 +747,7 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
data->inode = inode;
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(inode);
data->args.context = ctx;
data->args.context = get_nfs_open_context(ctx);
data->args.offset = pos;
data->args.pgbase = pgbase;
data->args.pages = data->pagevec;
......@@ -745,7 +764,8 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
NFS_PROTO(inode)->write_setup(data, &msg);
task = rpc_run_task(&task_setup_data);
if (!IS_ERR(task))
if (IS_ERR(task))
break;
rpc_put_task(task);
dprintk("NFS: %5u initiated direct write call "
......
......@@ -238,10 +238,8 @@ nfs_file_read(struct kiocb *iocb, const struct iovec *iov,
ssize_t result;
size_t count = iov_length(iov, nr_segs);
#ifdef CONFIG_NFS_DIRECTIO
if (iocb->ki_filp->f_flags & O_DIRECT)
return nfs_file_direct_read(iocb, iov, nr_segs, pos);
#endif
dfprintk(VFS, "nfs: read(%s/%s, %lu@%lu)\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
......@@ -387,9 +385,7 @@ const struct address_space_operations nfs_file_aops = {
.write_end = nfs_write_end,
.invalidatepage = nfs_invalidate_page,
.releasepage = nfs_release_page,
#ifdef CONFIG_NFS_DIRECTIO
.direct_IO = nfs_direct_IO,
#endif
.launder_page = nfs_launder_page,
};
......@@ -447,10 +443,8 @@ static ssize_t nfs_file_write(struct kiocb *iocb, const struct iovec *iov,
ssize_t result;
size_t count = iov_length(iov, nr_segs);
#ifdef CONFIG_NFS_DIRECTIO
if (iocb->ki_filp->f_flags & O_DIRECT)
return nfs_file_direct_write(iocb, iov, nr_segs, pos);
#endif
dfprintk(VFS, "nfs: write(%s/%s(%ld), %lu@%Ld)\n",
dentry->d_parent->d_name.name, dentry->d_name.name,
......@@ -576,17 +570,9 @@ static int do_setlk(struct file *filp, int cmd, struct file_lock *fl)
lock_kernel();
/* Use local locking if mounted with "-onolock" */
if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM)) {
if (!(NFS_SERVER(inode)->flags & NFS_MOUNT_NONLM))
status = NFS_PROTO(inode)->lock(filp, cmd, fl);
/* If we were signalled we still need to ensure that
* we clean up any state on the server. We therefore
* record the lock call as having succeeded in order to
* ensure that locks_remove_posix() cleans it out when
* the process exits.
*/
if (status == -EINTR || status == -ERESTARTSYS)
do_vfs_lock(filp, fl);
} else
else
status = do_vfs_lock(filp, fl);
unlock_kernel();
if (status < 0)
......
......@@ -523,8 +523,12 @@ struct nfs_open_context *get_nfs_open_context(struct nfs_open_context *ctx)
static void __put_nfs_open_context(struct nfs_open_context *ctx, int wait)
{
struct inode *inode = ctx->path.dentry->d_inode;
struct inode *inode;
if (ctx == NULL)
return;
inode = ctx->path.dentry->d_inode;
if (!atomic_dec_and_lock(&ctx->count, &inode->i_lock))
return;
list_del(&ctx->list);
......@@ -610,7 +614,7 @@ int nfs_open(struct inode *inode, struct file *filp)
struct nfs_open_context *ctx;
struct rpc_cred *cred;
cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0);
cred = rpc_lookup_cred();
if (IS_ERR(cred))
return PTR_ERR(cred);
ctx = alloc_nfs_open_context(filp->f_path.mnt, filp->f_path.dentry, cred);
......@@ -1218,6 +1222,36 @@ static void nfs_destroy_inodecache(void)
kmem_cache_destroy(nfs_inode_cachep);
}
struct workqueue_struct *nfsiod_workqueue;
/*
* start up the nfsiod workqueue
*/
static int nfsiod_start(void)
{
struct workqueue_struct *wq;
dprintk("RPC: creating workqueue nfsiod\n");
wq = create_singlethread_workqueue("nfsiod");
if (wq == NULL)
return -ENOMEM;
nfsiod_workqueue = wq;
return 0;
}
/*
* Destroy the nfsiod workqueue
*/
static void nfsiod_stop(void)
{
struct workqueue_struct *wq;
wq = nfsiod_workqueue;
if (wq == NULL)
return;
nfsiod_workqueue = NULL;
destroy_workqueue(wq);
}
/*
* Initialize NFS
*/
......@@ -1225,6 +1259,10 @@ static int __init init_nfs_fs(void)
{
int err;
err = nfsiod_start();
if (err)
goto out6;
err = nfs_fs_proc_init();
if (err)
goto out5;
......@@ -1271,6 +1309,8 @@ static int __init init_nfs_fs(void)
out4:
nfs_fs_proc_exit();
out5:
nfsiod_stop();
out6:
return err;
}
......@@ -1286,6 +1326,7 @@ static void __exit exit_nfs_fs(void)
#endif
unregister_nfs_fs();
nfs_fs_proc_exit();
nfsiod_stop();
}
/* Not quite true; I just maintain it */
......
......@@ -46,9 +46,9 @@ struct nfs_parsed_mount_data {
struct sockaddr_storage address;
size_t addrlen;
char *hostname;
unsigned int version;
u32 version;
unsigned short port;
int protocol;
unsigned short protocol;
} mount_server;
struct {
......@@ -56,7 +56,8 @@ struct nfs_parsed_mount_data {
size_t addrlen;
char *hostname;
char *export_path;
int protocol;
unsigned short port;
unsigned short protocol;
} nfs_server;
struct security_mnt_opts lsm_opts;
......@@ -115,13 +116,8 @@ extern void nfs_destroy_readpagecache(void);
extern int __init nfs_init_writepagecache(void);
extern void nfs_destroy_writepagecache(void);
#ifdef CONFIG_NFS_DIRECTIO
extern int __init nfs_init_directcache(void);
extern void nfs_destroy_directcache(void);
#else
#define nfs_init_directcache() (0)
#define nfs_destroy_directcache() do {} while(0)
#endif
/* nfs2xdr.c */
extern int nfs_stat_to_errno(int);
......@@ -146,6 +142,7 @@ extern struct rpc_procinfo nfs4_procedures[];
extern int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask);
/* inode.c */
extern struct workqueue_struct *nfsiod_workqueue;
extern struct inode *nfs_alloc_inode(struct super_block *sb);
extern void nfs_destroy_inode(struct inode *);
extern int nfs_write_inode(struct inode *,int);
......
......@@ -20,7 +20,7 @@
static void nfs_expire_automounts(struct work_struct *work);
LIST_HEAD(nfs_automount_list);
static LIST_HEAD(nfs_automount_list);
static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
int nfs_mountpoint_expiry_timeout = 500 * HZ;
......
......@@ -267,7 +267,7 @@ nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
int status;
if ((status = ntohl(*p++)))
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
p = xdr_decode_fattr(p, res->fattr);
count = ntohl(*p++);
......@@ -428,11 +428,11 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
size_t hdrlen;
unsigned int pglen, recvd;
u32 len;
int status, nr;
int status, nr = 0;
__be32 *end, *entry, *kaddr;
if ((status = ntohl(*p++)))
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len < hdrlen) {
......@@ -452,7 +452,12 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
kaddr = p = kmap_atomic(*page, KM_USER0);
end = (__be32 *)((char *)p + pglen);
entry = p;
for (nr = 0; *p++; nr++) {
/* Make sure the packet actually has a value_follows and EOF entry */
if ((entry + 1) > end)
goto short_pkt;
for (; *p++; nr++) {
if (p + 2 > end)
goto short_pkt;
p++; /* fileid */
......@@ -467,18 +472,32 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
goto short_pkt;
entry = p;
}
if (!nr && (entry[0] != 0 || entry[1] == 0))
goto short_pkt;
/*
* Apparently some server sends responses that are a valid size, but
* contain no entries, and have value_follows==0 and EOF==0. For
* those, just set the EOF marker.
*/
if (!nr && entry[1] == 0) {
dprintk("NFS: readdir reply truncated!\n");
entry[1] = 1;
}
out:
kunmap_atomic(kaddr, KM_USER0);
return nr;
short_pkt:
/*
* When we get a short packet there are 2 possibilities. We can
* return an error, or fix up the response to look like a valid
* response and return what we have so far. If there are no
* entries and the packet was short, then return -EIO. If there
* are valid entries in the response, return them and pretend that
* the call was successful, but incomplete. The caller can retry the
* readdir starting at the last cookie.
*/
entry[0] = entry[1] = 0;
/* truncate listing ? */
if (!nr) {
dprintk("NFS: readdir reply truncated!\n");
entry[1] = 1;
}
if (!nr)
nr = -errno_NFSERR_IO;
goto out;
err_unmap:
nr = -errno_NFSERR_IO;
......@@ -518,7 +537,7 @@ nfs_xdr_stat(struct rpc_rqst *req, __be32 *p, void *dummy)
int status;
if ((status = ntohl(*p++)) != 0)
status = -nfs_stat_to_errno(status);
status = nfs_stat_to_errno(status);
return status;
}
......@@ -532,7 +551,7 @@ nfs_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
int status;
if ((status = ntohl(*p++)))
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
xdr_decode_fattr(p, fattr);
return 0;
}
......@@ -547,7 +566,7 @@ nfs_xdr_diropres(struct rpc_rqst *req, __be32 *p, struct nfs_diropok *res)
int status;
if ((status = ntohl(*p++)))
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
p = xdr_decode_fhandle(p, res->fh);
xdr_decode_fattr(p, res->fattr);
return 0;
......@@ -585,7 +604,7 @@ nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy)
int status;
if ((status = ntohl(*p++)))
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
/* Convert length of symlink */
len = ntohl(*p++);
if (len >= rcvbuf->page_len) {
......@@ -634,7 +653,7 @@ nfs_xdr_statfsres(struct rpc_rqst *req, __be32 *p, struct nfs2_fsstat *res)
int status;
if ((status = ntohl(*p++)))
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
res->tsize = ntohl(*p++);
res->bsize = ntohl(*p++);
......@@ -653,39 +672,39 @@ static struct {
int errno;
} nfs_errtbl[] = {
{ NFS_OK, 0 },
{ NFSERR_PERM, EPERM },
{ NFSERR_NOENT, ENOENT },
{ NFSERR_IO, errno_NFSERR_IO },
{ NFSERR_NXIO, ENXIO },
/* { NFSERR_EAGAIN, EAGAIN }, */
{ NFSERR_ACCES, EACCES },
{ NFSERR_EXIST, EEXIST },
{ NFSERR_XDEV, EXDEV },
{ NFSERR_NODEV, ENODEV },
{ NFSERR_NOTDIR, ENOTDIR },
{ NFSERR_ISDIR, EISDIR },
{ NFSERR_INVAL, EINVAL },
{ NFSERR_FBIG, EFBIG },
{ NFSERR_NOSPC, ENOSPC },
{ NFSERR_ROFS, EROFS },
{ NFSERR_MLINK, EMLINK },
{ NFSERR_NAMETOOLONG, ENAMETOOLONG },
{ NFSERR_NOTEMPTY, ENOTEMPTY },
{ NFSERR_DQUOT, EDQUOT },
{ NFSERR_STALE, ESTALE },
{ NFSERR_REMOTE, EREMOTE },
{ NFSERR_PERM, -EPERM },
{ NFSERR_NOENT, -ENOENT },
{ NFSERR_IO, -errno_NFSERR_IO},
{ NFSERR_NXIO, -ENXIO },
/* { NFSERR_EAGAIN, -EAGAIN }, */
{ NFSERR_ACCES, -EACCES },
{ NFSERR_EXIST, -EEXIST },
{ NFSERR_XDEV, -EXDEV },
{ NFSERR_NODEV, -ENODEV },
{ NFSERR_NOTDIR, -ENOTDIR },
{ NFSERR_ISDIR, -EISDIR },
{ NFSERR_INVAL, -EINVAL },
{ NFSERR_FBIG, -EFBIG },
{ NFSERR_NOSPC, -ENOSPC },
{ NFSERR_ROFS, -EROFS },
{ NFSERR_MLINK, -EMLINK },
{ NFSERR_NAMETOOLONG, -ENAMETOOLONG },
{ NFSERR_NOTEMPTY, -ENOTEMPTY },
{ NFSERR_DQUOT, -EDQUOT },
{ NFSERR_STALE, -ESTALE },
{ NFSERR_REMOTE, -EREMOTE },
#ifdef EWFLUSH
{ NFSERR_WFLUSH, EWFLUSH },
{ NFSERR_WFLUSH, -EWFLUSH },
#endif
{ NFSERR_BADHANDLE, EBADHANDLE },
{ NFSERR_NOT_SYNC, ENOTSYNC },
{ NFSERR_BAD_COOKIE, EBADCOOKIE },
{ NFSERR_NOTSUPP, ENOTSUPP },
{ NFSERR_TOOSMALL, ETOOSMALL },
{ NFSERR_SERVERFAULT, ESERVERFAULT },
{ NFSERR_BADTYPE, EBADTYPE },
{ NFSERR_JUKEBOX, EJUKEBOX },
{ -1, EIO }
{ NFSERR_BADHANDLE, -EBADHANDLE },
{ NFSERR_NOT_SYNC, -ENOTSYNC },
{ NFSERR_BAD_COOKIE, -EBADCOOKIE },
{ NFSERR_NOTSUPP, -ENOTSUPP },
{ NFSERR_TOOSMALL, -ETOOSMALL },
{ NFSERR_SERVERFAULT, -ESERVERFAULT },
{ NFSERR_BADTYPE, -EBADTYPE },
{ NFSERR_JUKEBOX, -EJUKEBOX },
{ -1, -EIO }
};
/*
......
......@@ -508,14 +508,14 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
struct page **page;
size_t hdrlen;
u32 len, recvd, pglen;
int status, nr;
int status, nr = 0;
__be32 *entry, *end, *kaddr;
status = ntohl(*p++);
/* Decode post_op_attrs */
p = xdr_decode_post_op_attr(p, res->dir_attr);
if (status)
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
/* Decode verifier cookie */
if (res->verf) {
res->verf[0] = *p++;
......@@ -542,7 +542,12 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
kaddr = p = kmap_atomic(*page, KM_USER0);
end = (__be32 *)((char *)p + pglen);
entry = p;
for (nr = 0; *p++; nr++) {
/* Make sure the packet actually has a value_follows and EOF entry */
if ((entry + 1) > end)
goto short_pkt;
for (; *p++; nr++) {
if (p + 3 > end)
goto short_pkt;
p += 2; /* inode # */
......@@ -581,18 +586,32 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
goto short_pkt;
entry = p;
}
if (!nr && (entry[0] != 0 || entry[1] == 0))
goto short_pkt;
/*
* Apparently some server sends responses that are a valid size, but
* contain no entries, and have value_follows==0 and EOF==0. For
* those, just set the EOF marker.
*/
if (!nr && entry[1] == 0) {
dprintk("NFS: readdir reply truncated!\n");
entry[1] = 1;
}
out:
kunmap_atomic(kaddr, KM_USER0);
return nr;
short_pkt:
/*
* When we get a short packet there are 2 possibilities. We can
* return an error, or fix up the response to look like a valid
* response and return what we have so far. If there are no
* entries and the packet was short, then return -EIO. If there
* are valid entries in the response, return them and pretend that
* the call was successful, but incomplete. The caller can retry the
* readdir starting at the last cookie.
*/
entry[0] = entry[1] = 0;
/* truncate listing ? */
if (!nr) {
dprintk("NFS: readdir reply truncated!\n");
entry[1] = 1;
}
if (!nr)
nr = -errno_NFSERR_IO;
goto out;
err_unmap:
nr = -errno_NFSERR_IO;
......@@ -732,7 +751,7 @@ nfs3_xdr_attrstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
int status;
if ((status = ntohl(*p++)))
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
xdr_decode_fattr(p, fattr);
return 0;
}
......@@ -747,7 +766,7 @@ nfs3_xdr_wccstat(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
int status;
if ((status = ntohl(*p++)))
status = -nfs_stat_to_errno(status);
status = nfs_stat_to_errno(status);
xdr_decode_wcc_data(p, fattr);
return status;
}
......@@ -767,7 +786,7 @@ nfs3_xdr_lookupres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res)
int status;
if ((status = ntohl(*p++))) {
status = -nfs_stat_to_errno(status);
status = nfs_stat_to_errno(status);
} else {
if (!(p = xdr_decode_fhandle(p, res->fh)))
return -errno_NFSERR_IO;
......@@ -787,7 +806,7 @@ nfs3_xdr_accessres(struct rpc_rqst *req, __be32 *p, struct nfs3_accessres *res)
p = xdr_decode_post_op_attr(p, res->fattr);
if (status)
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
res->access = ntohl(*p++);
return 0;
}
......@@ -824,7 +843,7 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
p = xdr_decode_post_op_attr(p, fattr);
if (status != 0)
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
/* Convert length of symlink */
len = ntohl(*p++);
......@@ -872,7 +891,7 @@ nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
p = xdr_decode_post_op_attr(p, res->fattr);
if (status != 0)
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
/* Decode reply count and EOF flag. NFSv3 is somewhat redundant
* in that it puts the count both in the res struct and in the
......@@ -922,7 +941,7 @@ nfs3_xdr_writeres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
p = xdr_decode_wcc_data(p, res->fattr);
if (status != 0)
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
res->count = ntohl(*p++);
res->verf->committed = (enum nfs3_stable_how)ntohl(*p++);
......@@ -953,7 +972,7 @@ nfs3_xdr_createres(struct rpc_rqst *req, __be32 *p, struct nfs3_diropres *res)
res->fattr->valid = 0;
}
} else {
status = -nfs_stat_to_errno(status);
status = nfs_stat_to_errno(status);
}
p = xdr_decode_wcc_data(p, res->dir_attr);
return status;
......@@ -968,7 +987,7 @@ nfs3_xdr_renameres(struct rpc_rqst *req, __be32 *p, struct nfs3_renameres *res)
int status;
if ((status = ntohl(*p++)) != 0)
status = -nfs_stat_to_errno(status);
status = nfs_stat_to_errno(status);
p = xdr_decode_wcc_data(p, res->fromattr);
p = xdr_decode_wcc_data(p, res->toattr);
return status;
......@@ -983,7 +1002,7 @@ nfs3_xdr_linkres(struct rpc_rqst *req, __be32 *p, struct nfs3_linkres *res)
int status;
if ((status = ntohl(*p++)) != 0)
status = -nfs_stat_to_errno(status);
status = nfs_stat_to_errno(status);
p = xdr_decode_post_op_attr(p, res->fattr);
p = xdr_decode_wcc_data(p, res->dir_attr);
return status;
......@@ -1001,7 +1020,7 @@ nfs3_xdr_fsstatres(struct rpc_rqst *req, __be32 *p, struct nfs_fsstat *res)
p = xdr_decode_post_op_attr(p, res->fattr);
if (status != 0)
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
p = xdr_decode_hyper(p, &res->tbytes);
p = xdr_decode_hyper(p, &res->fbytes);
......@@ -1026,7 +1045,7 @@ nfs3_xdr_fsinfores(struct rpc_rqst *req, __be32 *p, struct nfs_fsinfo *res)
p = xdr_decode_post_op_attr(p, res->fattr);
if (status != 0)
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
res->rtmax = ntohl(*p++);
res->rtpref = ntohl(*p++);
......@@ -1054,7 +1073,7 @@ nfs3_xdr_pathconfres(struct rpc_rqst *req, __be32 *p, struct nfs_pathconf *res)
p = xdr_decode_post_op_attr(p, res->fattr);
if (status != 0)
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
res->max_link = ntohl(*p++);
res->max_namelen = ntohl(*p++);
......@@ -1073,7 +1092,7 @@ nfs3_xdr_commitres(struct rpc_rqst *req, __be32 *p, struct nfs_writeres *res)
status = ntohl(*p++);
p = xdr_decode_wcc_data(p, res->fattr);
if (status != 0)
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
res->verf->verifier[0] = *p++;
res->verf->verifier[1] = *p++;
......@@ -1095,7 +1114,7 @@ nfs3_xdr_getaclres(struct rpc_rqst *req, __be32 *p,
int err, base;
if (status != 0)
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
p = xdr_decode_post_op_attr(p, res->fattr);
res->mask = ntohl(*p++);
if (res->mask & ~(NFS_ACL|NFS_ACLCNT|NFS_DFACL|NFS_DFACLCNT))
......@@ -1122,7 +1141,7 @@ nfs3_xdr_setaclres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
int status = ntohl(*p++);
if (status)
return -nfs_stat_to_errno(status);
return nfs_stat_to_errno(status);
xdr_decode_post_op_attr(p, fattr);
return 0;
}
......
......@@ -51,6 +51,7 @@
#include "nfs4_fs.h"
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
#define NFSDBG_FACILITY NFSDBG_PROC
......@@ -239,6 +240,8 @@ static void nfs4_init_opendata_res(struct nfs4_opendata *p)
{
p->o_res.f_attr = &p->f_attr;
p->o_res.dir_attr = &p->dir_attr;
p->o_res.seqid = p->o_arg.seqid;
p->c_res.seqid = p->c_arg.seqid;
p->o_res.server = p->o_arg.server;
nfs_fattr_init(&p->f_attr);
nfs_fattr_init(&p->dir_attr);
......@@ -729,7 +732,6 @@ static void nfs4_open_confirm_done(struct rpc_task *task, void *calldata)
renew_lease(data->o_res.server, data->timestamp);
data->rpc_done = 1;
}
nfs_increment_open_seqid(data->rpc_status, data->c_arg.seqid);
}
static void nfs4_open_confirm_release(void *calldata)
......@@ -773,6 +775,7 @@ static int _nfs4_proc_open_confirm(struct nfs4_opendata *data)
.rpc_message = &msg,
.callback_ops = &nfs4_open_confirm_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status;
......@@ -858,7 +861,6 @@ static void nfs4_open_done(struct rpc_task *task, void *calldata)
if (!(data->o_res.rflags & NFS4_OPEN_RESULT_CONFIRM))
nfs_confirm_seqid(&data->owner->so_seqid, 0);
}
nfs_increment_open_seqid(data->rpc_status, data->o_arg.seqid);
data->rpc_done = 1;
}
......@@ -910,6 +912,7 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
.rpc_message = &msg,
.callback_ops = &nfs4_open_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status;
......@@ -979,11 +982,8 @@ static int _nfs4_open_expired(struct nfs_open_context *ctx, struct nfs4_state *s
if (IS_ERR(opendata))
return PTR_ERR(opendata);
ret = nfs4_open_recover(opendata, state);
if (ret == -ESTALE) {
/* Invalidate the state owner so we don't ever use it again */
nfs4_drop_state_owner(state->owner);
if (ret == -ESTALE)
d_drop(ctx->path.dentry);
}
nfs4_opendata_put(opendata);
return ret;
}
......@@ -1226,7 +1226,6 @@ static void nfs4_close_done(struct rpc_task *task, void *data)
/* hmm. we are done with the inode, and in the process of freeing
* the state_owner. we keep this around to process errors
*/
nfs_increment_open_seqid(task->tk_status, calldata->arg.seqid);
switch (task->tk_status) {
case 0:
nfs_set_open_stateid(state, &calldata->res.stateid, 0);
......@@ -1315,6 +1314,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
.rpc_client = server->client,
.rpc_message = &msg,
.callback_ops = &nfs4_close_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int status = -ENOMEM;
......@@ -1332,6 +1332,7 @@ int nfs4_do_close(struct path *path, struct nfs4_state *state, int wait)
goto out_free_calldata;
calldata->arg.bitmask = server->attr_bitmask;
calldata->res.fattr = &calldata->fattr;
calldata->res.seqid = calldata->arg.seqid;
calldata->res.server = server;
calldata->path.mnt = mntget(path->mnt);
calldata->path.dentry = dget(path->dentry);
......@@ -1404,7 +1405,7 @@ nfs4_atomic_open(struct inode *dir, struct dentry *dentry, struct nameidata *nd)
BUG_ON(nd->intent.open.flags & O_CREAT);
}
cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0);
cred = rpc_lookup_cred();
if (IS_ERR(cred))
return (struct dentry *)cred;
parent = dentry->d_parent;
......@@ -1439,7 +1440,7 @@ nfs4_open_revalidate(struct inode *dir, struct dentry *dentry, int openflags, st
struct rpc_cred *cred;
struct nfs4_state *state;
cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0);
cred = rpc_lookup_cred();
if (IS_ERR(cred))
return PTR_ERR(cred);
state = nfs4_do_open(dir, &path, openflags, NULL, cred);
......@@ -1656,7 +1657,7 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
nfs_fattr_init(fattr);
cred = rpcauth_lookupcred(NFS_CLIENT(inode)->cl_auth, 0);
cred = rpc_lookup_cred();
if (IS_ERR(cred))
return PTR_ERR(cred);
......@@ -1892,7 +1893,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
struct rpc_cred *cred;
int status = 0;
cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0);
cred = rpc_lookup_cred();
if (IS_ERR(cred)) {
status = PTR_ERR(cred);
goto out;
......@@ -2761,10 +2762,10 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server)
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_STALE_STATEID:
case -NFS4ERR_EXPIRED:
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL, NULL);
rpc_sleep_on(&clp->cl_rpcwaitq, task, NULL);
nfs4_schedule_state_recovery(clp);
if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
rpc_wake_up_task(task);
rpc_wake_up_queued_task(&clp->cl_rpcwaitq, task);
task->tk_status = 0;
return -EAGAIN;
case -NFS4ERR_DELAY:
......@@ -2884,7 +2885,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program, unsigned short po
RPC_DISPLAY_ADDR),
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_PROTO),
cred->cr_ops->cr_name,
clp->cl_rpcclient->cl_auth->au_ops->au_name,
clp->cl_id_uniquifier);
setclientid.sc_netid_len = scnprintf(setclientid.sc_netid,
sizeof(setclientid.sc_netid),
......@@ -3158,6 +3159,7 @@ static struct nfs4_unlockdata *nfs4_alloc_unlockdata(struct file_lock *fl,
p->arg.fh = NFS_FH(inode);
p->arg.fl = &p->fl;
p->arg.seqid = seqid;
p->res.seqid = seqid;
p->arg.stateid = &lsp->ls_stateid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
......@@ -3183,7 +3185,6 @@ static void nfs4_locku_done(struct rpc_task *task, void *data)
if (RPC_ASSASSINATED(task))
return;
nfs_increment_lock_seqid(task->tk_status, calldata->arg.seqid);
switch (task->tk_status) {
case 0:
memcpy(calldata->lsp->ls_stateid.data,
......@@ -3235,6 +3236,7 @@ static struct rpc_task *nfs4_do_unlck(struct file_lock *fl,
.rpc_client = NFS_CLIENT(lsp->ls_state->inode),
.rpc_message = &msg,
.callback_ops = &nfs4_locku_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
......@@ -3261,6 +3263,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
struct nfs4_lock_state *lsp;
struct rpc_task *task;
int status = 0;
unsigned char fl_flags = request->fl_flags;
status = nfs4_set_lock_state(state, request);
/* Unlock _before_ we do the RPC call */
......@@ -3284,6 +3287,7 @@ static int nfs4_proc_unlck(struct nfs4_state *state, int cmd, struct file_lock *
status = nfs4_wait_for_completion_rpc_task(task);
rpc_put_task(task);
out:
request->fl_flags = fl_flags;
return status;
}
......@@ -3320,6 +3324,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
p->arg.lock_stateid = &lsp->ls_stateid;
p->arg.lock_owner.clientid = server->nfs_client->cl_clientid;
p->arg.lock_owner.id = lsp->ls_id.id;
p->res.lock_seqid = p->arg.lock_seqid;
p->lsp = lsp;
atomic_inc(&lsp->ls_count);
p->ctx = get_nfs_open_context(ctx);
......@@ -3346,6 +3351,7 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
return;
data->arg.open_stateid = &state->stateid;
data->arg.new_lock_owner = 1;
data->res.open_seqid = data->arg.open_seqid;
} else
data->arg.new_lock_owner = 0;
data->timestamp = jiffies;
......@@ -3363,7 +3369,6 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
if (RPC_ASSASSINATED(task))
goto out;
if (data->arg.new_lock_owner != 0) {
nfs_increment_open_seqid(data->rpc_status, data->arg.open_seqid);
if (data->rpc_status == 0)
nfs_confirm_seqid(&data->lsp->ls_seqid, 0);
else
......@@ -3375,7 +3380,6 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
data->lsp->ls_flags |= NFS_LOCK_INITIALIZED;
renew_lease(NFS_SERVER(data->ctx->path.dentry->d_inode), data->timestamp);
}
nfs_increment_lock_seqid(data->rpc_status, data->arg.lock_seqid);
out:
dprintk("%s: done, ret = %d!\n", __FUNCTION__, data->rpc_status);
}
......@@ -3419,6 +3423,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
.rpc_client = NFS_CLIENT(state->inode),
.rpc_message = &msg,
.callback_ops = &nfs4_lock_ops,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC,
};
int ret;
......
......@@ -71,6 +71,29 @@ static int nfs4_init_client(struct nfs_client *clp, struct rpc_cred *cred)
return status;
}
static struct rpc_cred *nfs4_get_machine_cred(struct nfs_client *clp)
{
struct rpc_cred *cred = NULL;
spin_lock(&clp->cl_lock);
if (clp->cl_machine_cred != NULL)
cred = get_rpccred(clp->cl_machine_cred);
spin_unlock(&clp->cl_lock);
return cred;
}
static void nfs4_clear_machine_cred(struct nfs_client *clp)
{
struct rpc_cred *cred;
spin_lock(&clp->cl_lock);
cred = clp->cl_machine_cred;
clp->cl_machine_cred = NULL;
spin_unlock(&clp->cl_lock);
if (cred != NULL)
put_rpccred(cred);
}
struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp)
{
struct nfs4_state_owner *sp;
......@@ -91,13 +114,18 @@ static struct rpc_cred *nfs4_get_setclientid_cred(struct nfs_client *clp)
{
struct nfs4_state_owner *sp;
struct rb_node *pos;
struct rpc_cred *cred;
cred = nfs4_get_machine_cred(clp);
if (cred != NULL)
goto out;
pos = rb_first(&clp->cl_state_owners);
if (pos != NULL) {
sp = rb_entry(pos, struct nfs4_state_owner, so_client_node);
return get_rpccred(sp->so_cred);
cred = get_rpccred(sp->so_cred);
}
return NULL;
out:
return cred;
}
static void nfs_alloc_unique_id(struct rb_root *root, struct nfs_unique_id *new,
......@@ -292,8 +320,10 @@ struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct
spin_unlock(&clp->cl_lock);
if (sp == new)
get_rpccred(cred);
else
else {
rpc_destroy_wait_queue(&new->so_sequence.wait);
kfree(new);
}
return sp;
}
......@@ -310,6 +340,7 @@ void nfs4_put_state_owner(struct nfs4_state_owner *sp)
return;
nfs4_remove_state_owner(clp, sp);
spin_unlock(&clp->cl_lock);
rpc_destroy_wait_queue(&sp->so_sequence.wait);
put_rpccred(cred);
kfree(sp);
}
......@@ -529,6 +560,7 @@ static void nfs4_free_lock_state(struct nfs4_lock_state *lsp)
spin_lock(&clp->cl_lock);
nfs_free_unique_id(&clp->cl_lockowner_id, &lsp->ls_id);
spin_unlock(&clp->cl_lock);
rpc_destroy_wait_queue(&lsp->ls_sequence.wait);
kfree(lsp);
}
......@@ -731,7 +763,7 @@ int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
list_add_tail(&seqid->list, &sequence->list);
if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
goto unlock;
rpc_sleep_on(&sequence->wait, task, NULL, NULL);
rpc_sleep_on(&sequence->wait, task, NULL);
status = -EAGAIN;
unlock:
spin_unlock(&sequence->lock);
......@@ -920,10 +952,10 @@ static int reclaimer(void *ptr)
if (cred != NULL) {
/* Yes there are: try to renew the old lease */
status = nfs4_proc_renew(clp, cred);
put_rpccred(cred);
switch (status) {
case 0:
case -NFS4ERR_CB_PATH_DOWN:
put_rpccred(cred);
goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_LEASE_MOVED:
......@@ -932,14 +964,19 @@ static int reclaimer(void *ptr)
} else {
/* "reboot" to ensure we clear all state on the server */
clp->cl_boot_time = CURRENT_TIME;
cred = nfs4_get_setclientid_cred(clp);
}
/* We're going to have to re-establish a clientid */
nfs4_state_mark_reclaim(clp);
status = -ENOENT;
cred = nfs4_get_setclientid_cred(clp);
if (cred != NULL) {
status = nfs4_init_client(clp, cred);
put_rpccred(cred);
/* Handle case where the user hasn't set up machine creds */
if (status == -EACCES && cred == clp->cl_machine_cred) {
nfs4_clear_machine_cred(clp);
goto restart_loop;
}
}
if (status)
goto out_error;
......
......@@ -110,7 +110,7 @@ static int nfs4_stat_to_errno(int);
#define decode_savefh_maxsz (op_decode_hdr_maxsz)
#define encode_restorefh_maxsz (op_encode_hdr_maxsz)
#define decode_restorefh_maxsz (op_decode_hdr_maxsz)
#define encode_fsinfo_maxsz (op_encode_hdr_maxsz + 2)
#define encode_fsinfo_maxsz (encode_getattr_maxsz)
#define decode_fsinfo_maxsz (op_decode_hdr_maxsz + 11)
#define encode_renew_maxsz (op_encode_hdr_maxsz + 3)
#define decode_renew_maxsz (op_decode_hdr_maxsz)
......@@ -1191,8 +1191,8 @@ static int encode_readdir(struct xdr_stream *xdr, const struct nfs4_readdir_arg
attrs[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
WRITE32(attrs[0] & readdir->bitmask[0]);
WRITE32(attrs[1] & readdir->bitmask[1]);
dprintk("%s: cookie = %Lu, verifier = 0x%x%x, bitmap = 0x%x%x\n",
__FUNCTION__,
dprintk("%s: cookie = %Lu, verifier = %08x:%08x, bitmap = %08x:%08x\n",
__func__,
(unsigned long long)readdir->cookie,
((u32 *)readdir->verifier.data)[0],
((u32 *)readdir->verifier.data)[1],
......@@ -2241,7 +2241,7 @@ static int decode_op_hdr(struct xdr_stream *xdr, enum nfs_opnum4 expected)
}
READ32(nfserr);
if (nfserr != NFS_OK)
return -nfs4_stat_to_errno(nfserr);
return nfs4_stat_to_errno(nfserr);
return 0;
}
......@@ -2291,7 +2291,7 @@ static int decode_attr_supported(struct xdr_stream *xdr, uint32_t *bitmap, uint3
bitmap[0] &= ~FATTR4_WORD0_SUPPORTED_ATTRS;
} else
bitmask[0] = bitmask[1] = 0;
dprintk("%s: bitmask=0x%x%x\n", __FUNCTION__, bitmask[0], bitmask[1]);
dprintk("%s: bitmask=%08x:%08x\n", __func__, bitmask[0], bitmask[1]);
return 0;
}
......@@ -3005,6 +3005,8 @@ static int decode_close(struct xdr_stream *xdr, struct nfs_closeres *res)
int status;
status = decode_op_hdr(xdr, OP_CLOSE);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
if (status)
return status;
READ_BUF(NFS4_STATEID_SIZE);
......@@ -3296,11 +3298,17 @@ static int decode_lock(struct xdr_stream *xdr, struct nfs_lock_res *res)
int status;
status = decode_op_hdr(xdr, OP_LOCK);
if (status == -EIO)
goto out;
if (status == 0) {
READ_BUF(NFS4_STATEID_SIZE);
COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
} else if (status == -NFS4ERR_DENIED)
return decode_lock_denied(xdr, NULL);
status = decode_lock_denied(xdr, NULL);
if (res->open_seqid != NULL)
nfs_increment_open_seqid(status, res->open_seqid);
nfs_increment_lock_seqid(status, res->lock_seqid);
out:
return status;
}
......@@ -3319,6 +3327,8 @@ static int decode_locku(struct xdr_stream *xdr, struct nfs_locku_res *res)
int status;
status = decode_op_hdr(xdr, OP_LOCKU);
if (status != -EIO)
nfs_increment_lock_seqid(status, res->seqid);
if (status == 0) {
READ_BUF(NFS4_STATEID_SIZE);
COPYMEM(res->stateid.data, NFS4_STATEID_SIZE);
......@@ -3384,6 +3394,8 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
int status;
status = decode_op_hdr(xdr, OP_OPEN);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
if (status)
return status;
READ_BUF(NFS4_STATEID_SIZE);
......@@ -3416,6 +3428,8 @@ static int decode_open_confirm(struct xdr_stream *xdr, struct nfs_open_confirmre
int status;
status = decode_op_hdr(xdr, OP_OPEN_CONFIRM);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
if (status)
return status;
READ_BUF(NFS4_STATEID_SIZE);
......@@ -3429,6 +3443,8 @@ static int decode_open_downgrade(struct xdr_stream *xdr, struct nfs_closeres *re
int status;
status = decode_op_hdr(xdr, OP_OPEN_DOWNGRADE);
if (status != -EIO)
nfs_increment_open_seqid(status, res->seqid);
if (status)
return status;
READ_BUF(NFS4_STATEID_SIZE);
......@@ -3481,7 +3497,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
size_t hdrlen;
u32 recvd, pglen = rcvbuf->page_len;
__be32 *end, *entry, *p, *kaddr;
unsigned int nr;
unsigned int nr = 0;
int status;
status = decode_op_hdr(xdr, OP_READDIR);
......@@ -3489,8 +3505,8 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
return status;
READ_BUF(8);
COPYMEM(readdir->verifier.data, 8);
dprintk("%s: verifier = 0x%x%x\n",
__FUNCTION__,
dprintk("%s: verifier = %08x:%08x\n",
__func__,
((u32 *)readdir->verifier.data)[0],
((u32 *)readdir->verifier.data)[1]);
......@@ -3505,7 +3521,12 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
kaddr = p = kmap_atomic(page, KM_USER0);
end = p + ((pglen + readdir->pgbase) >> 2);
entry = p;
for (nr = 0; *p++; nr++) {
/* Make sure the packet actually has a value_follows and EOF entry */
if ((entry + 1) > end)
goto short_pkt;
for (; *p++; nr++) {
u32 len, attrlen, xlen;
if (end - p < 3)
goto short_pkt;
......@@ -3532,19 +3553,31 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
p += attrlen; /* attributes */
entry = p;
}
if (!nr && (entry[0] != 0 || entry[1] == 0))
goto short_pkt;
/*
* Apparently some server sends responses that are a valid size, but
* contain no entries, and have value_follows==0 and EOF==0. For
* those, just set the EOF marker.
*/
if (!nr && entry[1] == 0) {
dprintk("NFS: readdir reply truncated!\n");
entry[1] = 1;
}
out:
kunmap_atomic(kaddr, KM_USER0);
return 0;
short_pkt:
/*
* When we get a short packet there are 2 possibilities. We can
* return an error, or fix up the response to look like a valid
* response and return what we have so far. If there are no
* entries and the packet was short, then return -EIO. If there
* are valid entries in the response, return them and pretend that
* the call was successful, but incomplete. The caller can retry the
* readdir starting at the last cookie.
*/
dprintk("%s: short packet at entry %d\n", __FUNCTION__, nr);
entry[0] = entry[1] = 0;
/* truncate listing ? */
if (!nr) {
dprintk("NFS: readdir reply truncated!\n");
entry[1] = 1;
}
if (nr)
goto out;
err_unmap:
kunmap_atomic(kaddr, KM_USER0);
......@@ -3727,7 +3760,7 @@ static int decode_setclientid(struct xdr_stream *xdr, struct nfs_client *clp)
READ_BUF(len);
return -NFSERR_CLID_INUSE;
} else
return -nfs4_stat_to_errno(nfserr);
return nfs4_stat_to_errno(nfserr);
return 0;
}
......@@ -4389,7 +4422,7 @@ static int nfs4_xdr_dec_fsinfo(struct rpc_rqst *req, __be32 *p, struct nfs_fsinf
if (!status)
status = decode_fsinfo(&xdr, fsinfo);
if (!status)
status = -nfs4_stat_to_errno(hdr.status);
status = nfs4_stat_to_errno(hdr.status);
return status;
}
......@@ -4479,7 +4512,7 @@ static int nfs4_xdr_dec_setclientid(struct rpc_rqst *req, __be32 *p,
if (!status)
status = decode_setclientid(&xdr, clp);
if (!status)
status = -nfs4_stat_to_errno(hdr.status);
status = nfs4_stat_to_errno(hdr.status);
return status;
}
......@@ -4501,7 +4534,7 @@ static int nfs4_xdr_dec_setclientid_confirm(struct rpc_rqst *req, __be32 *p, str
if (!status)
status = decode_fsinfo(&xdr, fsinfo);
if (!status)
status = -nfs4_stat_to_errno(hdr.status);
status = nfs4_stat_to_errno(hdr.status);
return status;
}
......@@ -4611,42 +4644,42 @@ static struct {
int errno;
} nfs_errtbl[] = {
{ NFS4_OK, 0 },
{ NFS4ERR_PERM, EPERM },
{ NFS4ERR_NOENT, ENOENT },
{ NFS4ERR_IO, errno_NFSERR_IO },
{ NFS4ERR_NXIO, ENXIO },
{ NFS4ERR_ACCESS, EACCES },
{ NFS4ERR_EXIST, EEXIST },
{ NFS4ERR_XDEV, EXDEV },
{ NFS4ERR_NOTDIR, ENOTDIR },
{ NFS4ERR_ISDIR, EISDIR },
{ NFS4ERR_INVAL, EINVAL },
{ NFS4ERR_FBIG, EFBIG },
{ NFS4ERR_NOSPC, ENOSPC },
{ NFS4ERR_ROFS, EROFS },
{ NFS4ERR_MLINK, EMLINK },
{ NFS4ERR_NAMETOOLONG, ENAMETOOLONG },
{ NFS4ERR_NOTEMPTY, ENOTEMPTY },
{ NFS4ERR_DQUOT, EDQUOT },
{ NFS4ERR_STALE, ESTALE },
{ NFS4ERR_BADHANDLE, EBADHANDLE },
{ NFS4ERR_BADOWNER, EINVAL },
{ NFS4ERR_BADNAME, EINVAL },
{ NFS4ERR_BAD_COOKIE, EBADCOOKIE },
{ NFS4ERR_NOTSUPP, ENOTSUPP },
{ NFS4ERR_TOOSMALL, ETOOSMALL },
{ NFS4ERR_SERVERFAULT, ESERVERFAULT },
{ NFS4ERR_BADTYPE, EBADTYPE },
{ NFS4ERR_LOCKED, EAGAIN },
{ NFS4ERR_RESOURCE, EREMOTEIO },
{ NFS4ERR_SYMLINK, ELOOP },
{ NFS4ERR_OP_ILLEGAL, EOPNOTSUPP },
{ NFS4ERR_DEADLOCK, EDEADLK },
{ NFS4ERR_WRONGSEC, EPERM }, /* FIXME: this needs
{ NFS4ERR_PERM, -EPERM },
{ NFS4ERR_NOENT, -ENOENT },
{ NFS4ERR_IO, -errno_NFSERR_IO},
{ NFS4ERR_NXIO, -ENXIO },
{ NFS4ERR_ACCESS, -EACCES },
{ NFS4ERR_EXIST, -EEXIST },
{ NFS4ERR_XDEV, -EXDEV },
{ NFS4ERR_NOTDIR, -ENOTDIR },
{ NFS4ERR_ISDIR, -EISDIR },
{ NFS4ERR_INVAL, -EINVAL },
{ NFS4ERR_FBIG, -EFBIG },
{ NFS4ERR_NOSPC, -ENOSPC },
{ NFS4ERR_ROFS, -EROFS },
{ NFS4ERR_MLINK, -EMLINK },
{ NFS4ERR_NAMETOOLONG, -ENAMETOOLONG },
{ NFS4ERR_NOTEMPTY, -ENOTEMPTY },
{ NFS4ERR_DQUOT, -EDQUOT },
{ NFS4ERR_STALE, -ESTALE },
{ NFS4ERR_BADHANDLE, -EBADHANDLE },
{ NFS4ERR_BADOWNER, -EINVAL },
{ NFS4ERR_BADNAME, -EINVAL },
{ NFS4ERR_BAD_COOKIE, -EBADCOOKIE },
{ NFS4ERR_NOTSUPP, -ENOTSUPP },
{ NFS4ERR_TOOSMALL, -ETOOSMALL },
{ NFS4ERR_SERVERFAULT, -ESERVERFAULT },
{ NFS4ERR_BADTYPE, -EBADTYPE },
{ NFS4ERR_LOCKED, -EAGAIN },
{ NFS4ERR_RESOURCE, -EREMOTEIO },
{ NFS4ERR_SYMLINK, -ELOOP },
{ NFS4ERR_OP_ILLEGAL, -EOPNOTSUPP },
{ NFS4ERR_DEADLOCK, -EDEADLK },
{ NFS4ERR_WRONGSEC, -EPERM }, /* FIXME: this needs
* to be handled by a
* middle-layer.
*/
{ -1, EIO }
{ -1, -EIO }
};
/*
......@@ -4663,14 +4696,14 @@ nfs4_stat_to_errno(int stat)
}
if (stat <= 10000 || stat > 10100) {
/* The server is looney tunes. */
return ESERVERFAULT;
return -ESERVERFAULT;
}
/* If we cannot translate the error, the recovery routines should
* handle it.
* Note: remaining NFSv4 error codes have values > 10000, so should
* not conflict with native Linux error codes.
*/
return stat;
return -stat;
}
#define PROC(proc, argtype, restype) \
......
......@@ -58,22 +58,19 @@ struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
return p;
}
static void nfs_readdata_rcu_free(struct rcu_head *head)
static void nfs_readdata_free(struct nfs_read_data *p)
{
struct nfs_read_data *p = container_of(head, struct nfs_read_data, task.u.tk_rcu);
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_rdata_mempool);
}
static void nfs_readdata_free(struct nfs_read_data *rdata)
{
call_rcu_bh(&rdata->task.u.tk_rcu, nfs_readdata_rcu_free);
}
void nfs_readdata_release(void *data)
{
nfs_readdata_free(data);
struct nfs_read_data *rdata = data;
put_nfs_open_context(rdata->args.context);
nfs_readdata_free(rdata);
}
static
......@@ -156,7 +153,7 @@ static void nfs_readpage_release(struct nfs_page *req)
/*
* Set up the NFS read request struct
*/
static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
static int nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
const struct rpc_call_ops *call_ops,
unsigned int count, unsigned int offset)
{
......@@ -174,6 +171,7 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
.rpc_message = &msg,
.callback_ops = call_ops,
.callback_data = data,
.workqueue = nfsiod_workqueue,
.flags = RPC_TASK_ASYNC | swap_flags,
};
......@@ -186,7 +184,7 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
data->args.pgbase = req->wb_pgbase + offset;
data->args.pages = data->pagevec;
data->args.count = count;
data->args.context = req->wb_context;
data->args.context = get_nfs_open_context(req->wb_context);
data->res.fattr = &data->fattr;
data->res.count = count;
......@@ -204,8 +202,10 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
(unsigned long long)data->args.offset);
task = rpc_run_task(&task_setup_data);
if (!IS_ERR(task))
if (IS_ERR(task))
return PTR_ERR(task);
rpc_put_task(task);
return 0;
}
static void
......@@ -242,6 +242,7 @@ static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigne
size_t rsize = NFS_SERVER(inode)->rsize, nbytes;
unsigned int offset;
int requests = 0;
int ret = 0;
LIST_HEAD(list);
nfs_list_remove_request(req);
......@@ -253,7 +254,6 @@ static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigne
data = nfs_readdata_alloc(1);
if (!data)
goto out_bad;
INIT_LIST_HEAD(&data->pages);
list_add(&data->pages, &list);
requests++;
nbytes -= len;
......@@ -264,6 +264,8 @@ static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigne
offset = 0;
nbytes = count;
do {
int ret2;
data = list_entry(list.next, struct nfs_read_data, pages);
list_del_init(&data->pages);
......@@ -271,13 +273,15 @@ static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigne
if (nbytes < rsize)
rsize = nbytes;
nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
ret2 = nfs_read_rpcsetup(req, data, &nfs_read_partial_ops,
rsize, offset);
if (ret == 0)
ret = ret2;
offset += rsize;
nbytes -= rsize;
} while (nbytes != 0);
return 0;
return ret;
out_bad:
while (!list_empty(&list)) {
......@@ -295,12 +299,12 @@ static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned
struct nfs_page *req;
struct page **pages;
struct nfs_read_data *data;
int ret = -ENOMEM;
data = nfs_readdata_alloc(npages);
if (!data)
goto out_bad;
INIT_LIST_HEAD(&data->pages);
pages = data->pagevec;
while (!list_empty(head)) {
req = nfs_list_entry(head->next);
......@@ -311,11 +315,10 @@ static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned
}
req = nfs_list_entry(data->pages.next);
nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
return 0;
return nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
out_bad:
nfs_async_read_error(head);
return -ENOMEM;
return ret;
}
/*
......@@ -342,26 +345,25 @@ int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
return 0;
}
static int nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
{
struct nfs_readargs *argp = &data->args;
struct nfs_readres *resp = &data->res;
if (resp->eof || resp->count == argp->count)
return 0;
return;
/* This is a short read! */
nfs_inc_stats(data->inode, NFSIOS_SHORTREAD);
/* Has the server at least made some progress? */
if (resp->count == 0)
return 0;
return;
/* Yes, so retry the read at the end of the data */
argp->offset += resp->count;
argp->pgbase += resp->count;
argp->count -= resp->count;
rpc_restart_call(task);
return -EAGAIN;
}
/*
......@@ -370,29 +372,37 @@ static int nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data)
static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
{
struct nfs_read_data *data = calldata;
struct nfs_page *req = data->req;
struct page *page = req->wb_page;
if (nfs_readpage_result(task, data) != 0)
return;
if (task->tk_status < 0)
return;
if (likely(task->tk_status >= 0)) {
nfs_readpage_truncate_uninitialised_page(data);
if (nfs_readpage_retry(task, data) != 0)
return;
}
if (unlikely(task->tk_status < 0))
nfs_readpage_retry(task, data);
}
static void nfs_readpage_release_partial(void *calldata)
{
struct nfs_read_data *data = calldata;
struct nfs_page *req = data->req;
struct page *page = req->wb_page;
int status = data->task.tk_status;
if (status < 0)
SetPageError(page);
if (atomic_dec_and_test(&req->wb_complete)) {
if (!PageError(page))
SetPageUptodate(page);
nfs_readpage_release(req);
}
nfs_readdata_release(calldata);
}
static const struct rpc_call_ops nfs_read_partial_ops = {
.rpc_call_done = nfs_readpage_result_partial,
.rpc_release = nfs_readdata_release,
.rpc_release = nfs_readpage_release_partial,
};
static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
......@@ -427,29 +437,35 @@ static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
if (nfs_readpage_result(task, data) != 0)
return;
if (task->tk_status < 0)
return;
/*
* Note: nfs_readpage_retry may change the values of
* data->args. In the multi-page case, we therefore need
* to ensure that we call nfs_readpage_set_pages_uptodate()
* first.
*/
if (likely(task->tk_status >= 0)) {
nfs_readpage_truncate_uninitialised_page(data);
nfs_readpage_set_pages_uptodate(data);
if (nfs_readpage_retry(task, data) != 0)
return;
}
nfs_readpage_retry(task, data);
}
static void nfs_readpage_release_full(void *calldata)
{
struct nfs_read_data *data = calldata;
while (!list_empty(&data->pages)) {
struct nfs_page *req = nfs_list_entry(data->pages.next);
nfs_list_remove_request(req);
nfs_readpage_release(req);
}
nfs_readdata_release(calldata);
}
static const struct rpc_call_ops nfs_read_full_ops = {
.rpc_call_done = nfs_readpage_result_full,
.rpc_release = nfs_readdata_release,
.rpc_release = nfs_readpage_release_full,
};
/*
......
......@@ -441,10 +441,52 @@ static const char *nfs_pseudoflavour_to_name(rpc_authflavor_t flavour)
return sec_flavours[i].str;
}
static void nfs_show_mountd_options(struct seq_file *m, struct nfs_server *nfss,
int showdefaults)
{
struct sockaddr *sap = (struct sockaddr *)&nfss->mountd_address;
switch (sap->sa_family) {
case AF_INET: {
struct sockaddr_in *sin = (struct sockaddr_in *)sap;
seq_printf(m, ",mountaddr=" NIPQUAD_FMT,
NIPQUAD(sin->sin_addr.s_addr));
break;
}
case AF_INET6: {
struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)sap;
seq_printf(m, ",mountaddr=" NIP6_FMT,
NIP6(sin6->sin6_addr));
break;
}
default:
if (showdefaults)
seq_printf(m, ",mountaddr=unspecified");
}
if (nfss->mountd_version || showdefaults)
seq_printf(m, ",mountvers=%u", nfss->mountd_version);
if (nfss->mountd_port || showdefaults)
seq_printf(m, ",mountport=%u", nfss->mountd_port);
switch (nfss->mountd_protocol) {
case IPPROTO_UDP:
seq_printf(m, ",mountproto=udp");
break;
case IPPROTO_TCP:
seq_printf(m, ",mountproto=tcp");
break;
default:
if (showdefaults)
seq_printf(m, ",mountproto=auto");
}
}
/*
* Describe the mount options in force on this server representation
*/
static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss, int showdefaults)
static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
int showdefaults)
{
static const struct proc_nfs_info {
int flag;
......@@ -452,6 +494,8 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
const char *nostr;
} nfs_info[] = {
{ NFS_MOUNT_SOFT, ",soft", ",hard" },
{ NFS_MOUNT_INTR, ",intr", ",nointr" },
{ NFS_MOUNT_POSIX, ",posix", "" },
{ NFS_MOUNT_NOCTO, ",nocto", "" },
{ NFS_MOUNT_NOAC, ",noac", "" },
{ NFS_MOUNT_NONLM, ",nolock", "" },
......@@ -462,18 +506,22 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
};
const struct proc_nfs_info *nfs_infop;
struct nfs_client *clp = nfss->nfs_client;
seq_printf(m, ",vers=%d", clp->rpc_ops->version);
seq_printf(m, ",rsize=%d", nfss->rsize);
seq_printf(m, ",wsize=%d", nfss->wsize);
u32 version = clp->rpc_ops->version;
seq_printf(m, ",vers=%u", version);
seq_printf(m, ",rsize=%u", nfss->rsize);
seq_printf(m, ",wsize=%u", nfss->wsize);
if (nfss->bsize != 0)
seq_printf(m, ",bsize=%u", nfss->bsize);
seq_printf(m, ",namlen=%u", nfss->namelen);
if (nfss->acregmin != 3*HZ || showdefaults)
seq_printf(m, ",acregmin=%d", nfss->acregmin/HZ);
seq_printf(m, ",acregmin=%u", nfss->acregmin/HZ);
if (nfss->acregmax != 60*HZ || showdefaults)
seq_printf(m, ",acregmax=%d", nfss->acregmax/HZ);
seq_printf(m, ",acregmax=%u", nfss->acregmax/HZ);
if (nfss->acdirmin != 30*HZ || showdefaults)
seq_printf(m, ",acdirmin=%d", nfss->acdirmin/HZ);
seq_printf(m, ",acdirmin=%u", nfss->acdirmin/HZ);
if (nfss->acdirmax != 60*HZ || showdefaults)
seq_printf(m, ",acdirmax=%d", nfss->acdirmax/HZ);
seq_printf(m, ",acdirmax=%u", nfss->acdirmax/HZ);
for (nfs_infop = nfs_info; nfs_infop->flag; nfs_infop++) {
if (nfss->flags & nfs_infop->flag)
seq_puts(m, nfs_infop->str);
......@@ -482,9 +530,24 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
}
seq_printf(m, ",proto=%s",
rpc_peeraddr2str(nfss->client, RPC_DISPLAY_PROTO));
if (version == 4) {
if (nfss->port != NFS_PORT)
seq_printf(m, ",port=%u", nfss->port);
} else
if (nfss->port)
seq_printf(m, ",port=%u", nfss->port);
seq_printf(m, ",timeo=%lu", 10U * nfss->client->cl_timeout->to_initval / HZ);
seq_printf(m, ",retrans=%u", nfss->client->cl_timeout->to_retries);
seq_printf(m, ",sec=%s", nfs_pseudoflavour_to_name(nfss->client->cl_auth->au_flavor));
if (version != 4)
nfs_show_mountd_options(m, nfss, showdefaults);
#ifdef CONFIG_NFS_V4
if (clp->rpc_ops->version == 4)
seq_printf(m, ",clientaddr=%s", clp->cl_ipaddr);
#endif
}
/*
......@@ -529,10 +592,10 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
seq_printf(m, "\n\tcaps:\t");
seq_printf(m, "caps=0x%x", nfss->caps);
seq_printf(m, ",wtmult=%d", nfss->wtmult);
seq_printf(m, ",dtsize=%d", nfss->dtsize);
seq_printf(m, ",bsize=%d", nfss->bsize);
seq_printf(m, ",namelen=%d", nfss->namelen);
seq_printf(m, ",wtmult=%u", nfss->wtmult);
seq_printf(m, ",dtsize=%u", nfss->dtsize);
seq_printf(m, ",bsize=%u", nfss->bsize);
seq_printf(m, ",namlen=%u", nfss->namelen);
#ifdef CONFIG_NFS_V4
if (nfss->nfs_client->rpc_ops->version == 4) {
......@@ -546,9 +609,9 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
/*
* Display security flavor in effect for this mount
*/
seq_printf(m, "\n\tsec:\tflavor=%d", auth->au_ops->au_flavor);
seq_printf(m, "\n\tsec:\tflavor=%u", auth->au_ops->au_flavor);
if (auth->au_flavor)
seq_printf(m, ",pseudoflavor=%d", auth->au_flavor);
seq_printf(m, ",pseudoflavor=%u", auth->au_flavor);
/*
* Display superblock I/O counters
......@@ -683,7 +746,6 @@ static int nfs_parse_mount_options(char *raw,
struct nfs_parsed_mount_data *mnt)
{
char *p, *string, *secdata;
unsigned short port = 0;
int rc;
if (!raw) {
......@@ -798,7 +860,7 @@ static int nfs_parse_mount_options(char *raw,
return 0;
if (option < 0 || option > 65535)
return 0;
port = option;
mnt->nfs_server.port = option;
break;
case Opt_rsize:
if (match_int(args, &mnt->rsize))
......@@ -1048,7 +1110,8 @@ static int nfs_parse_mount_options(char *raw,
}
}
nfs_set_port((struct sockaddr *)&mnt->nfs_server.address, port);
nfs_set_port((struct sockaddr *)&mnt->nfs_server.address,
mnt->nfs_server.port);
return 1;
......@@ -1169,7 +1232,9 @@ static int nfs_validate_mount_data(void *options,
args->acregmax = 60;
args->acdirmin = 30;
args->acdirmax = 60;
args->mount_server.port = 0; /* autobind unless user sets port */
args->mount_server.protocol = XPRT_TRANSPORT_UDP;
args->nfs_server.port = 0; /* autobind unless user sets port */
args->nfs_server.protocol = XPRT_TRANSPORT_TCP;
switch (data->version) {
......@@ -1208,7 +1273,6 @@ static int nfs_validate_mount_data(void *options,
args->flags = data->flags;
args->rsize = data->rsize;
args->wsize = data->wsize;
args->flags = data->flags;
args->timeo = data->timeo;
args->retrans = data->retrans;
args->acregmin = data->acregmin;
......@@ -1230,6 +1294,8 @@ static int nfs_validate_mount_data(void *options,
args->namlen = data->namlen;
args->bsize = data->bsize;
args->auth_flavors[0] = data->pseudoflavor;
if (!args->nfs_server.hostname)
goto out_nomem;
/*
* The legacy version 6 binary mount data from userspace has a
......@@ -1276,6 +1342,8 @@ static int nfs_validate_mount_data(void *options,
len = c - dev_name;
/* N.B. caller will free nfs_server.hostname in all cases */
args->nfs_server.hostname = kstrndup(dev_name, len, GFP_KERNEL);
if (!args->nfs_server.hostname)
goto out_nomem;
c++;
if (strlen(c) > NFS_MAXPATHLEN)
......@@ -1319,6 +1387,10 @@ static int nfs_validate_mount_data(void *options,
return -EPROTONOSUPPORT;
#endif /* !CONFIG_NFS_V3 */
out_nomem:
dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n");
return -ENOMEM;
out_no_address:
dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n");
return -EINVAL;
......@@ -1705,28 +1777,6 @@ static void nfs4_fill_super(struct super_block *sb)
nfs_initialise_sb(sb);
}
/*
* If the user didn't specify a port, set the port number to
* the NFS version 4 default port.
*/
static void nfs4_default_port(struct sockaddr *sap)
{
switch (sap->sa_family) {
case AF_INET: {
struct sockaddr_in *ap = (struct sockaddr_in *)sap;
if (ap->sin_port == 0)
ap->sin_port = htons(NFS_PORT);
break;
}
case AF_INET6: {
struct sockaddr_in6 *ap = (struct sockaddr_in6 *)sap;
if (ap->sin6_port == 0)
ap->sin6_port = htons(NFS_PORT);
break;
}
}
}
/*
* Validate NFSv4 mount options
*/
......@@ -1751,6 +1801,7 @@ static int nfs4_validate_mount_data(void *options,
args->acregmax = 60;
args->acdirmin = 30;
args->acdirmax = 60;
args->nfs_server.port = NFS_PORT; /* 2049 unless user set port= */
args->nfs_server.protocol = XPRT_TRANSPORT_TCP;
switch (data->version) {
......@@ -1767,9 +1818,6 @@ static int nfs4_validate_mount_data(void *options,
&args->nfs_server.address))
goto out_no_address;
nfs4_default_port((struct sockaddr *)
&args->nfs_server.address);
switch (data->auth_flavourlen) {
case 0:
args->auth_flavors[0] = RPC_AUTH_UNIX;
......@@ -1827,9 +1875,6 @@ static int nfs4_validate_mount_data(void *options,
&args->nfs_server.address))
return -EINVAL;
nfs4_default_port((struct sockaddr *)
&args->nfs_server.address);
switch (args->auth_flavor_len) {
case 0:
args->auth_flavors[0] = RPC_AUTH_UNIX;
......@@ -1852,12 +1897,16 @@ static int nfs4_validate_mount_data(void *options,
return -ENAMETOOLONG;
/* N.B. caller will free nfs_server.hostname in all cases */
args->nfs_server.hostname = kstrndup(dev_name, len, GFP_KERNEL);
if (!args->nfs_server.hostname)
goto out_nomem;
c++; /* step over the ':' */
len = strlen(c);
if (len > NFS4_MAXPATHLEN)
return -ENAMETOOLONG;
args->nfs_server.export_path = kstrndup(c, len, GFP_KERNEL);
if (!args->nfs_server.export_path)
goto out_nomem;
dprintk("NFS: MNTPATH: '%s'\n", args->nfs_server.export_path);
......@@ -1879,6 +1928,10 @@ static int nfs4_validate_mount_data(void *options,
data->auth_flavourlen);
return -EINVAL;
out_nomem:
dfprintk(MOUNT, "NFS4: not enough memory to handle mount options\n");
return -ENOMEM;
out_no_address:
dfprintk(MOUNT, "NFS4: mount program didn't pass remote address\n");
return -EINVAL;
......
......@@ -234,7 +234,7 @@ nfs_async_unlink(struct inode *dir, struct dentry *dentry)
if (data == NULL)
goto out;
data->cred = rpcauth_lookupcred(NFS_CLIENT(dir)->cl_auth, 0);
data->cred = rpc_lookup_cred();
if (IS_ERR(data->cred)) {
status = PTR_ERR(data->cred);
goto out_free;
......
This diff is collapsed.
......@@ -91,6 +91,7 @@ struct nlm_wait;
*/
#define NLMCLNT_OHSIZE ((__NEW_UTS_LEN) + 10u)
struct nlm_rqst {
atomic_t a_count;
unsigned int a_flags; /* initial RPC task flags */
struct nlm_host * a_host; /* host handle */
struct nlm_args a_args; /* arguments */
......@@ -173,8 +174,10 @@ void nlmclnt_next_cookie(struct nlm_cookie *);
/*
* Host cache
*/
struct nlm_host *nlmclnt_lookup_host(const struct sockaddr_in *, int, int,
const char *, unsigned int);
struct nlm_host *nlmclnt_lookup_host(const struct sockaddr_in *sin,
int proto, u32 version,
const char *hostname,
unsigned int hostname_len);
struct nlm_host *nlmsvc_lookup_host(struct svc_rqst *, const char *,
unsigned int);
struct rpc_clnt * nlm_bind_host(struct nlm_host *);
......@@ -217,8 +220,7 @@ void nlmsvc_mark_resources(void);
void nlmsvc_free_host_resources(struct nlm_host *);
void nlmsvc_invalidate_all(void);
static __inline__ struct inode *
nlmsvc_file_inode(struct nlm_file *file)
static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
{
return file->f_file->f_path.dentry->d_inode;
}
......@@ -226,8 +228,8 @@ nlmsvc_file_inode(struct nlm_file *file)
/*
* Compare two host addresses (needs modifying for ipv6)
*/
static __inline__ int
nlm_cmp_addr(const struct sockaddr_in *sin1, const struct sockaddr_in *sin2)
static inline int nlm_cmp_addr(const struct sockaddr_in *sin1,
const struct sockaddr_in *sin2)
{
return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr;
}
......@@ -236,8 +238,8 @@ nlm_cmp_addr(const struct sockaddr_in *sin1, const struct sockaddr_in *sin2)
* Compare two NLM locks.
* When the second lock is of type F_UNLCK, this acts like a wildcard.
*/
static __inline__ int
nlm_compare_locks(const struct file_lock *fl1, const struct file_lock *fl2)
static inline int nlm_compare_locks(const struct file_lock *fl1,
const struct file_lock *fl2)
{
return fl1->fl_pid == fl2->fl_pid
&& fl1->fl_owner == fl2->fl_owner
......
......@@ -19,6 +19,7 @@
#define SM_NOTIFY 6
#define SM_MAXSTRLEN 1024
#define SM_PRIV_SIZE 16
/*
* Arguments for all calls to statd
......
......@@ -430,7 +430,6 @@ extern void nfs_unregister_sysctl(void);
/*
* linux/fs/nfs/namespace.c
*/
extern struct list_head nfs_automount_list;
extern const struct inode_operations nfs_mountpoint_inode_operations;
extern const struct inode_operations nfs_referral_inode_operations;
extern int nfs_mountpoint_expiry_timeout;
......@@ -466,9 +465,9 @@ extern int nfs_wb_page(struct inode *inode, struct page* page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
extern int nfs_commit_inode(struct inode *, int);
extern struct nfs_write_data *nfs_commit_alloc(void);
extern struct nfs_write_data *nfs_commitdata_alloc(void);
extern void nfs_commit_free(struct nfs_write_data *wdata);
extern void nfs_commit_release(void *wdata);
extern void nfs_commitdata_release(void *wdata);
#else
static inline int
nfs_commit_inode(struct inode *inode, int how)
......
......@@ -32,6 +32,8 @@ struct nfs_client {
const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */
int cl_proto; /* Network transport protocol */
struct rpc_cred *cl_machine_cred;
#ifdef CONFIG_NFS_V4
u64 cl_clientid; /* constant */
nfs4_verifier cl_confirm;
......@@ -93,6 +95,7 @@ struct nfs_server {
unsigned int wpages; /* write size (in pages) */
unsigned int wtmult; /* server disk block size */
unsigned int dtsize; /* readdir size */
unsigned short port; /* "port=" setting */
unsigned int bsize; /* server block size */
unsigned int acregmin; /* attr cache timeouts */
unsigned int acregmax;
......@@ -117,6 +120,13 @@ struct nfs_server {
atomic_t active; /* Keep trace of any activity to this server */
wait_queue_head_t active_wq; /* Wait for any activity to stop */
/* mountd-related mount options */
struct sockaddr_storage mountd_address;
size_t mountd_addrlen;
u32 mountd_version;
unsigned short mountd_port;
unsigned short mountd_protocol;
};
/* Server capabilities */
......
......@@ -140,6 +140,7 @@ struct nfs_openres {
__u32 rflags;
struct nfs_fattr * f_attr;
struct nfs_fattr * dir_attr;
struct nfs_seqid * seqid;
const struct nfs_server *server;
int delegation_type;
nfs4_stateid delegation;
......@@ -159,6 +160,7 @@ struct nfs_open_confirmargs {
struct nfs_open_confirmres {
nfs4_stateid stateid;
struct nfs_seqid * seqid;
};
/*
......@@ -175,6 +177,7 @@ struct nfs_closeargs {
struct nfs_closeres {
nfs4_stateid stateid;
struct nfs_fattr * fattr;
struct nfs_seqid * seqid;
const struct nfs_server *server;
};
/*
......@@ -200,6 +203,8 @@ struct nfs_lock_args {
struct nfs_lock_res {
nfs4_stateid stateid;
struct nfs_seqid * lock_seqid;
struct nfs_seqid * open_seqid;
};
struct nfs_locku_args {
......@@ -211,6 +216,7 @@ struct nfs_locku_args {
struct nfs_locku_res {
nfs4_stateid stateid;
struct nfs_seqid * seqid;
};
struct nfs_lockt_args {
......
......@@ -26,6 +26,7 @@ struct auth_cred {
uid_t uid;
gid_t gid;
struct group_info *group_info;
unsigned char machine_cred : 1;
};
/*
......@@ -59,8 +60,8 @@ struct rpc_cred {
/*
* Client authentication handle
*/
#define RPC_CREDCACHE_NR 8
#define RPC_CREDCACHE_MASK (RPC_CREDCACHE_NR - 1)
#define RPC_CREDCACHE_HASHBITS 4
#define RPC_CREDCACHE_NR (1 << RPC_CREDCACHE_HASHBITS)
struct rpc_cred_cache {
struct hlist_head hashtable[RPC_CREDCACHE_NR];
spinlock_t lock;
......@@ -89,7 +90,6 @@ struct rpc_auth {
/* Flags for rpcauth_lookupcred() */
#define RPCAUTH_LOOKUP_NEW 0x01 /* Accept an uninitialised cred */
#define RPCAUTH_LOOKUP_ROOTCREDS 0x02 /* This really ought to go! */
/*
* Client authentication ops
......@@ -97,9 +97,7 @@ struct rpc_auth {
struct rpc_authops {
struct module *owner;
rpc_authflavor_t au_flavor; /* flavor (RPC_AUTH_*) */
#ifdef RPC_DEBUG
char * au_name;
#endif
struct rpc_auth * (*create)(struct rpc_clnt *, rpc_authflavor_t);
void (*destroy)(struct rpc_auth *);
......@@ -113,6 +111,7 @@ struct rpc_credops {
void (*crdestroy)(struct rpc_cred *);
int (*crmatch)(struct auth_cred *, struct rpc_cred *, int);
void (*crbind)(struct rpc_task *, struct rpc_cred *);
__be32 * (*crmarshal)(struct rpc_task *, __be32 *);
int (*crrefresh)(struct rpc_task *);
__be32 * (*crvalidate)(struct rpc_task *, __be32 *);
......@@ -126,9 +125,13 @@ extern const struct rpc_authops authunix_ops;
extern const struct rpc_authops authnull_ops;
void __init rpc_init_authunix(void);
void __init rpc_init_generic_auth(void);
void __init rpcauth_init_module(void);
void __exit rpcauth_remove_module(void);
void __exit rpc_destroy_generic_auth(void);
struct rpc_cred * rpc_lookup_cred(void);
struct rpc_cred * rpc_lookup_machine_cred(void);
int rpcauth_register(const struct rpc_authops *);
int rpcauth_unregister(const struct rpc_authops *);
struct rpc_auth * rpcauth_create(rpc_authflavor_t, struct rpc_clnt *);
......@@ -136,8 +139,8 @@ void rpcauth_release(struct rpc_auth *);
struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int);
void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int);
struct rpc_cred * rpcauth_bindcred(struct rpc_task *);
void rpcauth_holdcred(struct rpc_task *);
void rpcauth_bindcred(struct rpc_task *, struct rpc_cred *, int);
void rpcauth_generic_bind_cred(struct rpc_task *, struct rpc_cred *);
void put_rpccred(struct rpc_cred *);
void rpcauth_unbindcred(struct rpc_task *);
__be32 * rpcauth_marshcred(struct rpc_task *, __be32 *);
......
......@@ -84,6 +84,7 @@ struct gss_cred {
enum rpc_gss_svc gc_service;
struct gss_cl_ctx *gc_ctx;
struct gss_upcall_msg *gc_upcall;
unsigned char gc_machine_cred : 1;
};
#endif /* __KERNEL__ */
......
......@@ -127,11 +127,12 @@ int rpcb_getport_sync(struct sockaddr_in *, u32, u32, int);
void rpcb_getport_async(struct rpc_task *);
void rpc_call_start(struct rpc_task *);
int rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg,
int flags, const struct rpc_call_ops *tk_ops,
int rpc_call_async(struct rpc_clnt *clnt,
const struct rpc_message *msg, int flags,
const struct rpc_call_ops *tk_ops,
void *calldata);
int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg,
int flags);
int rpc_call_sync(struct rpc_clnt *clnt,
const struct rpc_message *msg, int flags);
struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred,
int flags);
void rpc_restart_call(struct rpc_task *);
......
......@@ -11,7 +11,6 @@
#include <linux/timer.h>
#include <linux/sunrpc/types.h>
#include <linux/rcupdate.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
......@@ -33,7 +32,8 @@ struct rpc_wait_queue;
struct rpc_wait {
struct list_head list; /* wait queue links */
struct list_head links; /* Links to related tasks */
struct rpc_wait_queue * rpc_waitq; /* RPC wait queue we're on */
struct list_head timer_list; /* Timer list */
unsigned long expires;
};
/*
......@@ -57,33 +57,25 @@ struct rpc_task {
__u8 tk_cred_retry;
/*
* timeout_fn to be executed by timer bottom half
* callback to be executed after waking up
* action next procedure for async tasks
* tk_ops caller callbacks
*/
void (*tk_timeout_fn)(struct rpc_task *);
void (*tk_callback)(struct rpc_task *);
void (*tk_action)(struct rpc_task *);
const struct rpc_call_ops *tk_ops;
void * tk_calldata;
/*
* tk_timer is used for async processing by the RPC scheduling
* primitives. You should not access this directly unless
* you have a pathological interest in kernel oopses.
*/
struct timer_list tk_timer; /* kernel timer */
unsigned long tk_timeout; /* timeout for rpc_sleep() */
unsigned short tk_flags; /* misc flags */
unsigned long tk_runstate; /* Task run status */
struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
* be any workqueue
*/
struct rpc_wait_queue *tk_waitqueue; /* RPC wait queue we're on */
union {
struct work_struct tk_work; /* Async task work queue */
struct rpc_wait tk_wait; /* RPC wait */
struct rcu_head tk_rcu; /* for task deletion */
} u;
unsigned short tk_timeouts; /* maj timeouts */
......@@ -123,6 +115,7 @@ struct rpc_task_setup {
const struct rpc_message *rpc_message;
const struct rpc_call_ops *callback_ops;
void *callback_data;
struct workqueue_struct *workqueue;
unsigned short flags;
signed char priority;
};
......@@ -147,9 +140,7 @@ struct rpc_task_setup {
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
#define RPC_TASK_WAKEUP 2
#define RPC_TASK_HAS_TIMER 3
#define RPC_TASK_ACTIVE 4
#define RPC_TASK_ACTIVE 2
#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
......@@ -171,15 +162,6 @@ struct rpc_task_setup {
smp_mb__after_clear_bit(); \
} while (0)
#define rpc_start_wakeup(t) \
(test_and_set_bit(RPC_TASK_WAKEUP, &(t)->tk_runstate) == 0)
#define rpc_finish_wakeup(t) \
do { \
smp_mb__before_clear_bit(); \
clear_bit(RPC_TASK_WAKEUP, &(t)->tk_runstate); \
smp_mb__after_clear_bit(); \
} while (0)
#define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
/*
......@@ -192,6 +174,12 @@ struct rpc_task_setup {
#define RPC_PRIORITY_HIGH (1)
#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_HIGH - RPC_PRIORITY_LOW)
struct rpc_timer {
struct timer_list timer;
struct list_head list;
unsigned long expires;
};
/*
* RPC synchronization objects
*/
......@@ -204,6 +192,7 @@ struct rpc_wait_queue {
unsigned char count; /* # task groups remaining serviced so far */
unsigned char nr; /* # tasks remaining for cookie */
unsigned short qlen; /* total # tasks waiting in queue */
struct rpc_timer timer_list;
#ifdef RPC_DEBUG
const char * name;
#endif
......@@ -229,9 +218,11 @@ void rpc_killall_tasks(struct rpc_clnt *);
void rpc_execute(struct rpc_task *);
void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
void rpc_destroy_wait_queue(struct rpc_wait_queue *);
void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
rpc_action action, rpc_action timer);
void rpc_wake_up_task(struct rpc_task *);
rpc_action action);
void rpc_wake_up_queued_task(struct rpc_wait_queue *,
struct rpc_task *);
void rpc_wake_up(struct rpc_wait_queue *);
struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
void rpc_wake_up_status(struct rpc_wait_queue *, int);
......
......@@ -86,6 +86,10 @@ struct rpc_rqst {
unsigned long rq_majortimeo; /* major timeout alarm */
unsigned long rq_timeout; /* Current timeout value */
unsigned int rq_retries; /* # of retries */
unsigned int rq_connect_cookie;
/* A cookie used to track the
state of the transport
connection */
/*
* Partial send handling
......@@ -152,6 +156,9 @@ struct rpc_xprt {
unsigned long connect_timeout,
bind_timeout,
reestablish_timeout;
unsigned int connect_cookie; /* A cookie that gets bumped
every time the transport
is reconnected */
/*
* Disconnection of idle transports
......@@ -232,7 +239,7 @@ int xprt_unregister_transport(struct xprt_class *type);
void xprt_set_retrans_timeout_def(struct rpc_task *task);
void xprt_set_retrans_timeout_rtt(struct rpc_task *task);
void xprt_wake_pending_tasks(struct rpc_xprt *xprt, int status);
void xprt_wait_for_buffer_space(struct rpc_task *task);
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action);
void xprt_write_space(struct rpc_xprt *xprt);
void xprt_update_rtt(struct rpc_task *task);
void xprt_adjust_cwnd(struct rpc_task *task, int result);
......@@ -241,6 +248,7 @@ void xprt_complete_rqst(struct rpc_task *task, int copied);
void xprt_release_rqst_cong(struct rpc_task *task);
void xprt_disconnect_done(struct rpc_xprt *xprt);
void xprt_force_disconnect(struct rpc_xprt *xprt);
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
/*
* Reserved bit positions in xprt->state
......
......@@ -8,7 +8,7 @@ obj-$(CONFIG_SUNRPC_GSS) += auth_gss/
obj-$(CONFIG_SUNRPC_XPRT_RDMA) += xprtrdma/
sunrpc-y := clnt.o xprt.o socklib.o xprtsock.o sched.o \
auth.o auth_null.o auth_unix.o \
auth.o auth_null.o auth_unix.o auth_generic.o \
svc.o svcsock.o svcauth.o svcauth_unix.o \
rpcb_clnt.o timer.o xdr.o \
sunrpc_syms.o cache.o rpc_pipe.o \
......
......@@ -11,6 +11,7 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/errno.h>
#include <linux/hash.h>
#include <linux/sunrpc/clnt.h>
#include <linux/spinlock.h>
......@@ -219,6 +220,9 @@ rpcauth_destroy_credcache(struct rpc_auth *auth)
}
EXPORT_SYMBOL_GPL(rpcauth_destroy_credcache);
#define RPC_AUTH_EXPIRY_MORATORIUM (60 * HZ)
/*
* Remove stale credentials. Avoid sleeping inside the loop.
*/
......@@ -227,6 +231,7 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
{
spinlock_t *cache_lock;
struct rpc_cred *cred;
unsigned long expired = jiffies - RPC_AUTH_EXPIRY_MORATORIUM;
while (!list_empty(&cred_unused)) {
cred = list_entry(cred_unused.next, struct rpc_cred, cr_lru);
......@@ -234,6 +239,10 @@ rpcauth_prune_expired(struct list_head *free, int nr_to_scan)
number_cred_unused--;
if (atomic_read(&cred->cr_count) != 0)
continue;
/* Enforce a 5 second garbage collection moratorium */
if (time_in_range(cred->cr_expire, expired, jiffies) &&
test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) != 0)
continue;
cache_lock = &cred->cr_auth->au_credcache->lock;
spin_lock(cache_lock);
if (atomic_read(&cred->cr_count) == 0) {
......@@ -280,10 +289,9 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
struct hlist_node *pos;
struct rpc_cred *cred = NULL,
*entry, *new;
int nr = 0;
unsigned int nr;
if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS))
nr = acred->uid & RPC_CREDCACHE_MASK;
nr = hash_long(acred->uid, RPC_CREDCACHE_HASHBITS);
rcu_read_lock();
hlist_for_each_entry_rcu(entry, pos, &cache->hashtable[nr], cr_hash) {
......@@ -356,7 +364,6 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags)
put_group_info(acred.group_info);
return ret;
}
EXPORT_SYMBOL_GPL(rpcauth_lookupcred);
void
rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
......@@ -375,41 +382,58 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
}
EXPORT_SYMBOL_GPL(rpcauth_init_cred);
struct rpc_cred *
rpcauth_bindcred(struct rpc_task *task)
void
rpcauth_generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred)
{
task->tk_msg.rpc_cred = get_rpccred(cred);
dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid,
cred->cr_auth->au_ops->au_name, cred);
}
EXPORT_SYMBOL_GPL(rpcauth_generic_bind_cred);
static void
rpcauth_bind_root_cred(struct rpc_task *task)
{
struct rpc_auth *auth = task->tk_client->cl_auth;
struct auth_cred acred = {
.uid = current->fsuid,
.gid = current->fsgid,
.group_info = current->group_info,
.uid = 0,
.gid = 0,
};
struct rpc_cred *ret;
int flags = 0;
dprintk("RPC: %5u looking up %s cred\n",
task->tk_pid, task->tk_client->cl_auth->au_ops->au_name);
get_group_info(acred.group_info);
if (task->tk_flags & RPC_TASK_ROOTCREDS)
flags |= RPCAUTH_LOOKUP_ROOTCREDS;
ret = auth->au_ops->lookup_cred(auth, &acred, flags);
ret = auth->au_ops->lookup_cred(auth, &acred, 0);
if (!IS_ERR(ret))
task->tk_msg.rpc_cred = ret;
else
task->tk_status = PTR_ERR(ret);
}
static void
rpcauth_bind_new_cred(struct rpc_task *task)
{
struct rpc_auth *auth = task->tk_client->cl_auth;
struct rpc_cred *ret;
dprintk("RPC: %5u looking up %s cred\n",
task->tk_pid, auth->au_ops->au_name);
ret = rpcauth_lookupcred(auth, 0);
if (!IS_ERR(ret))
task->tk_msg.rpc_cred = ret;
else
task->tk_status = PTR_ERR(ret);
put_group_info(acred.group_info);
return ret;
}
void
rpcauth_holdcred(struct rpc_task *task)
rpcauth_bindcred(struct rpc_task *task, struct rpc_cred *cred, int flags)
{
struct rpc_cred *cred = task->tk_msg.rpc_cred;
if (cred != NULL) {
get_rpccred(cred);
dprintk("RPC: %5u holding %s cred %p\n", task->tk_pid,
cred->cr_auth->au_ops->au_name, cred);
}
if (cred != NULL)
cred->cr_ops->crbind(task, cred);
else if (flags & RPC_TASK_ROOTCREDS)
rpcauth_bind_root_cred(task);
else
rpcauth_bind_new_cred(task);
}
void
......@@ -550,6 +574,7 @@ static struct shrinker rpc_cred_shrinker = {
void __init rpcauth_init_module(void)
{
rpc_init_authunix();
rpc_init_generic_auth();
register_shrinker(&rpc_cred_shrinker);
}
......
/*
* Generic RPC credential
*
* Copyright (C) 2008, Trond Myklebust <Trond.Myklebust@netapp.com>
*/
#include <linux/err.h>
#include <linux/types.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/clnt.h>
#include <linux/sunrpc/debug.h>
#include <linux/sunrpc/sched.h>
#ifdef RPC_DEBUG
# define RPCDBG_FACILITY RPCDBG_AUTH
#endif
#define RPC_ANONYMOUS_USERID ((uid_t)-2)
#define RPC_ANONYMOUS_GROUPID ((gid_t)-2)
struct generic_cred {
struct rpc_cred gc_base;
struct auth_cred acred;
};
static struct rpc_auth generic_auth;
static struct rpc_cred_cache generic_cred_cache;
static const struct rpc_credops generic_credops;
/*
* Public call interface
*/
struct rpc_cred *rpc_lookup_cred(void)
{
return rpcauth_lookupcred(&generic_auth, 0);
}
EXPORT_SYMBOL_GPL(rpc_lookup_cred);
/*
* Public call interface for looking up machine creds.
*/
struct rpc_cred *rpc_lookup_machine_cred(void)
{
struct auth_cred acred = {
.uid = RPC_ANONYMOUS_USERID,
.gid = RPC_ANONYMOUS_GROUPID,
.machine_cred = 1,
};
dprintk("RPC: looking up machine cred\n");
return generic_auth.au_ops->lookup_cred(&generic_auth, &acred, 0);
}
EXPORT_SYMBOL_GPL(rpc_lookup_machine_cred);
static void
generic_bind_cred(struct rpc_task *task, struct rpc_cred *cred)
{
struct rpc_auth *auth = task->tk_client->cl_auth;
struct auth_cred *acred = &container_of(cred, struct generic_cred, gc_base)->acred;
struct rpc_cred *ret;
ret = auth->au_ops->lookup_cred(auth, acred, 0);
if (!IS_ERR(ret))
task->tk_msg.rpc_cred = ret;
else
task->tk_status = PTR_ERR(ret);
}
/*
* Lookup generic creds for current process
*/
static struct rpc_cred *
generic_lookup_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
{
return rpcauth_lookup_credcache(&generic_auth, acred, flags);
}
static struct rpc_cred *
generic_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
{
struct generic_cred *gcred;
gcred = kmalloc(sizeof(*gcred), GFP_KERNEL);
if (gcred == NULL)
return ERR_PTR(-ENOMEM);
rpcauth_init_cred(&gcred->gc_base, acred, &generic_auth, &generic_credops);
gcred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
gcred->acred.uid = acred->uid;
gcred->acred.gid = acred->gid;
gcred->acred.group_info = acred->group_info;
if (gcred->acred.group_info != NULL)
get_group_info(gcred->acred.group_info);
gcred->acred.machine_cred = acred->machine_cred;
dprintk("RPC: allocated %s cred %p for uid %d gid %d\n",
gcred->acred.machine_cred ? "machine" : "generic",
gcred, acred->uid, acred->gid);
return &gcred->gc_base;
}
static void
generic_free_cred(struct rpc_cred *cred)
{
struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base);
dprintk("RPC: generic_free_cred %p\n", gcred);
if (gcred->acred.group_info != NULL)
put_group_info(gcred->acred.group_info);
kfree(gcred);
}
static void
generic_free_cred_callback(struct rcu_head *head)
{
struct rpc_cred *cred = container_of(head, struct rpc_cred, cr_rcu);
generic_free_cred(cred);
}
static void
generic_destroy_cred(struct rpc_cred *cred)
{
call_rcu(&cred->cr_rcu, generic_free_cred_callback);
}
/*
* Match credentials against current process creds.
*/
static int
generic_match(struct auth_cred *acred, struct rpc_cred *cred, int flags)
{
struct generic_cred *gcred = container_of(cred, struct generic_cred, gc_base);
if (gcred->acred.uid != acred->uid ||
gcred->acred.gid != acred->gid ||
gcred->acred.group_info != acred->group_info ||
gcred->acred.machine_cred != acred->machine_cred)
return 0;
return 1;
}
void __init rpc_init_generic_auth(void)
{
spin_lock_init(&generic_cred_cache.lock);
}
void __exit rpc_destroy_generic_auth(void)
{
rpcauth_clear_credcache(&generic_cred_cache);
}
static struct rpc_cred_cache generic_cred_cache = {
{{ NULL, },},
};
static const struct rpc_authops generic_auth_ops = {
.owner = THIS_MODULE,
.au_name = "Generic",
.lookup_cred = generic_lookup_cred,
.crcreate = generic_create_cred,
};
static struct rpc_auth generic_auth = {
.au_ops = &generic_auth_ops,
.au_count = ATOMIC_INIT(0),
.au_credcache = &generic_cred_cache,
};
static const struct rpc_credops generic_credops = {
.cr_name = "Generic cred",
.crdestroy = generic_destroy_cred,
.crbind = generic_bind_cred,
.crmatch = generic_match,
};
......@@ -114,27 +114,14 @@ static void
gss_cred_set_ctx(struct rpc_cred *cred, struct gss_cl_ctx *ctx)
{
struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
struct gss_cl_ctx *old;
old = gss_cred->gc_ctx;
if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
return;
gss_get_ctx(ctx);
rcu_assign_pointer(gss_cred->gc_ctx, ctx);
set_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags);
smp_mb__before_clear_bit();
clear_bit(RPCAUTH_CRED_NEW, &cred->cr_flags);
if (old)
gss_put_ctx(old);
}
static int
gss_cred_is_uptodate_ctx(struct rpc_cred *cred)
{
struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
int res = 0;
rcu_read_lock();
if (test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) && gss_cred->gc_ctx)
res = 1;
rcu_read_unlock();
return res;
}
static const void *
......@@ -266,6 +253,7 @@ gss_release_msg(struct gss_upcall_msg *gss_msg)
BUG_ON(!list_empty(&gss_msg->list));
if (gss_msg->ctx != NULL)
gss_put_ctx(gss_msg->ctx);
rpc_destroy_wait_queue(&gss_msg->rpc_waitqueue);
kfree(gss_msg);
}
......@@ -339,7 +327,7 @@ gss_upcall_callback(struct rpc_task *task)
spin_lock(&inode->i_lock);
if (gss_msg->ctx)
gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_get_ctx(gss_msg->ctx));
gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx);
else
task->tk_status = gss_msg->msg.errno;
gss_cred->gc_upcall = NULL;
......@@ -370,9 +358,16 @@ gss_alloc_msg(struct gss_auth *gss_auth, uid_t uid)
static struct gss_upcall_msg *
gss_setup_upcall(struct rpc_clnt *clnt, struct gss_auth *gss_auth, struct rpc_cred *cred)
{
struct gss_cred *gss_cred = container_of(cred,
struct gss_cred, gc_base);
struct gss_upcall_msg *gss_new, *gss_msg;
uid_t uid = cred->cr_uid;
/* Special case: rpc.gssd assumes that uid == 0 implies machine creds */
if (gss_cred->gc_machine_cred != 0)
uid = 0;
gss_new = gss_alloc_msg(gss_auth, cred->cr_uid);
gss_new = gss_alloc_msg(gss_auth, uid);
if (gss_new == NULL)
return ERR_PTR(-ENOMEM);
gss_msg = gss_add_msg(gss_auth, gss_new);
......@@ -408,13 +403,17 @@ gss_refresh_upcall(struct rpc_task *task)
}
spin_lock(&inode->i_lock);
if (gss_cred->gc_upcall != NULL)
rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL, NULL);
else if (gss_msg->ctx == NULL && gss_msg->msg.errno >= 0) {
rpc_sleep_on(&gss_cred->gc_upcall->rpc_waitqueue, task, NULL);
else if (gss_msg->ctx != NULL) {
gss_cred_set_ctx(task->tk_msg.rpc_cred, gss_msg->ctx);
gss_cred->gc_upcall = NULL;
rpc_wake_up_status(&gss_msg->rpc_waitqueue, gss_msg->msg.errno);
} else if (gss_msg->msg.errno >= 0) {
task->tk_timeout = 0;
gss_cred->gc_upcall = gss_msg;
/* gss_upcall_callback will release the reference to gss_upcall_msg */
atomic_inc(&gss_msg->count);
rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback, NULL);
rpc_sleep_on(&gss_msg->rpc_waitqueue, task, gss_upcall_callback);
} else
err = gss_msg->msg.errno;
spin_unlock(&inode->i_lock);
......@@ -454,7 +453,7 @@ gss_create_upcall(struct gss_auth *gss_auth, struct gss_cred *gss_cred)
schedule();
}
if (gss_msg->ctx)
gss_cred_set_ctx(cred, gss_get_ctx(gss_msg->ctx));
gss_cred_set_ctx(cred, gss_msg->ctx);
else
err = gss_msg->msg.errno;
spin_unlock(&inode->i_lock);
......@@ -709,7 +708,7 @@ gss_destroying_context(struct rpc_cred *cred)
struct rpc_task *task;
if (gss_cred->gc_ctx == NULL ||
gss_cred->gc_ctx->gc_proc == RPC_GSS_PROC_DESTROY)
test_and_clear_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags) == 0)
return 0;
gss_cred->gc_ctx->gc_proc = RPC_GSS_PROC_DESTROY;
......@@ -719,7 +718,7 @@ gss_destroying_context(struct rpc_cred *cred)
* by the RPC call or by the put_rpccred() below */
get_rpccred(cred);
task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC);
task = rpc_call_null(gss_auth->client, cred, RPC_TASK_ASYNC|RPC_TASK_SOFT);
if (!IS_ERR(task))
rpc_put_task(task);
......@@ -817,6 +816,7 @@ gss_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
*/
cred->gc_base.cr_flags = 1UL << RPCAUTH_CRED_NEW;
cred->gc_service = gss_auth->service;
cred->gc_machine_cred = acred->machine_cred;
kref_get(&gss_auth->kref);
return &cred->gc_base;
......@@ -843,17 +843,16 @@ gss_match(struct auth_cred *acred, struct rpc_cred *rc, int flags)
{
struct gss_cred *gss_cred = container_of(rc, struct gss_cred, gc_base);
/*
* If the searchflags have set RPCAUTH_LOOKUP_NEW, then
* we don't really care if the credential has expired or not,
* since the caller should be prepared to reinitialise it.
*/
if ((flags & RPCAUTH_LOOKUP_NEW) && test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
if (test_bit(RPCAUTH_CRED_NEW, &rc->cr_flags))
goto out;
/* Don't match with creds that have expired. */
if (gss_cred->gc_ctx && time_after(jiffies, gss_cred->gc_ctx->gc_expiry))
if (time_after(jiffies, gss_cred->gc_ctx->gc_expiry))
return 0;
if (!test_bit(RPCAUTH_CRED_UPTODATE, &rc->cr_flags))
return 0;
out:
if (acred->machine_cred != gss_cred->gc_machine_cred)
return 0;
return (rc->cr_uid == acred->uid);
}
......@@ -917,16 +916,48 @@ gss_marshal(struct rpc_task *task, __be32 *p)
return NULL;
}
static int gss_renew_cred(struct rpc_task *task)
{
struct rpc_cred *oldcred = task->tk_msg.rpc_cred;
struct gss_cred *gss_cred = container_of(oldcred,
struct gss_cred,
gc_base);
struct rpc_auth *auth = oldcred->cr_auth;
struct auth_cred acred = {
.uid = oldcred->cr_uid,
.machine_cred = gss_cred->gc_machine_cred,
};
struct rpc_cred *new;
new = gss_lookup_cred(auth, &acred, RPCAUTH_LOOKUP_NEW);
if (IS_ERR(new))
return PTR_ERR(new);
task->tk_msg.rpc_cred = new;
put_rpccred(oldcred);
return 0;
}
/*
* Refresh credentials. XXX - finish
*/
static int
gss_refresh(struct rpc_task *task)
{
struct rpc_cred *cred = task->tk_msg.rpc_cred;
int ret = 0;
if (!gss_cred_is_uptodate_ctx(task->tk_msg.rpc_cred))
return gss_refresh_upcall(task);
return 0;
if (!test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags) &&
!test_bit(RPCAUTH_CRED_UPTODATE, &cred->cr_flags)) {
ret = gss_renew_cred(task);
if (ret < 0)
goto out;
cred = task->tk_msg.rpc_cred;
}
if (test_bit(RPCAUTH_CRED_NEW, &cred->cr_flags))
ret = gss_refresh_upcall(task);
out:
return ret;
}
/* Dummy refresh routine: used only when destroying the context */
......@@ -1286,9 +1317,7 @@ gss_unwrap_resp(struct rpc_task *task,
static const struct rpc_authops authgss_ops = {
.owner = THIS_MODULE,
.au_flavor = RPC_AUTH_GSS,
#ifdef RPC_DEBUG
.au_name = "RPCSEC_GSS",
#endif
.create = gss_create,
.destroy = gss_destroy,
.lookup_cred = gss_lookup_cred,
......@@ -1299,6 +1328,7 @@ static const struct rpc_credops gss_credops = {
.cr_name = "AUTH_GSS",
.crdestroy = gss_destroy_cred,
.cr_init = gss_cred_init,
.crbind = rpcauth_generic_bind_cred,
.crmatch = gss_match,
.crmarshal = gss_marshal,
.crrefresh = gss_refresh,
......@@ -1310,6 +1340,7 @@ static const struct rpc_credops gss_credops = {
static const struct rpc_credops gss_nullops = {
.cr_name = "AUTH_GSS",
.crdestroy = gss_destroy_cred,
.crbind = rpcauth_generic_bind_cred,
.crmatch = gss_match,
.crmarshal = gss_marshal,
.crrefresh = gss_refresh_null,
......
......@@ -104,9 +104,7 @@ nul_validate(struct rpc_task *task, __be32 *p)
const struct rpc_authops authnull_ops = {
.owner = THIS_MODULE,
.au_flavor = RPC_AUTH_NULL,
#ifdef RPC_DEBUG
.au_name = "NULL",
#endif
.create = nul_create,
.destroy = nul_destroy,
.lookup_cred = nul_lookup_cred,
......@@ -125,6 +123,7 @@ static
const struct rpc_credops null_credops = {
.cr_name = "AUTH_NULL",
.crdestroy = nul_destroy_cred,
.crbind = rpcauth_generic_bind_cred,
.crmatch = nul_match,
.crmarshal = nul_marshal,
.crrefresh = nul_refresh,
......
......@@ -60,7 +60,8 @@ static struct rpc_cred *
unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
{
struct unx_cred *cred;
int i;
unsigned int groups = 0;
unsigned int i;
dprintk("RPC: allocating UNIX cred for uid %d gid %d\n",
acred->uid, acred->gid);
......@@ -70,12 +71,9 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
rpcauth_init_cred(&cred->uc_base, acred, auth, &unix_credops);
cred->uc_base.cr_flags = 1UL << RPCAUTH_CRED_UPTODATE;
if (flags & RPCAUTH_LOOKUP_ROOTCREDS) {
cred->uc_uid = 0;
cred->uc_gid = 0;
cred->uc_gids[0] = NOGROUP;
} else {
int groups = acred->group_info->ngroups;
if (acred->group_info != NULL)
groups = acred->group_info->ngroups;
if (groups > NFS_NGROUPS)
groups = NFS_NGROUPS;
......@@ -84,7 +82,6 @@ unx_create_cred(struct rpc_auth *auth, struct auth_cred *acred, int flags)
cred->uc_gids[i] = GROUP_AT(acred->group_info, i);
if (i < NFS_NGROUPS)
cred->uc_gids[i] = NOGROUP;
}
return &cred->uc_base;
}
......@@ -118,15 +115,14 @@ static int
unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
{
struct unx_cred *cred = container_of(rcred, struct unx_cred, uc_base);
int i;
unsigned int groups = 0;
unsigned int i;
if (!(flags & RPCAUTH_LOOKUP_ROOTCREDS)) {
int groups;
if (cred->uc_uid != acred->uid
|| cred->uc_gid != acred->gid)
if (cred->uc_uid != acred->uid || cred->uc_gid != acred->gid)
return 0;
if (acred->group_info != NULL)
groups = acred->group_info->ngroups;
if (groups > NFS_NGROUPS)
groups = NFS_NGROUPS;
......@@ -134,10 +130,6 @@ unx_match(struct auth_cred *acred, struct rpc_cred *rcred, int flags)
if (cred->uc_gids[i] != GROUP_AT(acred->group_info, i))
return 0;
return 1;
}
return (cred->uc_uid == 0
&& cred->uc_gid == 0
&& cred->uc_gids[0] == (gid_t) NOGROUP);
}
/*
......@@ -218,9 +210,7 @@ void __init rpc_init_authunix(void)
const struct rpc_authops authunix_ops = {
.owner = THIS_MODULE,
.au_flavor = RPC_AUTH_UNIX,
#ifdef RPC_DEBUG
.au_name = "UNIX",
#endif
.create = unx_create,
.destroy = unx_destroy,
.lookup_cred = unx_lookup_cred,
......@@ -245,6 +235,7 @@ static
const struct rpc_credops unix_credops = {
.cr_name = "AUTH_UNIX",
.crdestroy = unx_destroy_cred,
.crbind = rpcauth_generic_bind_cred,
.crmatch = unx_match,
.crmarshal = unx_marshal,
.crrefresh = unx_refresh,
......
......@@ -544,7 +544,7 @@ EXPORT_SYMBOL_GPL(rpc_run_task);
* @msg: RPC call parameters
* @flags: RPC call flags
*/
int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg, int flags)
int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags)
{
struct rpc_task *task;
struct rpc_task_setup task_setup_data = {
......@@ -575,7 +575,7 @@ EXPORT_SYMBOL_GPL(rpc_call_sync);
* @data: user call data
*/
int
rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg, int flags,
rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags,
const struct rpc_call_ops *tk_ops, void *data)
{
struct rpc_task *task;
......@@ -1062,7 +1062,7 @@ call_transmit(struct rpc_task *task)
if (task->tk_msg.rpc_proc->p_decode != NULL)
return;
task->tk_action = rpc_exit_task;
rpc_wake_up_task(task);
rpc_wake_up_queued_task(&task->tk_xprt->pending, task);
}
/*
......@@ -1116,7 +1116,8 @@ call_status(struct rpc_task *task)
case -ETIMEDOUT:
task->tk_action = call_timeout;
if (task->tk_client->cl_discrtry)
xprt_force_disconnect(task->tk_xprt);
xprt_conditional_disconnect(task->tk_xprt,
req->rq_connect_cookie);
break;
case -ECONNREFUSED:
case -ENOTCONN:
......@@ -1168,6 +1169,11 @@ call_timeout(struct rpc_task *task)
clnt->cl_protname, clnt->cl_server);
}
rpc_force_rebind(clnt);
/*
* Did our request time out due to an RPCSEC_GSS out-of-sequence
* event? RFC2203 requires the server to drop all such requests.
*/
rpcauth_invalcred(task);
retry:
clnt->cl_stats->rpcretrans++;
......@@ -1195,18 +1201,6 @@ call_decode(struct rpc_task *task)
task->tk_flags &= ~RPC_CALL_MAJORSEEN;
}
if (task->tk_status < 12) {
if (!RPC_IS_SOFT(task)) {
task->tk_action = call_bind;
clnt->cl_stats->rpcretrans++;
goto out_retry;
}
dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
clnt->cl_protname, task->tk_status);
task->tk_action = call_timeout;
goto out_retry;
}
/*
* Ensure that we see all writes made by xprt_complete_rqst()
* before it changed req->rq_received.
......@@ -1218,6 +1212,18 @@ call_decode(struct rpc_task *task)
WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf,
sizeof(req->rq_rcv_buf)) != 0);
if (req->rq_rcv_buf.len < 12) {
if (!RPC_IS_SOFT(task)) {
task->tk_action = call_bind;
clnt->cl_stats->rpcretrans++;
goto out_retry;
}
dprintk("RPC: %s: too small RPC reply size (%d bytes)\n",
clnt->cl_protname, task->tk_status);
task->tk_action = call_timeout;
goto out_retry;
}
/* Verify the RPC header */
p = call_verify(task);
if (IS_ERR(p)) {
......@@ -1236,10 +1242,14 @@ call_decode(struct rpc_task *task)
task->tk_status);
return;
out_retry:
req->rq_received = req->rq_private_buf.len = 0;
task->tk_status = 0;
/* Note: call_verify() may have freed the RPC slot */
if (task->tk_rqstp == req) {
req->rq_received = req->rq_rcv_buf.len = 0;
if (task->tk_client->cl_discrtry)
xprt_force_disconnect(task->tk_xprt);
xprt_conditional_disconnect(task->tk_xprt,
req->rq_connect_cookie);
}
}
/*
......@@ -1531,7 +1541,7 @@ void rpc_show_tasks(void)
proc = -1;
if (RPC_IS_QUEUED(t))
rpc_waitq = rpc_qname(t->u.tk_wait.rpc_waitq);
rpc_waitq = rpc_qname(t->tk_waitqueue);
printk("%5u %04d %04x %6d %8p %6d %8p %8ld %8s %8p %8p\n",
t->tk_pid, proc,
......
......@@ -298,7 +298,7 @@ void rpcb_getport_async(struct rpc_task *task)
/* Put self on queue before sending rpcbind request, in case
* rpcb_getport_done completes before we return from rpc_run_task */
rpc_sleep_on(&xprt->binding, task, NULL, NULL);
rpc_sleep_on(&xprt->binding, task, NULL);
/* Someone else may have bound if we slept */
if (xprt_bound(xprt)) {
......
This diff is collapsed.
......@@ -188,9 +188,9 @@ int xprt_reserve_xprt(struct rpc_task *task)
task->tk_timeout = 0;
task->tk_status = -EAGAIN;
if (req && req->rq_ntrans)
rpc_sleep_on(&xprt->resend, task, NULL, NULL);
rpc_sleep_on(&xprt->resend, task, NULL);
else
rpc_sleep_on(&xprt->sending, task, NULL, NULL);
rpc_sleep_on(&xprt->sending, task, NULL);
return 0;
}
EXPORT_SYMBOL_GPL(xprt_reserve_xprt);
......@@ -238,9 +238,9 @@ int xprt_reserve_xprt_cong(struct rpc_task *task)
task->tk_timeout = 0;
task->tk_status = -EAGAIN;
if (req && req->rq_ntrans)
rpc_sleep_on(&xprt->resend, task, NULL, NULL);
rpc_sleep_on(&xprt->resend, task, NULL);
else
rpc_sleep_on(&xprt->sending, task, NULL, NULL);
rpc_sleep_on(&xprt->sending, task, NULL);
return 0;
}
EXPORT_SYMBOL_GPL(xprt_reserve_xprt_cong);
......@@ -447,13 +447,13 @@ EXPORT_SYMBOL_GPL(xprt_wake_pending_tasks);
* @task: task to be put to sleep
*
*/
void xprt_wait_for_buffer_space(struct rpc_task *task)
void xprt_wait_for_buffer_space(struct rpc_task *task, rpc_action action)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
task->tk_timeout = req->rq_timeout;
rpc_sleep_on(&xprt->pending, task, NULL, NULL);
rpc_sleep_on(&xprt->pending, task, action);
}
EXPORT_SYMBOL_GPL(xprt_wait_for_buffer_space);
......@@ -472,7 +472,7 @@ void xprt_write_space(struct rpc_xprt *xprt)
if (xprt->snd_task) {
dprintk("RPC: write space: waking waiting task on "
"xprt %p\n", xprt);
rpc_wake_up_task(xprt->snd_task);
rpc_wake_up_queued_task(&xprt->pending, xprt->snd_task);
}
spin_unlock_bh(&xprt->transport_lock);
}
......@@ -602,11 +602,37 @@ void xprt_force_disconnect(struct rpc_xprt *xprt)
/* Try to schedule an autoclose RPC call */
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
queue_work(rpciod_workqueue, &xprt->task_cleanup);
else if (xprt->snd_task != NULL)
rpc_wake_up_task(xprt->snd_task);
xprt_wake_pending_tasks(xprt, -ENOTCONN);
spin_unlock_bh(&xprt->transport_lock);
}
/**
* xprt_conditional_disconnect - force a transport to disconnect
* @xprt: transport to disconnect
* @cookie: 'connection cookie'
*
* This attempts to break the connection if and only if 'cookie' matches
* the current transport 'connection cookie'. It ensures that we don't
* try to break the connection more than once when we need to retransmit
* a batch of RPC requests.
*
*/
void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie)
{
/* Don't race with the test_bit() in xprt_clear_locked() */
spin_lock_bh(&xprt->transport_lock);
if (cookie != xprt->connect_cookie)
goto out;
if (test_bit(XPRT_CLOSING, &xprt->state) || !xprt_connected(xprt))
goto out;
set_bit(XPRT_CLOSE_WAIT, &xprt->state);
/* Try to schedule an autoclose RPC call */
if (test_and_set_bit(XPRT_LOCKED, &xprt->state) == 0)
queue_work(rpciod_workqueue, &xprt->task_cleanup);
xprt_wake_pending_tasks(xprt, -ENOTCONN);
out:
spin_unlock_bh(&xprt->transport_lock);
}
EXPORT_SYMBOL_GPL(xprt_force_disconnect);
static void
xprt_init_autodisconnect(unsigned long data)
......@@ -653,7 +679,7 @@ void xprt_connect(struct rpc_task *task)
task->tk_rqstp->rq_bytes_sent = 0;
task->tk_timeout = xprt->connect_timeout;
rpc_sleep_on(&xprt->pending, task, xprt_connect_status, NULL);
rpc_sleep_on(&xprt->pending, task, xprt_connect_status);
xprt->stat.connect_start = jiffies;
xprt->ops->connect(task);
}
......@@ -749,18 +775,20 @@ EXPORT_SYMBOL_GPL(xprt_update_rtt);
void xprt_complete_rqst(struct rpc_task *task, int copied)
{
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
dprintk("RPC: %5u xid %08x complete (%d bytes received)\n",
task->tk_pid, ntohl(req->rq_xid), copied);
task->tk_xprt->stat.recvs++;
xprt->stat.recvs++;
task->tk_rtt = (long)jiffies - req->rq_xtime;
list_del_init(&req->rq_list);
req->rq_private_buf.len = copied;
/* Ensure all writes are done before we update req->rq_received */
smp_wmb();
req->rq_received = req->rq_private_buf.len = copied;
rpc_wake_up_task(task);
req->rq_received = copied;
rpc_wake_up_queued_task(&xprt->pending, task);
}
EXPORT_SYMBOL_GPL(xprt_complete_rqst);
......@@ -769,17 +797,17 @@ static void xprt_timer(struct rpc_task *task)
struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt;
if (task->tk_status != -ETIMEDOUT)
return;
dprintk("RPC: %5u xprt_timer\n", task->tk_pid);
spin_lock(&xprt->transport_lock);
spin_lock_bh(&xprt->transport_lock);
if (!req->rq_received) {
if (xprt->ops->timer)
xprt->ops->timer(task);
task->tk_status = -ETIMEDOUT;
}
task->tk_timeout = 0;
rpc_wake_up_task(task);
spin_unlock(&xprt->transport_lock);
} else
task->tk_status = 0;
spin_unlock_bh(&xprt->transport_lock);
}
/**
......@@ -849,6 +877,7 @@ void xprt_transmit(struct rpc_task *task)
} else if (!req->rq_bytes_sent)
return;
req->rq_connect_cookie = xprt->connect_cookie;
status = xprt->ops->send_request(task);
if (status == 0) {
dprintk("RPC: %5u xmit complete\n", task->tk_pid);
......@@ -864,7 +893,7 @@ void xprt_transmit(struct rpc_task *task)
if (!xprt_connected(xprt))
task->tk_status = -ENOTCONN;
else if (!req->rq_received)
rpc_sleep_on(&xprt->pending, task, NULL, xprt_timer);
rpc_sleep_on(&xprt->pending, task, xprt_timer);
spin_unlock_bh(&xprt->transport_lock);
return;
}
......@@ -875,7 +904,7 @@ void xprt_transmit(struct rpc_task *task)
*/
task->tk_status = status;
if (status == -ECONNREFUSED)
rpc_sleep_on(&xprt->sending, task, NULL, NULL);
rpc_sleep_on(&xprt->sending, task, NULL);
}
static inline void do_xprt_reserve(struct rpc_task *task)
......@@ -895,7 +924,7 @@ static inline void do_xprt_reserve(struct rpc_task *task)
dprintk("RPC: waiting for request slot\n");
task->tk_status = -EAGAIN;
task->tk_timeout = 0;
rpc_sleep_on(&xprt->backlog, task, NULL, NULL);
rpc_sleep_on(&xprt->backlog, task, NULL);
}
/**
......@@ -1052,6 +1081,11 @@ static void xprt_destroy(struct kref *kref)
xprt->shutdown = 1;
del_timer_sync(&xprt->timer);
rpc_destroy_wait_queue(&xprt->binding);
rpc_destroy_wait_queue(&xprt->pending);
rpc_destroy_wait_queue(&xprt->sending);
rpc_destroy_wait_queue(&xprt->resend);
rpc_destroy_wait_queue(&xprt->backlog);
/*
* Tear down transport state and free the rpc_xprt
*/
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment