Commit 47853e7f authored by Linus Torvalds's avatar Linus Torvalds
parents 221fc10e 9e56904e
......@@ -910,6 +910,14 @@ running once the system is up.
nfsroot= [NFS] nfs root filesystem for disk-less boxes.
See Documentation/nfsroot.txt.
nfs.callback_tcpport=
[NFS] set the TCP port on which the NFSv4 callback
channel should listen.
nfs.idmap_cache_timeout=
[NFS] set the maximum lifetime for idmapper cache
entries.
nmi_watchdog= [KNL,BUGS=IA-32] Debugging features for SMP kernels
no387 [BUGS=IA-32] Tells the kernel to use the 387 maths
......
......@@ -26,11 +26,12 @@
static int nlmclnt_test(struct nlm_rqst *, struct file_lock *);
static int nlmclnt_lock(struct nlm_rqst *, struct file_lock *);
static int nlmclnt_unlock(struct nlm_rqst *, struct file_lock *);
static void nlmclnt_unlock_callback(struct rpc_task *);
static void nlmclnt_cancel_callback(struct rpc_task *);
static int nlm_stat_to_errno(u32 stat);
static void nlmclnt_locks_init_private(struct file_lock *fl, struct nlm_host *host);
static const struct rpc_call_ops nlmclnt_unlock_ops;
static const struct rpc_call_ops nlmclnt_cancel_ops;
/*
* Cookie counter for NLM requests
*/
......@@ -222,7 +223,6 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
}
clnt->cl_softrtry = nfssrv->client->cl_softrtry;
clnt->cl_intr = nfssrv->client->cl_intr;
clnt->cl_chatty = nfssrv->client->cl_chatty;
}
/* Keep the old signal mask */
......@@ -399,8 +399,7 @@ nlmclnt_call(struct nlm_rqst *req, u32 proc)
/*
* Generic NLM call, async version.
*/
int
nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
int nlmsvc_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
......@@ -419,13 +418,12 @@ nlmsvc_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
msg.rpc_proc = &clnt->cl_procinfo[proc];
/* bootstrap and kick off the async RPC call */
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
return status;
}
static int
nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
static int nlmclnt_async_call(struct nlm_rqst *req, u32 proc, const struct rpc_call_ops *tk_ops)
{
struct nlm_host *host = req->a_host;
struct rpc_clnt *clnt;
......@@ -448,7 +446,7 @@ nlmclnt_async_call(struct nlm_rqst *req, u32 proc, rpc_action callback)
/* Increment host refcount */
nlm_get_host(host);
/* bootstrap and kick off the async RPC call */
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, callback, req);
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, tk_ops, req);
if (status < 0)
nlm_release_host(host);
return status;
......@@ -664,7 +662,7 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
if (req->a_flags & RPC_TASK_ASYNC) {
status = nlmclnt_async_call(req, NLMPROC_UNLOCK,
nlmclnt_unlock_callback);
&nlmclnt_unlock_ops);
/* Hrmf... Do the unlock early since locks_remove_posix()
* really expects us to free the lock synchronously */
do_vfs_lock(fl);
......@@ -692,10 +690,9 @@ nlmclnt_unlock(struct nlm_rqst *req, struct file_lock *fl)
return -ENOLCK;
}
static void
nlmclnt_unlock_callback(struct rpc_task *task)
static void nlmclnt_unlock_callback(struct rpc_task *task, void *data)
{
struct nlm_rqst *req = (struct nlm_rqst *) task->tk_calldata;
struct nlm_rqst *req = data;
int status = req->a_res.status;
if (RPC_ASSASSINATED(task))
......@@ -722,6 +719,10 @@ nlmclnt_unlock_callback(struct rpc_task *task)
rpc_restart_call(task);
}
static const struct rpc_call_ops nlmclnt_unlock_ops = {
.rpc_call_done = nlmclnt_unlock_callback,
};
/*
* Cancel a blocked lock request.
* We always use an async RPC call for this in order not to hang a
......@@ -750,8 +751,7 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
nlmclnt_setlockargs(req, fl);
status = nlmclnt_async_call(req, NLMPROC_CANCEL,
nlmclnt_cancel_callback);
status = nlmclnt_async_call(req, NLMPROC_CANCEL, &nlmclnt_cancel_ops);
if (status < 0) {
nlmclnt_release_lockargs(req);
kfree(req);
......@@ -765,10 +765,9 @@ nlmclnt_cancel(struct nlm_host *host, struct file_lock *fl)
return status;
}
static void
nlmclnt_cancel_callback(struct rpc_task *task)
static void nlmclnt_cancel_callback(struct rpc_task *task, void *data)
{
struct nlm_rqst *req = (struct nlm_rqst *) task->tk_calldata;
struct nlm_rqst *req = data;
if (RPC_ASSASSINATED(task))
goto die;
......@@ -807,6 +806,10 @@ nlmclnt_cancel_callback(struct rpc_task *task)
rpc_delay(task, 30 * HZ);
}
static const struct rpc_call_ops nlmclnt_cancel_ops = {
.rpc_call_done = nlmclnt_cancel_callback,
};
/*
* Convert an NLM status code to a generic kernel errno
*/
......
......@@ -177,7 +177,7 @@ nlm_bind_host(struct nlm_host *host)
if ((clnt = host->h_rpcclnt) != NULL) {
xprt = clnt->cl_xprt;
if (time_after_eq(jiffies, host->h_nextrebind)) {
clnt->cl_port = 0;
rpc_force_rebind(clnt);
host->h_nextrebind = jiffies + NLM_HOST_REBIND;
dprintk("lockd: next rebind in %ld jiffies\n",
host->h_nextrebind - jiffies);
......@@ -217,7 +217,7 @@ nlm_rebind_host(struct nlm_host *host)
{
dprintk("lockd: rebind host %s\n", host->h_name);
if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
host->h_rpcclnt->cl_port = 0;
rpc_force_rebind(host->h_rpcclnt);
host->h_nextrebind = jiffies + NLM_HOST_REBIND;
}
}
......
......@@ -123,7 +123,6 @@ nsm_create(void)
if (IS_ERR(clnt))
goto out_err;
clnt->cl_softrtry = 1;
clnt->cl_chatty = 1;
clnt->cl_oneshot = 1;
return clnt;
......
......@@ -178,6 +178,8 @@ lockd(struct svc_rqst *rqstp)
}
flush_signals(current);
/*
* Check whether there's a new lockd process before
* shutting down the hosts and clearing the slot.
......@@ -192,8 +194,6 @@ lockd(struct svc_rqst *rqstp)
"lockd: new process, skipping host shutdown\n");
wake_up(&lockd_exit);
flush_signals(current);
/* Exit the RPC thread */
svc_exit_thread(rqstp);
......
......@@ -22,7 +22,8 @@
#define NLMDBG_FACILITY NLMDBG_CLIENT
static u32 nlm4svc_callback(struct svc_rqst *, u32, struct nlm_res *);
static void nlm4svc_callback_exit(struct rpc_task *);
static const struct rpc_call_ops nlm4svc_callback_ops;
/*
* Obtain client and file from arguments
......@@ -470,7 +471,6 @@ nlm4svc_proc_granted_res(struct svc_rqst *rqstp, struct nlm_res *argp,
}
/*
* This is the generic lockd callback for async RPC calls
*/
......@@ -494,7 +494,7 @@ nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
call->a_host = host;
memcpy(&call->a_args, resp, sizeof(*resp));
if (nlmsvc_async_call(call, proc, nlm4svc_callback_exit) < 0)
if (nlmsvc_async_call(call, proc, &nlm4svc_callback_ops) < 0)
goto error;
return rpc_success;
......@@ -504,10 +504,9 @@ nlm4svc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
return rpc_system_err;
}
static void
nlm4svc_callback_exit(struct rpc_task *task)
static void nlm4svc_callback_exit(struct rpc_task *task, void *data)
{
struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
struct nlm_rqst *call = data;
if (task->tk_status < 0) {
dprintk("lockd: %4d callback failed (errno = %d)\n",
......@@ -517,6 +516,10 @@ nlm4svc_callback_exit(struct rpc_task *task)
kfree(call);
}
static const struct rpc_call_ops nlm4svc_callback_ops = {
.rpc_call_done = nlm4svc_callback_exit,
};
/*
* NLM Server procedures.
*/
......
......@@ -41,7 +41,8 @@
static void nlmsvc_insert_block(struct nlm_block *block, unsigned long);
static int nlmsvc_remove_block(struct nlm_block *block);
static void nlmsvc_grant_callback(struct rpc_task *task);
static const struct rpc_call_ops nlmsvc_grant_ops;
/*
* The list of blocked locks to retry
......@@ -226,31 +227,27 @@ nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_file *file,
* It is the caller's responsibility to check whether the file
* can be closed hereafter.
*/
static void
static int
nlmsvc_delete_block(struct nlm_block *block, int unlock)
{
struct file_lock *fl = &block->b_call.a_args.lock.fl;
struct nlm_file *file = block->b_file;
struct nlm_block **bp;
int status = 0;
dprintk("lockd: deleting block %p...\n", block);
/* Remove block from list */
nlmsvc_remove_block(block);
if (fl->fl_next)
posix_unblock_lock(file->f_file, fl);
if (unlock) {
fl->fl_type = F_UNLCK;
posix_lock_file(file->f_file, fl);
block->b_granted = 0;
}
if (unlock)
status = posix_unblock_lock(file->f_file, fl);
/* If the block is in the middle of a GRANT callback,
* don't kill it yet. */
if (block->b_incall) {
nlmsvc_insert_block(block, NLM_NEVER);
block->b_done = 1;
return;
return status;
}
/* Remove block from file's list of blocks */
......@@ -265,6 +262,7 @@ nlmsvc_delete_block(struct nlm_block *block, int unlock)
nlm_release_host(block->b_host);
nlmclnt_freegrantargs(&block->b_call);
kfree(block);
return status;
}
/*
......@@ -275,6 +273,7 @@ int
nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file, int action)
{
struct nlm_block *block, *next;
/* XXX: Will everything get cleaned up if we don't unlock here? */
down(&file->f_sema);
for (block = file->f_blocks; block; block = next) {
......@@ -444,6 +443,7 @@ u32
nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
{
struct nlm_block *block;
int status = 0;
dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n",
file->f_file->f_dentry->d_inode->i_sb->s_id,
......@@ -454,9 +454,9 @@ nlmsvc_cancel_blocked(struct nlm_file *file, struct nlm_lock *lock)
down(&file->f_sema);
if ((block = nlmsvc_lookup_block(file, lock, 1)) != NULL)
nlmsvc_delete_block(block, 1);
status = nlmsvc_delete_block(block, 1);
up(&file->f_sema);
return nlm_granted;
return status ? nlm_lck_denied : nlm_granted;
}
/*
......@@ -562,7 +562,7 @@ nlmsvc_grant_blocked(struct nlm_block *block)
/* Call the client */
nlm_get_host(block->b_call.a_host);
if (nlmsvc_async_call(&block->b_call, NLMPROC_GRANTED_MSG,
nlmsvc_grant_callback) < 0)
&nlmsvc_grant_ops) < 0)
nlm_release_host(block->b_call.a_host);
up(&file->f_sema);
}
......@@ -575,10 +575,9 @@ nlmsvc_grant_blocked(struct nlm_block *block)
* chain once more in order to have it removed by lockd itself (which can
* then sleep on the file semaphore without disrupting e.g. the nfs client).
*/
static void
nlmsvc_grant_callback(struct rpc_task *task)
static void nlmsvc_grant_callback(struct rpc_task *task, void *data)
{
struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
struct nlm_rqst *call = data;
struct nlm_block *block;
unsigned long timeout;
struct sockaddr_in *peer_addr = RPC_PEERADDR(task->tk_client);
......@@ -614,6 +613,10 @@ nlmsvc_grant_callback(struct rpc_task *task)
nlm_release_host(call->a_host);
}
static const struct rpc_call_ops nlmsvc_grant_ops = {
.rpc_call_done = nlmsvc_grant_callback,
};
/*
* We received a GRANT_RES callback. Try to find the corresponding
* block.
......@@ -633,11 +636,12 @@ nlmsvc_grant_reply(struct svc_rqst *rqstp, struct nlm_cookie *cookie, u32 status
file->f_count++;
down(&file->f_sema);
if ((block = nlmsvc_find_block(cookie,&rqstp->rq_addr)) != NULL) {
block = nlmsvc_find_block(cookie, &rqstp->rq_addr);
if (block) {
if (status == NLM_LCK_DENIED_GRACE_PERIOD) {
/* Try again in a couple of seconds */
nlmsvc_insert_block(block, 10 * HZ);
block = NULL;
up(&file->f_sema);
} else {
/* Lock is now held by client, or has been rejected.
* In both cases, the block should be removed. */
......@@ -648,8 +652,6 @@ nlmsvc_grant_reply(struct svc_rqst *rqstp, struct nlm_cookie *cookie, u32 status
nlmsvc_delete_block(block, 1);
}
}
if (!block)
up(&file->f_sema);
nlm_release_file(file);
}
......
......@@ -23,7 +23,8 @@
#define NLMDBG_FACILITY NLMDBG_CLIENT
static u32 nlmsvc_callback(struct svc_rqst *, u32, struct nlm_res *);
static void nlmsvc_callback_exit(struct rpc_task *);
static const struct rpc_call_ops nlmsvc_callback_ops;
#ifdef CONFIG_LOCKD_V4
static u32
......@@ -518,7 +519,7 @@ nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
call->a_host = host;
memcpy(&call->a_args, resp, sizeof(*resp));
if (nlmsvc_async_call(call, proc, nlmsvc_callback_exit) < 0)
if (nlmsvc_async_call(call, proc, &nlmsvc_callback_ops) < 0)
goto error;
return rpc_success;
......@@ -528,10 +529,9 @@ nlmsvc_callback(struct svc_rqst *rqstp, u32 proc, struct nlm_res *resp)
return rpc_system_err;
}
static void
nlmsvc_callback_exit(struct rpc_task *task)
static void nlmsvc_callback_exit(struct rpc_task *task, void *data)
{
struct nlm_rqst *call = (struct nlm_rqst *) task->tk_calldata;
struct nlm_rqst *call = data;
if (task->tk_status < 0) {
dprintk("lockd: %4d callback failed (errno = %d)\n",
......@@ -541,6 +541,10 @@ nlmsvc_callback_exit(struct rpc_task *task)
kfree(call);
}
static const struct rpc_call_ops nlmsvc_callback_ops = {
.rpc_call_done = nlmsvc_callback_exit,
};
/*
* NLM Server procedures.
*/
......
......@@ -355,6 +355,8 @@ nlm4svc_decode_reboot(struct svc_rqst *rqstp, u32 *p, struct nlm_reboot *argp)
argp->state = ntohl(*p++);
/* Preserve the address in network byte order */
argp->addr = *p++;
argp->vers = *p++;
argp->proto = *p++;
return xdr_argsize_check(rqstp, p);
}
......
......@@ -1958,22 +1958,18 @@ EXPORT_SYMBOL(posix_block_lock);
*
* lockd needs to block waiting for locks.
*/
void
int
posix_unblock_lock(struct file *filp, struct file_lock *waiter)
{
/*
* A remote machine may cancel the lock request after it's been
* granted locally. If that happens, we need to delete the lock.
*/
int status = 0;
lock_kernel();
if (waiter->fl_next) {
if (waiter->fl_next)
__locks_delete_block(waiter);
else
status = -ENOENT;
unlock_kernel();
} else {
unlock_kernel();
waiter->fl_type = F_UNLCK;
posix_lock_file(filp, waiter);
}
return status;
}
EXPORT_SYMBOL(posix_unblock_lock);
......
......@@ -13,4 +13,5 @@ nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
delegation.o idmap.o \
callback.o callback_xdr.o callback_proc.o
nfs-$(CONFIG_NFS_DIRECTIO) += direct.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
nfs-objs := $(nfs-y)
......@@ -34,6 +34,7 @@ static struct nfs_callback_data nfs_callback_info;
static DECLARE_MUTEX(nfs_callback_sema);
static struct svc_program nfs4_callback_program;
unsigned int nfs_callback_set_tcpport;
unsigned short nfs_callback_tcpport;
/*
......@@ -98,7 +99,7 @@ int nfs_callback_up(void)
if (!serv)
goto out_err;
/* FIXME: We don't want to register this socket with the portmapper */
ret = svc_makesock(serv, IPPROTO_TCP, 0);
ret = svc_makesock(serv, IPPROTO_TCP, nfs_callback_set_tcpport);
if (ret < 0)
goto out_destroy;
if (!list_empty(&serv->sv_permsocks)) {
......
......@@ -65,6 +65,7 @@ extern unsigned nfs4_callback_recall(struct cb_recallargs *args, void *dummy);
extern int nfs_callback_up(void);
extern int nfs_callback_down(void);
extern unsigned int nfs_callback_set_tcpport;
extern unsigned short nfs_callback_tcpport;
#endif /* __LINUX_FS_NFS_CALLBACK_H */
......@@ -35,7 +35,9 @@ unsigned nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres
if (delegation == NULL || (delegation->type & FMODE_WRITE) == 0)
goto out_iput;
res->size = i_size_read(inode);
res->change_attr = NFS_CHANGE_ATTR(inode);
res->change_attr = delegation->change_attr;
if (nfsi->npages != 0)
res->change_attr++;
res->ctime = inode->i_ctime;
res->mtime = inode->i_mtime;
res->bitmap[0] = (FATTR4_WORD0_CHANGE|FATTR4_WORD0_SIZE) &
......
......@@ -8,6 +8,7 @@
*/
#include <linux/config.h>
#include <linux/completion.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
......@@ -130,6 +131,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
sizeof(delegation->stateid.data));
delegation->type = res->delegation_type;
delegation->maxsize = res->maxsize;
delegation->change_attr = nfsi->change_attr;
delegation->cred = get_rpccred(cred);
delegation->inode = inode;
......@@ -157,8 +159,6 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *
{
int res = 0;
__nfs_revalidate_inode(NFS_SERVER(inode), inode);
res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
nfs_free_delegation(delegation);
return res;
......@@ -231,6 +231,49 @@ void nfs_return_all_delegations(struct super_block *sb)
spin_unlock(&clp->cl_lock);
}
int nfs_do_expire_all_delegations(void *ptr)
{
struct nfs4_client *clp = ptr;
struct nfs_delegation *delegation;
struct inode *inode;
allow_signal(SIGKILL);
restart:
spin_lock(&clp->cl_lock);
if (test_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) != 0)
goto out;
if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0)
goto out;
list_for_each_entry(delegation, &clp->cl_delegations, super_list) {
inode = igrab(delegation->inode);
if (inode == NULL)
continue;
spin_unlock(&clp->cl_lock);
nfs_inode_return_delegation(inode);
iput(inode);
goto restart;
}
out:
spin_unlock(&clp->cl_lock);
nfs4_put_client(clp);
module_put_and_exit(0);
}
void nfs_expire_all_delegations(struct nfs4_client *clp)
{
struct task_struct *task;
__module_get(THIS_MODULE);
atomic_inc(&clp->cl_count);
task = kthread_run(nfs_do_expire_all_delegations, clp,
"%u.%u.%u.%u-delegreturn",
NIPQUAD(clp->cl_addr));
if (!IS_ERR(task))
return;
nfs4_put_client(clp);
module_put(THIS_MODULE);
}
/*
* Return all delegations following an NFS4ERR_CB_PATH_DOWN error.
*/
......
......@@ -21,6 +21,7 @@ struct nfs_delegation {
#define NFS_DELEGATION_NEED_RECLAIM 1
long flags;
loff_t maxsize;
__u64 change_attr;
};
int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
......@@ -30,6 +31,7 @@ int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *s
struct inode *nfs_delegation_find_inode(struct nfs4_client *clp, const struct nfs_fh *fhandle);
void nfs_return_all_delegations(struct super_block *sb);
void nfs_expire_all_delegations(struct nfs4_client *clp);
void nfs_handle_cb_pathdown(struct nfs4_client *clp);
void nfs_delegation_mark_reclaim(struct nfs4_client *clp);
......
......@@ -1550,8 +1550,10 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
}
nfs_inode_return_delegation(old_inode);
if (new_inode)
if (new_inode != NULL) {
nfs_inode_return_delegation(new_inode);
d_delete(new_dentry);
}
nfs_begin_data_update(old_dir);
nfs_begin_data_update(new_dir);
......
......@@ -122,9 +122,10 @@ nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
{
int i;
for (i = 0; i < npages; i++) {
if (do_dirty)
set_page_dirty_lock(pages[i]);
page_cache_release(pages[i]);
struct page *page = pages[i];
if (do_dirty && !PageCompound(page))
set_page_dirty_lock(page);
page_cache_release(page);
}
kfree(pages);
}
......@@ -154,6 +155,7 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, unsigned int
struct list_head *list;
struct nfs_direct_req *dreq;
unsigned int reads = 0;
unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
if (!dreq)
......@@ -167,7 +169,7 @@ static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, unsigned int
list = &dreq->list;
for(;;) {
struct nfs_read_data *data = nfs_readdata_alloc();
struct nfs_read_data *data = nfs_readdata_alloc(rpages);
if (unlikely(!data)) {
while (!list_empty(list)) {
......@@ -268,8 +270,6 @@ static void nfs_direct_read_schedule(struct nfs_direct_req *dreq,
NFS_PROTO(inode)->read_setup(data);
data->task.tk_cookie = (unsigned long) inode;
data->task.tk_calldata = data;
data->task.tk_release = nfs_readdata_release;
data->complete = nfs_direct_read_result;
lock_kernel();
......@@ -433,7 +433,7 @@ static ssize_t nfs_direct_write_seg(struct inode *inode,
struct nfs_writeverf first_verf;
struct nfs_write_data *wdata;
wdata = nfs_writedata_alloc();
wdata = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
if (!wdata)
return -ENOMEM;
......@@ -662,10 +662,10 @@ nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t
.iov_len = count,
};
dprintk("nfs: direct read(%s/%s, %lu@%lu)\n",
dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
file->f_dentry->d_parent->d_name.name,
file->f_dentry->d_name.name,
(unsigned long) count, (unsigned long) pos);
(unsigned long) count, (long long) pos);
if (!is_sync_kiocb(iocb))
goto out;
......@@ -718,9 +718,7 @@ nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t
ssize_t
nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
{
ssize_t retval = -EINVAL;
loff_t *ppos = &iocb->ki_pos;
unsigned long limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
ssize_t retval;
struct file *file = iocb->ki_filp;
struct nfs_open_context *ctx =
(struct nfs_open_context *) file->private_data;
......@@ -728,35 +726,32 @@ nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count,
struct inode *inode = mapping->host;
struct iovec iov = {
.iov_base = (char __user *)buf,
.iov_len = count,
};
dfprintk(VFS, "nfs: direct write(%s/%s(%ld), %lu@%lu)\n",
dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n",
file->f_dentry->d_parent->d_name.name,
file->f_dentry->d_name.name, inode->i_ino,
(unsigned long) count, (unsigned long) pos);
file->f_dentry->d_name.name,
(unsigned long) count, (long long) pos);
retval = -EINVAL;
if (!is_sync_kiocb(iocb))
goto out;
if (count < 0)
goto out;
if (pos < 0)
goto out;
retval = -EFAULT;
if (!access_ok(VERIFY_READ, iov.iov_base, iov.iov_len))
retval = generic_write_checks(file, &pos, &count, 0);
if (retval)
goto out;
retval = -EFBIG;
if (limit != RLIM_INFINITY) {
if (pos >= limit) {
send_sig(SIGXFSZ, current, 0);
retval = -EINVAL;
if ((ssize_t) count < 0)
goto out;
}
if (count > limit - (unsigned long) pos)
count = limit - (unsigned long) pos;
}
retval = 0;
if (!count)
goto out;
iov.iov_len = count,
retval = -EFAULT;
if (!access_ok(VERIFY_READ, iov.iov_base, iov.iov_len))
goto out;
retval = nfs_sync_mapping(mapping);
if (retval)
......@@ -766,7 +761,7 @@ nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count,
if (mapping->nrpages)
invalidate_inode_pages2(mapping);
if (retval > 0)
*ppos = pos + retval;
iocb->ki_pos = pos + retval;
out:
return retval;
......
......@@ -54,7 +54,11 @@
#define IDMAP_HASH_SZ 128
/* Default cache timeout is 10 minutes */
unsigned int nfs_idmap_cache_timeout = 600 * HZ;
struct idmap_hashent {
unsigned long ih_expires;
__u32 ih_id;
int ih_namelen;
char ih_name[IDMAP_NAMESZ];
......@@ -149,6 +153,8 @@ idmap_lookup_name(struct idmap_hashtable *h, const char *name, size_t len)
if (he->ih_namelen != len || memcmp(he->ih_name, name, len) != 0)
return NULL;
if (time_after(jiffies, he->ih_expires))
return NULL;
return he;
}
......@@ -164,6 +170,8 @@ idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
struct idmap_hashent *he = idmap_id_hash(h, id);
if (he->ih_id != id || he->ih_namelen == 0)
return NULL;
if (time_after(jiffies, he->ih_expires))
return NULL;
return he;
}
......@@ -192,6 +200,7 @@ idmap_update_entry(struct idmap_hashent *he, const char *name,
memcpy(he->ih_name, name, namelen);
he->ih_name[namelen] = '\0';
he->ih_namelen = namelen;
he->ih_expires = jiffies + nfs_idmap_cache_timeout;
}
/*
......
......@@ -40,6 +40,7 @@
#include <asm/uaccess.h>
#include "nfs4_fs.h"
#include "callback.h"
#include "delegation.h"
#define NFSDBG_FACILITY NFSDBG_VFS
......@@ -221,10 +222,10 @@ nfs_calc_block_size(u64 tsize)
static inline unsigned long
nfs_block_size(unsigned long bsize, unsigned char *nrbitsp)
{
if (bsize < 1024)
bsize = NFS_DEF_FILE_IO_BUFFER_SIZE;
else if (bsize >= NFS_MAX_FILE_IO_BUFFER_SIZE)
bsize = NFS_MAX_FILE_IO_BUFFER_SIZE;
if (bsize < NFS_MIN_FILE_IO_SIZE)
bsize = NFS_DEF_FILE_IO_SIZE;
else if (bsize >= NFS_MAX_FILE_IO_SIZE)
bsize = NFS_MAX_FILE_IO_SIZE;
return nfs_block_bits(bsize, nrbitsp);
}
......@@ -307,20 +308,15 @@ nfs_sb_init(struct super_block *sb, rpc_authflavor_t authflavor)
max_rpc_payload = nfs_block_size(rpc_max_payload(server->client), NULL);
if (server->rsize > max_rpc_payload)
server->rsize = max_rpc_payload;
if (server->wsize > max_rpc_payload)
server->wsize = max_rpc_payload;
if (server->rsize > NFS_MAX_FILE_IO_SIZE)
server->rsize = NFS_MAX_FILE_IO_SIZE;
server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (server->rpages > NFS_READ_MAXIOV) {
server->rpages = NFS_READ_MAXIOV;
server->rsize = server->rpages << PAGE_CACHE_SHIFT;
}
if (server->wsize > max_rpc_payload)
server->wsize = max_rpc_payload;
if (server->wsize > NFS_MAX_FILE_IO_SIZE)
server->wsize = NFS_MAX_FILE_IO_SIZE;
server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (server->wpages > NFS_WRITE_MAXIOV) {
server->wpages = NFS_WRITE_MAXIOV;
server->wsize = server->wpages << PAGE_CACHE_SHIFT;
}
if (sb->s_blocksize == 0)
sb->s_blocksize = nfs_block_bits(server->wsize,
......@@ -417,7 +413,6 @@ nfs_create_client(struct nfs_server *server, const struct nfs_mount_data *data)
clnt->cl_intr = 1;
clnt->cl_softrtry = 1;
clnt->cl_chatty = 1;
return clnt;
......@@ -575,11 +570,10 @@ nfs_statfs(struct super_block *sb, struct kstatfs *buf)
buf->f_namelen = server->namelen;
out:
unlock_kernel();
return 0;
out_err:
printk(KERN_WARNING "nfs_statfs: statfs error = %d\n", -error);
dprintk("%s: statfs error = %d\n", __FUNCTION__, -error);
buf->f_bsize = buf->f_blocks = buf->f_bfree = buf->f_bavail = -1;
goto out;
......@@ -958,6 +952,8 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
int err;
/* Flush out writes to the server in order to update c/mtime */
nfs_sync_inode(inode, 0, 0, FLUSH_WAIT|FLUSH_NOCOMMIT);
if (__IS_FLG(inode, MS_NOATIME))
need_atime = 0;
else if (__IS_FLG(inode, MS_NODIRATIME) && S_ISDIR(inode->i_mode))
......@@ -1252,6 +1248,33 @@ void nfs_end_data_update(struct inode *inode)
atomic_dec(&nfsi->data_updates);
}
static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
{
struct nfs_inode *nfsi = NFS_I(inode);
if ((fattr->valid & NFS_ATTR_PRE_CHANGE) != 0
&& nfsi->change_attr == fattr->pre_change_attr) {
nfsi->change_attr = fattr->change_attr;
nfsi->cache_change_attribute = jiffies;
}
/* If we have atomic WCC data, we may update some attributes */
if ((fattr->valid & NFS_ATTR_WCC) != 0) {
if (timespec_equal(&inode->i_ctime, &fattr->pre_ctime)) {
memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
nfsi->cache_change_attribute = jiffies;
}
if (timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) {
memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
nfsi->cache_change_attribute = jiffies;
}
if (inode->i_size == fattr->pre_size && nfsi->npages == 0) {
inode->i_size = fattr->size;
nfsi->cache_change_attribute = jiffies;
}
}
}
/**
* nfs_check_inode_attributes - verify consistency of the inode attribute cache
* @inode - pointer to inode
......@@ -1268,23 +1291,21 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
int data_unstable;
if ((fattr->valid & NFS_ATTR_FATTR) == 0)
return 0;
/* Are we in the process of updating data on the server? */
data_unstable = nfs_caches_unstable(inode);
if (fattr->valid & NFS_ATTR_FATTR_V4) {
if ((fattr->valid & NFS_ATTR_PRE_CHANGE) != 0
&& nfsi->change_attr == fattr->pre_change_attr)
nfsi->change_attr = fattr->change_attr;
if (nfsi->change_attr != fattr->change_attr) {
/* Do atomic weak cache consistency updates */
nfs_wcc_update_inode(inode, fattr);
if ((fattr->valid & NFS_ATTR_FATTR_V4) != 0 &&
nfsi->change_attr != fattr->change_attr) {
nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
if (!data_unstable)
nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE;
}
}
if ((fattr->valid & NFS_ATTR_FATTR) == 0) {
return 0;
}
/* Has the inode gone and changed behind our back? */
if (nfsi->fileid != fattr->fileid
......@@ -1295,14 +1316,6 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
cur_size = i_size_read(inode);
new_isize = nfs_size_to_loff_t(fattr->size);
/* If we have atomic WCC data, we may update some attributes */
if ((fattr->valid & NFS_ATTR_WCC) != 0) {
if (timespec_equal(&inode->i_ctime, &fattr->pre_ctime))
memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
if (timespec_equal(&inode->i_mtime, &fattr->pre_mtime))
memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
}
/* Verify a few of the more important attributes */
if (!timespec_equal(&inode->i_mtime, &fattr->mtime)) {
nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
......@@ -1410,14 +1423,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if ((fattr->valid & NFS_ATTR_FATTR) == 0)
return 0;
if (nfsi->fileid != fattr->fileid) {
printk(KERN_ERR "%s: inode number mismatch\n"
"expected (%s/0x%Lx), got (%s/0x%Lx)\n",
__FUNCTION__,
inode->i_sb->s_id, (long long)nfsi->fileid,
inode->i_sb->s_id, (long long)fattr->fileid);
goto out_err;
}
if (nfsi->fileid != fattr->fileid)
goto out_fileid;
/*
* Make sure the inode's type hasn't changed.
......@@ -1436,6 +1443,9 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (data_stable)
nfsi->cache_validity &= ~(NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ATIME);
/* Do atomic weak cache consistency updates */
nfs_wcc_update_inode(inode, fattr);
/* Check if our cached file size is stale */
new_isize = nfs_size_to_loff_t(fattr->size);
cur_isize = i_size_read(inode);
......@@ -1539,6 +1549,13 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
*/
nfs_invalidate_inode(inode);
return -ESTALE;
out_fileid:
printk(KERN_ERR "NFS: server %s error: fileid changed\n"
"fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
NFS_SERVER(inode)->hostname, inode->i_sb->s_id,
(long long)nfsi->fileid, (long long)fattr->fileid);
goto out_err;
}
/*
......@@ -1820,25 +1837,10 @@ static int nfs4_fill_super(struct super_block *sb, struct nfs4_mount_data *data,
}
clnt->cl_intr = 1;
clnt->cl_softrtry = 1;
clnt->cl_chatty = 1;
clp->cl_rpcclient = clnt;
clp->cl_cred = rpcauth_lookupcred(clnt->cl_auth, 0);
if (IS_ERR(clp->cl_cred)) {
up_write(&clp->cl_sem);
err = PTR_ERR(clp->cl_cred);
clp->cl_cred = NULL;
goto out_fail;
}
memcpy(clp->cl_ipaddr, server->ip_addr, sizeof(clp->cl_ipaddr));
nfs_idmap_new(clp);
}
if (list_empty(&clp->cl_superblocks)) {
err = nfs4_init_client(clp);
if (err != 0) {
up_write(&clp->cl_sem);
goto out_fail;
}
}
list_add_tail(&server->nfs4_siblings, &clp->cl_superblocks);
clnt = rpc_clone_client(clp->cl_rpcclient);
if (!IS_ERR(clnt))
......@@ -2033,6 +2035,35 @@ static struct file_system_type nfs4_fs_type = {
.fs_flags = FS_ODD_RENAME|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
};
static const int nfs_set_port_min = 0;
static const int nfs_set_port_max = 65535;
static int param_set_port(const char *val, struct kernel_param *kp)
{
char *endp;
int num = simple_strtol(val, &endp, 0);
if (endp == val || *endp || num < nfs_set_port_min || num > nfs_set_port_max)
return -EINVAL;
*((int *)kp->arg) = num;
return 0;
}
module_param_call(callback_tcpport, param_set_port, param_get_int,
&nfs_callback_set_tcpport, 0644);
static int param_set_idmap_timeout(const char *val, struct kernel_param *kp)
{
char *endp;
int num = simple_strtol(val, &endp, 0);
int jif = num * HZ;
if (endp == val || *endp || num < 0 || jif < num)
return -EINVAL;
*((int *)kp->arg) = jif;
return 0;
}
module_param_call(idmap_cache_timeout, param_set_idmap_timeout, param_get_int,
&nfs_idmap_cache_timeout, 0644);
#define nfs4_init_once(nfsi) \
do { \
INIT_LIST_HEAD(&(nfsi)->open_states); \
......@@ -2040,8 +2071,25 @@ static struct file_system_type nfs4_fs_type = {
nfsi->delegation_state = 0; \
init_rwsem(&nfsi->rwsem); \
} while(0)
#define register_nfs4fs() register_filesystem(&nfs4_fs_type)
#define unregister_nfs4fs() unregister_filesystem(&nfs4_fs_type)
static inline int register_nfs4fs(void)
{
int ret;
ret = nfs_register_sysctl();
if (ret != 0)
return ret;
ret = register_filesystem(&nfs4_fs_type);
if (ret != 0)
nfs_unregister_sysctl();
return ret;
}
static inline void unregister_nfs4fs(void)
{
unregister_filesystem(&nfs4_fs_type);
nfs_unregister_sysctl();
}
#else
#define nfs4_init_once(nfsi) \
do { } while (0)
......@@ -2166,11 +2214,11 @@ static int __init init_nfs_fs(void)
#ifdef CONFIG_PROC_FS
rpc_proc_unregister("nfs");
#endif
nfs_destroy_writepagecache();
#ifdef CONFIG_NFS_DIRECTIO
out0:
nfs_destroy_directcache();
out0:
#endif
nfs_destroy_writepagecache();
out1:
nfs_destroy_readpagecache();
out2:
......
......@@ -82,7 +82,6 @@ mnt_create(char *hostname, struct sockaddr_in *srvaddr, int version,
RPC_AUTH_UNIX);
if (!IS_ERR(clnt)) {
clnt->cl_softrtry = 1;
clnt->cl_chatty = 1;
clnt->cl_oneshot = 1;
clnt->cl_intr = 1;
}
......
......@@ -146,23 +146,23 @@ xdr_decode_fattr(u32 *p, struct nfs_fattr *fattr)
return p;
}
#define SATTR(p, attr, flag, field) \
*p++ = (attr->ia_valid & flag) ? htonl(attr->field) : ~(u32) 0
static inline u32 *
xdr_encode_sattr(u32 *p, struct iattr *attr)
{
SATTR(p, attr, ATTR_MODE, ia_mode);
SATTR(p, attr, ATTR_UID, ia_uid);
SATTR(p, attr, ATTR_GID, ia_gid);
SATTR(p, attr, ATTR_SIZE, ia_size);
const u32 not_set = __constant_htonl(0xFFFFFFFF);
*p++ = (attr->ia_valid & ATTR_MODE) ? htonl(attr->ia_mode) : not_set;
*p++ = (attr->ia_valid & ATTR_UID) ? htonl(attr->ia_uid) : not_set;
*p++ = (attr->ia_valid & ATTR_GID) ? htonl(attr->ia_gid) : not_set;
*p++ = (attr->ia_valid & ATTR_SIZE) ? htonl(attr->ia_size) : not_set;
if (attr->ia_valid & ATTR_ATIME_SET) {
p = xdr_encode_time(p, &attr->ia_atime);
} else if (attr->ia_valid & ATTR_ATIME) {
p = xdr_encode_current_server_time(p, &attr->ia_atime);
} else {
*p++ = ~(u32) 0;
*p++ = ~(u32) 0;
*p++ = not_set;
*p++ = not_set;
}
if (attr->ia_valid & ATTR_MTIME_SET) {
......@@ -170,12 +170,11 @@ xdr_encode_sattr(u32 *p, struct iattr *attr)
} else if (attr->ia_valid & ATTR_MTIME) {
p = xdr_encode_current_server_time(p, &attr->ia_mtime);
} else {
*p++ = ~(u32) 0;
*p++ = ~(u32) 0;
*p++ = not_set;
*p++ = not_set;
}
return p;
}
#undef SATTR
/*
* NFS encode functions
......
......@@ -68,26 +68,38 @@ nfs3_async_handle_jukebox(struct rpc_task *task)
return 1;
}
/*
* Bare-bones access to getattr: this is for nfs_read_super.
*/
static int
nfs3_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
int status;
dprintk("%s: call fsinfo\n", __FUNCTION__);
nfs_fattr_init(info->fattr);
status = rpc_call(server->client_sys, NFS3PROC_FSINFO, fhandle, info, 0);
status = rpc_call(client, NFS3PROC_FSINFO, fhandle, info, 0);
dprintk("%s: reply fsinfo: %d\n", __FUNCTION__, status);
if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
status = rpc_call(server->client_sys, NFS3PROC_GETATTR, fhandle, info->fattr, 0);
status = rpc_call(client, NFS3PROC_GETATTR, fhandle, info->fattr, 0);
dprintk("%s: reply getattr: %d\n", __FUNCTION__, status);
}
return status;
}
/*
* Bare-bones access to getattr: this is for nfs_read_super.
*/
static int
nfs3_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
struct nfs_fsinfo *info)
{
int status;
status = do_proc_get_root(server->client, fhandle, info);
if (status && server->client_sys != server->client)
status = do_proc_get_root(server->client_sys, fhandle, info);
return status;
}
/*
* One function for each procedure in the NFS protocol.
*/
......@@ -732,19 +744,23 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
extern u32 *nfs3_decode_dirent(u32 *, struct nfs_entry *, int);
static void
nfs3_read_done(struct rpc_task *task)
static void nfs3_read_done(struct rpc_task *task, void *calldata)
{
struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
struct nfs_read_data *data = calldata;
if (nfs3_async_handle_jukebox(task))
return;
/* Call back common NFS readpage processing */
if (task->tk_status >= 0)
nfs_refresh_inode(data->inode, &data->fattr);
nfs_readpage_result(task);
nfs_readpage_result(task, calldata);
}
static const struct rpc_call_ops nfs3_read_ops = {
.rpc_call_done = nfs3_read_done,
.rpc_release = nfs_readdata_release,
};
static void
nfs3_proc_read_setup(struct nfs_read_data *data)
{
......@@ -762,23 +778,26 @@ nfs3_proc_read_setup(struct nfs_read_data *data)
flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
/* Finalize the task. */
rpc_init_task(task, NFS_CLIENT(inode), nfs3_read_done, flags);
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs3_read_ops, data);
rpc_call_setup(task, &msg, 0);
}
static void
nfs3_write_done(struct rpc_task *task)
static void nfs3_write_done(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data;
struct nfs_write_data *data = calldata;
if (nfs3_async_handle_jukebox(task))
return;
data = (struct nfs_write_data *)task->tk_calldata;
if (task->tk_status >= 0)
nfs_post_op_update_inode(data->inode, data->res.fattr);
nfs_writeback_done(task);
nfs_writeback_done(task, calldata);
}
static const struct rpc_call_ops nfs3_write_ops = {
.rpc_call_done = nfs3_write_done,
.rpc_release = nfs_writedata_release,
};
static void
nfs3_proc_write_setup(struct nfs_write_data *data, int how)
{
......@@ -806,23 +825,26 @@ nfs3_proc_write_setup(struct nfs_write_data *data, int how)
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
/* Finalize the task. */
rpc_init_task(task, NFS_CLIENT(inode), nfs3_write_done, flags);
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs3_write_ops, data);
rpc_call_setup(task, &msg, 0);
}
static void
nfs3_commit_done(struct rpc_task *task)
static void nfs3_commit_done(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data;
struct nfs_write_data *data = calldata;
if (nfs3_async_handle_jukebox(task))
return;
data = (struct nfs_write_data *)task->tk_calldata;
if (task->tk_status >= 0)
nfs_post_op_update_inode(data->inode, data->res.fattr);
nfs_commit_done(task);
nfs_commit_done(task, calldata);
}
static const struct rpc_call_ops nfs3_commit_ops = {
.rpc_call_done = nfs3_commit_done,
.rpc_release = nfs_commit_release,
};
static void
nfs3_proc_commit_setup(struct nfs_write_data *data, int how)
{
......@@ -840,7 +862,7 @@ nfs3_proc_commit_setup(struct nfs_write_data *data, int how)
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
/* Finalize the task. */
rpc_init_task(task, NFS_CLIENT(inode), nfs3_commit_done, flags);
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs3_commit_ops, data);
rpc_call_setup(task, &msg, 0);
}
......
......@@ -182,7 +182,7 @@ xdr_encode_sattr(u32 *p, struct iattr *attr)
{
if (attr->ia_valid & ATTR_MODE) {
*p++ = xdr_one;
*p++ = htonl(attr->ia_mode);
*p++ = htonl(attr->ia_mode & S_IALLUGO);
} else {
*p++ = xdr_zero;
}
......
......@@ -38,7 +38,8 @@ struct idmap;
((err) != NFSERR_NOFILEHANDLE))
enum nfs4_client_state {
NFS4CLNT_OK = 0,
NFS4CLNT_STATE_RECOVER = 0,
NFS4CLNT_LEASE_EXPIRED,
};
/*
......@@ -67,7 +68,6 @@ struct nfs4_client {
atomic_t cl_count;
struct rpc_clnt * cl_rpcclient;
struct rpc_cred * cl_cred;
struct list_head cl_superblocks; /* List of nfs_server structs */
......@@ -76,7 +76,6 @@ struct nfs4_client {
struct work_struct cl_renewd;
struct work_struct cl_recoverd;
wait_queue_head_t cl_waitq;
struct rpc_wait_queue cl_rpcwaitq;
/* used for the setclientid verifier */
......@@ -182,8 +181,9 @@ struct nfs4_state {
nfs4_stateid stateid;
unsigned int nreaders;
unsigned int nwriters;
unsigned int n_rdonly;
unsigned int n_wronly;
unsigned int n_rdwr;
int state; /* State on the server (R,W, or RW) */
atomic_t count;
};
......@@ -210,10 +210,10 @@ extern ssize_t nfs4_listxattr(struct dentry *, char *, size_t);
/* nfs4proc.c */
extern int nfs4_map_errors(int err);
extern int nfs4_proc_setclientid(struct nfs4_client *, u32, unsigned short);
extern int nfs4_proc_setclientid_confirm(struct nfs4_client *);
extern int nfs4_proc_async_renew(struct nfs4_client *);
extern int nfs4_proc_renew(struct nfs4_client *);
extern int nfs4_proc_setclientid(struct nfs4_client *, u32, unsigned short, struct rpc_cred *);
extern int nfs4_proc_setclientid_confirm(struct nfs4_client *, struct rpc_cred *);
extern int nfs4_proc_async_renew(struct nfs4_client *, struct rpc_cred *);
extern int nfs4_proc_renew(struct nfs4_client *, struct rpc_cred *);
extern int nfs4_do_close(struct inode *inode, struct nfs4_state *state);
extern struct dentry *nfs4_atomic_open(struct inode *, struct dentry *, struct nameidata *);
extern int nfs4_open_revalidate(struct inode *, struct dentry *, int, struct nameidata *);
......@@ -237,8 +237,8 @@ extern void init_nfsv4_state(struct nfs_server *);
extern void destroy_nfsv4_state(struct nfs_server *);
extern struct nfs4_client *nfs4_get_client(struct in_addr *);
extern void nfs4_put_client(struct nfs4_client *clp);
extern int nfs4_init_client(struct nfs4_client *clp);
extern struct nfs4_client *nfs4_find_client(struct in_addr *);
struct rpc_cred *nfs4_get_renew_cred(struct nfs4_client *clp);
extern u32 nfs4_alloc_lockowner_id(struct nfs4_client *);
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
......
This diff is collapsed.
......@@ -54,6 +54,7 @@
#include <linux/nfs4.h>
#include <linux/nfs_fs.h>
#include "nfs4_fs.h"
#include "delegation.h"
#define NFSDBG_FACILITY NFSDBG_PROC
......@@ -61,6 +62,7 @@ void
nfs4_renew_state(void *data)
{
struct nfs4_client *clp = (struct nfs4_client *)data;
struct rpc_cred *cred;
long lease, timeout;
unsigned long last, now;
......@@ -76,9 +78,17 @@ nfs4_renew_state(void *data)
timeout = (2 * lease) / 3 + (long)last - (long)now;
/* Are we close to a lease timeout? */
if (time_after(now, last + lease/3)) {
cred = nfs4_get_renew_cred(clp);
if (cred == NULL) {
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
spin_unlock(&clp->cl_lock);
nfs_expire_all_delegations(clp);
goto out;
}
spin_unlock(&clp->cl_lock);
/* Queue an asynchronous RENEW. */
nfs4_proc_async_renew(clp);
nfs4_proc_async_renew(clp, cred);
put_rpccred(cred);
timeout = (2 * lease) / 3;
spin_lock(&clp->cl_lock);
} else
......
......@@ -43,6 +43,8 @@
#include <linux/smp_lock.h>
#include <linux/nfs_fs.h>
#include <linux/nfs_idmap.h>
#include <linux/kthread.h>
#include <linux/module.h>
#include <linux/workqueue.h>
#include <linux/bitops.h>
......@@ -57,8 +59,6 @@ const nfs4_stateid zero_stateid;
static DEFINE_SPINLOCK(state_spinlock);
static LIST_HEAD(nfs4_clientid_list);
static void nfs4_recover_state(void *);
void
init_nfsv4_state(struct nfs_server *server)
{
......@@ -91,11 +91,10 @@ nfs4_alloc_client(struct in_addr *addr)
if (nfs_callback_up() < 0)
return NULL;
if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
nfs_callback_down();
return NULL;
}
memset(clp, 0, sizeof(*clp));
memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
init_rwsem(&clp->cl_sem);
INIT_LIST_HEAD(&clp->cl_delegations);
......@@ -103,14 +102,12 @@ nfs4_alloc_client(struct in_addr *addr)
INIT_LIST_HEAD(&clp->cl_unused);
spin_lock_init(&clp->cl_lock);
atomic_set(&clp->cl_count, 1);
INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
INIT_LIST_HEAD(&clp->cl_superblocks);
init_waitqueue_head(&clp->cl_waitq);
rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
clp->cl_rpcclient = ERR_PTR(-EINVAL);
clp->cl_boot_time = CURRENT_TIME;
clp->cl_state = 1 << NFS4CLNT_OK;
clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
return clp;
}
......@@ -127,8 +124,6 @@ nfs4_free_client(struct nfs4_client *clp)
kfree(sp);
}
BUG_ON(!list_empty(&clp->cl_state_owners));
if (clp->cl_cred)
put_rpccred(clp->cl_cred);
nfs_idmap_delete(clp);
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
......@@ -193,27 +188,22 @@ nfs4_put_client(struct nfs4_client *clp)
list_del(&clp->cl_servers);
spin_unlock(&state_spinlock);
BUG_ON(!list_empty(&clp->cl_superblocks));
wake_up_all(&clp->cl_waitq);
rpc_wake_up(&clp->cl_rpcwaitq);
nfs4_kill_renewd(clp);
nfs4_free_client(clp);
}
static int __nfs4_init_client(struct nfs4_client *clp)
static int nfs4_init_client(struct nfs4_client *clp, struct rpc_cred *cred)
{
int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);
int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK,
nfs_callback_tcpport, cred);
if (status == 0)
status = nfs4_proc_setclientid_confirm(clp);
status = nfs4_proc_setclientid_confirm(clp, cred);
if (status == 0)
nfs4_schedule_state_renewal(clp);
return status;
}
int nfs4_init_client(struct nfs4_client *clp)
{
return nfs4_map_errors(__nfs4_init_client(clp));
}
u32
nfs4_alloc_lockowner_id(struct nfs4_client *clp)
{
......@@ -235,6 +225,32 @@ nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
return sp;
}
struct rpc_cred *nfs4_get_renew_cred(struct nfs4_client *clp)
{
struct nfs4_state_owner *sp;
struct rpc_cred *cred = NULL;
list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
if (list_empty(&sp->so_states))
continue;
cred = get_rpccred(sp->so_cred);
break;
}
return cred;
}
struct rpc_cred *nfs4_get_setclientid_cred(struct nfs4_client *clp)
{
struct nfs4_state_owner *sp;
if (!list_empty(&clp->cl_state_owners)) {
sp = list_entry(clp->cl_state_owners.next,
struct nfs4_state_owner, so_list);
return get_rpccred(sp->so_cred);
}
return NULL;
}
static struct nfs4_state_owner *
nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
{
......@@ -349,14 +365,9 @@ nfs4_alloc_open_state(void)
{
struct nfs4_state *state;
state = kmalloc(sizeof(*state), GFP_KERNEL);
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (!state)
return NULL;
state->state = 0;
state->nreaders = 0;
state->nwriters = 0;
state->flags = 0;
memset(state->stateid.data, 0, sizeof(state->stateid.data));
atomic_set(&state->count, 1);
INIT_LIST_HEAD(&state->lock_states);
spin_lock_init(&state->state_lock);
......@@ -475,15 +486,23 @@ void nfs4_close_state(struct nfs4_state *state, mode_t mode)
/* Protect against nfs4_find_state() */
spin_lock(&owner->so_lock);
spin_lock(&inode->i_lock);
if (mode & FMODE_READ)
state->nreaders--;
if (mode & FMODE_WRITE)
state->nwriters--;
switch (mode & (FMODE_READ | FMODE_WRITE)) {
case FMODE_READ:
state->n_rdonly--;
break;
case FMODE_WRITE:
state->n_wronly--;
break;
case FMODE_READ|FMODE_WRITE:
state->n_rdwr--;
}
oldstate = newstate = state->state;
if (state->nreaders == 0)
if (state->n_rdwr == 0) {
if (state->n_rdonly == 0)
newstate &= ~FMODE_READ;
if (state->nwriters == 0)
if (state->n_wronly == 0)
newstate &= ~FMODE_WRITE;
}
if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
nfs4_state_set_mode_locked(state, newstate);
oldstate = newstate;
......@@ -733,45 +752,43 @@ int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
}
static int reclaimer(void *);
struct reclaimer_args {
struct nfs4_client *clp;
struct completion complete;
};
static inline void nfs4_clear_recover_bit(struct nfs4_client *clp)
{
smp_mb__before_clear_bit();
clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
smp_mb__after_clear_bit();
wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
rpc_wake_up(&clp->cl_rpcwaitq);
}
/*
* State recovery routine
*/
void
nfs4_recover_state(void *data)
static void nfs4_recover_state(struct nfs4_client *clp)
{
struct nfs4_client *clp = (struct nfs4_client *)data;
struct reclaimer_args args = {
.clp = clp,
};
might_sleep();
struct task_struct *task;
init_completion(&args.complete);
if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0)
goto out_failed_clear;
wait_for_completion(&args.complete);
__module_get(THIS_MODULE);
atomic_inc(&clp->cl_count);
task = kthread_run(reclaimer, clp, "%u.%u.%u.%u-reclaim",
NIPQUAD(clp->cl_addr));
if (!IS_ERR(task))
return;
out_failed_clear:
set_bit(NFS4CLNT_OK, &clp->cl_state);
wake_up_all(&clp->cl_waitq);
rpc_wake_up(&clp->cl_rpcwaitq);
nfs4_clear_recover_bit(clp);
nfs4_put_client(clp);
module_put(THIS_MODULE);
}
/*
* Schedule a state recovery attempt
*/
void
nfs4_schedule_state_recovery(struct nfs4_client *clp)
void nfs4_schedule_state_recovery(struct nfs4_client *clp)
{
if (!clp)
return;
if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state))
schedule_work(&clp->cl_recoverd);
if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
nfs4_recover_state(clp);
}
static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
......@@ -887,18 +904,14 @@ static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
static int reclaimer(void *ptr)
{
struct reclaimer_args *args = (struct reclaimer_args *)ptr;
struct nfs4_client *clp = args->clp;
struct nfs4_client *clp = ptr;
struct nfs4_state_owner *sp;
struct nfs4_state_recovery_ops *ops;
struct rpc_cred *cred;
int status = 0;
daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr));
allow_signal(SIGKILL);
atomic_inc(&clp->cl_count);
complete(&args->complete);
/* Ensure exclusive access to NFSv4 state */
lock_kernel();
down_write(&clp->cl_sem);
......@@ -906,20 +919,33 @@ static int reclaimer(void *ptr)
if (list_empty(&clp->cl_superblocks))
goto out;
restart_loop:
status = nfs4_proc_renew(clp);
ops = &nfs4_network_partition_recovery_ops;
/* Are there any open files on this volume? */
cred = nfs4_get_renew_cred(clp);
if (cred != NULL) {
/* Yes there are: try to renew the old lease */
status = nfs4_proc_renew(clp, cred);
switch (status) {
case 0:
case -NFS4ERR_CB_PATH_DOWN:
put_rpccred(cred);
goto out;
case -NFS4ERR_STALE_CLIENTID:
case -NFS4ERR_LEASE_MOVED:
ops = &nfs4_reboot_recovery_ops;
break;
default:
ops = &nfs4_network_partition_recovery_ops;
};
}
} else {
/* "reboot" to ensure we clear all state on the server */
clp->cl_boot_time = CURRENT_TIME;
cred = nfs4_get_setclientid_cred(clp);
}
/* We're going to have to re-establish a clientid */
nfs4_state_mark_reclaim(clp);
status = __nfs4_init_client(clp);
status = -ENOENT;
if (cred != NULL) {
status = nfs4_init_client(clp, cred);
put_rpccred(cred);
}
if (status)
goto out_error;
/* Mark all delegations for reclaim */
......@@ -940,14 +966,13 @@ static int reclaimer(void *ptr)
}
nfs_delegation_reap_unclaimed(clp);
out:
set_bit(NFS4CLNT_OK, &clp->cl_state);
up_write(&clp->cl_sem);
unlock_kernel();
wake_up_all(&clp->cl_waitq);
rpc_wake_up(&clp->cl_rpcwaitq);
if (status == -NFS4ERR_CB_PATH_DOWN)
nfs_handle_cb_pathdown(clp);
nfs4_clear_recover_bit(clp);
nfs4_put_client(clp);
module_put_and_exit(0);
return 0;
out_error:
printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
......
This diff is collapsed.
......@@ -296,8 +296,8 @@ static int __init root_nfs_name(char *name)
nfs_port = -1;
nfs_data.version = NFS_MOUNT_VERSION;
nfs_data.flags = NFS_MOUNT_NONLM; /* No lockd in nfs root yet */
nfs_data.rsize = NFS_DEF_FILE_IO_BUFFER_SIZE;
nfs_data.wsize = NFS_DEF_FILE_IO_BUFFER_SIZE;
nfs_data.rsize = NFS_DEF_FILE_IO_SIZE;
nfs_data.wsize = NFS_DEF_FILE_IO_SIZE;
nfs_data.acregmin = 3;
nfs_data.acregmax = 60;
nfs_data.acdirmin = 30;
......
......@@ -111,6 +111,9 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
};
int status;
/* Mask out the non-modebit related stuff from attr->ia_mode */
sattr->ia_mode &= S_IALLUGO;
dprintk("NFS call setattr\n");
nfs_fattr_init(fattr);
status = rpc_call(NFS_CLIENT(inode), NFSPROC_SETATTR, &arg, fattr, 0);
......@@ -547,10 +550,9 @@ nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
extern u32 * nfs_decode_dirent(u32 *, struct nfs_entry *, int);
static void
nfs_read_done(struct rpc_task *task)
static void nfs_read_done(struct rpc_task *task, void *calldata)
{
struct nfs_read_data *data = (struct nfs_read_data *) task->tk_calldata;
struct nfs_read_data *data = calldata;
if (task->tk_status >= 0) {
nfs_refresh_inode(data->inode, data->res.fattr);
......@@ -560,9 +562,14 @@ nfs_read_done(struct rpc_task *task)
if (data->args.offset + data->args.count >= data->res.fattr->size)
data->res.eof = 1;
}
nfs_readpage_result(task);
nfs_readpage_result(task, calldata);
}
static const struct rpc_call_ops nfs_read_ops = {
.rpc_call_done = nfs_read_done,
.rpc_release = nfs_readdata_release,
};
static void
nfs_proc_read_setup(struct nfs_read_data *data)
{
......@@ -580,20 +587,24 @@ nfs_proc_read_setup(struct nfs_read_data *data)
flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
/* Finalize the task. */
rpc_init_task(task, NFS_CLIENT(inode), nfs_read_done, flags);
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs_read_ops, data);
rpc_call_setup(task, &msg, 0);
}
static void
nfs_write_done(struct rpc_task *task)
static void nfs_write_done(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
struct nfs_write_data *data = calldata;
if (task->tk_status >= 0)
nfs_post_op_update_inode(data->inode, data->res.fattr);
nfs_writeback_done(task);
nfs_writeback_done(task, calldata);
}
static const struct rpc_call_ops nfs_write_ops = {
.rpc_call_done = nfs_write_done,
.rpc_release = nfs_writedata_release,
};
static void
nfs_proc_write_setup(struct nfs_write_data *data, int how)
{
......@@ -614,7 +625,7 @@ nfs_proc_write_setup(struct nfs_write_data *data, int how)
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
/* Finalize the task. */
rpc_init_task(task, NFS_CLIENT(inode), nfs_write_done, flags);
rpc_init_task(task, NFS_CLIENT(inode), flags, &nfs_write_ops, data);
rpc_call_setup(task, &msg, 0);
}
......
......@@ -42,9 +42,8 @@ mempool_t *nfs_rdata_mempool;
#define MIN_POOL_READ (32)
void nfs_readdata_release(struct rpc_task *task)
void nfs_readdata_release(void *data)
{
struct nfs_read_data *data = (struct nfs_read_data *)task->tk_calldata;
nfs_readdata_free(data);
}
......@@ -84,7 +83,7 @@ static int nfs_readpage_sync(struct nfs_open_context *ctx, struct inode *inode,
int result;
struct nfs_read_data *rdata;
rdata = nfs_readdata_alloc();
rdata = nfs_readdata_alloc(1);
if (!rdata)
return -ENOMEM;
......@@ -220,9 +219,6 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
NFS_PROTO(inode)->read_setup(data);
data->task.tk_cookie = (unsigned long)inode;
data->task.tk_calldata = data;
/* Release requests */
data->task.tk_release = nfs_readdata_release;
dprintk("NFS: %4d initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
data->task.tk_pid,
......@@ -287,7 +283,7 @@ static int nfs_pagein_multi(struct list_head *head, struct inode *inode)
nbytes = req->wb_bytes;
for(;;) {
data = nfs_readdata_alloc();
data = nfs_readdata_alloc(1);
if (!data)
goto out_bad;
INIT_LIST_HEAD(&data->pages);
......@@ -343,7 +339,7 @@ static int nfs_pagein_one(struct list_head *head, struct inode *inode)
if (NFS_SERVER(inode)->rsize < PAGE_CACHE_SIZE)
return nfs_pagein_multi(head, inode);
data = nfs_readdata_alloc();
data = nfs_readdata_alloc(NFS_SERVER(inode)->rpages);
if (!data)
goto out_bad;
......@@ -452,9 +448,9 @@ static void nfs_readpage_result_full(struct nfs_read_data *data, int status)
* This is the callback from RPC telling us whether a reply was
* received or some error occurred (timeout or socket shutdown).
*/
void nfs_readpage_result(struct rpc_task *task)
void nfs_readpage_result(struct rpc_task *task, void *calldata)
{
struct nfs_read_data *data = (struct nfs_read_data *)task->tk_calldata;
struct nfs_read_data *data = calldata;
struct nfs_readargs *argp = &data->args;
struct nfs_readres *resp = &data->res;
int status = task->tk_status;
......
/*
* linux/fs/nfs/sysctl.c
*
* Sysctl interface to NFS parameters
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/linkage.h>
#include <linux/ctype.h>
#include <linux/fs.h>
#include <linux/sysctl.h>
#include <linux/module.h>
#include <linux/nfs4.h>
#include <linux/nfs_idmap.h>
#include "callback.h"
static const int nfs_set_port_min = 0;
static const int nfs_set_port_max = 65535;
static struct ctl_table_header *nfs_callback_sysctl_table;
/*
* Something that isn't CTL_ANY, CTL_NONE or a value that may clash.
* Use the same values as fs/lockd/svc.c
*/
#define CTL_UNNUMBERED -2
static ctl_table nfs_cb_sysctls[] = {
#ifdef CONFIG_NFS_V4
{
.ctl_name = CTL_UNNUMBERED,
.procname = "nfs_callback_tcpport",
.data = &nfs_callback_set_tcpport,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec_minmax,
.extra1 = (int *)&nfs_set_port_min,
.extra2 = (int *)&nfs_set_port_max,
},
{
.ctl_name = CTL_UNNUMBERED,
.procname = "idmap_cache_timeout",
.data = &nfs_idmap_cache_timeout,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec_jiffies,
.strategy = &sysctl_jiffies,
},
#endif
{ .ctl_name = 0 }
};
static ctl_table nfs_cb_sysctl_dir[] = {
{
.ctl_name = CTL_UNNUMBERED,
.procname = "nfs",
.mode = 0555,
.child = nfs_cb_sysctls,
},
{ .ctl_name = 0 }
};
static ctl_table nfs_cb_sysctl_root[] = {
{
.ctl_name = CTL_FS,
.procname = "fs",
.mode = 0555,
.child = nfs_cb_sysctl_dir,
},
{ .ctl_name = 0 }
};
int nfs_register_sysctl(void)
{
nfs_callback_sysctl_table = register_sysctl_table(nfs_cb_sysctl_root, 0);
if (nfs_callback_sysctl_table == NULL)
return -ENOMEM;
return 0;
}
void nfs_unregister_sysctl(void)
{
unregister_sysctl_table(nfs_callback_sysctl_table);
nfs_callback_sysctl_table = NULL;
}
......@@ -87,10 +87,9 @@ nfs_copy_dname(struct dentry *dentry, struct nfs_unlinkdata *data)
* We delay initializing RPC info until after the call to dentry_iput()
* in order to minimize races against rename().
*/
static void
nfs_async_unlink_init(struct rpc_task *task)
static void nfs_async_unlink_init(struct rpc_task *task, void *calldata)
{
struct nfs_unlinkdata *data = (struct nfs_unlinkdata *)task->tk_calldata;
struct nfs_unlinkdata *data = calldata;
struct dentry *dir = data->dir;
struct rpc_message msg = {
.rpc_cred = data->cred,
......@@ -116,10 +115,9 @@ nfs_async_unlink_init(struct rpc_task *task)
*
* Do the directory attribute update.
*/
static void
nfs_async_unlink_done(struct rpc_task *task)
static void nfs_async_unlink_done(struct rpc_task *task, void *calldata)
{
struct nfs_unlinkdata *data = (struct nfs_unlinkdata *)task->tk_calldata;
struct nfs_unlinkdata *data = calldata;
struct dentry *dir = data->dir;
struct inode *dir_i;
......@@ -141,13 +139,18 @@ nfs_async_unlink_done(struct rpc_task *task)
* We need to call nfs_put_unlinkdata as a 'tk_release' task since the
* rpc_task would be freed too.
*/
static void
nfs_async_unlink_release(struct rpc_task *task)
static void nfs_async_unlink_release(void *calldata)
{
struct nfs_unlinkdata *data = (struct nfs_unlinkdata *)task->tk_calldata;
struct nfs_unlinkdata *data = calldata;
nfs_put_unlinkdata(data);
}
static const struct rpc_call_ops nfs_unlink_ops = {
.rpc_call_prepare = nfs_async_unlink_init,
.rpc_call_done = nfs_async_unlink_done,
.rpc_release = nfs_async_unlink_release,
};
/**
* nfs_async_unlink - asynchronous unlinking of a file
* @dentry: dentry to unlink
......@@ -157,7 +160,6 @@ nfs_async_unlink(struct dentry *dentry)
{
struct dentry *dir = dentry->d_parent;
struct nfs_unlinkdata *data;
struct rpc_task *task;
struct rpc_clnt *clnt = NFS_CLIENT(dir->d_inode);
int status = -ENOMEM;
......@@ -178,17 +180,13 @@ nfs_async_unlink(struct dentry *dentry)
nfs_deletes = data;
data->count = 1;
task = &data->task;
rpc_init_task(task, clnt, nfs_async_unlink_done , RPC_TASK_ASYNC);
task->tk_calldata = data;
task->tk_action = nfs_async_unlink_init;
task->tk_release = nfs_async_unlink_release;
rpc_init_task(&data->task, clnt, RPC_TASK_ASYNC, &nfs_unlink_ops, data);
spin_lock(&dentry->d_lock);
dentry->d_flags |= DCACHE_NFSFS_RENAMED;
spin_unlock(&dentry->d_lock);
rpc_sleep_on(&nfs_delete_queue, task, NULL, NULL);
rpc_sleep_on(&nfs_delete_queue, &data->task, NULL, NULL);
status = 0;
out:
return status;
......
......@@ -89,24 +89,38 @@ static mempool_t *nfs_commit_mempool;
static DECLARE_WAIT_QUEUE_HEAD(nfs_write_congestion);
static inline struct nfs_write_data *nfs_commit_alloc(void)
static inline struct nfs_write_data *nfs_commit_alloc(unsigned int pagecount)
{
struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, SLAB_NOFS);
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
if (pagecount < NFS_PAGEVEC_SIZE)
p->pagevec = &p->page_array[0];
else {
size_t size = ++pagecount * sizeof(struct page *);
p->pagevec = kmalloc(size, GFP_NOFS);
if (p->pagevec) {
memset(p->pagevec, 0, size);
} else {
mempool_free(p, nfs_commit_mempool);
p = NULL;
}
}
}
return p;
}
static inline void nfs_commit_free(struct nfs_write_data *p)
{
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_commit_mempool);
}
static void nfs_writedata_release(struct rpc_task *task)
void nfs_writedata_release(void *wdata)
{
struct nfs_write_data *wdata = (struct nfs_write_data *)task->tk_calldata;
nfs_writedata_free(wdata);
}
......@@ -168,7 +182,7 @@ static int nfs_writepage_sync(struct nfs_open_context *ctx, struct inode *inode,
int result, written = 0;
struct nfs_write_data *wdata;
wdata = nfs_writedata_alloc();
wdata = nfs_writedata_alloc(1);
if (!wdata)
return -ENOMEM;
......@@ -232,19 +246,16 @@ static int nfs_writepage_async(struct nfs_open_context *ctx,
unsigned int offset, unsigned int count)
{
struct nfs_page *req;
int status;
req = nfs_update_request(ctx, inode, page, offset, count);
status = (IS_ERR(req)) ? PTR_ERR(req) : 0;
if (status < 0)
goto out;
if (IS_ERR(req))
return PTR_ERR(req);
/* Update file length */
nfs_grow_file(page, offset, count);
/* Set the PG_uptodate flag? */
nfs_mark_uptodate(page, offset, count);
nfs_unlock_request(req);
out:
return status;
return 0;
}
static int wb_priority(struct writeback_control *wbc)
......@@ -304,11 +315,8 @@ int nfs_writepage(struct page *page, struct writeback_control *wbc)
lock_kernel();
if (!IS_SYNC(inode) && inode_referenced) {
err = nfs_writepage_async(ctx, inode, page, 0, offset);
if (err >= 0) {
err = 0;
if (wbc->for_reclaim)
nfs_flush_inode(inode, 0, 0, FLUSH_STABLE);
}
if (!wbc->for_writepages)
nfs_flush_inode(inode, 0, 0, wb_priority(wbc));
} else {
err = nfs_writepage_sync(ctx, inode, page, 0,
offset, priority);
......@@ -877,9 +885,6 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
data->task.tk_priority = flush_task_priority(how);
data->task.tk_cookie = (unsigned long)inode;
data->task.tk_calldata = data;
/* Release requests */
data->task.tk_release = nfs_writedata_release;
dprintk("NFS: %4d initiated write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
data->task.tk_pid,
......@@ -919,7 +924,7 @@ static int nfs_flush_multi(struct list_head *head, struct inode *inode, int how)
nbytes = req->wb_bytes;
for (;;) {
data = nfs_writedata_alloc();
data = nfs_writedata_alloc(1);
if (!data)
goto out_bad;
list_add(&data->pages, &list);
......@@ -983,7 +988,7 @@ static int nfs_flush_one(struct list_head *head, struct inode *inode, int how)
if (NFS_SERVER(inode)->wsize < PAGE_CACHE_SIZE)
return nfs_flush_multi(head, inode, how);
data = nfs_writedata_alloc();
data = nfs_writedata_alloc(NFS_SERVER(inode)->wpages);
if (!data)
goto out_bad;
......@@ -1137,9 +1142,9 @@ static void nfs_writeback_done_full(struct nfs_write_data *data, int status)
/*
* This function is called when the WRITE call is complete.
*/
void nfs_writeback_done(struct rpc_task *task)
void nfs_writeback_done(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = (struct nfs_write_data *) task->tk_calldata;
struct nfs_write_data *data = calldata;
struct nfs_writeargs *argp = &data->args;
struct nfs_writeres *resp = &data->res;
......@@ -1206,9 +1211,8 @@ void nfs_writeback_done(struct rpc_task *task)
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
static void nfs_commit_release(struct rpc_task *task)
void nfs_commit_release(void *wdata)
{
struct nfs_write_data *wdata = (struct nfs_write_data *)task->tk_calldata;
nfs_commit_free(wdata);
}
......@@ -1244,9 +1248,6 @@ static void nfs_commit_rpcsetup(struct list_head *head,
data->task.tk_priority = flush_task_priority(how);
data->task.tk_cookie = (unsigned long)inode;
data->task.tk_calldata = data;
/* Release requests */
data->task.tk_release = nfs_commit_release;
dprintk("NFS: %4d initiated commit call\n", data->task.tk_pid);
}
......@@ -1255,12 +1256,12 @@ static void nfs_commit_rpcsetup(struct list_head *head,
* Commit dirty pages
*/
static int
nfs_commit_list(struct list_head *head, int how)
nfs_commit_list(struct inode *inode, struct list_head *head, int how)
{
struct nfs_write_data *data;
struct nfs_page *req;
data = nfs_commit_alloc();
data = nfs_commit_alloc(NFS_SERVER(inode)->wpages);
if (!data)
goto out_bad;
......@@ -1283,10 +1284,9 @@ nfs_commit_list(struct list_head *head, int how)
/*
* COMMIT call returned
*/
void
nfs_commit_done(struct rpc_task *task)
void nfs_commit_done(struct rpc_task *task, void *calldata)
{
struct nfs_write_data *data = (struct nfs_write_data *)task->tk_calldata;
struct nfs_write_data *data = calldata;
struct nfs_page *req;
int res = 0;
......@@ -1366,7 +1366,7 @@ int nfs_commit_inode(struct inode *inode, int how)
res = nfs_scan_commit(inode, &head, 0, 0);
spin_unlock(&nfsi->req_lock);
if (res) {
error = nfs_commit_list(&head, how);
error = nfs_commit_list(inode, &head, how);
if (error < 0)
return error;
}
......@@ -1377,22 +1377,23 @@ int nfs_commit_inode(struct inode *inode, int how)
int nfs_sync_inode(struct inode *inode, unsigned long idx_start,
unsigned int npages, int how)
{
int error,
wait;
int nocommit = how & FLUSH_NOCOMMIT;
int wait = how & FLUSH_WAIT;
int error;
wait = how & FLUSH_WAIT;
how &= ~FLUSH_WAIT;
how &= ~(FLUSH_WAIT|FLUSH_NOCOMMIT);
do {
error = 0;
if (wait)
if (wait) {
error = nfs_wait_on_requests(inode, idx_start, npages);
if (error == 0)
if (error != 0)
continue;
}
error = nfs_flush_inode(inode, idx_start, npages, how);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
if (error == 0)
if (error != 0)
continue;
if (!nocommit)
error = nfs_commit_inode(inode, how);
#endif
} while (error > 0);
return error;
}
......
......@@ -53,7 +53,7 @@
#define NFSPROC4_CB_COMPOUND 1
/* declarations */
static void nfs4_cb_null(struct rpc_task *task);
static const struct rpc_call_ops nfs4_cb_null_ops;
/* Index of predefined Linux callback client operations */
......@@ -431,7 +431,6 @@ nfsd4_probe_callback(struct nfs4_client *clp)
}
clnt->cl_intr = 0;
clnt->cl_softrtry = 1;
clnt->cl_chatty = 1;
/* Kick rpciod, put the call on the wire. */
......@@ -447,7 +446,7 @@ nfsd4_probe_callback(struct nfs4_client *clp)
msg.rpc_cred = nfsd4_lookupcred(clp,0);
if (IS_ERR(msg.rpc_cred))
goto out_rpciod;
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, nfs4_cb_null, NULL);
status = rpc_call_async(clnt, &msg, RPC_TASK_ASYNC, &nfs4_cb_null_ops, NULL);
put_rpccred(msg.rpc_cred);
if (status != 0) {
......@@ -469,7 +468,7 @@ nfsd4_probe_callback(struct nfs4_client *clp)
}
static void
nfs4_cb_null(struct rpc_task *task)
nfs4_cb_null(struct rpc_task *task, void *dummy)
{
struct nfs4_client *clp = (struct nfs4_client *)task->tk_msg.rpc_argp;
struct nfs4_callback *cb = &clp->cl_callback;
......@@ -488,6 +487,10 @@ nfs4_cb_null(struct rpc_task *task)
put_nfs4_client(clp);
}
static const struct rpc_call_ops nfs4_cb_null_ops = {
.rpc_call_done = nfs4_cb_null,
};
/*
* called with dp->dl_count inc'ed.
* nfs4_lock_state() may or may not have been called.
......
......@@ -760,7 +760,7 @@ extern struct file_lock *posix_test_lock(struct file *, struct file_lock *);
extern int posix_lock_file(struct file *, struct file_lock *);
extern int posix_lock_file_wait(struct file *, struct file_lock *);
extern void posix_block_lock(struct file_lock *, struct file_lock *);
extern void posix_unblock_lock(struct file *, struct file_lock *);
extern int posix_unblock_lock(struct file *, struct file_lock *);
extern int posix_locks_deadlock(struct file_lock *, struct file_lock *);
extern int flock_lock_file_wait(struct file *filp, struct file_lock *fl);
extern int __break_lease(struct inode *inode, unsigned int flags);
......
......@@ -172,7 +172,7 @@ extern struct nlm_host *nlm_find_client(void);
/*
* Server-side lock handling
*/
int nlmsvc_async_call(struct nlm_rqst *, u32, rpc_action);
int nlmsvc_async_call(struct nlm_rqst *, u32, const struct rpc_call_ops *);
u32 nlmsvc_lock(struct svc_rqst *, struct nlm_file *,
struct nlm_lock *, int, struct nlm_cookie *);
u32 nlmsvc_unlock(struct nlm_file *, struct nlm_lock *);
......
......@@ -38,9 +38,6 @@
# define NFS_DEBUG
#endif
#define NFS_MAX_FILE_IO_BUFFER_SIZE 32768
#define NFS_DEF_FILE_IO_BUFFER_SIZE 4096
/* Default timeout values */
#define NFS_MAX_UDP_TIMEOUT (60*HZ)
#define NFS_MAX_TCP_TIMEOUT (600*HZ)
......@@ -65,6 +62,7 @@
#define FLUSH_STABLE 4 /* commit to stable storage */
#define FLUSH_LOWPRI 8 /* low priority background flush */
#define FLUSH_HIGHPRI 16 /* high priority memory reclaim flush */
#define FLUSH_NOCOMMIT 32 /* Don't send the NFSv3/v4 COMMIT */
#ifdef __KERNEL__
......@@ -393,6 +391,17 @@ extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_
*/
extern struct inode_operations nfs_symlink_inode_operations;
/*
* linux/fs/nfs/sysctl.c
*/
#ifdef CONFIG_SYSCTL
extern int nfs_register_sysctl(void);
extern void nfs_unregister_sysctl(void);
#else
#define nfs_register_sysctl() do { } while(0)
#define nfs_unregister_sysctl() do { } while(0)
#endif
/*
* linux/fs/nfs/unlink.c
*/
......@@ -406,10 +415,12 @@ extern int nfs_writepage(struct page *page, struct writeback_control *wbc);
extern int nfs_writepages(struct address_space *, struct writeback_control *);
extern int nfs_flush_incompatible(struct file *file, struct page *page);
extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned int);
extern void nfs_writeback_done(struct rpc_task *task);
extern void nfs_writeback_done(struct rpc_task *task, void *data);
extern void nfs_writedata_release(void *data);
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
extern void nfs_commit_done(struct rpc_task *);
extern void nfs_commit_done(struct rpc_task *, void *data);
extern void nfs_commit_release(void *data);
#endif
/*
......@@ -460,18 +471,33 @@ static inline int nfs_wb_page(struct inode *inode, struct page* page)
*/
extern mempool_t *nfs_wdata_mempool;
static inline struct nfs_write_data *nfs_writedata_alloc(void)
static inline struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
{
struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, SLAB_NOFS);
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
if (pagecount < NFS_PAGEVEC_SIZE)
p->pagevec = &p->page_array[0];
else {
size_t size = ++pagecount * sizeof(struct page *);
p->pagevec = kmalloc(size, GFP_NOFS);
if (p->pagevec) {
memset(p->pagevec, 0, size);
} else {
mempool_free(p, nfs_wdata_mempool);
p = NULL;
}
}
}
return p;
}
static inline void nfs_writedata_free(struct nfs_write_data *p)
{
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_wdata_mempool);
}
......@@ -481,28 +507,45 @@ static inline void nfs_writedata_free(struct nfs_write_data *p)
extern int nfs_readpage(struct file *, struct page *);
extern int nfs_readpages(struct file *, struct address_space *,
struct list_head *, unsigned);
extern void nfs_readpage_result(struct rpc_task *);
extern void nfs_readpage_result(struct rpc_task *, void *);
extern void nfs_readdata_release(void *data);
/*
* Allocate and free nfs_read_data structures
*/
extern mempool_t *nfs_rdata_mempool;
static inline struct nfs_read_data *nfs_readdata_alloc(void)
static inline struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount)
{
struct nfs_read_data *p = mempool_alloc(nfs_rdata_mempool, SLAB_NOFS);
if (p)
if (p) {
memset(p, 0, sizeof(*p));
INIT_LIST_HEAD(&p->pages);
if (pagecount < NFS_PAGEVEC_SIZE)
p->pagevec = &p->page_array[0];
else {
size_t size = ++pagecount * sizeof(struct page *);
p->pagevec = kmalloc(size, GFP_NOFS);
if (p->pagevec) {
memset(p->pagevec, 0, size);
} else {
mempool_free(p, nfs_rdata_mempool);
p = NULL;
}
}
}
return p;
}
static inline void nfs_readdata_free(struct nfs_read_data *p)
{
if (p && (p->pagevec != &p->page_array[0]))
kfree(p->pagevec);
mempool_free(p, nfs_rdata_mempool);
}
extern void nfs_readdata_release(struct rpc_task *task);
/*
* linux/fs/nfs3proc.c
*/
......
......@@ -71,6 +71,8 @@ int nfs_map_name_to_uid(struct nfs4_client *, const char *, size_t, __u32 *);
int nfs_map_group_to_gid(struct nfs4_client *, const char *, size_t, __u32 *);
int nfs_map_uid_to_name(struct nfs4_client *, __u32, char *);
int nfs_map_gid_to_group(struct nfs4_client *, __u32, char *);
extern unsigned int nfs_idmap_cache_timeout;
#endif /* __KERNEL__ */
#endif /* NFS_IDMAP_H */
......@@ -79,9 +79,7 @@ extern void nfs_clear_page_writeback(struct nfs_page *req);
static inline int
nfs_lock_request_dontget(struct nfs_page *req)
{
if (test_and_set_bit(PG_BUSY, &req->wb_flags))
return 0;
return 1;
return !test_and_set_bit(PG_BUSY, &req->wb_flags);
}
/*
......@@ -125,9 +123,7 @@ nfs_list_remove_request(struct nfs_page *req)
static inline int
nfs_defer_commit(struct nfs_page *req)
{
if (test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags))
return 0;
return 1;
return !test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags);
}
static inline void
......@@ -141,9 +137,7 @@ nfs_clear_commit(struct nfs_page *req)
static inline int
nfs_defer_reschedule(struct nfs_page *req)
{
if (test_and_set_bit(PG_NEED_RESCHED, &req->wb_flags))
return 0;
return 1;
return !test_and_set_bit(PG_NEED_RESCHED, &req->wb_flags);
}
static inline void
......
......@@ -4,6 +4,16 @@
#include <linux/sunrpc/xprt.h>
#include <linux/nfsacl.h>
/*
* To change the maximum rsize and wsize supported by the NFS client, adjust
* NFS_MAX_FILE_IO_SIZE. 64KB is a typical maximum, but some servers can
* support a megabyte or more. The default is left at 4096 bytes, which is
* reasonable for NFS over UDP.
*/
#define NFS_MAX_FILE_IO_SIZE (1048576U)
#define NFS_DEF_FILE_IO_SIZE (4096U)
#define NFS_MIN_FILE_IO_SIZE (1024U)
struct nfs4_fsid {
__u64 major;
__u64 minor;
......@@ -137,7 +147,7 @@ struct nfs_openres {
*/
struct nfs_open_confirmargs {
const struct nfs_fh * fh;
nfs4_stateid stateid;
nfs4_stateid * stateid;
struct nfs_seqid * seqid;
};
......@@ -169,62 +179,58 @@ struct nfs_lowner {
u32 id;
};
struct nfs_lock_opargs {
struct nfs_lock_args {
struct nfs_fh * fh;
struct file_lock * fl;
struct nfs_seqid * lock_seqid;
nfs4_stateid * lock_stateid;
struct nfs_seqid * open_seqid;
nfs4_stateid * open_stateid;
struct nfs_lowner lock_owner;
__u32 reclaim;
__u32 new_lock_owner;
unsigned char block : 1;
unsigned char reclaim : 1;
unsigned char new_lock_owner : 1;
};
struct nfs_lock_res {
nfs4_stateid stateid;
};
struct nfs_locku_opargs {
struct nfs_locku_args {
struct nfs_fh * fh;
struct file_lock * fl;
struct nfs_seqid * seqid;
nfs4_stateid * stateid;
};
struct nfs_lockargs {
struct nfs_fh * fh;
__u32 type;
__u64 offset;
__u64 length;
union {
struct nfs_lock_opargs *lock; /* LOCK */
struct nfs_lowner *lockt; /* LOCKT */
struct nfs_locku_opargs *locku; /* LOCKU */
} u;
struct nfs_locku_res {
nfs4_stateid stateid;
};
struct nfs_lock_denied {
__u64 offset;
__u64 length;
__u32 type;
struct nfs_lowner owner;
struct nfs_lockt_args {
struct nfs_fh * fh;
struct file_lock * fl;
struct nfs_lowner lock_owner;
};
struct nfs_lockres {
union {
nfs4_stateid stateid;/* LOCK success, LOCKU */
struct nfs_lock_denied denied; /* LOCK failed, LOCKT success */
} u;
const struct nfs_server * server;
struct nfs_lockt_res {
struct file_lock * denied; /* LOCK, LOCKT failed */
};
struct nfs4_delegreturnargs {
const struct nfs_fh *fhandle;
const nfs4_stateid *stateid;
const u32 * bitmask;
};
struct nfs4_delegreturnres {
struct nfs_fattr * fattr;
const struct nfs_server *server;
};
/*
* Arguments to the read call.
*/
#define NFS_READ_MAXIOV (9U)
#if (NFS_READ_MAXIOV > (MAX_IOVEC -2))
#error "NFS_READ_MAXIOV is too large"
#endif
struct nfs_readargs {
struct nfs_fh * fh;
struct nfs_open_context *context;
......@@ -243,11 +249,6 @@ struct nfs_readres {
/*
* Arguments to the write call.
*/
#define NFS_WRITE_MAXIOV (9U)
#if (NFS_WRITE_MAXIOV > (MAX_IOVEC -2))
#error "NFS_WRITE_MAXIOV is too large"
#endif
struct nfs_writeargs {
struct nfs_fh * fh;
struct nfs_open_context *context;
......@@ -678,6 +679,8 @@ struct nfs4_server_caps_res {
struct nfs_page;
#define NFS_PAGEVEC_SIZE (8U)
struct nfs_read_data {
int flags;
struct rpc_task task;
......@@ -686,13 +689,14 @@ struct nfs_read_data {
struct nfs_fattr fattr; /* fattr storage */
struct list_head pages; /* Coalesced read requests */
struct nfs_page *req; /* multi ops per nfs_page */
struct page *pagevec[NFS_READ_MAXIOV];
struct page **pagevec;
struct nfs_readargs args;
struct nfs_readres res;
#ifdef CONFIG_NFS_V4
unsigned long timestamp; /* For lease renewal */
#endif
void (*complete) (struct nfs_read_data *, int);
struct page *page_array[NFS_PAGEVEC_SIZE + 1];
};
struct nfs_write_data {
......@@ -704,13 +708,14 @@ struct nfs_write_data {
struct nfs_writeverf verf;
struct list_head pages; /* Coalesced requests we wish to flush */
struct nfs_page *req; /* multi ops per nfs_page */
struct page *pagevec[NFS_WRITE_MAXIOV];
struct page **pagevec;
struct nfs_writeargs args; /* argument struct */
struct nfs_writeres res; /* result struct */
#ifdef CONFIG_NFS_V4
unsigned long timestamp; /* For lease renewal */
#endif
void (*complete) (struct nfs_write_data *, int);
struct page *page_array[NFS_PAGEVEC_SIZE + 1];
};
struct nfs_access_entry;
......
......@@ -49,7 +49,6 @@ struct rpc_clnt {
unsigned int cl_softrtry : 1,/* soft timeouts */
cl_intr : 1,/* interruptible */
cl_chatty : 1,/* be verbose */
cl_autobind : 1,/* use getport() */
cl_oneshot : 1,/* dispose after use */
cl_dead : 1;/* abandoned */
......@@ -126,7 +125,8 @@ int rpc_register(u32, u32, int, unsigned short, int *);
void rpc_call_setup(struct rpc_task *, struct rpc_message *, int);
int rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg,
int flags, rpc_action callback, void *clntdata);
int flags, const struct rpc_call_ops *tk_ops,
void *calldata);
int rpc_call_sync(struct rpc_clnt *clnt, struct rpc_message *msg,
int flags);
void rpc_restart_call(struct rpc_task *);
......@@ -134,6 +134,7 @@ void rpc_clnt_sigmask(struct rpc_clnt *clnt, sigset_t *oldset);
void rpc_clnt_sigunmask(struct rpc_clnt *clnt, sigset_t *oldset);
void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
size_t rpc_max_payload(struct rpc_clnt *);
void rpc_force_rebind(struct rpc_clnt *);
int rpc_ping(struct rpc_clnt *clnt, int flags);
static __inline__
......
......@@ -48,7 +48,7 @@ u32 spkm3_read_token(struct spkm3_ctx *ctx, struct xdr_netobj *read_token, struc
#define CKSUMTYPE_RSA_MD5 0x0007
s32 make_checksum(s32 cksumtype, char *header, int hdrlen, struct xdr_buf *body,
struct xdr_netobj *cksum);
int body_offset, struct xdr_netobj *cksum);
void asn1_bitstring_len(struct xdr_netobj *in, int *enclen, int *zerobits);
int decode_asn1_bitstring(struct xdr_netobj *out, char *in, int enclen,
int explen);
......
This diff is collapsed.
......@@ -91,7 +91,6 @@ struct xdr_buf {
u32 * xdr_encode_opaque_fixed(u32 *p, const void *ptr, unsigned int len);
u32 * xdr_encode_opaque(u32 *p, const void *ptr, unsigned int len);
u32 * xdr_encode_string(u32 *p, const char *s);
u32 * xdr_decode_string(u32 *p, char **sp, int *lenp, int maxlen);
u32 * xdr_decode_string_inplace(u32 *p, char **sp, int *lenp, int maxlen);
u32 * xdr_encode_netobj(u32 *p, const struct xdr_netobj *);
u32 * xdr_decode_netobj(u32 *p, struct xdr_netobj *);
......@@ -134,11 +133,6 @@ xdr_adjust_iovec(struct kvec *iov, u32 *p)
return iov->iov_len = ((u8 *) p - (u8 *) iov->iov_base);
}
/*
* Maximum number of iov's we use.
*/
#define MAX_IOVEC (12)
/*
* XDR buffer helper functions
*/
......
......@@ -79,21 +79,19 @@ struct rpc_rqst {
void (*rq_release_snd_buf)(struct rpc_rqst *); /* release rq_enc_pages */
struct list_head rq_list;
__u32 * rq_buffer; /* XDR encode buffer */
size_t rq_bufsize;
struct xdr_buf rq_private_buf; /* The receive buffer
* used in the softirq.
*/
unsigned long rq_majortimeo; /* major timeout alarm */
unsigned long rq_timeout; /* Current timeout value */
unsigned int rq_retries; /* # of retries */
/*
* For authentication (e.g. auth_des)
*/
u32 rq_creddata[2];
/*
* Partial send handling
*/
u32 rq_bytes_sent; /* Bytes we have sent */
unsigned long rq_xtime; /* when transmitted */
......@@ -106,7 +104,10 @@ struct rpc_xprt_ops {
void (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
int (*reserve_xprt)(struct rpc_task *task);
void (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
void (*connect)(struct rpc_task *task);
void * (*buf_alloc)(struct rpc_task *task, size_t size);
void (*buf_free)(struct rpc_task *task);
int (*send_request)(struct rpc_task *task);
void (*set_retrans_timeout)(struct rpc_task *task);
void (*timer)(struct rpc_task *task);
......@@ -253,6 +254,7 @@ int xs_setup_tcp(struct rpc_xprt *xprt, struct rpc_timeout *to);
#define XPRT_LOCKED (0)
#define XPRT_CONNECTED (1)
#define XPRT_CONNECTING (2)
#define XPRT_CLOSE_WAIT (3)
static inline void xprt_set_connected(struct rpc_xprt *xprt)
{
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment