Commit 85004cc3 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.linux-nfs.org/pub/linux/nfs-2.6

* git://git.linux-nfs.org/pub/linux/nfs-2.6: (118 commits)
  NFSv4: Iterate through all nfs_clients when the server recalls a delegation
  NFSv4: Deal more correctly with duplicate delegations
  NFS: Fix a potential race between umount and nfs_access_cache_shrinker()
  NFS: Add an asynchronous delegreturn operation for use in nfs_clear_inode
  nfs: convert NFS_*(inode) helpers to static inline
  nfs: obliterate NFS_FLAGS macro
  NFS: Address memory leaks in the NFS client mount option parser
  nfs4: allow nfsv4 acls on non-regular-files
  NFS: Optimise away the sigmask code in aio/dio reads and writes
  SUNRPC: Don't bother changing the sigmask for asynchronous RPC calls
  SUNRPC: rpcb_getport_sync() passes incorrect address size to rpc_create()
  SUNRPC: Clean up block comment preceding rpcb_getport_sync()
  SUNRPC: Use appropriate argument types in rpcb client
  SUNRPC: rpcb_getport_sync() should use built-in hostname generator
  SUNRPC: Clean up functions that free address_strings array
  NFS: NFS version number is unsigned
  NLM: Fix a bogus 'return' in nlmclnt_rpc_release
  NLM: Introduce an arguments structure for nlmclnt_init()
  NLM/NFS: Use cached nlm_host when calling nlmclnt_proc()
  NFS: Invoke nlmclnt_init during NFS mount processing
  ...
parents 149a051f 3fbd67ad
......@@ -41,6 +41,48 @@ struct nlm_wait {
static LIST_HEAD(nlm_blocked);
/**
* nlmclnt_init - Set up per-NFS mount point lockd data structures
* @nlm_init: pointer to arguments structure
*
* Returns pointer to an appropriate nlm_host struct,
* or an ERR_PTR value.
*/
struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
{
struct nlm_host *host;
u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4;
int status;
status = lockd_up(nlm_init->protocol);
if (status < 0)
return ERR_PTR(status);
host = nlmclnt_lookup_host((struct sockaddr_in *)nlm_init->address,
nlm_init->protocol, nlm_version,
nlm_init->hostname,
strlen(nlm_init->hostname));
if (host == NULL) {
lockd_down();
return ERR_PTR(-ENOLCK);
}
return host;
}
EXPORT_SYMBOL_GPL(nlmclnt_init);
/**
* nlmclnt_done - Release resources allocated by nlmclnt_init()
* @host: nlm_host structure reserved by nlmclnt_init()
*
*/
void nlmclnt_done(struct nlm_host *host)
{
nlm_release_host(host);
lockd_down();
}
EXPORT_SYMBOL_GPL(nlmclnt_done);
/*
* Queue up a lock for blocking so that the GRANTED request can see it
*/
......
......@@ -145,34 +145,21 @@ static void nlmclnt_release_lockargs(struct nlm_rqst *req)
BUG_ON(req->a_args.lock.fl.fl_ops != NULL);
}
/*
* This is the main entry point for the NLM client.
/**
* nlmclnt_proc - Perform a single client-side lock request
* @host: address of a valid nlm_host context representing the NLM server
* @cmd: fcntl-style file lock operation to perform
* @fl: address of arguments for the lock operation
*
*/
int
nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl)
{
struct rpc_clnt *client = NFS_CLIENT(inode);
struct sockaddr_in addr;
struct nfs_server *nfssrv = NFS_SERVER(inode);
struct nlm_host *host;
struct nlm_rqst *call;
sigset_t oldset;
unsigned long flags;
int status, vers;
vers = (NFS_PROTO(inode)->version == 3) ? 4 : 1;
if (NFS_PROTO(inode)->version > 3) {
printk(KERN_NOTICE "NFSv4 file locking not implemented!\n");
return -ENOLCK;
}
rpc_peeraddr(client, (struct sockaddr *) &addr, sizeof(addr));
host = nlmclnt_lookup_host(&addr, client->cl_xprt->prot, vers,
nfssrv->nfs_client->cl_hostname,
strlen(nfssrv->nfs_client->cl_hostname));
if (host == NULL)
return -ENOLCK;
int status;
nlm_get_host(host);
call = nlm_alloc_call(host);
if (call == NULL)
return -ENOMEM;
......@@ -219,7 +206,7 @@ nlmclnt_proc(struct inode *inode, int cmd, struct file_lock *fl)
dprintk("lockd: clnt proc returns %d\n", status);
return status;
}
EXPORT_SYMBOL(nlmclnt_proc);
EXPORT_SYMBOL_GPL(nlmclnt_proc);
/*
* Allocate an NLM RPC call struct
......@@ -257,7 +244,7 @@ void nlm_release_call(struct nlm_rqst *call)
static void nlmclnt_rpc_release(void *data)
{
return nlm_release_call(data);
nlm_release_call(data);
}
static int nlm_wait_on_grace(wait_queue_head_t *queue)
......
......@@ -612,8 +612,7 @@ const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie)
* called with BKL held.
*/
static char buf[2*NLM_MAXCOOKIELEN+1];
int i;
int len = sizeof(buf);
unsigned int i, len = sizeof(buf);
char *p = buf;
len--; /* allow for trailing \0 */
......
......@@ -73,8 +73,6 @@ static void nfs_callback_svc(struct svc_rqst *rqstp)
complete(&nfs_callback_info.started);
for(;;) {
char buf[RPC_MAX_ADDRBUFLEN];
if (signalled()) {
if (nfs_callback_info.users == 0)
break;
......@@ -92,8 +90,6 @@ static void nfs_callback_svc(struct svc_rqst *rqstp)
__FUNCTION__, -err);
break;
}
dprintk("%s: request from %s\n", __FUNCTION__,
svc_print_addr(rqstp, buf, sizeof(buf)));
svc_process(rqstp);
}
......@@ -168,12 +164,11 @@ void nfs_callback_down(void)
static int nfs_callback_authenticate(struct svc_rqst *rqstp)
{
struct sockaddr_in *addr = svc_addr_in(rqstp);
struct nfs_client *clp;
char buf[RPC_MAX_ADDRBUFLEN];
/* Don't talk to strangers */
clp = nfs_find_client(addr, 4);
clp = nfs_find_client(svc_addr(rqstp), 4);
if (clp == NULL)
return SVC_DROP;
......
......@@ -38,7 +38,7 @@ struct cb_compound_hdr_res {
};
struct cb_getattrargs {
struct sockaddr_in *addr;
struct sockaddr *addr;
struct nfs_fh fh;
uint32_t bitmap[2];
};
......@@ -53,7 +53,7 @@ struct cb_getattrres {
};
struct cb_recallargs {
struct sockaddr_in *addr;
struct sockaddr *addr;
struct nfs_fh fh;
nfs4_stateid stateid;
uint32_t truncate;
......
......@@ -12,7 +12,9 @@
#include "delegation.h"
#include "internal.h"
#ifdef NFS_DEBUG
#define NFSDBG_FACILITY NFSDBG_CALLBACK
#endif
__be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *res)
{
......@@ -26,6 +28,10 @@ __be32 nfs4_callback_getattr(struct cb_getattrargs *args, struct cb_getattrres *
clp = nfs_find_client(args->addr, 4);
if (clp == NULL)
goto out;
dprintk("NFS: GETATTR callback request from %s\n",
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
inode = nfs_delegation_find_inode(clp, &args->fh);
if (inode == NULL)
goto out_putclient;
......@@ -65,23 +71,32 @@ __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
clp = nfs_find_client(args->addr, 4);
if (clp == NULL)
goto out;
dprintk("NFS: RECALL callback request from %s\n",
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
do {
struct nfs_client *prev = clp;
inode = nfs_delegation_find_inode(clp, &args->fh);
if (inode == NULL)
goto out_putclient;
if (inode != NULL) {
/* Set up a helper thread to actually return the delegation */
switch(nfs_async_inode_return_delegation(inode, &args->stateid)) {
case 0:
res = 0;
break;
case -ENOENT:
if (res != 0)
res = htonl(NFS4ERR_BAD_STATEID);
break;
default:
res = htonl(NFS4ERR_RESOURCE);
}
iput(inode);
out_putclient:
nfs_put_client(clp);
}
clp = nfs_find_client_next(prev);
nfs_put_client(prev);
} while (clp != NULL);
out:
dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(res));
return res;
......
......@@ -139,7 +139,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound
if (unlikely(status != 0))
return status;
/* We do not like overly long tags! */
if (hdr->taglen > CB_OP_TAGLEN_MAXSZ-12 || hdr->taglen < 0) {
if (hdr->taglen > CB_OP_TAGLEN_MAXSZ - 12) {
printk("NFSv4 CALLBACK %s: client sent tag of length %u\n",
__FUNCTION__, hdr->taglen);
return htonl(NFS4ERR_RESOURCE);
......@@ -176,7 +176,7 @@ static __be32 decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr
status = decode_fh(xdr, &args->fh);
if (unlikely(status != 0))
goto out;
args->addr = svc_addr_in(rqstp);
args->addr = svc_addr(rqstp);
status = decode_bitmap(xdr, args->bitmap);
out:
dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(status));
......@@ -188,7 +188,7 @@ static __be32 decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr,
__be32 *p;
__be32 status;
args->addr = svc_addr_in(rqstp);
args->addr = svc_addr(rqstp);
status = decode_stateid(xdr, &args->stateid);
if (unlikely(status != 0))
goto out;
......
This diff is collapsed.
......@@ -125,6 +125,32 @@ void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, st
put_rpccred(oldcred);
}
static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
{
int res = 0;
res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync);
nfs_free_delegation(delegation);
return res;
}
static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
{
struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
if (delegation == NULL)
goto nomatch;
if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
sizeof(delegation->stateid.data)) != 0)
goto nomatch;
list_del_rcu(&delegation->super_list);
nfsi->delegation_state = 0;
rcu_assign_pointer(nfsi->delegation, NULL);
return delegation;
nomatch:
return NULL;
}
/*
* Set up a delegation on an inode
*/
......@@ -133,6 +159,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_delegation *delegation;
struct nfs_delegation *freeme = NULL;
int status = 0;
delegation = kmalloc(sizeof(*delegation), GFP_KERNEL);
......@@ -147,41 +174,45 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
delegation->inode = inode;
spin_lock(&clp->cl_lock);
if (rcu_dereference(nfsi->delegation) == NULL) {
if (rcu_dereference(nfsi->delegation) != NULL) {
if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
sizeof(delegation->stateid)) == 0 &&
delegation->type == nfsi->delegation->type) {
goto out;
}
/*
* Deal with broken servers that hand out two
* delegations for the same file.
*/
dfprintk(FILE, "%s: server %s handed out "
"a duplicate delegation!\n",
__FUNCTION__, clp->cl_hostname);
if (delegation->type <= nfsi->delegation->type) {
freeme = delegation;
delegation = NULL;
goto out;
}
freeme = nfs_detach_delegation_locked(nfsi, NULL);
}
list_add_rcu(&delegation->super_list, &clp->cl_delegations);
nfsi->delegation_state = delegation->type;
rcu_assign_pointer(nfsi->delegation, delegation);
delegation = NULL;
} else {
if (memcmp(&delegation->stateid, &nfsi->delegation->stateid,
sizeof(delegation->stateid)) != 0 ||
delegation->type != nfsi->delegation->type) {
printk("%s: server %u.%u.%u.%u, handed out a duplicate delegation!\n",
__FUNCTION__, NIPQUAD(clp->cl_addr.sin_addr));
status = -EIO;
}
}
/* Ensure we revalidate the attributes and page cache! */
spin_lock(&inode->i_lock);
nfsi->cache_validity |= NFS_INO_REVAL_FORCED;
spin_unlock(&inode->i_lock);
out:
spin_unlock(&clp->cl_lock);
if (delegation != NULL)
nfs_free_delegation(delegation);
if (freeme != NULL)
nfs_do_return_delegation(inode, freeme, 0);
return status;
}
static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation)
{
int res = 0;
res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid);
nfs_free_delegation(delegation);
return res;
}
/* Sync all data to disk upon delegation return */
static void nfs_msync_inode(struct inode *inode)
{
......@@ -207,24 +238,28 @@ static int __nfs_inode_return_delegation(struct inode *inode, struct nfs_delegat
up_read(&clp->cl_sem);
nfs_msync_inode(inode);
return nfs_do_return_delegation(inode, delegation);
return nfs_do_return_delegation(inode, delegation, 1);
}
static struct nfs_delegation *nfs_detach_delegation_locked(struct nfs_inode *nfsi, const nfs4_stateid *stateid)
/*
* This function returns the delegation without reclaiming opens
* or protecting against delegation reclaims.
* It is therefore really only safe to be called from
* nfs4_clear_inode()
*/
void nfs_inode_return_delegation_noreclaim(struct inode *inode)
{
struct nfs_delegation *delegation = rcu_dereference(nfsi->delegation);
struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_delegation *delegation;
if (delegation == NULL)
goto nomatch;
if (stateid != NULL && memcmp(delegation->stateid.data, stateid->data,
sizeof(delegation->stateid.data)) != 0)
goto nomatch;
list_del_rcu(&delegation->super_list);
nfsi->delegation_state = 0;
rcu_assign_pointer(nfsi->delegation, NULL);
return delegation;
nomatch:
return NULL;
if (rcu_dereference(nfsi->delegation) != NULL) {
spin_lock(&clp->cl_lock);
delegation = nfs_detach_delegation_locked(nfsi, NULL);
spin_unlock(&clp->cl_lock);
if (delegation != NULL)
nfs_do_return_delegation(inode, delegation, 0);
}
}
int nfs_inode_return_delegation(struct inode *inode)
......@@ -314,8 +349,9 @@ void nfs_expire_all_delegations(struct nfs_client *clp)
__module_get(THIS_MODULE);
atomic_inc(&clp->cl_count);
task = kthread_run(nfs_do_expire_all_delegations, clp,
"%u.%u.%u.%u-delegreturn",
NIPQUAD(clp->cl_addr.sin_addr));
"%s-delegreturn",
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_ADDR));
if (!IS_ERR(task))
return;
nfs_put_client(clp);
......@@ -386,7 +422,7 @@ static int recall_thread(void *data)
nfs_msync_inode(inode);
if (delegation != NULL)
nfs_do_return_delegation(inode, delegation);
nfs_do_return_delegation(inode, delegation, 1);
iput(inode);
module_put_and_exit(0);
}
......
......@@ -29,6 +29,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
void nfs_inode_reclaim_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
int nfs_inode_return_delegation(struct inode *inode);
int nfs_async_inode_return_delegation(struct inode *inode, const nfs4_stateid *stateid);
void nfs_inode_return_delegation_noreclaim(struct inode *inode);
struct inode *nfs_delegation_find_inode(struct nfs_client *clp, const struct nfs_fh *fhandle);
void nfs_return_all_delegations(struct super_block *sb);
......@@ -39,7 +40,7 @@ void nfs_delegation_mark_reclaim(struct nfs_client *clp);
void nfs_delegation_reap_unclaimed(struct nfs_client *clp);
/* NFSv4 delegation-related procedures */
int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid);
int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync);
int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid);
int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl);
int nfs4_copy_delegation_stateid(nfs4_stateid *dst, struct inode *inode);
......
......@@ -192,7 +192,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
/* We requested READDIRPLUS, but the server doesn't grok it */
if (error == -ENOTSUPP && desc->plus) {
NFS_SERVER(inode)->caps &= ~NFS_CAP_READDIRPLUS;
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
desc->plus = 0;
goto again;
}
......@@ -537,12 +537,6 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
lock_kernel();
res = nfs_revalidate_mapping_nolock(inode, filp->f_mapping);
if (res < 0) {
unlock_kernel();
return res;
}
/*
* filp->f_pos points to the dirent entry number.
* *desc->dir_cookie has the cookie for the next entry. We have
......@@ -564,6 +558,10 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
desc->entry = &my_entry;
nfs_block_sillyrename(dentry);
res = nfs_revalidate_mapping_nolock(inode, filp->f_mapping);
if (res < 0)
goto out;
while(!desc->entry->eof) {
res = readdir_search_pagecache(desc);
......@@ -579,7 +577,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
break;
}
if (res == -ETOOSMALL && desc->plus) {
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
nfs_zap_caches(inode);
desc->plus = 0;
desc->entry->eof = 0;
......@@ -594,6 +592,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
break;
}
}
out:
nfs_unblock_sillyrename(dentry);
unlock_kernel();
if (res > 0)
......@@ -639,6 +638,21 @@ static int nfs_fsync_dir(struct file *filp, struct dentry *dentry, int datasync)
return 0;
}
/**
* nfs_force_lookup_revalidate - Mark the directory as having changed
* @dir - pointer to directory inode
*
* This forces the revalidation code in nfs_lookup_revalidate() to do a
* full lookup on all child dentries of 'dir' whenever a change occurs
* on the server that might have invalidated our dcache.
*
* The caller should be holding dir->i_lock
*/
void nfs_force_lookup_revalidate(struct inode *dir)
{
NFS_I(dir)->cache_change_attribute = jiffies;
}
/*
* A check for whether or not the parent directory has changed.
* In the case it has, we assume that the dentries are untrustworthy
......@@ -827,6 +841,10 @@ static int nfs_dentry_delete(struct dentry *dentry)
dentry->d_parent->d_name.name, dentry->d_name.name,
dentry->d_flags);
/* Unhash any dentry with a stale inode */
if (dentry->d_inode != NULL && NFS_STALE(dentry->d_inode))
return 1;
if (dentry->d_flags & DCACHE_NFSFS_RENAMED) {
/* Unhash it, so that ->d_iput() would be called */
return 1;
......@@ -846,7 +864,6 @@ static int nfs_dentry_delete(struct dentry *dentry)
*/
static void nfs_dentry_iput(struct dentry *dentry, struct inode *inode)
{
nfs_inode_return_delegation(inode);
if (S_ISDIR(inode->i_mode))
/* drop any readdir cache as it could easily be old */
NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
......@@ -1268,6 +1285,12 @@ static int nfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
return error;
}
static void nfs_dentry_handle_enoent(struct dentry *dentry)
{
if (dentry->d_inode != NULL && !d_unhashed(dentry))
d_delete(dentry);
}
static int nfs_rmdir(struct inode *dir, struct dentry *dentry)
{
int error;
......@@ -1280,6 +1303,8 @@ static int nfs_rmdir(struct inode *dir, struct dentry *dentry)
/* Ensure the VFS deletes this inode */
if (error == 0 && dentry->d_inode != NULL)
clear_nlink(dentry->d_inode);
else if (error == -ENOENT)
nfs_dentry_handle_enoent(dentry);
unlock_kernel();
return error;
......@@ -1386,6 +1411,8 @@ static int nfs_safe_remove(struct dentry *dentry)
nfs_mark_for_revalidate(inode);
} else
error = NFS_PROTO(dir)->remove(dir, &dentry->d_name);
if (error == -ENOENT)
nfs_dentry_handle_enoent(dentry);
out:
return error;
}
......@@ -1422,7 +1449,7 @@ static int nfs_unlink(struct inode *dir, struct dentry *dentry)
spin_unlock(&dentry->d_lock);
spin_unlock(&dcache_lock);
error = nfs_safe_remove(dentry);
if (!error) {
if (!error || error == -ENOENT) {
nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
} else if (need_rehash)
d_rehash(dentry);
......@@ -1635,7 +1662,8 @@ static int nfs_rename(struct inode *old_dir, struct dentry *old_dentry,
d_move(old_dentry, new_dentry);
nfs_set_verifier(new_dentry,
nfs_save_change_attribute(new_dir));
}
} else if (error == -ENOENT)
nfs_dentry_handle_enoent(old_dentry);
/* new dentry created? */
if (dentry)
......@@ -1666,13 +1694,19 @@ int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
restart:
spin_lock(&nfs_access_lru_lock);
list_for_each_entry(nfsi, &nfs_access_lru_list, access_cache_inode_lru) {
struct rw_semaphore *s_umount;
struct inode *inode;
if (nr_to_scan-- == 0)
break;
s_umount = &nfsi->vfs_inode.i_sb->s_umount;
if (!down_read_trylock(s_umount))
continue;
inode = igrab(&nfsi->vfs_inode);
if (inode == NULL)
if (inode == NULL) {
up_read(s_umount);
continue;
}
spin_lock(&inode->i_lock);
if (list_empty(&nfsi->access_cache_entry_lru))
goto remove_lru_entry;
......@@ -1691,6 +1725,7 @@ int nfs_access_cache_shrinker(int nr_to_scan, gfp_t gfp_mask)
spin_unlock(&inode->i_lock);
spin_unlock(&nfs_access_lru_lock);
iput(inode);
up_read(s_umount);
goto restart;
}
spin_unlock(&nfs_access_lru_lock);
......@@ -1731,7 +1766,7 @@ static void __nfs_access_zap_cache(struct inode *inode)
void nfs_access_zap_cache(struct inode *inode)
{
/* Remove from global LRU init */
if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_FLAGS(inode))) {
if (test_and_clear_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
spin_lock(&nfs_access_lru_lock);
list_del_init(&NFS_I(inode)->access_cache_inode_lru);
spin_unlock(&nfs_access_lru_lock);
......@@ -1845,7 +1880,7 @@ static void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *s
smp_mb__after_atomic_inc();
/* Add inode to global LRU list */
if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_FLAGS(inode))) {
if (!test_and_set_bit(NFS_INO_ACL_LRU_SET, &NFS_I(inode)->flags)) {
spin_lock(&nfs_access_lru_lock);
list_add_tail(&NFS_I(inode)->access_cache_inode_lru, &nfs_access_lru_list);
spin_unlock(&nfs_access_lru_lock);
......
......@@ -188,12 +188,17 @@ static void nfs_direct_req_release(struct nfs_direct_req *dreq)
static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
{
ssize_t result = -EIOCBQUEUED;
struct rpc_clnt *clnt;
sigset_t oldset;
/* Async requests don't wait here */
if (dreq->iocb)
goto out;
clnt = NFS_CLIENT(dreq->inode);
rpc_clnt_sigmask(clnt, &oldset);
result = wait_for_completion_interruptible(&dreq->completion);
rpc_clnt_sigunmask(clnt, &oldset);
if (!result)
result = dreq->error;
......@@ -272,6 +277,16 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
unsigned long user_addr = (unsigned long)iov->iov_base;
size_t count = iov->iov_len;
size_t rsize = NFS_SERVER(inode)->rsize;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_cred = ctx->cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = NFS_CLIENT(inode),
.rpc_message = &msg,
.callback_ops = &nfs_read_direct_ops,
.flags = RPC_TASK_ASYNC,
};
unsigned int pgbase;
int result;
ssize_t started = 0;
......@@ -311,7 +326,7 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
data->req = (struct nfs_page *) dreq;
data->inode = inode;
data->cred = ctx->cred;
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(inode);
data->args.context = ctx;
data->args.offset = pos;
......@@ -321,14 +336,16 @@ static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq,
data->res.fattr = &data->fattr;
data->res.eof = 0;
data->res.count = bytes;
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
&nfs_read_direct_ops, data);
NFS_PROTO(inode)->read_setup(data);
data->task.tk_cookie = (unsigned long) inode;
task_setup_data.task = &data->task;
task_setup_data.callback_data = data;
NFS_PROTO(inode)->read_setup(data, &msg);
rpc_execute(&data->task);
task = rpc_run_task(&task_setup_data);
if (!IS_ERR(task))
rpc_put_task(task);
dprintk("NFS: %5u initiated direct read call "
"(req %s/%Ld, %zu bytes @ offset %Lu)\n",
......@@ -391,9 +408,7 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
unsigned long nr_segs, loff_t pos)
{
ssize_t result = 0;
sigset_t oldset;
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct rpc_clnt *clnt = NFS_CLIENT(inode);
struct nfs_direct_req *dreq;
dreq = nfs_direct_req_alloc();
......@@ -405,11 +420,9 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
rpc_clnt_sigmask(clnt, &oldset);
result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
if (!result)
result = nfs_direct_wait(dreq);
rpc_clnt_sigunmask(clnt, &oldset);
nfs_direct_req_release(dreq);
return result;
......@@ -431,6 +444,15 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
struct inode *inode = dreq->inode;
struct list_head *p;
struct nfs_write_data *data;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_cred = dreq->ctx->cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = NFS_CLIENT(inode),
.callback_ops = &nfs_write_direct_ops,
.flags = RPC_TASK_ASYNC,
};
dreq->count = 0;
get_dreq(dreq);
......@@ -440,6 +462,9 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
get_dreq(dreq);
/* Use stable writes */
data->args.stable = NFS_FILE_SYNC;
/*
* Reset data->res.
*/
......@@ -451,17 +476,18 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
* Reuse data->task; data->args should not have changed
* since the original request was sent.
*/
rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
&nfs_write_direct_ops, data);
NFS_PROTO(inode)->write_setup(data, FLUSH_STABLE);
data->task.tk_priority = RPC_PRIORITY_NORMAL;
data->task.tk_cookie = (unsigned long) inode;
task_setup_data.task = &data->task;
task_setup_data.callback_data = data;
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
NFS_PROTO(inode)->write_setup(data, &msg);
/*
* We're called via an RPC callback, so BKL is already held.
*/
rpc_execute(&data->task);
task = rpc_run_task(&task_setup_data);
if (!IS_ERR(task))
rpc_put_task(task);
dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
data->task.tk_pid,
......@@ -504,9 +530,23 @@ static const struct rpc_call_ops nfs_commit_direct_ops = {
static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
{
struct nfs_write_data *data = dreq->commit_data;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = dreq->ctx->cred,
};
struct rpc_task_setup task_setup_data = {
.task = &data->task,
.rpc_client = NFS_CLIENT(dreq->inode),
.rpc_message = &msg,
.callback_ops = &nfs_commit_direct_ops,
.callback_data = data,
.flags = RPC_TASK_ASYNC,
};
data->inode = dreq->inode;
data->cred = dreq->ctx->cred;
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(data->inode);
data->args.offset = 0;
......@@ -515,18 +555,16 @@ static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
data->res.fattr = &data->fattr;
data->res.verf = &data->verf;
rpc_init_task(&data->task, NFS_CLIENT(dreq->inode), RPC_TASK_ASYNC,
&nfs_commit_direct_ops, data);
NFS_PROTO(data->inode)->commit_setup(data, 0);
NFS_PROTO(data->inode)->commit_setup(data, &msg);
data->task.tk_priority = RPC_PRIORITY_NORMAL;
data->task.tk_cookie = (unsigned long)data->inode;
/* Note: task.tk_ops->rpc_release will free dreq->commit_data */
dreq->commit_data = NULL;
dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
rpc_execute(&data->task);
task = rpc_run_task(&task_setup_data);
if (!IS_ERR(task))
rpc_put_task(task);
}
static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
......@@ -641,6 +679,16 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
struct inode *inode = ctx->path.dentry->d_inode;
unsigned long user_addr = (unsigned long)iov->iov_base;
size_t count = iov->iov_len;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_cred = ctx->cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = NFS_CLIENT(inode),
.rpc_message = &msg,
.callback_ops = &nfs_write_direct_ops,
.flags = RPC_TASK_ASYNC,
};
size_t wsize = NFS_SERVER(inode)->wsize;
unsigned int pgbase;
int result;
......@@ -683,25 +731,27 @@ static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq,
data->req = (struct nfs_page *) dreq;
data->inode = inode;
data->cred = ctx->cred;
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(inode);
data->args.context = ctx;
data->args.offset = pos;
data->args.pgbase = pgbase;
data->args.pages = data->pagevec;
data->args.count = bytes;
data->args.stable = sync;
data->res.fattr = &data->fattr;
data->res.count = bytes;
data->res.verf = &data->verf;
rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
&nfs_write_direct_ops, data);
NFS_PROTO(inode)->write_setup(data, sync);
data->task.tk_priority = RPC_PRIORITY_NORMAL;
data->task.tk_cookie = (unsigned long) inode;
task_setup_data.task = &data->task;
task_setup_data.callback_data = data;
msg.rpc_argp = &data->args;
msg.rpc_resp = &data->res;
NFS_PROTO(inode)->write_setup(data, &msg);
rpc_execute(&data->task);
task = rpc_run_task(&task_setup_data);
if (!IS_ERR(task))
rpc_put_task(task);
dprintk("NFS: %5u initiated direct write call "
"(req %s/%Ld, %zu bytes @ offset %Lu)\n",
......@@ -767,12 +817,10 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
size_t count)
{
ssize_t result = 0;
sigset_t oldset;
struct inode *inode = iocb->ki_filp->f_mapping->host;
struct rpc_clnt *clnt = NFS_CLIENT(inode);
struct nfs_direct_req *dreq;
size_t wsize = NFS_SERVER(inode)->wsize;
int sync = 0;
int sync = NFS_UNSTABLE;
dreq = nfs_direct_req_alloc();
if (!dreq)
......@@ -780,18 +828,16 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
nfs_alloc_commit_data(dreq);
if (dreq->commit_data == NULL || count < wsize)
sync = FLUSH_STABLE;
sync = NFS_FILE_SYNC;
dreq->inode = inode;
dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
if (!is_sync_kiocb(iocb))
dreq->iocb = iocb;
rpc_clnt_sigmask(clnt, &oldset);
result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync);
if (!result)
result = nfs_direct_wait(dreq);
rpc_clnt_sigunmask(clnt, &oldset);
nfs_direct_req_release(dreq);
return result;
......
......@@ -349,7 +349,9 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
unlock_page(page);
page_cache_release(page);
return status < 0 ? status : copied;
if (status < 0)
return status;
return copied;
}
static void nfs_invalidate_page(struct page *page, unsigned long offset)
......@@ -392,35 +394,27 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
struct file *filp = vma->vm_file;
unsigned pagelen;
int ret = -EINVAL;
void *fsdata;
struct address_space *mapping;
loff_t offset;
lock_page(page);
mapping = page->mapping;
if (mapping != vma->vm_file->f_path.dentry->d_inode->i_mapping) {
unlock_page(page);
return -EINVAL;
}
if (mapping != vma->vm_file->f_path.dentry->d_inode->i_mapping)
goto out_unlock;
ret = 0;
pagelen = nfs_page_length(page);
offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
unlock_page(page);
if (pagelen == 0)
goto out_unlock;
/*
* we can use mapping after releasing the page lock, because:
* we hold mmap_sem on the fault path, which should pin the vma
* which should pin the file, which pins the dentry which should
* hold a reference on inode.
*/
ret = nfs_flush_incompatible(filp, page);
if (ret != 0)
goto out_unlock;
if (pagelen) {
struct page *page2 = NULL;
ret = nfs_write_begin(filp, mapping, offset, pagelen,
0, &page2, &fsdata);
if (!ret)
ret = nfs_write_end(filp, mapping, offset, pagelen,
pagelen, page2, fsdata);
}
ret = nfs_updatepage(filp, page, 0, pagelen);
if (ret == 0)
ret = pagelen;
out_unlock:
unlock_page(page);
return ret;
}
......
......@@ -74,7 +74,7 @@ module_param_call(idmap_cache_timeout, param_set_idmap_timeout, param_get_int,
struct idmap_hashent {
unsigned long ih_expires;
__u32 ih_id;
int ih_namelen;
size_t ih_namelen;
char ih_name[IDMAP_NAMESZ];
};
......@@ -115,7 +115,8 @@ nfs_idmap_new(struct nfs_client *clp)
BUG_ON(clp->cl_idmap != NULL);
if ((idmap = kzalloc(sizeof(*idmap), GFP_KERNEL)) == NULL)
idmap = kzalloc(sizeof(*idmap), GFP_KERNEL);
if (idmap == NULL)
return -ENOMEM;
idmap->idmap_dentry = rpc_mkpipe(clp->cl_rpcclient->cl_dentry, "idmap",
......@@ -192,7 +193,7 @@ idmap_lookup_id(struct idmap_hashtable *h, __u32 id)
* pretty trivial.
*/
static inline struct idmap_hashent *
idmap_alloc_name(struct idmap_hashtable *h, char *name, unsigned len)
idmap_alloc_name(struct idmap_hashtable *h, char *name, size_t len)
{
return idmap_name_hash(h, name, len);
}
......@@ -285,7 +286,7 @@ nfs_idmap_id(struct idmap *idmap, struct idmap_hashtable *h,
memset(im, 0, sizeof(*im));
mutex_unlock(&idmap->idmap_im_lock);
mutex_unlock(&idmap->idmap_lock);
return (ret);
return ret;
}
/*
......@@ -357,17 +358,15 @@ idmap_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
char __user *dst, size_t buflen)
{
char *data = (char *)msg->data + msg->copied;
ssize_t mlen = msg->len - msg->copied;
ssize_t left;
if (mlen > buflen)
mlen = buflen;
size_t mlen = min(msg->len, buflen);
unsigned long left;
left = copy_to_user(dst, data, mlen);
if (left < 0) {
msg->errno = left;
return left;
if (left == mlen) {
msg->errno = -EFAULT;
return -EFAULT;
}
mlen -= left;
msg->copied += mlen;
msg->errno = 0;
......@@ -382,14 +381,14 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
struct idmap_msg im_in, *im = &idmap->idmap_im;
struct idmap_hashtable *h;
struct idmap_hashent *he = NULL;
int namelen_in;
size_t namelen_in;
int ret;
if (mlen != sizeof(im_in))
return (-ENOSPC);
return -ENOSPC;
if (copy_from_user(&im_in, src, mlen) != 0)
return (-EFAULT);
return -EFAULT;
mutex_lock(&idmap->idmap_im_lock);
......@@ -487,7 +486,7 @@ static unsigned int fnvhash32(const void *buf, size_t buflen)
hash ^= (unsigned int)*p;
}
return (hash);
return hash;
}
int nfs_map_name_to_uid(struct nfs_client *clp, const char *name, size_t namelen, __u32 *uid)
......
......@@ -192,7 +192,7 @@ void nfs_invalidate_atime(struct inode *inode)
*/
static void nfs_invalidate_inode(struct inode *inode)
{
set_bit(NFS_INO_STALE, &NFS_FLAGS(inode));
set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
nfs_zap_caches_locked(inode);
}
......@@ -229,7 +229,7 @@ nfs_init_locked(struct inode *inode, void *opaque)
struct nfs_find_desc *desc = (struct nfs_find_desc *)opaque;
struct nfs_fattr *fattr = desc->fattr;
NFS_FILEID(inode) = fattr->fileid;
set_nfs_fileid(inode, fattr->fileid);
nfs_copy_fh(NFS_FH(inode), desc->fh);
return 0;
}
......@@ -291,7 +291,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
inode->i_fop = &nfs_dir_operations;
if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS)
&& fattr->size <= NFS_LIMIT_READDIRPLUS)
set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
/* Deal with crossing mountpoints */
if (!nfs_fsid_equal(&NFS_SB(sb)->fsid, &fattr->fsid)) {
if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
......@@ -461,9 +461,18 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
int err;
/* Flush out writes to the server in order to update c/mtime */
if (S_ISREG(inode->i_mode))
/*
* Flush out writes to the server in order to update c/mtime.
*
* Hold the i_mutex to suspend application writes temporarily;
* this prevents long-running writing applications from blocking
* nfs_wb_nocommit.
*/
if (S_ISREG(inode->i_mode)) {
mutex_lock(&inode->i_mutex);
nfs_wb_nocommit(inode);
mutex_unlock(&inode->i_mutex);
}
/*
* We may force a getattr if the user cares about atime.
......@@ -659,7 +668,7 @@ __nfs_revalidate_inode(struct nfs_server *server, struct inode *inode)
if (status == -ESTALE) {
nfs_zap_caches(inode);
if (!S_ISDIR(inode->i_mode))
set_bit(NFS_INO_STALE, &NFS_FLAGS(inode));
set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
}
goto out;
}
......@@ -814,8 +823,9 @@ static void nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (S_ISDIR(inode->i_mode))
nfsi->cache_validity |= NFS_INO_INVALID_DATA;
}
if (inode->i_size == fattr->pre_size && nfsi->npages == 0)
inode->i_size = fattr->size;
if (inode->i_size == nfs_size_to_loff_t(fattr->pre_size) &&
nfsi->npages == 0)
inode->i_size = nfs_size_to_loff_t(fattr->size);
}
}
......@@ -1019,7 +1029,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
dprintk("NFS: mtime change on server for file %s/%ld\n",
inode->i_sb->s_id, inode->i_ino);
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
nfsi->cache_change_attribute = now;
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
}
/* If ctime has changed we should definitely clear access+acl caches */
if (!timespec_equal(&inode->i_ctime, &fattr->ctime))
......@@ -1028,7 +1039,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
dprintk("NFS: change_attr change on server for file %s/%ld\n",
inode->i_sb->s_id, inode->i_ino);
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
nfsi->cache_change_attribute = now;
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
}
/* Check if our cached file size is stale */
......@@ -1133,7 +1145,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
void nfs4_clear_inode(struct inode *inode)
{
/* If we are holding a delegation, return it! */
nfs_inode_return_delegation(inode);
nfs_inode_return_delegation_noreclaim(inode);
/* First call standard NFS clear_inode() code */
nfs_clear_inode(inode);
}
......
......@@ -21,7 +21,8 @@ struct nfs_clone_mount {
struct nfs_fattr *fattr;
char *hostname;
char *mnt_path;
struct sockaddr_in *addr;
struct sockaddr *addr;
size_t addrlen;
rpc_authflavor_t authflavor;
};
......@@ -41,19 +42,19 @@ struct nfs_parsed_mount_data {
char *client_address;
struct {
struct sockaddr_in address;
struct sockaddr_storage address;
size_t addrlen;
char *hostname;
unsigned int program;
unsigned int version;
unsigned short port;
int protocol;
} mount_server;
struct {
struct sockaddr_in address;
struct sockaddr_storage address;
size_t addrlen;
char *hostname;
char *export_path;
unsigned int program;
int protocol;
} nfs_server;
};
......@@ -62,7 +63,8 @@ struct nfs_parsed_mount_data {
extern struct rpc_program nfs_program;
extern void nfs_put_client(struct nfs_client *);
extern struct nfs_client *nfs_find_client(const struct sockaddr_in *, int);
extern struct nfs_client *nfs_find_client(const struct sockaddr *, u32);
extern struct nfs_client *nfs_find_client_next(struct nfs_client *);
extern struct nfs_server *nfs_create_server(
const struct nfs_parsed_mount_data *,
struct nfs_fh *);
......@@ -160,6 +162,8 @@ extern struct rpc_stat nfs_rpcstat;
extern int __init register_nfs_fs(void);
extern void __exit unregister_nfs_fs(void);
extern void nfs_sb_active(struct nfs_server *server);
extern void nfs_sb_deactive(struct nfs_server *server);
/* namespace.c */
extern char *nfs_path(const char *base,
......
......@@ -188,7 +188,7 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
{
#ifdef CONFIG_NFS_V4
struct vfsmount *mnt = NULL;
switch (server->nfs_client->cl_nfsversion) {
switch (server->nfs_client->rpc_ops->version) {
case 2:
case 3:
mnt = vfs_kern_mount(&nfs_xdev_fs_type, 0, devname, mountdata);
......
......@@ -262,7 +262,9 @@ static int
nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
{
struct kvec *iov = req->rq_rcv_buf.head;
int status, count, recvd, hdrlen;
size_t hdrlen;
u32 count, recvd;
int status;
if ((status = ntohl(*p++)))
return -nfs_stat_to_errno(status);
......@@ -273,7 +275,7 @@ nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len < hdrlen) {
dprintk("NFS: READ reply header overflowed:"
"length %d > %Zu\n", hdrlen, iov->iov_len);
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
return -errno_NFSERR_IO;
} else if (iov->iov_len != hdrlen) {
dprintk("NFS: READ header is short. iovec will be shifted.\n");
......@@ -283,11 +285,11 @@ nfs_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
recvd = req->rq_rcv_buf.len - hdrlen;
if (count > recvd) {
dprintk("NFS: server cheating in read reply: "
"count %d > recvd %d\n", count, recvd);
"count %u > recvd %u\n", count, recvd);
count = recvd;
}
dprintk("RPC: readres OK count %d\n", count);
dprintk("RPC: readres OK count %u\n", count);
if (count < res->count)
res->count = count;
......@@ -423,9 +425,10 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct kvec *iov = rcvbuf->head;
struct page **page;
int hdrlen, recvd;
size_t hdrlen;
unsigned int pglen, recvd;
u32 len;
int status, nr;
unsigned int len, pglen;
__be32 *end, *entry, *kaddr;
if ((status = ntohl(*p++)))
......@@ -434,7 +437,7 @@ nfs_xdr_readdirres(struct rpc_rqst *req, __be32 *p, void *dummy)
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len < hdrlen) {
dprintk("NFS: READDIR reply header overflowed:"
"length %d > %Zu\n", hdrlen, iov->iov_len);
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
return -errno_NFSERR_IO;
} else if (iov->iov_len != hdrlen) {
dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
......@@ -576,7 +579,8 @@ nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct kvec *iov = rcvbuf->head;
int hdrlen, len, recvd;
size_t hdrlen;
u32 len, recvd;
char *kaddr;
int status;
......@@ -584,14 +588,14 @@ nfs_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, void *dummy)
return -nfs_stat_to_errno(status);
/* Convert length of symlink */
len = ntohl(*p++);
if (len >= rcvbuf->page_len || len <= 0) {
if (len >= rcvbuf->page_len) {
dprintk("nfs: server returned giant symlink!\n");
return -ENAMETOOLONG;
}
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len < hdrlen) {
dprintk("NFS: READLINK reply header overflowed:"
"length %d > %Zu\n", hdrlen, iov->iov_len);
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
return -errno_NFSERR_IO;
} else if (iov->iov_len != hdrlen) {
dprintk("NFS: READLINK header is short. iovec will be shifted.\n");
......
......@@ -732,16 +732,9 @@ static int nfs3_read_done(struct rpc_task *task, struct nfs_read_data *data)
return 0;
}
static void nfs3_proc_read_setup(struct nfs_read_data *data)
static void nfs3_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
{
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_READ],
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = data->cred,
};
rpc_call_setup(&data->task, &msg, 0);
msg->rpc_proc = &nfs3_procedures[NFS3PROC_READ];
}
static int nfs3_write_done(struct rpc_task *task, struct nfs_write_data *data)
......@@ -753,24 +746,9 @@ static int nfs3_write_done(struct rpc_task *task, struct nfs_write_data *data)
return 0;
}
static void nfs3_proc_write_setup(struct nfs_write_data *data, int how)
static void nfs3_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_WRITE],
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = data->cred,
};
data->args.stable = NFS_UNSTABLE;
if (how & FLUSH_STABLE) {
data->args.stable = NFS_FILE_SYNC;
if (NFS_I(data->inode)->ncommit)
data->args.stable = NFS_DATA_SYNC;
}
/* Finalize the task. */
rpc_call_setup(&data->task, &msg, 0);
msg->rpc_proc = &nfs3_procedures[NFS3PROC_WRITE];
}
static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data)
......@@ -781,22 +759,17 @@ static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data)
return 0;
}
static void nfs3_proc_commit_setup(struct nfs_write_data *data, int how)
static void nfs3_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
struct rpc_message msg = {
.rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT],
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = data->cred,
};
rpc_call_setup(&data->task, &msg, 0);
msg->rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT];
}
static int
nfs3_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
{
return nlmclnt_proc(filp->f_path.dentry->d_inode, cmd, fl);
struct inode *inode = filp->f_path.dentry->d_inode;
return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
}
const struct nfs_rpc_ops nfs_v3_clientops = {
......
......@@ -506,9 +506,9 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct kvec *iov = rcvbuf->head;
struct page **page;
int hdrlen, recvd;
size_t hdrlen;
u32 len, recvd, pglen;
int status, nr;
unsigned int len, pglen;
__be32 *entry, *end, *kaddr;
status = ntohl(*p++);
......@@ -527,7 +527,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len < hdrlen) {
dprintk("NFS: READDIR reply header overflowed:"
"length %d > %Zu\n", hdrlen, iov->iov_len);
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
return -errno_NFSERR_IO;
} else if (iov->iov_len != hdrlen) {
dprintk("NFS: READDIR header is short. iovec will be shifted.\n");
......@@ -549,7 +549,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
len = ntohl(*p++); /* string length */
p += XDR_QUADLEN(len) + 2; /* name + cookie */
if (len > NFS3_MAXNAMLEN) {
dprintk("NFS: giant filename in readdir (len %x)!\n",
dprintk("NFS: giant filename in readdir (len 0x%x)!\n",
len);
goto err_unmap;
}
......@@ -570,7 +570,7 @@ nfs3_xdr_readdirres(struct rpc_rqst *req, __be32 *p, struct nfs3_readdirres *res
len = ntohl(*p++);
if (len > NFS3_FHSIZE) {
dprintk("NFS: giant filehandle in "
"readdir (len %x)!\n", len);
"readdir (len 0x%x)!\n", len);
goto err_unmap;
}
p += XDR_QUADLEN(len);
......@@ -815,7 +815,8 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct kvec *iov = rcvbuf->head;
int hdrlen, len, recvd;
size_t hdrlen;
u32 len, recvd;
char *kaddr;
int status;
......@@ -827,7 +828,7 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
/* Convert length of symlink */
len = ntohl(*p++);
if (len >= rcvbuf->page_len || len <= 0) {
if (len >= rcvbuf->page_len) {
dprintk("nfs: server returned giant symlink!\n");
return -ENAMETOOLONG;
}
......@@ -835,7 +836,7 @@ nfs3_xdr_readlinkres(struct rpc_rqst *req, __be32 *p, struct nfs_fattr *fattr)
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len < hdrlen) {
dprintk("NFS: READLINK reply header overflowed:"
"length %d > %Zu\n", hdrlen, iov->iov_len);
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
return -errno_NFSERR_IO;
} else if (iov->iov_len != hdrlen) {
dprintk("NFS: READLINK header is short. "
......@@ -863,7 +864,9 @@ static int
nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
{
struct kvec *iov = req->rq_rcv_buf.head;
int status, count, ocount, recvd, hdrlen;
size_t hdrlen;
u32 count, ocount, recvd;
int status;
status = ntohl(*p++);
p = xdr_decode_post_op_attr(p, res->fattr);
......@@ -871,7 +874,7 @@ nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
if (status != 0)
return -nfs_stat_to_errno(status);
/* Decode reply could and EOF flag. NFSv3 is somewhat redundant
/* Decode reply count and EOF flag. NFSv3 is somewhat redundant
* in that it puts the count both in the res struct and in the
* opaque data count. */
count = ntohl(*p++);
......@@ -886,7 +889,7 @@ nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
hdrlen = (u8 *) p - (u8 *) iov->iov_base;
if (iov->iov_len < hdrlen) {
dprintk("NFS: READ reply header overflowed:"
"length %d > %Zu\n", hdrlen, iov->iov_len);
"length %Zu > %Zu\n", hdrlen, iov->iov_len);
return -errno_NFSERR_IO;
} else if (iov->iov_len != hdrlen) {
dprintk("NFS: READ header is short. iovec will be shifted.\n");
......@@ -896,7 +899,7 @@ nfs3_xdr_readres(struct rpc_rqst *req, __be32 *p, struct nfs_readres *res)
recvd = req->rq_rcv_buf.len - hdrlen;
if (count > recvd) {
dprintk("NFS: server cheating in read reply: "
"count %d > recvd %d\n", count, recvd);
"count %u > recvd %u\n", count, recvd);
count = recvd;
res->eof = 0;
}
......
......@@ -114,10 +114,7 @@ static inline int valid_ipaddr4(const char *buf)
* nfs_follow_referral - set up mountpoint when hitting a referral on moved error
* @mnt_parent - mountpoint of parent directory
* @dentry - parent directory
* @fspath - fs path returned in fs_locations
* @mntpath - mount path to new server
* @hostname - hostname of new server
* @addr - host addr of new server
* @locations - array of NFSv4 server location information
*
*/
static struct vfsmount *nfs_follow_referral(const struct vfsmount *mnt_parent,
......@@ -131,7 +128,8 @@ static struct vfsmount *nfs_follow_referral(const struct vfsmount *mnt_parent,
.authflavor = NFS_SB(mnt_parent->mnt_sb)->client->cl_auth->au_flavor,
};
char *page = NULL, *page2 = NULL;
int loc, s, error;
unsigned int s;
int loc, error;
if (locations == NULL || locations->nlocations <= 0)
goto out;
......@@ -174,7 +172,10 @@ static struct vfsmount *nfs_follow_referral(const struct vfsmount *mnt_parent,
s = 0;
while (s < location->nservers) {
struct sockaddr_in addr = {};
struct sockaddr_in addr = {
.sin_family = AF_INET,
.sin_port = htons(NFS_PORT),
};
if (location->servers[s].len <= 0 ||
valid_ipaddr4(location->servers[s].data) < 0) {
......@@ -183,10 +184,9 @@ static struct vfsmount *nfs_follow_referral(const struct vfsmount *mnt_parent,
}
mountdata.hostname = location->servers[s].data;
addr.sin_addr.s_addr = in_aton(mountdata.hostname);
addr.sin_family = AF_INET;
addr.sin_port = htons(NFS_PORT);
mountdata.addr = &addr;
addr.sin_addr.s_addr = in_aton(mountdata.hostname),
mountdata.addr = (struct sockaddr *)&addr;
mountdata.addrlen = sizeof(addr);
snprintf(page, PAGE_SIZE, "%s:%s",
mountdata.hostname,
......
This diff is collapsed.
......@@ -644,27 +644,26 @@ void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t f
struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
{
struct rpc_sequence *sequence = counter->sequence;
struct nfs_seqid *new;
new = kmalloc(sizeof(*new), GFP_KERNEL);
if (new != NULL) {
new->sequence = counter;
spin_lock(&sequence->lock);
list_add_tail(&new->list, &sequence->list);
spin_unlock(&sequence->lock);
INIT_LIST_HEAD(&new->list);
}
return new;
}
void nfs_free_seqid(struct nfs_seqid *seqid)
{
if (!list_empty(&seqid->list)) {
struct rpc_sequence *sequence = seqid->sequence->sequence;
spin_lock(&sequence->lock);
list_del(&seqid->list);
spin_unlock(&sequence->lock);
rpc_wake_up(&sequence->wait);
}
kfree(seqid);
}
......@@ -675,6 +674,7 @@ void nfs_free_seqid(struct nfs_seqid *seqid)
*/
static void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
{
BUG_ON(list_first_entry(&seqid->sequence->sequence->list, struct nfs_seqid, list) != seqid);
switch (status) {
case 0:
break;
......@@ -726,15 +726,15 @@ int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
struct rpc_sequence *sequence = seqid->sequence->sequence;
int status = 0;
if (sequence->list.next == &seqid->list)
goto out;
spin_lock(&sequence->lock);
if (sequence->list.next != &seqid->list) {
if (list_empty(&seqid->list))
list_add_tail(&seqid->list, &sequence->list);
if (list_first_entry(&sequence->list, struct nfs_seqid, list) == seqid)
goto unlock;
rpc_sleep_on(&sequence->wait, task, NULL, NULL);
status = -EAGAIN;
}
unlock:
spin_unlock(&sequence->lock);
out:
return status;
}
......@@ -758,8 +758,9 @@ static void nfs4_recover_state(struct nfs_client *clp)
__module_get(THIS_MODULE);
atomic_inc(&clp->cl_count);
task = kthread_run(reclaimer, clp, "%u.%u.%u.%u-reclaim",
NIPQUAD(clp->cl_addr.sin_addr));
task = kthread_run(reclaimer, clp, "%s-reclaim",
rpc_peeraddr2str(clp->cl_rpcclient,
RPC_DISPLAY_ADDR));
if (!IS_ERR(task))
return;
nfs4_clear_recover_bit(clp);
......@@ -970,8 +971,8 @@ static int reclaimer(void *ptr)
module_put_and_exit(0);
return 0;
out_error:
printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
NIPQUAD(clp->cl_addr.sin_addr), -status);
printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %s"
" with error %d\n", clp->cl_hostname, -status);
set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
goto out;
}
......
......@@ -116,10 +116,12 @@ static int nfs4_stat_to_errno(int);
#define decode_renew_maxsz (op_decode_hdr_maxsz)
#define encode_setclientid_maxsz \
(op_encode_hdr_maxsz + \
4 /*server->ip_addr*/ + \
1 /*Netid*/ + \
6 /*uaddr*/ + \
6 + (NFS4_VERIFIER_SIZE >> 2))
XDR_QUADLEN(NFS4_VERIFIER_SIZE) + \
XDR_QUADLEN(NFS4_SETCLIENTID_NAMELEN) + \
1 /* sc_prog */ + \
XDR_QUADLEN(RPCBIND_MAXNETIDLEN) + \
XDR_QUADLEN(RPCBIND_MAXUADDRLEN) + \
1) /* sc_cb_ident */
#define decode_setclientid_maxsz \
(op_decode_hdr_maxsz + \
2 + \
......@@ -2515,14 +2517,12 @@ static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uin
static int decode_pathname(struct xdr_stream *xdr, struct nfs4_pathname *path)
{
int n;
u32 n;
__be32 *p;
int status = 0;
READ_BUF(4);
READ32(n);
if (n < 0)
goto out_eio;
if (n == 0)
goto root_path;
dprintk("path ");
......@@ -2579,13 +2579,11 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
goto out_eio;
res->nlocations = 0;
while (res->nlocations < n) {
int m;
u32 m;
struct nfs4_fs_location *loc = &res->locations[res->nlocations];
READ_BUF(4);
READ32(m);
if (m <= 0)
goto out_eio;
loc->nservers = 0;
dprintk("%s: servers ", __FUNCTION__);
......@@ -2598,8 +2596,12 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
if (loc->nservers < NFS4_FS_LOCATION_MAXSERVERS)
loc->nservers++;
else {
int i;
dprintk("%s: using first %d of %d servers returned for location %d\n", __FUNCTION__, NFS4_FS_LOCATION_MAXSERVERS, m, res->nlocations);
unsigned int i;
dprintk("%s: using first %u of %u servers "
"returned for location %u\n",
__FUNCTION__,
NFS4_FS_LOCATION_MAXSERVERS,
m, res->nlocations);
for (i = loc->nservers; i < m; i++) {
unsigned int len;
char *data;
......@@ -3476,10 +3478,11 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct page *page = *rcvbuf->pages;
struct kvec *iov = rcvbuf->head;
unsigned int nr, pglen = rcvbuf->page_len;
size_t hdrlen;
u32 recvd, pglen = rcvbuf->page_len;
__be32 *end, *entry, *p, *kaddr;
uint32_t len, attrlen, xlen;
int hdrlen, recvd, status;
unsigned int nr;
int status;
status = decode_op_hdr(xdr, OP_READDIR);
if (status)
......@@ -3503,6 +3506,7 @@ static int decode_readdir(struct xdr_stream *xdr, struct rpc_rqst *req, struct n
end = p + ((pglen + readdir->pgbase) >> 2);
entry = p;
for (nr = 0; *p++; nr++) {
u32 len, attrlen, xlen;
if (end - p < 3)
goto short_pkt;
dprintk("cookie = %Lu, ", *((unsigned long long *)p));
......@@ -3551,7 +3555,8 @@ static int decode_readlink(struct xdr_stream *xdr, struct rpc_rqst *req)
{
struct xdr_buf *rcvbuf = &req->rq_rcv_buf;
struct kvec *iov = rcvbuf->head;
int hdrlen, len, recvd;
size_t hdrlen;
u32 len, recvd;
__be32 *p;
char *kaddr;
int status;
......@@ -3646,7 +3651,8 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
if (unlikely(bitmap[0] & (FATTR4_WORD0_ACL - 1U)))
return -EIO;
if (likely(bitmap[0] & FATTR4_WORD0_ACL)) {
int hdrlen, recvd;
size_t hdrlen;
u32 recvd;
/* We ignore &savep and don't do consistency checks on
* the attr length. Let userspace figure it out.... */
......
......@@ -111,12 +111,13 @@ void nfs_unlock_request(struct nfs_page *req)
* nfs_set_page_tag_locked - Tag a request as locked
* @req:
*/
static int nfs_set_page_tag_locked(struct nfs_page *req)
int nfs_set_page_tag_locked(struct nfs_page *req)
{
struct nfs_inode *nfsi = NFS_I(req->wb_context->path.dentry->d_inode);
if (!nfs_lock_request(req))
if (!nfs_lock_request_dontget(req))
return 0;
if (req->wb_page != NULL)
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
return 1;
}
......@@ -132,8 +133,9 @@ void nfs_clear_page_tag_locked(struct nfs_page *req)
if (req->wb_page != NULL) {
spin_lock(&inode->i_lock);
radix_tree_tag_clear(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
nfs_unlock_request(req);
spin_unlock(&inode->i_lock);
}
} else
nfs_unlock_request(req);
}
......@@ -421,6 +423,7 @@ int nfs_scan_list(struct nfs_inode *nfsi,
goto out;
idx_start = req->wb_index + 1;
if (nfs_set_page_tag_locked(req)) {
kref_get(&req->wb_kref);
nfs_list_remove_request(req);
radix_tree_tag_clear(&nfsi->nfs_page_tree,
req->wb_index, tag);
......
......@@ -565,16 +565,9 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)
return 0;
}
static void nfs_proc_read_setup(struct nfs_read_data *data)
static void nfs_proc_read_setup(struct nfs_read_data *data, struct rpc_message *msg)
{
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_READ],
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = data->cred,
};
rpc_call_setup(&data->task, &msg, 0);
msg->rpc_proc = &nfs_procedures[NFSPROC_READ];
}
static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data)
......@@ -584,24 +577,15 @@ static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data)
return 0;
}
static void nfs_proc_write_setup(struct nfs_write_data *data, int how)
static void nfs_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
struct rpc_message msg = {
.rpc_proc = &nfs_procedures[NFSPROC_WRITE],
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = data->cred,
};
/* Note: NFSv2 ignores @stable and always uses NFS_FILE_SYNC */
data->args.stable = NFS_FILE_SYNC;
/* Finalize the task. */
rpc_call_setup(&data->task, &msg, 0);
msg->rpc_proc = &nfs_procedures[NFSPROC_WRITE];
}
static void
nfs_proc_commit_setup(struct nfs_write_data *data, int how)
nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg)
{
BUG();
}
......@@ -609,7 +593,9 @@ nfs_proc_commit_setup(struct nfs_write_data *data, int how)
static int
nfs_proc_lock(struct file *filp, int cmd, struct file_lock *fl)
{
return nlmclnt_proc(filp->f_path.dentry->d_inode, cmd, fl);
struct inode *inode = filp->f_path.dentry->d_inode;
return nlmclnt_proc(NFS_SERVER(inode)->nlm_host, cmd, fl);
}
......
......@@ -160,12 +160,26 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
const struct rpc_call_ops *call_ops,
unsigned int count, unsigned int offset)
{
struct inode *inode;
int flags;
struct inode *inode = req->wb_context->path.dentry->d_inode;
int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = req->wb_context->cred,
};
struct rpc_task_setup task_setup_data = {
.task = &data->task,
.rpc_client = NFS_CLIENT(inode),
.rpc_message = &msg,
.callback_ops = call_ops,
.callback_data = data,
.flags = RPC_TASK_ASYNC | swap_flags,
};
data->req = req;
data->inode = inode = req->wb_context->path.dentry->d_inode;
data->cred = req->wb_context->cred;
data->inode = inode;
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(inode);
data->args.offset = req_offset(req) + offset;
......@@ -180,11 +194,7 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
nfs_fattr_init(&data->fattr);
/* Set up the initial task struct. */
flags = RPC_TASK_ASYNC | (IS_SWAPFILE(inode)? NFS_RPC_SWAPFLAGS : 0);
rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
NFS_PROTO(inode)->read_setup(data);
data->task.tk_cookie = (unsigned long)inode;
NFS_PROTO(inode)->read_setup(data, &msg);
dprintk("NFS: %5u initiated read call (req %s/%Ld, %u bytes @ offset %Lu)\n",
data->task.tk_pid,
......@@ -192,6 +202,10 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
(long long)NFS_FILEID(inode),
count,
(unsigned long long)data->args.offset);
task = rpc_run_task(&task_setup_data);
if (!IS_ERR(task))
rpc_put_task(task);
}
static void
......@@ -207,19 +221,6 @@ nfs_async_read_error(struct list_head *head)
}
}
/*
* Start an async read operation
*/
static void nfs_execute_read(struct nfs_read_data *data)
{
struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
sigset_t oldset;
rpc_clnt_sigmask(clnt, &oldset);
rpc_execute(&data->task);
rpc_clnt_sigunmask(clnt, &oldset);
}
/*
* Generate multiple requests to fill a single page.
*
......@@ -274,7 +275,6 @@ static int nfs_pagein_multi(struct inode *inode, struct list_head *head, unsigne
rsize, offset);
offset += rsize;
nbytes -= rsize;
nfs_execute_read(data);
} while (nbytes != 0);
return 0;
......@@ -312,8 +312,6 @@ static int nfs_pagein_one(struct inode *inode, struct list_head *head, unsigned
req = nfs_list_entry(data->pages.next);
nfs_read_rpcsetup(req, data, &nfs_read_full_ops, count, 0);
nfs_execute_read(data);
return 0;
out_bad:
nfs_async_read_error(head);
......@@ -338,7 +336,7 @@ int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count);
if (task->tk_status == -ESTALE) {
set_bit(NFS_INO_STALE, &NFS_FLAGS(data->inode));
set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags);
nfs_mark_for_revalidate(data->inode);
}
return 0;
......
This diff is collapsed.
......@@ -14,6 +14,8 @@
#include <linux/sched.h>
#include <linux/wait.h>
#include "internal.h"
struct nfs_unlinkdata {
struct hlist_node list;
struct nfs_removeargs args;
......@@ -68,24 +70,6 @@ static void nfs_dec_sillycount(struct inode *dir)
wake_up(&nfsi->waitqueue);
}
/**
* nfs_async_unlink_init - Initialize the RPC info
* task: rpc_task of the sillydelete
*/
static void nfs_async_unlink_init(struct rpc_task *task, void *calldata)
{
struct nfs_unlinkdata *data = calldata;
struct inode *dir = data->dir;
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = data->cred,
};
NFS_PROTO(dir)->unlink_setup(&msg, dir);
rpc_call_setup(task, &msg, 0);
}
/**
* nfs_async_unlink_done - Sillydelete post-processing
* @task: rpc_task of the sillydelete
......@@ -113,32 +97,45 @@ static void nfs_async_unlink_release(void *calldata)
struct nfs_unlinkdata *data = calldata;
nfs_dec_sillycount(data->dir);
nfs_sb_deactive(NFS_SERVER(data->dir));
nfs_free_unlinkdata(data);
}
static const struct rpc_call_ops nfs_unlink_ops = {
.rpc_call_prepare = nfs_async_unlink_init,
.rpc_call_done = nfs_async_unlink_done,
.rpc_release = nfs_async_unlink_release,
};
static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct nfs_unlinkdata *data)
{
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = data->cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_message = &msg,
.callback_ops = &nfs_unlink_ops,
.callback_data = data,
.flags = RPC_TASK_ASYNC,
};
struct rpc_task *task;
struct dentry *alias;
alias = d_lookup(parent, &data->args.name);
if (alias != NULL) {
int ret = 0;
/*
* Hey, we raced with lookup... See if we need to transfer
* the sillyrename information to the aliased dentry.
*/
nfs_free_dname(data);
spin_lock(&alias->d_lock);
if (!(alias->d_flags & DCACHE_NFSFS_RENAMED)) {
if (alias->d_inode != NULL &&
!(alias->d_flags & DCACHE_NFSFS_RENAMED)) {
alias->d_fsdata = data;
alias->d_flags ^= DCACHE_NFSFS_RENAMED;
alias->d_flags |= DCACHE_NFSFS_RENAMED;
ret = 1;
}
spin_unlock(&alias->d_lock);
......@@ -151,10 +148,14 @@ static int nfs_do_call_unlink(struct dentry *parent, struct inode *dir, struct n
nfs_dec_sillycount(dir);
return 0;
}
nfs_sb_active(NFS_SERVER(dir));
data->args.fh = NFS_FH(dir);
nfs_fattr_init(&data->res.dir_attr);
task = rpc_run_task(NFS_CLIENT(dir), RPC_TASK_ASYNC, &nfs_unlink_ops, data);
NFS_PROTO(dir)->unlink_setup(&msg, dir);
task_setup_data.rpc_client = NFS_CLIENT(dir);
task = rpc_run_task(&task_setup_data);
if (!IS_ERR(task))
rpc_put_task(task);
return 1;
......
......@@ -196,7 +196,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
}
/* Update file length */
nfs_grow_file(page, offset, count);
nfs_unlock_request(req);
nfs_clear_page_tag_locked(req);
return 0;
}
......@@ -252,7 +252,6 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
struct page *page)
{
struct inode *inode = page->mapping->host;
struct nfs_inode *nfsi = NFS_I(inode);
struct nfs_page *req;
int ret;
......@@ -263,10 +262,10 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
spin_unlock(&inode->i_lock);
return 0;
}
if (nfs_lock_request_dontget(req))
if (nfs_set_page_tag_locked(req))
break;
/* Note: If we hold the page lock, as is the case in nfs_writepage,
* then the call to nfs_lock_request_dontget() will always
* then the call to nfs_set_page_tag_locked() will always
* succeed provided that someone hasn't already marked the
* request as dirty (in which case we don't care).
*/
......@@ -280,7 +279,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
/* This request is marked for commit */
spin_unlock(&inode->i_lock);
nfs_unlock_request(req);
nfs_clear_page_tag_locked(req);
nfs_pageio_complete(pgio);
return 0;
}
......@@ -288,8 +287,6 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
spin_unlock(&inode->i_lock);
BUG();
}
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
NFS_PAGE_TAG_LOCKED);
spin_unlock(&inode->i_lock);
nfs_pageio_add_request(pgio, req);
return 0;
......@@ -381,6 +378,7 @@ static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
set_page_private(req->wb_page, (unsigned long)req);
nfsi->npages++;
kref_get(&req->wb_kref);
radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index, NFS_PAGE_TAG_LOCKED);
return 0;
}
......@@ -596,7 +594,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
spin_lock(&inode->i_lock);
req = nfs_page_find_request_locked(page);
if (req) {
if (!nfs_lock_request_dontget(req)) {
if (!nfs_set_page_tag_locked(req)) {
int error;
spin_unlock(&inode->i_lock);
......@@ -646,7 +644,7 @@ static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
|| req->wb_page != page
|| !nfs_dirty_request(req)
|| offset > rqend || end < req->wb_offset) {
nfs_unlock_request(req);
nfs_clear_page_tag_locked(req);
return ERR_PTR(-EBUSY);
}
......@@ -755,7 +753,7 @@ static void nfs_writepage_release(struct nfs_page *req)
nfs_clear_page_tag_locked(req);
}
static inline int flush_task_priority(int how)
static int flush_task_priority(int how)
{
switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
case FLUSH_HIGHPRI:
......@@ -775,15 +773,31 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
unsigned int count, unsigned int offset,
int how)
{
struct inode *inode;
int flags;
struct inode *inode = req->wb_context->path.dentry->d_inode;
int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
int priority = flush_task_priority(how);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = req->wb_context->cred,
};
struct rpc_task_setup task_setup_data = {
.rpc_client = NFS_CLIENT(inode),
.task = &data->task,
.rpc_message = &msg,
.callback_ops = call_ops,
.callback_data = data,
.flags = flags,
.priority = priority,
};
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
data->req = req;
data->inode = inode = req->wb_context->path.dentry->d_inode;
data->cred = req->wb_context->cred;
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(inode);
data->args.offset = req_offset(req) + offset;
......@@ -791,6 +805,12 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
data->args.pages = data->pagevec;
data->args.count = count;
data->args.context = req->wb_context;
data->args.stable = NFS_UNSTABLE;
if (how & FLUSH_STABLE) {
data->args.stable = NFS_DATA_SYNC;
if (!NFS_I(inode)->ncommit)
data->args.stable = NFS_FILE_SYNC;
}
data->res.fattr = &data->fattr;
data->res.count = count;
......@@ -798,12 +818,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
nfs_fattr_init(&data->fattr);
/* Set up the initial task struct. */
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
NFS_PROTO(inode)->write_setup(data, how);
data->task.tk_priority = flush_task_priority(how);
data->task.tk_cookie = (unsigned long)inode;
NFS_PROTO(inode)->write_setup(data, &msg);
dprintk("NFS: %5u initiated write call "
"(req %s/%Ld, %u bytes @ offset %Lu)\n",
......@@ -812,16 +827,10 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
(long long)NFS_FILEID(inode),
count,
(unsigned long long)data->args.offset);
}
static void nfs_execute_write(struct nfs_write_data *data)
{
struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
sigset_t oldset;
rpc_clnt_sigmask(clnt, &oldset);
rpc_execute(&data->task);
rpc_clnt_sigunmask(clnt, &oldset);
task = rpc_run_task(&task_setup_data);
if (!IS_ERR(task))
rpc_put_task(task);
}
/*
......@@ -868,7 +877,6 @@ static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned
wsize, offset, how);
offset += wsize;
nbytes -= wsize;
nfs_execute_write(data);
} while (nbytes != 0);
return 0;
......@@ -916,7 +924,6 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned i
/* Set up the argument struct */
nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
nfs_execute_write(data);
return 0;
out_bad:
while (!list_empty(head)) {
......@@ -932,7 +939,7 @@ static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned i
static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
struct inode *inode, int ioflags)
{
int wsize = NFS_SERVER(inode)->wsize;
size_t wsize = NFS_SERVER(inode)->wsize;
if (wsize < PAGE_CACHE_SIZE)
nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
......@@ -1146,19 +1153,33 @@ static void nfs_commit_rpcsetup(struct list_head *head,
struct nfs_write_data *data,
int how)
{
struct nfs_page *first;
struct inode *inode;
int flags;
struct nfs_page *first = nfs_list_entry(head->next);
struct inode *inode = first->wb_context->path.dentry->d_inode;
int flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
int priority = flush_task_priority(how);
struct rpc_task *task;
struct rpc_message msg = {
.rpc_argp = &data->args,
.rpc_resp = &data->res,
.rpc_cred = first->wb_context->cred,
};
struct rpc_task_setup task_setup_data = {
.task = &data->task,
.rpc_client = NFS_CLIENT(inode),
.rpc_message = &msg,
.callback_ops = &nfs_commit_ops,
.callback_data = data,
.flags = flags,
.priority = priority,
};
/* Set up the RPC argument and reply structs
* NB: take care not to mess about with data->commit et al. */
list_splice_init(head, &data->pages);
first = nfs_list_entry(data->pages.next);
inode = first->wb_context->path.dentry->d_inode;
data->inode = inode;
data->cred = first->wb_context->cred;
data->cred = msg.rpc_cred;
data->args.fh = NFS_FH(data->inode);
/* Note: we always request a commit of the entire inode */
......@@ -1170,14 +1191,13 @@ static void nfs_commit_rpcsetup(struct list_head *head,
nfs_fattr_init(&data->fattr);
/* Set up the initial task struct. */
flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
NFS_PROTO(inode)->commit_setup(data, how);
data->task.tk_priority = flush_task_priority(how);
data->task.tk_cookie = (unsigned long)inode;
NFS_PROTO(inode)->commit_setup(data, &msg);
dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
task = rpc_run_task(&task_setup_data);
if (!IS_ERR(task))
rpc_put_task(task);
}
/*
......@@ -1197,7 +1217,6 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
/* Set up the argument struct */
nfs_commit_rpcsetup(head, data, how);
nfs_execute_write(data);
return 0;
out_bad:
while (!list_empty(head)) {
......
......@@ -32,10 +32,27 @@ struct nlmsvc_binding {
extern struct nlmsvc_binding * nlmsvc_ops;
/*
* Similar to nfs_client_initdata, but without the NFS-specific
* rpc_ops field.
*/
struct nlmclnt_initdata {
const char *hostname;
const struct sockaddr *address;
size_t addrlen;
unsigned short protocol;
u32 nfs_version;
};
/*
* Functions exported by the lockd module
*/
extern int nlmclnt_proc(struct inode *, int, struct file_lock *);
extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init);
extern void nlmclnt_done(struct nlm_host *host);
extern int nlmclnt_proc(struct nlm_host *host, int cmd,
struct file_lock *fl);
extern int lockd_up(int proto);
extern void lockd_down(void);
......
......@@ -196,28 +196,67 @@ struct nfs_inode {
#define NFS_INO_STALE (2) /* possible stale inode */
#define NFS_INO_ACL_LRU_SET (3) /* Inode is on the LRU list */
static inline struct nfs_inode *NFS_I(struct inode *inode)
static inline struct nfs_inode *NFS_I(const struct inode *inode)
{
return container_of(inode, struct nfs_inode, vfs_inode);
}
#define NFS_SB(s) ((struct nfs_server *)(s->s_fs_info))
#define NFS_FH(inode) (&NFS_I(inode)->fh)
#define NFS_SERVER(inode) (NFS_SB(inode->i_sb))
#define NFS_CLIENT(inode) (NFS_SERVER(inode)->client)
#define NFS_PROTO(inode) (NFS_SERVER(inode)->nfs_client->rpc_ops)
#define NFS_COOKIEVERF(inode) (NFS_I(inode)->cookieverf)
#define NFS_MINATTRTIMEO(inode) \
(S_ISDIR(inode->i_mode)? NFS_SERVER(inode)->acdirmin \
: NFS_SERVER(inode)->acregmin)
#define NFS_MAXATTRTIMEO(inode) \
(S_ISDIR(inode->i_mode)? NFS_SERVER(inode)->acdirmax \
: NFS_SERVER(inode)->acregmax)
static inline struct nfs_server *NFS_SB(const struct super_block *s)
{
return (struct nfs_server *)(s->s_fs_info);
}
static inline struct nfs_fh *NFS_FH(const struct inode *inode)
{
return &NFS_I(inode)->fh;
}
static inline struct nfs_server *NFS_SERVER(const struct inode *inode)
{
return NFS_SB(inode->i_sb);
}
static inline struct rpc_clnt *NFS_CLIENT(const struct inode *inode)
{
return NFS_SERVER(inode)->client;
}
static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode)
{
return NFS_SERVER(inode)->nfs_client->rpc_ops;
}
static inline __be32 *NFS_COOKIEVERF(const struct inode *inode)
{
return NFS_I(inode)->cookieverf;
}
static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode)
{
struct nfs_server *nfss = NFS_SERVER(inode);
return S_ISDIR(inode->i_mode) ? nfss->acdirmin : nfss->acregmin;
}
#define NFS_FLAGS(inode) (NFS_I(inode)->flags)
#define NFS_STALE(inode) (test_bit(NFS_INO_STALE, &NFS_FLAGS(inode)))
static inline unsigned NFS_MAXATTRTIMEO(const struct inode *inode)
{
struct nfs_server *nfss = NFS_SERVER(inode);
return S_ISDIR(inode->i_mode) ? nfss->acdirmax : nfss->acregmax;
}
#define NFS_FILEID(inode) (NFS_I(inode)->fileid)
static inline int NFS_STALE(const struct inode *inode)
{
return test_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
}
static inline __u64 NFS_FILEID(const struct inode *inode)
{
return NFS_I(inode)->fileid;
}
static inline void set_nfs_fileid(struct inode *inode, __u64 fileid)
{
NFS_I(inode)->fileid = fileid;
}
static inline void nfs_mark_for_revalidate(struct inode *inode)
{
......@@ -237,7 +276,7 @@ static inline int nfs_server_capable(struct inode *inode, int cap)
static inline int NFS_USE_READDIRPLUS(struct inode *inode)
{
return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_FLAGS(inode));
return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
}
static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
......@@ -366,6 +405,7 @@ extern const struct inode_operations nfs3_dir_inode_operations;
extern const struct file_operations nfs_dir_operations;
extern struct dentry_operations nfs_dentry_operations;
extern void nfs_force_lookup_revalidate(struct inode *dir);
extern int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fh, struct nfs_fattr *fattr);
extern int nfs_may_open(struct inode *inode, struct rpc_cred *cred, int openflags);
extern void nfs_access_zap_cache(struct inode *inode);
......
......@@ -3,8 +3,12 @@
#include <linux/list.h>
#include <linux/backing-dev.h>
#include <linux/wait.h>
#include <asm/atomic.h>
struct nfs_iostats;
struct nlm_host;
/*
* The nfs_client identifies our client state to the server.
......@@ -14,20 +18,19 @@ struct nfs_client {
int cl_cons_state; /* current construction state (-ve: init error) */
#define NFS_CS_READY 0 /* ready to be used */
#define NFS_CS_INITING 1 /* busy initialising */
int cl_nfsversion; /* NFS protocol version */
unsigned long cl_res_state; /* NFS resources state */
#define NFS_CS_CALLBACK 1 /* - callback started */
#define NFS_CS_IDMAP 2 /* - idmap started */
#define NFS_CS_RENEWD 3 /* - renewd started */
struct sockaddr_in cl_addr; /* server identifier */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
struct list_head cl_share_link; /* link in global client list */
struct list_head cl_superblocks; /* List of nfs_server structs */
struct rpc_clnt * cl_rpcclient;
const struct nfs_rpc_ops *rpc_ops; /* NFS protocol vector */
unsigned long retrans_timeo; /* retransmit timeout */
unsigned int retrans_count; /* number of retransmit tries */
int cl_proto; /* Network transport protocol */
#ifdef CONFIG_NFS_V4
u64 cl_clientid; /* constant */
......@@ -62,7 +65,7 @@ struct nfs_client {
/* Our own IP address, as a null-terminated string.
* This is used to generate the clientid, and the callback address.
*/
char cl_ipaddr[16];
char cl_ipaddr[48];
unsigned char cl_id_uniquifier;
#endif
};
......@@ -78,6 +81,7 @@ struct nfs_server {
struct list_head master_link; /* link in master servers list */
struct rpc_clnt * client; /* RPC client handle */
struct rpc_clnt * client_acl; /* ACL RPC client handle */
struct nlm_host *nlm_host; /* NLM client handle */
struct nfs_iostats * io_stats; /* I/O statistics */
struct backing_dev_info backing_dev_info;
atomic_long_t writeback; /* number of writeback pages */
......@@ -110,6 +114,9 @@ struct nfs_server {
filesystem */
#endif
void (*destroy)(struct nfs_server *);
atomic_t active; /* Keep trace of any activity to this server */
wait_queue_head_t active_wq; /* Wait for any activity to stop */
};
/* Server capabilities */
......
......@@ -83,6 +83,7 @@ extern void nfs_pageio_complete(struct nfs_pageio_descriptor *desc);
extern void nfs_pageio_cond_complete(struct nfs_pageio_descriptor *, pgoff_t);
extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
extern int nfs_set_page_tag_locked(struct nfs_page *req);
extern void nfs_clear_page_tag_locked(struct nfs_page *req);
......@@ -95,18 +96,6 @@ nfs_lock_request_dontget(struct nfs_page *req)
return !test_and_set_bit(PG_BUSY, &req->wb_flags);
}
/*
* Lock the page of an asynchronous request and take a reference
*/
static inline int
nfs_lock_request(struct nfs_page *req)
{
if (test_and_set_bit(PG_BUSY, &req->wb_flags))
return 0;
kref_get(&req->wb_kref);
return 1;
}
/**
* nfs_list_add_request - Insert a request into a list
* @req: request
......
......@@ -666,16 +666,17 @@ struct nfs4_rename_res {
struct nfs_fattr * new_fattr;
};
#define NFS4_SETCLIENTID_NAMELEN (56)
struct nfs4_setclientid {
const nfs4_verifier * sc_verifier; /* request */
const nfs4_verifier * sc_verifier;
unsigned int sc_name_len;
char sc_name[48]; /* request */
u32 sc_prog; /* request */
char sc_name[NFS4_SETCLIENTID_NAMELEN];
u32 sc_prog;
unsigned int sc_netid_len;
char sc_netid[4]; /* request */
char sc_netid[RPCBIND_MAXNETIDLEN];
unsigned int sc_uaddr_len;
char sc_uaddr[24]; /* request */
u32 sc_cb_ident; /* request */
char sc_uaddr[RPCBIND_MAXUADDRLEN];
u32 sc_cb_ident;
};
struct nfs4_statfs_arg {
......@@ -773,7 +774,7 @@ struct nfs_access_entry;
* RPC procedure vector for NFSv2/NFSv3 demuxing
*/
struct nfs_rpc_ops {
int version; /* Protocol version */
u32 version; /* Protocol version */
struct dentry_operations *dentry_ops;
const struct inode_operations *dir_inode_ops;
const struct inode_operations *file_inode_ops;
......@@ -816,11 +817,11 @@ struct nfs_rpc_ops {
struct nfs_pathconf *);
int (*set_capabilities)(struct nfs_server *, struct nfs_fh *);
__be32 *(*decode_dirent)(__be32 *, struct nfs_entry *, int plus);
void (*read_setup) (struct nfs_read_data *);
void (*read_setup) (struct nfs_read_data *, struct rpc_message *);
int (*read_done) (struct rpc_task *, struct nfs_read_data *);
void (*write_setup) (struct nfs_write_data *, int how);
void (*write_setup) (struct nfs_write_data *, struct rpc_message *);
int (*write_done) (struct rpc_task *, struct nfs_write_data *);
void (*commit_setup) (struct nfs_write_data *, int how);
void (*commit_setup) (struct nfs_write_data *, struct rpc_message *);
int (*commit_done) (struct rpc_task *, struct nfs_write_data *);
int (*file_open) (struct inode *, struct file *);
int (*file_release) (struct inode *, struct file *);
......
......@@ -46,6 +46,7 @@ struct rpc_clnt {
cl_autobind : 1;/* use getport() */
struct rpc_rtt * cl_rtt; /* RTO estimator data */
const struct rpc_timeout *cl_timeout; /* Timeout strategy */
int cl_nodelen; /* nodename length */
char cl_nodename[UNX_MAXNODENAME];
......@@ -54,6 +55,7 @@ struct rpc_clnt {
struct dentry * cl_dentry; /* inode */
struct rpc_clnt * cl_parent; /* Points to parent of clones */
struct rpc_rtt cl_rtt_default;
struct rpc_timeout cl_timeout_default;
struct rpc_program * cl_program;
char cl_inline_name[32];
};
......@@ -99,7 +101,7 @@ struct rpc_create_args {
struct sockaddr *address;
size_t addrsize;
struct sockaddr *saddress;
struct rpc_timeout *timeout;
const struct rpc_timeout *timeout;
char *servername;
struct rpc_program *program;
u32 version;
......@@ -123,11 +125,10 @@ void rpc_shutdown_client(struct rpc_clnt *);
void rpc_release_client(struct rpc_clnt *);
int rpcb_register(u32, u32, int, unsigned short, int *);
int rpcb_getport_sync(struct sockaddr_in *, __u32, __u32, int);
int rpcb_getport_sync(struct sockaddr_in *, u32, u32, int);
void rpcb_getport_async(struct rpc_task *);
void rpc_call_setup(struct rpc_task *, struct rpc_message *, int);
void rpc_call_start(struct rpc_task *);
int rpc_call_async(struct rpc_clnt *clnt, struct rpc_message *msg,
int flags, const struct rpc_call_ops *tk_ops,
void *calldata);
......@@ -142,7 +143,7 @@ void rpc_setbufsize(struct rpc_clnt *, unsigned int, unsigned int);
size_t rpc_max_payload(struct rpc_clnt *);
void rpc_force_rebind(struct rpc_clnt *);
size_t rpc_peeraddr(struct rpc_clnt *, struct sockaddr *, size_t);
char * rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
const char *rpc_peeraddr2str(struct rpc_clnt *, enum rpc_display_format_t);
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_CLNT_H */
......@@ -152,5 +152,44 @@ typedef __be32 rpc_fraghdr;
*/
#define RPCBIND_MAXNETIDLEN (4u)
/*
* Universal addresses are introduced in RFC 1833 and further spelled
* out in RFC 3530. RPCBIND_MAXUADDRLEN defines a maximum byte length
* of a universal address for use in allocating buffers and character
* arrays.
*
* Quoting RFC 3530, section 2.2:
*
* For TCP over IPv4 and for UDP over IPv4, the format of r_addr is the
* US-ASCII string:
*
* h1.h2.h3.h4.p1.p2
*
* The prefix, "h1.h2.h3.h4", is the standard textual form for
* representing an IPv4 address, which is always four octets long.
* Assuming big-endian ordering, h1, h2, h3, and h4, are respectively,
* the first through fourth octets each converted to ASCII-decimal.
* Assuming big-endian ordering, p1 and p2 are, respectively, the first
* and second octets each converted to ASCII-decimal. For example, if a
* host, in big-endian order, has an address of 0x0A010307 and there is
* a service listening on, in big endian order, port 0x020F (decimal
* 527), then the complete universal address is "10.1.3.7.2.15".
*
* ...
*
* For TCP over IPv6 and for UDP over IPv6, the format of r_addr is the
* US-ASCII string:
*
* x1:x2:x3:x4:x5:x6:x7:x8.p1.p2
*
* The suffix "p1.p2" is the service port, and is computed the same way
* as with universal addresses for TCP and UDP over IPv4. The prefix,
* "x1:x2:x3:x4:x5:x6:x7:x8", is the standard textual form for
* representing an IPv6 address as defined in Section 2.2 of [RFC2373].
* Additionally, the two alternative forms specified in Section 2.2 of
* [RFC2373] are also acceptable.
*/
#define RPCBIND_MAXUADDRLEN (56u)
#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_MSGPROT_H_ */
......@@ -56,8 +56,6 @@ struct rpc_task {
__u8 tk_garb_retry;
__u8 tk_cred_retry;
unsigned long tk_cookie; /* Cookie for batching tasks */
/*
* timeout_fn to be executed by timer bottom half
* callback to be executed after waking up
......@@ -78,7 +76,6 @@ struct rpc_task {
struct timer_list tk_timer; /* kernel timer */
unsigned long tk_timeout; /* timeout for rpc_sleep() */
unsigned short tk_flags; /* misc flags */
unsigned char tk_priority : 2;/* Task priority */
unsigned long tk_runstate; /* Task run status */
struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
* be any workqueue
......@@ -94,6 +91,9 @@ struct rpc_task {
unsigned long tk_start; /* RPC task init timestamp */
long tk_rtt; /* round-trip time (jiffies) */
pid_t tk_owner; /* Process id for batching tasks */
unsigned char tk_priority : 2;/* Task priority */
#ifdef RPC_DEBUG
unsigned short tk_pid; /* debugging aid */
#endif
......@@ -117,6 +117,15 @@ struct rpc_call_ops {
void (*rpc_release)(void *);
};
struct rpc_task_setup {
struct rpc_task *task;
struct rpc_clnt *rpc_client;
const struct rpc_message *rpc_message;
const struct rpc_call_ops *callback_ops;
void *callback_data;
unsigned short flags;
signed char priority;
};
/*
* RPC task flags
......@@ -180,10 +189,10 @@ struct rpc_call_ops {
* Note: if you change these, you must also change
* the task initialization definitions below.
*/
#define RPC_PRIORITY_LOW 0
#define RPC_PRIORITY_NORMAL 1
#define RPC_PRIORITY_HIGH 2
#define RPC_NR_PRIORITY (RPC_PRIORITY_HIGH+1)
#define RPC_PRIORITY_LOW (-1)
#define RPC_PRIORITY_NORMAL (0)
#define RPC_PRIORITY_HIGH (1)
#define RPC_NR_PRIORITY (1 + RPC_PRIORITY_HIGH - RPC_PRIORITY_LOW)
/*
* RPC synchronization objects
......@@ -191,7 +200,7 @@ struct rpc_call_ops {
struct rpc_wait_queue {
spinlock_t lock;
struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
unsigned long cookie; /* cookie of last task serviced */
pid_t owner; /* process id of last task serviced */
unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
unsigned char priority; /* current priority */
unsigned char count; /* # task groups remaining serviced so far */
......@@ -208,41 +217,13 @@ struct rpc_wait_queue {
* performance of NFS operations such as read/write.
*/
#define RPC_BATCH_COUNT 16
#ifndef RPC_DEBUG
# define RPC_WAITQ_INIT(var,qname) { \
.lock = __SPIN_LOCK_UNLOCKED(var.lock), \
.tasks = { \
[0] = LIST_HEAD_INIT(var.tasks[0]), \
[1] = LIST_HEAD_INIT(var.tasks[1]), \
[2] = LIST_HEAD_INIT(var.tasks[2]), \
}, \
}
#else
# define RPC_WAITQ_INIT(var,qname) { \
.lock = __SPIN_LOCK_UNLOCKED(var.lock), \
.tasks = { \
[0] = LIST_HEAD_INIT(var.tasks[0]), \
[1] = LIST_HEAD_INIT(var.tasks[1]), \
[2] = LIST_HEAD_INIT(var.tasks[2]), \
}, \
.name = qname, \
}
#endif
# define RPC_WAITQ(var,qname) struct rpc_wait_queue var = RPC_WAITQ_INIT(var,qname)
#define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
/*
* Function prototypes
*/
struct rpc_task *rpc_new_task(struct rpc_clnt *, int flags,
const struct rpc_call_ops *ops, void *data);
struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
const struct rpc_call_ops *ops, void *data);
void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
int flags, const struct rpc_call_ops *ops,
void *data);
struct rpc_task *rpc_new_task(const struct rpc_task_setup *);
struct rpc_task *rpc_run_task(const struct rpc_task_setup *);
void rpc_put_task(struct rpc_task *);
void rpc_exit_task(struct rpc_task *);
void rpc_release_calldata(const struct rpc_call_ops *, void *);
......
......@@ -120,7 +120,7 @@ struct rpc_xprt {
struct kref kref; /* Reference count */
struct rpc_xprt_ops * ops; /* transport methods */
struct rpc_timeout timeout; /* timeout parms */
const struct rpc_timeout *timeout; /* timeout parms */
struct sockaddr_storage addr; /* server address */
size_t addrlen; /* size of server address */
int prot; /* IP protocol */
......@@ -183,7 +183,7 @@ struct rpc_xprt {
bklog_u; /* backlog queue utilization */
} stat;
char * address_strings[RPC_DISPLAY_MAX];
const char *address_strings[RPC_DISPLAY_MAX];
};
struct xprt_create {
......@@ -191,7 +191,6 @@ struct xprt_create {
struct sockaddr * srcaddr; /* optional local address */
struct sockaddr * dstaddr; /* remote peer address */
size_t addrlen;
struct rpc_timeout * timeout; /* optional timeout parameters */
};
struct xprt_class {
......@@ -202,11 +201,6 @@ struct xprt_class {
char name[32];
};
/*
* Transport operations used by ULPs
*/
void xprt_set_timeout(struct rpc_timeout *to, unsigned int retr, unsigned long incr);
/*
* Generic internal transport functions
*/
......@@ -245,7 +239,8 @@ void xprt_adjust_cwnd(struct rpc_task *task, int result);
struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
void xprt_complete_rqst(struct rpc_task *task, int copied);
void xprt_release_rqst_cong(struct rpc_task *task);
void xprt_disconnect(struct rpc_xprt *xprt);
void xprt_disconnect_done(struct rpc_xprt *xprt);
void xprt_force_disconnect(struct rpc_xprt *xprt);
/*
* Reserved bit positions in xprt->state
......@@ -256,6 +251,7 @@ void xprt_disconnect(struct rpc_xprt *xprt);
#define XPRT_CLOSE_WAIT (3)
#define XPRT_BOUND (4)
#define XPRT_BINDING (5)
#define XPRT_CLOSING (6)
static inline void xprt_set_connected(struct rpc_xprt *xprt)
{
......
......@@ -51,6 +51,7 @@ rpcauth_register(const struct rpc_authops *ops)
spin_unlock(&rpc_authflavor_lock);
return ret;
}
EXPORT_SYMBOL_GPL(rpcauth_register);
int
rpcauth_unregister(const struct rpc_authops *ops)
......@@ -68,6 +69,7 @@ rpcauth_unregister(const struct rpc_authops *ops)
spin_unlock(&rpc_authflavor_lock);
return ret;
}
EXPORT_SYMBOL_GPL(rpcauth_unregister);
struct rpc_auth *
rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt)
......@@ -102,6 +104,7 @@ rpcauth_create(rpc_authflavor_t pseudoflavor, struct rpc_clnt *clnt)
out:
return auth;
}
EXPORT_SYMBOL_GPL(rpcauth_create);
void
rpcauth_release(struct rpc_auth *auth)
......@@ -151,6 +154,7 @@ rpcauth_init_credcache(struct rpc_auth *auth)
auth->au_credcache = new;
return 0;
}
EXPORT_SYMBOL_GPL(rpcauth_init_credcache);
/*
* Destroy a list of credentials
......@@ -213,6 +217,7 @@ rpcauth_destroy_credcache(struct rpc_auth *auth)
kfree(cache);
}
}
EXPORT_SYMBOL_GPL(rpcauth_destroy_credcache);
/*
* Remove stale credentials. Avoid sleeping inside the loop.
......@@ -332,6 +337,7 @@ rpcauth_lookup_credcache(struct rpc_auth *auth, struct auth_cred * acred,
out:
return cred;
}
EXPORT_SYMBOL_GPL(rpcauth_lookup_credcache);
struct rpc_cred *
rpcauth_lookupcred(struct rpc_auth *auth, int flags)
......@@ -350,6 +356,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags)
put_group_info(acred.group_info);
return ret;
}
EXPORT_SYMBOL_GPL(rpcauth_lookupcred);
void
rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
......@@ -366,7 +373,7 @@ rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
#endif
cred->cr_uid = acred->uid;
}
EXPORT_SYMBOL(rpcauth_init_cred);
EXPORT_SYMBOL_GPL(rpcauth_init_cred);
struct rpc_cred *
rpcauth_bindcred(struct rpc_task *task)
......@@ -378,6 +385,7 @@ rpcauth_bindcred(struct rpc_task *task)
.group_info = current->group_info,
};
struct rpc_cred *ret;
sigset_t oldset;
int flags = 0;
dprintk("RPC: %5u looking up %s cred\n",
......@@ -385,7 +393,9 @@ rpcauth_bindcred(struct rpc_task *task)
get_group_info(acred.group_info);
if (task->tk_flags & RPC_TASK_ROOTCREDS)
flags |= RPCAUTH_LOOKUP_ROOTCREDS;
rpc_clnt_sigmask(task->tk_client, &oldset);
ret = auth->au_ops->lookup_cred(auth, &acred, flags);
rpc_clnt_sigunmask(task->tk_client, &oldset);
if (!IS_ERR(ret))
task->tk_msg.rpc_cred = ret;
else
......@@ -435,6 +445,7 @@ put_rpccred(struct rpc_cred *cred)
out_destroy:
cred->cr_ops->crdestroy(cred);
}
EXPORT_SYMBOL_GPL(put_rpccred);
void
rpcauth_unbindcred(struct rpc_task *task)
......
......@@ -472,16 +472,15 @@ gss_pipe_upcall(struct file *filp, struct rpc_pipe_msg *msg,
char __user *dst, size_t buflen)
{
char *data = (char *)msg->data + msg->copied;
ssize_t mlen = msg->len;
ssize_t left;
size_t mlen = min(msg->len, buflen);
unsigned long left;
if (mlen > buflen)
mlen = buflen;
left = copy_to_user(dst, data, mlen);
if (left < 0) {
msg->errno = left;
return left;
if (left == mlen) {
msg->errno = -EFAULT;
return -EFAULT;
}
mlen -= left;
msg->copied += mlen;
msg->errno = 0;
......
This diff is collapsed.
......@@ -76,6 +76,16 @@ rpc_timeout_upcall_queue(struct work_struct *work)
rpc_purge_list(rpci, &free_list, destroy_msg, -ETIMEDOUT);
}
/**
* rpc_queue_upcall
* @inode: inode of upcall pipe on which to queue given message
* @msg: message to queue
*
* Call with an @inode created by rpc_mkpipe() to queue an upcall.
* A userspace process may then later read the upcall by performing a
* read on an open file for this inode. It is up to the caller to
* initialize the fields of @msg (other than @msg->list) appropriately.
*/
int
rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
{
......@@ -103,6 +113,7 @@ rpc_queue_upcall(struct inode *inode, struct rpc_pipe_msg *msg)
wake_up(&rpci->waitq);
return res;
}
EXPORT_SYMBOL(rpc_queue_upcall);
static inline void
rpc_inode_setowner(struct inode *inode, void *private)
......@@ -512,8 +523,8 @@ rpc_get_inode(struct super_block *sb, int mode)
/*
* FIXME: This probably has races.
*/
static void
rpc_depopulate(struct dentry *parent, int start, int eof)
static void rpc_depopulate(struct dentry *parent,
unsigned long start, unsigned long eof)
{
struct inode *dir = parent->d_inode;
struct list_head *pos, *next;
......@@ -663,7 +674,16 @@ rpc_lookup_negative(char *path, struct nameidata *nd)
return dentry;
}
/**
* rpc_mkdir - Create a new directory in rpc_pipefs
* @path: path from the rpc_pipefs root to the new directory
* @rpc_clnt: rpc client to associate with this directory
*
* This creates a directory at the given @path associated with
* @rpc_clnt, which will contain a file named "info" with some basic
* information about the client, together with any "pipes" that may
* later be created using rpc_mkpipe().
*/
struct dentry *
rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
{
......@@ -699,6 +719,10 @@ rpc_mkdir(char *path, struct rpc_clnt *rpc_client)
goto out;
}
/**
* rpc_rmdir - Remove a directory created with rpc_mkdir()
* @dentry: directory to remove
*/
int
rpc_rmdir(struct dentry *dentry)
{
......@@ -717,6 +741,25 @@ rpc_rmdir(struct dentry *dentry)
return error;
}
/**
* rpc_mkpipe - make an rpc_pipefs file for kernel<->userspace communication
* @parent: dentry of directory to create new "pipe" in
* @name: name of pipe
* @private: private data to associate with the pipe, for the caller's use
* @ops: operations defining the behavior of the pipe: upcall, downcall,
* release_pipe, and destroy_msg.
*
* Data is made available for userspace to read by calls to
* rpc_queue_upcall(). The actual reads will result in calls to
* @ops->upcall, which will be called with the file pointer,
* message, and userspace buffer to copy to.
*
* Writes can come at any time, and do not necessarily have to be
* responses to upcalls. They will result in calls to @msg->downcall.
*
* The @private argument passed here will be available to all these methods
* from the file pointer, via RPC_I(file->f_dentry->d_inode)->private.
*/
struct dentry *
rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pipe_ops *ops, int flags)
{
......@@ -763,7 +806,16 @@ rpc_mkpipe(struct dentry *parent, const char *name, void *private, struct rpc_pi
-ENOMEM);
goto out;
}
EXPORT_SYMBOL(rpc_mkpipe);
/**
* rpc_unlink - remove a pipe
* @dentry: dentry for the pipe, as returned from rpc_mkpipe
*
* After this call, lookups will no longer find the pipe, and any
* attempts to read or write using preexisting opens of the pipe will
* return -EPIPE.
*/
int
rpc_unlink(struct dentry *dentry)
{
......@@ -785,6 +837,7 @@ rpc_unlink(struct dentry *dentry)
dput(parent);
return error;
}
EXPORT_SYMBOL(rpc_unlink);
/*
* populate the filesystem
......
This diff is collapsed.
This diff is collapsed.
......@@ -72,7 +72,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
struct page **ppage = xdr->pages;
unsigned int len, pglen = xdr->page_len;
ssize_t copied = 0;
int ret;
size_t ret;
len = xdr->head[0].iov_len;
if (base < len) {
......
......@@ -118,7 +118,7 @@ struct rpc_iostats *rpc_alloc_iostats(struct rpc_clnt *clnt)
new = kcalloc(clnt->cl_maxproc, sizeof(struct rpc_iostats), GFP_KERNEL);
return new;
}
EXPORT_SYMBOL(rpc_alloc_iostats);
EXPORT_SYMBOL_GPL(rpc_alloc_iostats);
/**
* rpc_free_iostats - release an rpc_iostats structure
......@@ -129,7 +129,7 @@ void rpc_free_iostats(struct rpc_iostats *stats)
{
kfree(stats);
}
EXPORT_SYMBOL(rpc_free_iostats);
EXPORT_SYMBOL_GPL(rpc_free_iostats);
/**
* rpc_count_iostats - tally up per-task stats
......@@ -215,7 +215,7 @@ void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt)
metrics->om_execute * MILLISECS_PER_JIFFY);
}
}
EXPORT_SYMBOL(rpc_print_iostats);
EXPORT_SYMBOL_GPL(rpc_print_iostats);
/*
* Register/unregister RPC proc files
......@@ -241,12 +241,14 @@ rpc_proc_register(struct rpc_stat *statp)
{
return do_register(statp->program->name, statp, &rpc_proc_fops);
}
EXPORT_SYMBOL_GPL(rpc_proc_register);
void
rpc_proc_unregister(const char *name)
{
remove_proc_entry(name, proc_net_rpc);
}
EXPORT_SYMBOL_GPL(rpc_proc_unregister);
struct proc_dir_entry *
svc_proc_register(struct svc_stat *statp, const struct file_operations *fops)
......
This diff is collapsed.
......@@ -23,9 +23,16 @@
* Declare the debug flags here
*/
unsigned int rpc_debug;
EXPORT_SYMBOL_GPL(rpc_debug);
unsigned int nfs_debug;
EXPORT_SYMBOL_GPL(nfs_debug);
unsigned int nfsd_debug;
EXPORT_SYMBOL_GPL(nfsd_debug);
unsigned int nlm_debug;
EXPORT_SYMBOL_GPL(nlm_debug);
#ifdef RPC_DEBUG
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -522,7 +522,7 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
struct rpcrdma_create_data_internal *cdata)
{
struct ib_device_attr devattr;
int rc;
int rc, err;
rc = ib_query_device(ia->ri_id->device, &devattr);
if (rc) {
......@@ -648,8 +648,10 @@ rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
return 0;
out2:
if (ib_destroy_cq(ep->rep_cq))
;
err = ib_destroy_cq(ep->rep_cq);
if (err)
dprintk("RPC: %s: ib_destroy_cq returned %i\n",
__func__, err);
out1:
return rc;
}
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment