Commit 8e7757d8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nfs-for-4.14-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs

Pull NFS client updates from Trond Myklebust:
 "Hightlights include:

  Stable bugfixes:
   - Fix mirror allocation in the writeback code to avoid a use after
     free
   - Fix the O_DSYNC writes to use the correct byte range
   - Fix 2 use after free issues in the I/O code

  Features:
   - Writeback fixes to split up the inode->i_lock in order to reduce
     contention
   - RPC client receive fixes to reduce the amount of time the
     xprt->transport_lock is held when receiving data from a socket into
     am XDR buffer.
   - Ditto fixes to reduce contention between call side users of the
     rdma rb_lock, and its use in rpcrdma_reply_handler.
   - Re-arrange rdma stats to reduce false cacheline sharing.
   - Various rdma cleanups and optimisations.
   - Refactor the NFSv4.1 exchange id code and clean up the code.
   - Const-ify all instances of struct rpc_xprt_ops

  Bugfixes:
   - Fix the NFSv2 'sec=' mount option.
   - NFSv4.1: don't use machine credentials for CLOSE when using
     'sec=sys'
   - Fix the NFSv3 GRANT callback when the port changes on the server.
   - Fix livelock issues with COMMIT
   - NFSv4: Use correct inode in _nfs4_opendata_to_nfs4_state() when
     doing and NFSv4.1 open by filehandle"

* tag 'nfs-for-4.14-1' of git://git.linux-nfs.org/projects/trondmy/linux-nfs: (69 commits)
  NFS: Count the bytes of skipped subrequests in nfs_lock_and_join_requests()
  NFS: Don't hold the group lock when calling nfs_release_request()
  NFS: Remove pnfs_generic_transfer_commit_list()
  NFS: nfs_lock_and_join_requests and nfs_scan_commit_list can deadlock
  NFS: Fix 2 use after free issues in the I/O code
  NFS: Sync the correct byte range during synchronous writes
  lockd: Delete an error message for a failed memory allocation in reclaimer()
  NFS: remove jiffies field from access cache
  NFS: flush data when locking a file to ensure cache coherence for mmap.
  SUNRPC: remove some dead code.
  NFS: don't expect errors from mempool_alloc().
  xprtrdma: Use xprt_pin_rqst in rpcrdma_reply_handler
  xprtrdma: Re-arrange struct rx_stats
  NFS: Fix NFSv2 security settings
  NFSv4.1: don't use machine credentials for CLOSE when using 'sec=sys'
  SUNRPC: ECONNREFUSED should cause a rebind.
  NFS: Remove unused parameter gfp_flags from nfs_pageio_init()
  NFSv4: Fix up mirror allocation
  SUNRPC: Add a separate spinlock to protect the RPC request receive list
  SUNRPC: Cleanup xs_tcp_read_common()
  ...
parents dd198ce7 1bd5d6d0
......@@ -235,12 +235,8 @@ reclaimer(void *ptr)
struct net *net = host->net;
req = kmalloc(sizeof(*req), GFP_KERNEL);
if (!req) {
printk(KERN_ERR "lockd: reclaimer unable to alloc memory."
" Locks for %s won't be reclaimed!\n",
host->h_name);
if (!req)
return 0;
}
allow_signal(SIGKILL);
......
......@@ -51,7 +51,7 @@ __be32 nfs4_callback_getattr(void *argp, void *resp,
goto out_iput;
res->size = i_size_read(inode);
res->change_attr = delegation->change_attr;
if (nfsi->nrequests != 0)
if (nfs_have_writebacks(inode))
res->change_attr++;
res->ctime = inode->i_ctime;
res->mtime = inode->i_mtime;
......
......@@ -1089,7 +1089,7 @@ bool nfs4_delegation_flush_on_close(const struct inode *inode)
delegation = rcu_dereference(nfsi->delegation);
if (delegation == NULL || !(delegation->type & FMODE_WRITE))
goto out;
if (nfsi->nrequests < delegation->pagemod_limit)
if (atomic_long_read(&nfsi->nrequests) < delegation->pagemod_limit)
ret = false;
out:
rcu_read_unlock();
......
......@@ -2260,7 +2260,6 @@ static int nfs_access_get_cached(struct inode *inode, struct rpc_cred *cred, str
spin_lock(&inode->i_lock);
retry = false;
}
res->jiffies = cache->jiffies;
res->cred = cache->cred;
res->mask = cache->mask;
list_move_tail(&cache->lru, &nfsi->access_cache_entry_lru);
......@@ -2296,7 +2295,6 @@ static int nfs_access_get_cached_rcu(struct inode *inode, struct rpc_cred *cred,
goto out;
if (nfs_check_cache_invalid(inode, NFS_INO_INVALID_ACCESS))
goto out;
res->jiffies = cache->jiffies;
res->cred = cache->cred;
res->mask = cache->mask;
err = 0;
......@@ -2344,7 +2342,6 @@ void nfs_access_add_cache(struct inode *inode, struct nfs_access_entry *set)
if (cache == NULL)
return;
RB_CLEAR_NODE(&cache->rb_node);
cache->jiffies = set->jiffies;
cache->cred = get_rpccred(set->cred);
cache->mask = set->mask;
......@@ -2432,7 +2429,6 @@ static int nfs_do_access(struct inode *inode, struct rpc_cred *cred, int mask)
cache.mask = NFS_MAY_LOOKUP | NFS_MAY_EXECUTE
| NFS_MAY_WRITE | NFS_MAY_READ;
cache.cred = cred;
cache.jiffies = jiffies;
status = NFS_PROTO(inode)->access(inode, &cache);
if (status != 0) {
if (status == -ESTALE) {
......
......@@ -616,13 +616,13 @@ nfs_direct_write_scan_commit_list(struct inode *inode,
struct list_head *list,
struct nfs_commit_info *cinfo)
{
spin_lock(&cinfo->inode->i_lock);
mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
#ifdef CONFIG_NFS_V4_1
if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
#endif
nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
spin_unlock(&cinfo->inode->i_lock);
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
}
static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
......
......@@ -631,11 +631,11 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
if (result <= 0)
goto out;
result = generic_write_sync(iocb, result);
if (result < 0)
goto out;
written = result;
iocb->ki_pos += written;
result = generic_write_sync(iocb, written);
if (result < 0)
goto out;
/* Return error values */
if (nfs_need_check_write(file, inode)) {
......@@ -744,15 +744,18 @@ do_setlk(struct file *filp, int cmd, struct file_lock *fl, int is_local)
goto out;
/*
* Revalidate the cache if the server has time stamps granular
* enough to detect subsecond changes. Otherwise, clear the
* cache to prevent missing any changes.
* Invalidate cache to prevent missing any changes. If
* the file is mapped, clear the page cache as well so
* those mappings will be loaded.
*
* This makes locking act as a cache coherency point.
*/
nfs_sync_mapping(filp->f_mapping);
if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ)) {
nfs_zap_caches(inode);
if (mapping_mapped(filp->f_mapping))
nfs_revalidate_mapping(inode, filp->f_mapping);
}
out:
return status;
}
......
......@@ -1285,7 +1285,6 @@ static bool nfs_file_has_buffered_writers(struct nfs_inode *nfsi)
static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr *fattr)
{
struct nfs_inode *nfsi = NFS_I(inode);
unsigned long ret = 0;
if ((fattr->valid & NFS_ATTR_FATTR_PRECHANGE)
......@@ -1315,7 +1314,7 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
&& (fattr->valid & NFS_ATTR_FATTR_SIZE)
&& i_size_read(inode) == nfs_size_to_loff_t(fattr->pre_size)
&& nfsi->nrequests == 0) {
&& !nfs_have_writebacks(inode)) {
i_size_write(inode, nfs_size_to_loff_t(fattr->size));
ret |= NFS_INO_INVALID_ATTR;
}
......@@ -1823,7 +1822,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
if (new_isize != cur_isize) {
/* Do we perhaps have any outstanding writes, or has
* the file grown beyond our last write? */
if (nfsi->nrequests == 0 || new_isize > cur_isize) {
if (!nfs_have_writebacks(inode) || new_isize > cur_isize) {
i_size_write(inode, new_isize);
if (!have_writers)
invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
......@@ -2012,10 +2011,11 @@ static void init_once(void *foo)
INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
INIT_LIST_HEAD(&nfsi->commit_info.list);
nfsi->nrequests = 0;
nfsi->commit_info.ncommit = 0;
atomic_long_set(&nfsi->nrequests, 0);
atomic_long_set(&nfsi->commit_info.ncommit, 0);
atomic_set(&nfsi->commit_info.rpcs_out, 0);
init_rwsem(&nfsi->rmdir_sem);
mutex_init(&nfsi->commit_mutex);
nfs4_init_once(nfsi);
}
......
......@@ -251,7 +251,6 @@ int nfs_iocounter_wait(struct nfs_lock_context *l_ctx);
extern const struct nfs_pageio_ops nfs_pgio_rw_ops;
struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *);
void nfs_pgio_header_free(struct nfs_pgio_header *);
void nfs_pgio_data_destroy(struct nfs_pgio_header *);
int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
int nfs_initiate_pgio(struct rpc_clnt *clnt, struct nfs_pgio_header *hdr,
struct rpc_cred *cred, const struct nfs_rpc_ops *rpc_ops,
......
......@@ -303,6 +303,17 @@ _nfs4_state_protect(struct nfs_client *clp, unsigned long sp4_mode,
struct rpc_cred *newcred = NULL;
rpc_authflavor_t flavor;
if (sp4_mode == NFS_SP4_MACH_CRED_CLEANUP ||
sp4_mode == NFS_SP4_MACH_CRED_PNFS_CLEANUP) {
/* Using machine creds for cleanup operations
* is only relevent if the client credentials
* might expire. So don't bother for
* RPC_AUTH_UNIX. If file was only exported to
* sec=sys, the PUTFH would fail anyway.
*/
if ((*clntp)->cl_auth->au_flavor == RPC_AUTH_UNIX)
return false;
}
if (test_bit(sp4_mode, &clp->cl_sp4_flags)) {
spin_lock(&clp->cl_lock);
if (clp->cl_machine_cred != NULL)
......
This diff is collapsed.
......@@ -134,19 +134,14 @@ EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
/*
* nfs_page_group_lock - lock the head of the page group
* @req - request in group that is to be locked
* @nonblock - if true don't block waiting for lock
*
* this lock must be held if modifying the page group list
* this lock must be held when traversing or modifying the page
* group list
*
* return 0 on success, < 0 on error: -EDELAY if nonblocking or the
* result from wait_on_bit_lock
*
* NOTE: calling with nonblock=false should always have set the
* lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
* with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
* return 0 on success, < 0 on error
*/
int
nfs_page_group_lock(struct nfs_page *req, bool nonblock)
nfs_page_group_lock(struct nfs_page *req)
{
struct nfs_page *head = req->wb_head;
......@@ -155,35 +150,10 @@ nfs_page_group_lock(struct nfs_page *req, bool nonblock)
if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
return 0;
if (!nonblock) {
set_bit(PG_CONTENDED1, &head->wb_flags);
smp_mb__after_atomic();
return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
TASK_UNINTERRUPTIBLE);
}
return -EAGAIN;
}
/*
* nfs_page_group_lock_wait - wait for the lock to clear, but don't grab it
* @req - a request in the group
*
* This is a blocking call to wait for the group lock to be cleared.
*/
void
nfs_page_group_lock_wait(struct nfs_page *req)
{
struct nfs_page *head = req->wb_head;
WARN_ON_ONCE(head != head->wb_head);
if (!test_bit(PG_HEADLOCK, &head->wb_flags))
return;
set_bit(PG_CONTENDED1, &head->wb_flags);
smp_mb__after_atomic();
wait_on_bit(&head->wb_flags, PG_HEADLOCK,
TASK_UNINTERRUPTIBLE);
return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
TASK_UNINTERRUPTIBLE);
}
/*
......@@ -246,7 +216,7 @@ bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
{
bool ret;
nfs_page_group_lock(req, false);
nfs_page_group_lock(req);
ret = nfs_page_group_sync_on_bit_locked(req, bit);
nfs_page_group_unlock(req);
......@@ -288,9 +258,7 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
inode = page_file_mapping(req->wb_page)->host;
set_bit(PG_INODE_REF, &req->wb_flags);
kref_get(&req->wb_kref);
spin_lock(&inode->i_lock);
NFS_I(inode)->nrequests++;
spin_unlock(&inode->i_lock);
atomic_long_inc(&NFS_I(inode)->nrequests);
}
}
}
......@@ -306,14 +274,11 @@ static void
nfs_page_group_destroy(struct kref *kref)
{
struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
struct nfs_page *head = req->wb_head;
struct nfs_page *tmp, *next;
/* subrequests must release the ref on the head request */
if (req->wb_head != req)
nfs_release_request(req->wb_head);
if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
return;
goto out;
tmp = req;
do {
......@@ -324,6 +289,10 @@ nfs_page_group_destroy(struct kref *kref)
nfs_free_request(tmp);
tmp = next;
} while (tmp != req);
out:
/* subrequests must release the ref on the head request */
if (head != req)
nfs_release_request(head);
}
/**
......@@ -465,6 +434,7 @@ void nfs_release_request(struct nfs_page *req)
{
kref_put(&req->wb_kref, nfs_page_group_destroy);
}
EXPORT_SYMBOL_GPL(nfs_release_request);
/**
* nfs_wait_on_request - Wait for a request to complete.
......@@ -483,6 +453,7 @@ nfs_wait_on_request(struct nfs_page *req)
return wait_on_bit_io(&req->wb_flags, PG_BUSY,
TASK_UNINTERRUPTIBLE);
}
EXPORT_SYMBOL_GPL(nfs_wait_on_request);
/*
* nfs_generic_pg_test - determine if requests can be coalesced
......@@ -530,16 +501,6 @@ struct nfs_pgio_header *nfs_pgio_header_alloc(const struct nfs_rw_ops *ops)
}
EXPORT_SYMBOL_GPL(nfs_pgio_header_alloc);
/*
* nfs_pgio_header_free - Free a read or write header
* @hdr: The header to free
*/
void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
{
hdr->rw_ops->rw_free_header(hdr);
}
EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
/**
* nfs_pgio_data_destroy - make @hdr suitable for reuse
*
......@@ -548,14 +509,24 @@ EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
*
* @hdr: A header that has had nfs_generic_pgio called
*/
void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
static void nfs_pgio_data_destroy(struct nfs_pgio_header *hdr)
{
if (hdr->args.context)
put_nfs_open_context(hdr->args.context);
if (hdr->page_array.pagevec != hdr->page_array.page_array)
kfree(hdr->page_array.pagevec);
}
EXPORT_SYMBOL_GPL(nfs_pgio_data_destroy);
/*
* nfs_pgio_header_free - Free a read or write header
* @hdr: The header to free
*/
void nfs_pgio_header_free(struct nfs_pgio_header *hdr)
{
nfs_pgio_data_destroy(hdr);
hdr->rw_ops->rw_free_header(hdr);
}
EXPORT_SYMBOL_GPL(nfs_pgio_header_free);
/**
* nfs_pgio_rpcsetup - Set up arguments for a pageio call
......@@ -669,7 +640,6 @@ EXPORT_SYMBOL_GPL(nfs_initiate_pgio);
static void nfs_pgio_error(struct nfs_pgio_header *hdr)
{
set_bit(NFS_IOHDR_REDO, &hdr->flags);
nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
}
......@@ -680,7 +650,6 @@ static void nfs_pgio_error(struct nfs_pgio_header *hdr)
static void nfs_pgio_release(void *calldata)
{
struct nfs_pgio_header *hdr = calldata;
nfs_pgio_data_destroy(hdr);
hdr->completion_ops->completion(hdr);
}
......@@ -711,12 +680,8 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
const struct nfs_pgio_completion_ops *compl_ops,
const struct nfs_rw_ops *rw_ops,
size_t bsize,
int io_flags,
gfp_t gfp_flags)
int io_flags)
{
struct nfs_pgio_mirror *new;
int i;
desc->pg_moreio = 0;
desc->pg_inode = inode;
desc->pg_ops = pg_ops;
......@@ -732,23 +697,10 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
desc->pg_mirror_count = 1;
desc->pg_mirror_idx = 0;
if (pg_ops->pg_get_mirror_count) {
/* until we have a request, we don't have an lseg and no
* idea how many mirrors there will be */
new = kcalloc(NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX,
sizeof(struct nfs_pgio_mirror), gfp_flags);
desc->pg_mirrors_dynamic = new;
desc->pg_mirrors = new;
for (i = 0; i < NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX; i++)
nfs_pageio_mirror_init(&desc->pg_mirrors[i], bsize);
} else {
desc->pg_mirrors_dynamic = NULL;
desc->pg_mirrors = desc->pg_mirrors_static;
nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
}
desc->pg_mirrors_dynamic = NULL;
desc->pg_mirrors = desc->pg_mirrors_static;
nfs_pageio_mirror_init(&desc->pg_mirrors[0], bsize);
}
EXPORT_SYMBOL_GPL(nfs_pageio_init);
/**
* nfs_pgio_result - Basic pageio error handling
......@@ -865,32 +817,52 @@ static int nfs_generic_pg_pgios(struct nfs_pageio_descriptor *desc)
return ret;
}
static struct nfs_pgio_mirror *
nfs_pageio_alloc_mirrors(struct nfs_pageio_descriptor *desc,
unsigned int mirror_count)
{
struct nfs_pgio_mirror *ret;
unsigned int i;
kfree(desc->pg_mirrors_dynamic);
desc->pg_mirrors_dynamic = NULL;
if (mirror_count == 1)
return desc->pg_mirrors_static;
ret = kmalloc_array(mirror_count, sizeof(*ret), GFP_NOFS);
if (ret != NULL) {
for (i = 0; i < mirror_count; i++)
nfs_pageio_mirror_init(&ret[i], desc->pg_bsize);
desc->pg_mirrors_dynamic = ret;
}
return ret;
}
/*
* nfs_pageio_setup_mirroring - determine if mirroring is to be used
* by calling the pg_get_mirror_count op
*/
static int nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
static void nfs_pageio_setup_mirroring(struct nfs_pageio_descriptor *pgio,
struct nfs_page *req)
{
int mirror_count = 1;
if (!pgio->pg_ops->pg_get_mirror_count)
return 0;
unsigned int mirror_count = 1;
mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
if (pgio->pg_error < 0)
return pgio->pg_error;
if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX)
return -EINVAL;
if (pgio->pg_ops->pg_get_mirror_count)
mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
if (mirror_count == pgio->pg_mirror_count || pgio->pg_error < 0)
return;
if (WARN_ON_ONCE(!pgio->pg_mirrors_dynamic))
return -EINVAL;
if (!mirror_count || mirror_count > NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX) {
pgio->pg_error = -EINVAL;
return;
}
pgio->pg_mirrors = nfs_pageio_alloc_mirrors(pgio, mirror_count);
if (pgio->pg_mirrors == NULL) {
pgio->pg_error = -ENOMEM;
pgio->pg_mirrors = pgio->pg_mirrors_static;
mirror_count = 1;
}
pgio->pg_mirror_count = mirror_count;
return 0;
}
/*
......@@ -1036,7 +1008,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
unsigned int bytes_left = 0;
unsigned int offset, pgbase;
nfs_page_group_lock(req, false);
nfs_page_group_lock(req);
subreq = req;
bytes_left = subreq->wb_bytes;
......@@ -1058,7 +1030,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
if (mirror->pg_recoalesce)
return 0;
/* retry add_request for this subreq */
nfs_page_group_lock(req, false);
nfs_page_group_lock(req);
continue;
}
......@@ -1155,7 +1127,7 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
for (midx = 0; midx < desc->pg_mirror_count; midx++) {
if (midx) {
nfs_page_group_lock(req, false);
nfs_page_group_lock(req);
/* find the last request */
for (lastreq = req->wb_head;
......
......@@ -529,47 +529,6 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
}
EXPORT_SYMBOL_GPL(pnfs_put_lseg);
static void pnfs_free_lseg_async_work(struct work_struct *work)
{
struct pnfs_layout_segment *lseg;
struct pnfs_layout_hdr *lo;
lseg = container_of(work, struct pnfs_layout_segment, pls_work);
lo = lseg->pls_layout;
pnfs_free_lseg(lseg);
pnfs_put_layout_hdr(lo);
}
static void pnfs_free_lseg_async(struct pnfs_layout_segment *lseg)
{
INIT_WORK(&lseg->pls_work, pnfs_free_lseg_async_work);
schedule_work(&lseg->pls_work);
}
void
pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
{
if (!lseg)
return;
assert_spin_locked(&lseg->pls_layout->plh_inode->i_lock);
dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
atomic_read(&lseg->pls_refcount),
test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
if (atomic_dec_and_test(&lseg->pls_refcount)) {
struct pnfs_layout_hdr *lo = lseg->pls_layout;
if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
return;
pnfs_layout_remove_lseg(lo, lseg);
if (!pnfs_cache_lseg_for_layoutreturn(lo, lseg)) {
pnfs_get_layout_hdr(lo);
pnfs_free_lseg_async(lseg);
}
}
}
/*
* is l2 fully contained in l1?
* start1 end1
......@@ -2274,7 +2233,6 @@ pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
nfs_pageio_reset_write_mds(desc);
mirror->pg_recoalesce = 1;
}
nfs_pgio_data_destroy(hdr);
hdr->release(hdr);
}
......@@ -2398,7 +2356,6 @@ pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
nfs_pageio_reset_read_mds(desc);
mirror->pg_recoalesce = 1;
}
nfs_pgio_data_destroy(hdr);
hdr->release(hdr);
}
......
......@@ -67,7 +67,6 @@ struct pnfs_layout_segment {
u32 pls_seq;
unsigned long pls_flags;
struct pnfs_layout_hdr *pls_layout;
struct work_struct pls_work;
};
enum pnfs_try_status {
......@@ -230,7 +229,6 @@ extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp, bool sync);
/* pnfs.c */
void pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo);
void pnfs_put_lseg(struct pnfs_layout_segment *lseg);
void pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg);
void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, struct nfs_fsinfo *);
void unset_pnfs_layoutdriver(struct nfs_server *);
......
......@@ -83,33 +83,10 @@ pnfs_generic_clear_request_commit(struct nfs_page *req,
}
out:
nfs_request_remove_commit_list(req, cinfo);
pnfs_put_lseg_locked(freeme);
pnfs_put_lseg(freeme);
}
EXPORT_SYMBOL_GPL(pnfs_generic_clear_request_commit);
static int
pnfs_generic_transfer_commit_list(struct list_head *src, struct list_head *dst,
struct nfs_commit_info *cinfo, int max)
{
struct nfs_page *req, *tmp;
int ret = 0;
list_for_each_entry_safe(req, tmp, src, wb_list) {
if (!nfs_lock_request(req))
continue;
kref_get(&req->wb_kref);
if (cond_resched_lock(&cinfo->inode->i_lock))
list_safe_reset_next(req, tmp, wb_list);
nfs_request_remove_commit_list(req, cinfo);
clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
nfs_list_add_request(req, dst);
ret++;
if ((ret == max) && !cinfo->dreq)
break;
}
return ret;
}
static int
pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
struct nfs_commit_info *cinfo,
......@@ -119,15 +96,15 @@ pnfs_generic_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
struct list_head *dst = &bucket->committing;
int ret;
lockdep_assert_held(&cinfo->inode->i_lock);
ret = pnfs_generic_transfer_commit_list(src, dst, cinfo, max);
lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
ret = nfs_scan_commit_list(src, dst, cinfo, max);
if (ret) {
cinfo->ds->nwritten -= ret;
cinfo->ds->ncommitting += ret;
if (bucket->clseg == NULL)
bucket->clseg = pnfs_get_lseg(bucket->wlseg);
if (list_empty(src)) {
pnfs_put_lseg_locked(bucket->wlseg);
pnfs_put_lseg(bucket->wlseg);
bucket->wlseg = NULL;
}
}
......@@ -142,7 +119,7 @@ int pnfs_generic_scan_commit_lists(struct nfs_commit_info *cinfo,
{
int i, rv = 0, cnt;
lockdep_assert_held(&cinfo->inode->i_lock);
lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
cnt = pnfs_generic_scan_ds_commit_list(&cinfo->ds->buckets[i],
cinfo, max);
......@@ -162,11 +139,10 @@ void pnfs_generic_recover_commit_reqs(struct list_head *dst,
int nwritten;
int i;
lockdep_assert_held(&cinfo->inode->i_lock);
lockdep_assert_held(&NFS_I(cinfo->inode)->commit_mutex);
restart:
for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
nwritten = pnfs_generic_transfer_commit_list(&b->written,
dst, cinfo, 0);
nwritten = nfs_scan_commit_list(&b->written, dst, cinfo, 0);
if (!nwritten)
continue;
cinfo->ds->nwritten -= nwritten;
......@@ -953,12 +929,12 @@ pnfs_layout_mark_request_commit(struct nfs_page *req,
struct list_head *list;
struct pnfs_commit_bucket *buckets;
spin_lock(&cinfo->inode->i_lock);
mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
buckets = cinfo->ds->buckets;
list = &buckets[ds_commit_idx].written;
if (list_empty(list)) {
if (!pnfs_is_valid_lseg(lseg)) {
spin_unlock(&cinfo->inode->i_lock);
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
cinfo->completion_ops->resched_write(cinfo, req);
return;
}
......@@ -975,7 +951,7 @@ pnfs_layout_mark_request_commit(struct nfs_page *req,
cinfo->ds->nwritten++;
nfs_request_add_commit_list_locked(req, list, cinfo);
spin_unlock(&cinfo->inode->i_lock);
mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
nfs_mark_page_unstable(req->wb_page, cinfo);
}
EXPORT_SYMBOL_GPL(pnfs_layout_mark_request_commit);
......
......@@ -68,7 +68,7 @@ void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
pg_ops = server->pnfs_curr_ld->pg_read_ops;
#endif
nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
server->rsize, 0, GFP_KERNEL);
server->rsize, 0);
}
EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
......
......@@ -1691,8 +1691,8 @@ static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
rpc_authflavor_t *server_authlist, unsigned int count)
{
rpc_authflavor_t flavor = RPC_AUTH_MAXFLAVOR;
bool found_auth_null = false;
unsigned int i;
int use_auth_null = false;
/*
* If the sec= mount option is used, the specified flavor or AUTH_NULL
......@@ -1701,6 +1701,10 @@ static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
* AUTH_NULL has a special meaning when it's in the server list - it
* means that the server will ignore the rpc creds, so any flavor
* can be used but still use the sec= that was specified.
*
* Note also that the MNT procedure in MNTv1 does not return a list
* of supported security flavors. In this case, nfs_mount() fabricates
* a security flavor list containing just AUTH_NULL.
*/
for (i = 0; i < count; i++) {
flavor = server_authlist[i];
......@@ -1709,11 +1713,11 @@ static int nfs_verify_authflavors(struct nfs_parsed_mount_data *args,
goto out;
if (flavor == RPC_AUTH_NULL)
use_auth_null = true;
found_auth_null = true;
}
if (use_auth_null) {
flavor = RPC_AUTH_NULL;
if (found_auth_null) {
flavor = args->auth_info.flavors[0];
goto out;
}
......
This diff is collapsed.
......@@ -49,7 +49,6 @@
struct nfs_access_entry {
struct rb_node rb_node;
struct list_head lru;
unsigned long jiffies;
struct rpc_cred * cred;
__u32 mask;
struct rcu_head rcu_head;
......@@ -154,7 +153,7 @@ struct nfs_inode {
*/
__be32 cookieverf[2];
unsigned long nrequests;
atomic_long_t nrequests;
struct nfs_mds_commit_info commit_info;
/* Open contexts for shared mmap writes */
......@@ -163,6 +162,7 @@ struct nfs_inode {
/* Readers: in-flight sillydelete RPC calls */
/* Writers: rmdir */
struct rw_semaphore rmdir_sem;
struct mutex commit_mutex;
#if IS_ENABLED(CONFIG_NFS_V4)
struct nfs4_cached_acl *nfs4_acl;
......@@ -510,7 +510,7 @@ extern void nfs_commit_free(struct nfs_commit_data *data);
static inline int
nfs_have_writebacks(struct inode *inode)
{
return NFS_I(inode)->nrequests != 0;
return atomic_long_read(&NFS_I(inode)->nrequests) != 0;
}
/*
......
......@@ -125,8 +125,7 @@ extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
const struct nfs_pgio_completion_ops *compl_ops,
const struct nfs_rw_ops *rw_ops,
size_t bsize,
int how,
gfp_t gfp_flags);
int how);
extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
struct nfs_page *);
extern int nfs_pageio_resend(struct nfs_pageio_descriptor *,
......@@ -139,8 +138,7 @@ extern size_t nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
extern int nfs_wait_on_request(struct nfs_page *);
extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
extern int nfs_page_group_lock(struct nfs_page *, bool);
extern void nfs_page_group_lock_wait(struct nfs_page *);
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
......
......@@ -1476,7 +1476,7 @@ struct nfs_pgio_header {
struct nfs_mds_commit_info {
atomic_t rpcs_out;
unsigned long ncommit;
atomic_long_t ncommit;
struct list_head list;
};
......
......@@ -139,6 +139,8 @@ struct rpc_task_setup {
#define RPC_TASK_RUNNING 0
#define RPC_TASK_QUEUED 1
#define RPC_TASK_ACTIVE 2
#define RPC_TASK_MSG_RECV 3
#define RPC_TASK_MSG_RECV_WAIT 4
#define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
#define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
......
......@@ -239,6 +239,19 @@ extern unsigned int xdr_read_pages(struct xdr_stream *xdr, unsigned int len);
extern void xdr_enter_page(struct xdr_stream *xdr, unsigned int len);
extern int xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len, int (*actor)(struct scatterlist *, void *), void *data);
/**
* xdr_stream_remaining - Return the number of bytes remaining in the stream
* @xdr: pointer to struct xdr_stream
*
* Return value:
* Number of bytes remaining in @xdr before xdr->end
*/
static inline size_t
xdr_stream_remaining(const struct xdr_stream *xdr)
{
return xdr->nwords << 2;
}
ssize_t xdr_stream_decode_string_dup(struct xdr_stream *xdr, char **str,
size_t maxlen, gfp_t gfp_flags);
/**
......
......@@ -174,7 +174,7 @@ enum xprt_transports {
struct rpc_xprt {
struct kref kref; /* Reference count */
struct rpc_xprt_ops * ops; /* transport methods */
const struct rpc_xprt_ops *ops; /* transport methods */
const struct rpc_timeout *timeout; /* timeout parms */
struct sockaddr_storage addr; /* server address */
......@@ -232,6 +232,7 @@ struct rpc_xprt {
*/
spinlock_t transport_lock; /* lock transport info */
spinlock_t reserve_lock; /* lock slot table */
spinlock_t recv_lock; /* lock receive list */
u32 xid; /* Next XID value to use */
struct rpc_task * snd_task; /* Task blocked in send */
struct svc_xprt *bc_xprt; /* NFSv4.1 backchannel */
......@@ -372,6 +373,8 @@ void xprt_write_space(struct rpc_xprt *xprt);
void xprt_adjust_cwnd(struct rpc_xprt *xprt, struct rpc_task *task, int result);
struct rpc_rqst * xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid);
void xprt_complete_rqst(struct rpc_task *task, int copied);
void xprt_pin_rqst(struct rpc_rqst *req);
void xprt_unpin_rqst(struct rpc_rqst *req);
void xprt_release_rqst_cong(struct rpc_task *task);
void xprt_disconnect_done(struct rpc_xprt *xprt);
void xprt_force_disconnect(struct rpc_xprt *xprt);
......
......@@ -171,10 +171,10 @@ int xprt_setup_bc(struct rpc_xprt *xprt, unsigned int min_reqs)
/*
* Add the temporary list to the backchannel preallocation list
*/
spin_lock_bh(&xprt->bc_pa_lock);
spin_lock(&xprt->bc_pa_lock);
list_splice(&tmp_list, &xprt->bc_pa_list);
xprt_inc_alloc_count(xprt, min_reqs);
spin_unlock_bh(&xprt->bc_pa_lock);
spin_unlock(&xprt->bc_pa_lock);
dprintk("RPC: setup backchannel transport done\n");
return 0;
......
......@@ -1903,6 +1903,14 @@ call_connect_status(struct rpc_task *task)
task->tk_status = 0;
switch (status) {
case -ECONNREFUSED:
/* A positive refusal suggests a rebind is needed. */
if (RPC_IS_SOFTCONN(task))
break;
if (clnt->cl_autobind) {
rpc_force_rebind(clnt);
task->tk_action = call_bind;
return;
}
case -ECONNRESET:
case -ECONNABORTED:
case -ENETUNREACH:
......@@ -2139,10 +2147,6 @@ call_status(struct rpc_task *task)
rpc_delay(task, 3*HZ);
case -ETIMEDOUT:
task->tk_action = call_timeout;
if (!(task->tk_flags & RPC_TASK_NO_RETRANS_TIMEOUT)
&& task->tk_client->cl_discrtry)
xprt_conditional_disconnect(req->rq_xprt,
req->rq_connect_cookie);
break;
case -ECONNREFUSED:
case -ECONNRESET:
......
......@@ -1013,7 +1013,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
if (!bc_xprt)
return -EAGAIN;
spin_lock_bh(&bc_xprt->transport_lock);
spin_lock(&bc_xprt->recv_lock);
req = xprt_lookup_rqst(bc_xprt, xid);
if (!req)
goto unlock_notfound;
......@@ -1031,7 +1031,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
memcpy(dst->iov_base, src->iov_base, src->iov_len);
xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
rqstp->rq_arg.len = 0;
spin_unlock_bh(&bc_xprt->transport_lock);
spin_unlock(&bc_xprt->recv_lock);
return 0;
unlock_notfound:
printk(KERN_NOTICE
......@@ -1040,7 +1040,7 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
__func__, ntohl(calldir),
bc_xprt, ntohl(xid));
unlock_eagain:
spin_unlock_bh(&bc_xprt->transport_lock);
spin_unlock(&bc_xprt->recv_lock);
return -EAGAIN;
}
......
......@@ -844,6 +844,50 @@ struct rpc_rqst *xprt_lookup_rqst(struct rpc_xprt *xprt, __be32 xid)
}
EXPORT_SYMBOL_GPL(xprt_lookup_rqst);
/**
* xprt_pin_rqst - Pin a request on the transport receive list
* @req: Request to pin
*
* Caller must ensure this is atomic with the call to xprt_lookup_rqst()
* so should be holding the xprt transport lock.
*/
void xprt_pin_rqst(struct rpc_rqst *req)
{
set_bit(RPC_TASK_MSG_RECV, &req->rq_task->tk_runstate);
}
EXPORT_SYMBOL_GPL(xprt_pin_rqst);
/**
* xprt_unpin_rqst - Unpin a request on the transport receive list
* @req: Request to pin
*
* Caller should be holding the xprt transport lock.
*/
void xprt_unpin_rqst(struct rpc_rqst *req)
{
struct rpc_task *task = req->rq_task;
clear_bit(RPC_TASK_MSG_RECV, &task->tk_runstate);
if (test_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate))
wake_up_bit(&task->tk_runstate, RPC_TASK_MSG_RECV);
}
EXPORT_SYMBOL_GPL(xprt_unpin_rqst);
static void xprt_wait_on_pinned_rqst(struct rpc_rqst *req)
__must_hold(&req->rq_xprt->recv_lock)
{
struct rpc_task *task = req->rq_task;
if (task && test_bit(RPC_TASK_MSG_RECV, &task->tk_runstate)) {
spin_unlock(&req->rq_xprt->recv_lock);
set_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate);
wait_on_bit(&task->tk_runstate, RPC_TASK_MSG_RECV,
TASK_UNINTERRUPTIBLE);
clear_bit(RPC_TASK_MSG_RECV_WAIT, &task->tk_runstate);
spin_lock(&req->rq_xprt->recv_lock);
}
}
static void xprt_update_rtt(struct rpc_task *task)
{
struct rpc_rqst *req = task->tk_rqstp;
......@@ -966,13 +1010,13 @@ void xprt_transmit(struct rpc_task *task)
/*
* Add to the list only if we're expecting a reply
*/
spin_lock_bh(&xprt->transport_lock);
/* Update the softirq receive buffer */
memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
sizeof(req->rq_private_buf));
/* Add request to the receive list */
spin_lock(&xprt->recv_lock);
list_add_tail(&req->rq_list, &xprt->recv);
spin_unlock_bh(&xprt->transport_lock);
spin_unlock(&xprt->recv_lock);
xprt_reset_majortimeo(req);
/* Turn off autodisconnect */
del_singleshot_timer_sync(&xprt->timer);
......@@ -1287,12 +1331,16 @@ void xprt_release(struct rpc_task *task)
task->tk_ops->rpc_count_stats(task, task->tk_calldata);
else if (task->tk_client)
rpc_count_iostats(task, task->tk_client->cl_metrics);
spin_lock(&xprt->recv_lock);
if (!list_empty(&req->rq_list)) {
list_del(&req->rq_list);
xprt_wait_on_pinned_rqst(req);
}
spin_unlock(&xprt->recv_lock);
spin_lock_bh(&xprt->transport_lock);
xprt->ops->release_xprt(xprt, task);
if (xprt->ops->release_request)
xprt->ops->release_request(task);
if (!list_empty(&req->rq_list))
list_del(&req->rq_list);
xprt->last_used = jiffies;
xprt_schedule_autodisconnect(xprt);
spin_unlock_bh(&xprt->transport_lock);
......@@ -1318,6 +1366,7 @@ static void xprt_init(struct rpc_xprt *xprt, struct net *net)
spin_lock_init(&xprt->transport_lock);
spin_lock_init(&xprt->reserve_lock);
spin_lock_init(&xprt->recv_lock);
INIT_LIST_HEAD(&xprt->free);
INIT_LIST_HEAD(&xprt->recv);
......
......@@ -49,6 +49,7 @@ static int rpcrdma_bc_setup_rqst(struct rpcrdma_xprt *r_xprt,
if (IS_ERR(rb))
goto out_fail;
req->rl_rdmabuf = rb;
xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb));
size = r_xprt->rx_data.inline_rsize;
rb = rpcrdma_alloc_regbuf(size, DMA_TO_DEVICE, GFP_KERNEL);
......@@ -202,20 +203,24 @@ size_t xprt_rdma_bc_maxpayload(struct rpc_xprt *xprt)
*/
int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
{
struct rpc_xprt *xprt = rqst->rq_xprt;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(xprt);
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
struct rpcrdma_msg *headerp;
headerp = rdmab_to_msg(req->rl_rdmabuf);
headerp->rm_xid = rqst->rq_xid;
headerp->rm_vers = rpcrdma_version;
headerp->rm_credit =
cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
headerp->rm_type = rdma_msg;
headerp->rm_body.rm_chunks[0] = xdr_zero;
headerp->rm_body.rm_chunks[1] = xdr_zero;
headerp->rm_body.rm_chunks[2] = xdr_zero;
__be32 *p;
rpcrdma_set_xdrlen(&req->rl_hdrbuf, 0);
xdr_init_encode(&req->rl_stream, &req->rl_hdrbuf,
req->rl_rdmabuf->rg_base);
p = xdr_reserve_space(&req->rl_stream, 28);
if (unlikely(!p))
return -EIO;
*p++ = rqst->rq_xid;
*p++ = rpcrdma_version;
*p++ = cpu_to_be32(r_xprt->rx_buf.rb_bc_srv_max_requests);
*p++ = rdma_msg;
*p++ = xdr_zero;
*p++ = xdr_zero;
*p = xdr_zero;
if (!rpcrdma_prepare_send_sges(&r_xprt->rx_ia, req, RPCRDMA_HDRLEN_MIN,
&rqst->rq_snd_buf, rpcrdma_noch))
......@@ -271,9 +276,6 @@ void xprt_rdma_bc_free_rqst(struct rpc_rqst *rqst)
* @xprt: transport receiving the call
* @rep: receive buffer containing the call
*
* Called in the RPC reply handler, which runs in a tasklet.
* Be quick about it.
*
* Operational assumptions:
* o Backchannel credits are ignored, just as the NFS server
* forechannel currently does
......@@ -284,7 +286,6 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
struct rpcrdma_rep *rep)
{
struct rpc_xprt *xprt = &r_xprt->rx_xprt;
struct rpcrdma_msg *headerp;
struct svc_serv *bc_serv;
struct rpcrdma_req *req;
struct rpc_rqst *rqst;
......@@ -292,24 +293,15 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
size_t size;
__be32 *p;
headerp = rdmab_to_msg(rep->rr_rdmabuf);
p = xdr_inline_decode(&rep->rr_stream, 0);
size = xdr_stream_remaining(&rep->rr_stream);
#ifdef RPCRDMA_BACKCHANNEL_DEBUG
pr_info("RPC: %s: callback XID %08x, length=%u\n",
__func__, be32_to_cpu(headerp->rm_xid), rep->rr_len);
pr_info("RPC: %s: %*ph\n", __func__, rep->rr_len, headerp);
__func__, be32_to_cpup(p), size);
pr_info("RPC: %s: %*ph\n", __func__, size, p);
#endif
/* Sanity check:
* Need at least enough bytes for RPC/RDMA header, as code
* here references the header fields by array offset. Also,
* backward calls are always inline, so ensure there
* are some bytes beyond the RPC/RDMA header.
*/
if (rep->rr_len < RPCRDMA_HDRLEN_MIN + 24)
goto out_short;
p = (__be32 *)((unsigned char *)headerp + RPCRDMA_HDRLEN_MIN);
size = rep->rr_len - RPCRDMA_HDRLEN_MIN;
/* Grab a free bc rqst */
spin_lock(&xprt->bc_pa_lock);
if (list_empty(&xprt->bc_pa_list)) {
......@@ -325,7 +317,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
/* Prepare rqst */
rqst->rq_reply_bytes_recvd = 0;
rqst->rq_bytes_sent = 0;
rqst->rq_xid = headerp->rm_xid;
rqst->rq_xid = *p;
rqst->rq_private_buf.len = size;
set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
......@@ -337,9 +329,9 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
buf->len = size;
/* The receive buffer has to be hooked to the rpcrdma_req
* so that it can be reposted after the server is done
* parsing it but just before sending the backward
* direction reply.
* so that it is not released while the req is pointing
* to its buffer, and so that it can be reposted after
* the Upper Layer is done decoding it.
*/
req = rpcr_to_rdmar(rqst);
dprintk("RPC: %s: attaching rep %p to req %p\n",
......@@ -367,13 +359,4 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
* when the connection is re-established.
*/
return;
out_short:
pr_warn("RPC/RDMA short backward direction call\n");
if (rpcrdma_ep_post_recv(&r_xprt->rx_ia, rep))
xprt_disconnect_done(xprt);
else
pr_warn("RPC: %s: reposting rep %p\n",
__func__, rep);
}
......@@ -177,7 +177,7 @@ fmr_op_maxpages(struct rpcrdma_xprt *r_xprt)
/* Use the ib_map_phys_fmr() verb to register a memory region
* for remote access via RDMA READ or RDMA WRITE.
*/
static int
static struct rpcrdma_mr_seg *
fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
int nsegs, bool writing, struct rpcrdma_mw **out)
{
......@@ -188,7 +188,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
mw = rpcrdma_get_mw(r_xprt);
if (!mw)
return -ENOBUFS;
return ERR_PTR(-ENOBUFS);
pageoff = offset_in_page(seg1->mr_offset);
seg1->mr_offset -= pageoff; /* start of page */
......@@ -232,13 +232,13 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
mw->mw_offset = dma_pages[0] + pageoff;
*out = mw;
return mw->mw_nents;
return seg;
out_dmamap_err:
pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
mw->mw_sg, i);
rpcrdma_put_mw(r_xprt, mw);
return -EIO;
return ERR_PTR(-EIO);
out_maperr:
pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
......@@ -247,7 +247,7 @@ fmr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
mw->mw_sg, mw->mw_nents, mw->mw_dir);
rpcrdma_put_mw(r_xprt, mw);
return -EIO;
return ERR_PTR(-EIO);
}
/* Invalidate all memory regions that were registered for "req".
......
......@@ -344,7 +344,7 @@ frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
/* Post a REG_MR Work Request to register a memory region
* for remote access via RDMA READ or RDMA WRITE.
*/
static int
static struct rpcrdma_mr_seg *
frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
int nsegs, bool writing, struct rpcrdma_mw **out)
{
......@@ -364,7 +364,7 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
rpcrdma_defer_mr_recovery(mw);
mw = rpcrdma_get_mw(r_xprt);
if (!mw)
return -ENOBUFS;
return ERR_PTR(-ENOBUFS);
} while (mw->frmr.fr_state != FRMR_IS_INVALID);
frmr = &mw->frmr;
frmr->fr_state = FRMR_IS_VALID;
......@@ -429,25 +429,25 @@ frwr_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
mw->mw_offset = mr->iova;
*out = mw;
return mw->mw_nents;
return seg;
out_dmamap_err:
pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
mw->mw_sg, i);
frmr->fr_state = FRMR_IS_INVALID;
rpcrdma_put_mw(r_xprt, mw);
return -EIO;
return ERR_PTR(-EIO);
out_mapmr_err:
pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
frmr->fr_mr, n, mw->mw_nents);
rpcrdma_defer_mr_recovery(mw);
return -EIO;
return ERR_PTR(-EIO);
out_senderr:
pr_err("rpcrdma: FRMR registration ib_post_send returned %i\n", rc);
rpcrdma_defer_mr_recovery(mw);
return -ENOTCONN;
return ERR_PTR(-ENOTCONN);
}
/* Invalidate all memory regions that were registered for "req".
......
This diff is collapsed.
......@@ -52,7 +52,7 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
if (src->iov_len < 24)
goto out_shortreply;
spin_lock_bh(&xprt->transport_lock);
spin_lock(&xprt->recv_lock);
req = xprt_lookup_rqst(xprt, xid);
if (!req)
goto out_notfound;
......@@ -69,17 +69,20 @@ int svc_rdma_handle_bc_reply(struct rpc_xprt *xprt, __be32 *rdma_resp,
else if (credits > r_xprt->rx_buf.rb_bc_max_requests)
credits = r_xprt->rx_buf.rb_bc_max_requests;
spin_lock_bh(&xprt->transport_lock);
cwnd = xprt->cwnd;
xprt->cwnd = credits << RPC_CWNDSHIFT;
if (xprt->cwnd > cwnd)
xprt_release_rqst_cong(req->rq_task);
spin_unlock_bh(&xprt->transport_lock);
ret = 0;
xprt_complete_rqst(req->rq_task, rcvbuf->len);
rcvbuf->len = 0;
out_unlock:
spin_unlock_bh(&xprt->transport_lock);
spin_unlock(&xprt->recv_lock);
out:
return ret;
......@@ -266,7 +269,7 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt)
module_put(THIS_MODULE);
}
static struct rpc_xprt_ops xprt_rdma_bc_procs = {
static const struct rpc_xprt_ops xprt_rdma_bc_procs = {
.reserve_xprt = xprt_reserve_xprt_cong,
.release_xprt = xprt_release_xprt_cong,
.alloc_slot = xprt_alloc_slot,
......
......@@ -149,7 +149,7 @@ static struct ctl_table sunrpc_table[] = {
#endif
static struct rpc_xprt_ops xprt_rdma_procs; /*forward reference */
static const struct rpc_xprt_ops xprt_rdma_procs;
static void
xprt_rdma_format_addresses4(struct rpc_xprt *xprt, struct sockaddr *sap)
......@@ -559,6 +559,7 @@ rpcrdma_get_rdmabuf(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
r_xprt->rx_stats.hardway_register_count += size;
req->rl_rdmabuf = rb;
xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb));
return true;
}
......@@ -684,7 +685,6 @@ xprt_rdma_free(struct rpc_task *task)
dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply);
rpcrdma_remove_req(&r_xprt->rx_buf, req);
if (!list_empty(&req->rl_registered))
ia->ri_ops->ro_unmap_safe(r_xprt, req, !RPC_IS_ASYNC(task));
rpcrdma_unmap_sges(ia, req);
......@@ -730,7 +730,7 @@ xprt_rdma_send_request(struct rpc_task *task)
if (unlikely(!list_empty(&req->rl_registered)))
r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, false);
rc = rpcrdma_marshal_req(rqst);
rc = rpcrdma_marshal_req(r_xprt, rqst);
if (rc < 0)
goto failed_marshal;
......@@ -811,7 +811,7 @@ xprt_rdma_disable_swap(struct rpc_xprt *xprt)
* Plumbing for rpc transport switch and kernel module
*/
static struct rpc_xprt_ops xprt_rdma_procs = {
static const struct rpc_xprt_ops xprt_rdma_procs = {
.reserve_xprt = xprt_reserve_xprt_cong,
.release_xprt = xprt_release_xprt_cong, /* sunrpc/xprt.c */
.alloc_slot = xprt_alloc_slot,
......
......@@ -139,14 +139,11 @@ rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
static void
rpcrdma_update_granted_credits(struct rpcrdma_rep *rep)
{
struct rpcrdma_msg *rmsgp = rdmab_to_msg(rep->rr_rdmabuf);
struct rpcrdma_buffer *buffer = &rep->rr_rxprt->rx_buf;
__be32 *p = rep->rr_rdmabuf->rg_base;
u32 credits;
if (rep->rr_len < RPCRDMA_HDRLEN_ERR)
return;
credits = be32_to_cpu(rmsgp->rm_credit);
credits = be32_to_cpup(p + 2);
if (credits == 0)
credits = 1; /* don't deadlock */
else if (credits > buffer->rb_max_requests)
......@@ -173,21 +170,19 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
goto out_fail;
/* status == SUCCESS means all fields in wc are trustworthy */
if (wc->opcode != IB_WC_RECV)
return;
dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n",
__func__, rep, wc->byte_len);
rep->rr_len = wc->byte_len;
rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
rep->rr_wc_flags = wc->wc_flags;
rep->rr_inv_rkey = wc->ex.invalidate_rkey;
ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
rdmab_addr(rep->rr_rdmabuf),
rep->rr_len, DMA_FROM_DEVICE);
wc->byte_len, DMA_FROM_DEVICE);
rpcrdma_update_granted_credits(rep);
if (wc->byte_len >= RPCRDMA_HDRLEN_ERR)
rpcrdma_update_granted_credits(rep);
out_schedule:
queue_work(rpcrdma_receive_wq, &rep->rr_work);
......@@ -198,7 +193,7 @@ rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
pr_err("rpcrdma: Recv: %s (%u/0x%x)\n",
ib_wc_status_msg(wc->status),
wc->status, wc->vendor_err);
rep->rr_len = RPCRDMA_BAD_LEN;
rpcrdma_set_xdrlen(&rep->rr_hdrbuf, 0);
goto out_schedule;
}
......@@ -974,6 +969,8 @@ rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt)
rc = PTR_ERR(rep->rr_rdmabuf);
goto out_free;
}
xdr_buf_init(&rep->rr_hdrbuf, rep->rr_rdmabuf->rg_base,
rdmab_length(rep->rr_rdmabuf));
rep->rr_cqe.done = rpcrdma_wc_receive;
rep->rr_rxprt = r_xprt;
......@@ -1004,7 +1001,6 @@ rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
spin_lock_init(&buf->rb_recovery_lock);
INIT_LIST_HEAD(&buf->rb_mws);
INIT_LIST_HEAD(&buf->rb_all);
INIT_LIST_HEAD(&buf->rb_pending);
INIT_LIST_HEAD(&buf->rb_stale_mrs);
INIT_DELAYED_WORK(&buf->rb_refresh_worker,
rpcrdma_mr_refresh_worker);
......
......@@ -218,18 +218,17 @@ enum {
struct rpcrdma_rep {
struct ib_cqe rr_cqe;
unsigned int rr_len;
int rr_wc_flags;
u32 rr_inv_rkey;
struct rpcrdma_regbuf *rr_rdmabuf;
struct rpcrdma_xprt *rr_rxprt;
struct work_struct rr_work;
struct xdr_buf rr_hdrbuf;
struct xdr_stream rr_stream;
struct list_head rr_list;
struct ib_recv_wr rr_recv_wr;
struct rpcrdma_regbuf *rr_rdmabuf;
};
#define RPCRDMA_BAD_LEN (~0U)
/*
* struct rpcrdma_mw - external memory region metadata
*
......@@ -341,11 +340,12 @@ enum {
struct rpcrdma_buffer;
struct rpcrdma_req {
struct list_head rl_list;
__be32 rl_xid;
unsigned int rl_mapped_sges;
unsigned int rl_connect_cookie;
struct rpcrdma_buffer *rl_buffer;
struct rpcrdma_rep *rl_reply;
struct xdr_stream rl_stream;
struct xdr_buf rl_hdrbuf;
struct ib_send_wr rl_send_wr;
struct ib_sge rl_send_sge[RPCRDMA_MAX_SEND_SGES];
struct rpcrdma_regbuf *rl_rdmabuf; /* xprt header */
......@@ -403,7 +403,6 @@ struct rpcrdma_buffer {
int rb_send_count, rb_recv_count;
struct list_head rb_send_bufs;
struct list_head rb_recv_bufs;
struct list_head rb_pending;
u32 rb_max_requests;
atomic_t rb_credits; /* most recent credit grant */
......@@ -440,24 +439,27 @@ struct rpcrdma_create_data_internal {
* Statistics for RPCRDMA
*/
struct rpcrdma_stats {
/* accessed when sending a call */
unsigned long read_chunk_count;
unsigned long write_chunk_count;
unsigned long reply_chunk_count;
unsigned long long total_rdma_request;
unsigned long long total_rdma_reply;
/* rarely accessed error counters */
unsigned long long pullup_copy_count;
unsigned long long fixup_copy_count;
unsigned long hardway_register_count;
unsigned long failed_marshal_count;
unsigned long bad_reply_count;
unsigned long nomsg_call_count;
unsigned long bcall_count;
unsigned long mrs_recovered;
unsigned long mrs_orphaned;
unsigned long mrs_allocated;
/* accessed when receiving a reply */
unsigned long long total_rdma_reply;
unsigned long long fixup_copy_count;
unsigned long local_inv_needed;
unsigned long nomsg_call_count;
unsigned long bcall_count;
};
/*
......@@ -465,7 +467,8 @@ struct rpcrdma_stats {
*/
struct rpcrdma_xprt;
struct rpcrdma_memreg_ops {
int (*ro_map)(struct rpcrdma_xprt *,
struct rpcrdma_mr_seg *
(*ro_map)(struct rpcrdma_xprt *,
struct rpcrdma_mr_seg *, int, bool,
struct rpcrdma_mw **);
void (*ro_unmap_sync)(struct rpcrdma_xprt *,
......@@ -552,34 +555,6 @@ void rpcrdma_destroy_req(struct rpcrdma_req *);
int rpcrdma_buffer_create(struct rpcrdma_xprt *);
void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
static inline void
rpcrdma_insert_req(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
{
spin_lock(&buffers->rb_lock);
if (list_empty(&req->rl_list))
list_add_tail(&req->rl_list, &buffers->rb_pending);
spin_unlock(&buffers->rb_lock);
}
static inline struct rpcrdma_req *
rpcrdma_lookup_req_locked(struct rpcrdma_buffer *buffers, __be32 xid)
{
struct rpcrdma_req *pos;
list_for_each_entry(pos, &buffers->rb_pending, rl_list)
if (pos->rl_xid == xid)
return pos;
return NULL;
}
static inline void
rpcrdma_remove_req(struct rpcrdma_buffer *buffers, struct rpcrdma_req *req)
{
spin_lock(&buffers->rb_lock);
list_del(&req->rl_list);
spin_unlock(&buffers->rb_lock);
}
struct rpcrdma_mw *rpcrdma_get_mw(struct rpcrdma_xprt *);
void rpcrdma_put_mw(struct rpcrdma_xprt *, struct rpcrdma_mw *);
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
......@@ -638,10 +613,16 @@ enum rpcrdma_chunktype {
bool rpcrdma_prepare_send_sges(struct rpcrdma_ia *, struct rpcrdma_req *,
u32, struct xdr_buf *, enum rpcrdma_chunktype);
void rpcrdma_unmap_sges(struct rpcrdma_ia *, struct rpcrdma_req *);
int rpcrdma_marshal_req(struct rpc_rqst *);
int rpcrdma_marshal_req(struct rpcrdma_xprt *r_xprt, struct rpc_rqst *rqst);
void rpcrdma_set_max_header_sizes(struct rpcrdma_xprt *);
void rpcrdma_reply_handler(struct work_struct *work);
static inline void rpcrdma_set_xdrlen(struct xdr_buf *xdr, size_t len)
{
xdr->head[0].iov_len = len;
xdr->len = len;
}
/* RPC/RDMA module init - xprtrdma/transport.c
*/
extern unsigned int xprt_rdma_max_inline_read;
......
......@@ -969,10 +969,12 @@ static void xs_local_data_read_skb(struct rpc_xprt *xprt,
return;
/* Look up and lock the request corresponding to the given XID */
spin_lock_bh(&xprt->transport_lock);
spin_lock(&xprt->recv_lock);
rovr = xprt_lookup_rqst(xprt, *xp);
if (!rovr)
goto out_unlock;
xprt_pin_rqst(rovr);
spin_unlock(&xprt->recv_lock);
task = rovr->rq_task;
copied = rovr->rq_private_buf.buflen;
......@@ -981,13 +983,16 @@ static void xs_local_data_read_skb(struct rpc_xprt *xprt,
if (xs_local_copy_to_xdr(&rovr->rq_private_buf, skb)) {
dprintk("RPC: sk_buff copy failed\n");
goto out_unlock;
spin_lock(&xprt->recv_lock);
goto out_unpin;
}
spin_lock(&xprt->recv_lock);
xprt_complete_rqst(task, copied);
out_unpin:
xprt_unpin_rqst(rovr);
out_unlock:
spin_unlock_bh(&xprt->transport_lock);
spin_unlock(&xprt->recv_lock);
}
static void xs_local_data_receive(struct sock_xprt *transport)
......@@ -1050,10 +1055,12 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
return;
/* Look up and lock the request corresponding to the given XID */
spin_lock_bh(&xprt->transport_lock);
spin_lock(&xprt->recv_lock);
rovr = xprt_lookup_rqst(xprt, *xp);
if (!rovr)
goto out_unlock;
xprt_pin_rqst(rovr);
spin_unlock(&xprt->recv_lock);
task = rovr->rq_task;
if ((copied = rovr->rq_private_buf.buflen) > repsize)
......@@ -1062,16 +1069,21 @@ static void xs_udp_data_read_skb(struct rpc_xprt *xprt,
/* Suck it into the iovec, verify checksum if not done by hw. */
if (csum_partial_copy_to_xdr(&rovr->rq_private_buf, skb)) {
__UDPX_INC_STATS(sk, UDP_MIB_INERRORS);
goto out_unlock;
spin_lock(&xprt->recv_lock);
goto out_unpin;
}
__UDPX_INC_STATS(sk, UDP_MIB_INDATAGRAMS);
spin_lock_bh(&xprt->transport_lock);
xprt_adjust_cwnd(xprt, task, copied);
spin_unlock_bh(&xprt->transport_lock);
spin_lock(&xprt->recv_lock);
xprt_complete_rqst(task, copied);
out_unpin:
xprt_unpin_rqst(rovr);
out_unlock:
spin_unlock_bh(&xprt->transport_lock);
spin_unlock(&xprt->recv_lock);
}
static void xs_udp_data_receive(struct sock_xprt *transport)
......@@ -1277,25 +1289,12 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
}
len = desc->count;
if (len > transport->tcp_reclen - transport->tcp_offset) {
struct xdr_skb_reader my_desc;
len = transport->tcp_reclen - transport->tcp_offset;
memcpy(&my_desc, desc, sizeof(my_desc));
my_desc.count = len;
r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
&my_desc, xdr_skb_read_bits);
desc->count -= r;
desc->offset += r;
} else
r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
if (len > transport->tcp_reclen - transport->tcp_offset)
desc->count = transport->tcp_reclen - transport->tcp_offset;
r = xdr_partial_copy_from_skb(rcvbuf, transport->tcp_copied,
desc, xdr_skb_read_bits);
if (r > 0) {
transport->tcp_copied += r;
transport->tcp_offset += r;
}
if (r != len) {
if (desc->count) {
/* Error when copying to the receive buffer,
* usually because we weren't able to allocate
* additional buffer pages. All we can do now
......@@ -1315,6 +1314,10 @@ static inline void xs_tcp_read_common(struct rpc_xprt *xprt,
return;
}
transport->tcp_copied += r;
transport->tcp_offset += r;
desc->count = len - r;
dprintk("RPC: XID %08x read %zd bytes\n",
ntohl(transport->tcp_xid), r);
dprintk("RPC: xprt = %p, tcp_copied = %lu, tcp_offset = %u, "
......@@ -1343,21 +1346,24 @@ static inline int xs_tcp_read_reply(struct rpc_xprt *xprt,
dprintk("RPC: read reply XID %08x\n", ntohl(transport->tcp_xid));
/* Find and lock the request corresponding to this xid */
spin_lock_bh(&xprt->transport_lock);
spin_lock(&xprt->recv_lock);
req = xprt_lookup_rqst(xprt, transport->tcp_xid);
if (!req) {
dprintk("RPC: XID %08x request not found!\n",
ntohl(transport->tcp_xid));
spin_unlock_bh(&xprt->transport_lock);
spin_unlock(&xprt->recv_lock);
return -1;
}
xprt_pin_rqst(req);
spin_unlock(&xprt->recv_lock);
xs_tcp_read_common(xprt, desc, req);
spin_lock(&xprt->recv_lock);
if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
xprt_complete_rqst(req->rq_task, transport->tcp_copied);
spin_unlock_bh(&xprt->transport_lock);
xprt_unpin_rqst(req);
spin_unlock(&xprt->recv_lock);
return 0;
}
......@@ -1376,11 +1382,9 @@ static int xs_tcp_read_callback(struct rpc_xprt *xprt,
container_of(xprt, struct sock_xprt, xprt);
struct rpc_rqst *req;
/* Look up and lock the request corresponding to the given XID */
spin_lock_bh(&xprt->transport_lock);
/* Look up the request corresponding to the given XID */
req = xprt_lookup_bc_request(xprt, transport->tcp_xid);
if (req == NULL) {
spin_unlock_bh(&xprt->transport_lock);
printk(KERN_WARNING "Callback slot table overflowed\n");
xprt_force_disconnect(xprt);
return -1;
......@@ -1391,7 +1395,6 @@ static int xs_tcp_read_callback(struct rpc_xprt *xprt,
if (!(transport->tcp_flags & TCP_RCV_COPY_DATA))
xprt_complete_bc_request(req, transport->tcp_copied);
spin_unlock_bh(&xprt->transport_lock);
return 0;
}
......@@ -1516,6 +1519,7 @@ static void xs_tcp_data_receive(struct sock_xprt *transport)
.arg.data = xprt,
};
unsigned long total = 0;
int loop;
int read = 0;
mutex_lock(&transport->recv_mutex);
......@@ -1524,20 +1528,20 @@ static void xs_tcp_data_receive(struct sock_xprt *transport)
goto out;
/* We use rd_desc to pass struct xprt to xs_tcp_data_recv */
for (;;) {
for (loop = 0; loop < 64; loop++) {
lock_sock(sk);
read = tcp_read_sock(sk, &rd_desc, xs_tcp_data_recv);
if (read <= 0) {
clear_bit(XPRT_SOCK_DATA_READY, &transport->sock_state);
release_sock(sk);
if (!test_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
break;
} else {
release_sock(sk);
total += read;
break;
}
release_sock(sk);
total += read;
rd_desc.count = 65536;
}
if (test_bit(XPRT_SOCK_DATA_READY, &transport->sock_state))
queue_work(xprtiod_workqueue, &transport->recv_worker);
out:
mutex_unlock(&transport->recv_mutex);
trace_xs_tcp_data_ready(xprt, read, total);
......@@ -2724,7 +2728,7 @@ static void bc_destroy(struct rpc_xprt *xprt)
module_put(THIS_MODULE);
}
static struct rpc_xprt_ops xs_local_ops = {
static const struct rpc_xprt_ops xs_local_ops = {
.reserve_xprt = xprt_reserve_xprt,
.release_xprt = xs_tcp_release_xprt,
.alloc_slot = xprt_alloc_slot,
......@@ -2742,7 +2746,7 @@ static struct rpc_xprt_ops xs_local_ops = {
.disable_swap = xs_disable_swap,
};
static struct rpc_xprt_ops xs_udp_ops = {
static const struct rpc_xprt_ops xs_udp_ops = {
.set_buffer_size = xs_udp_set_buffer_size,
.reserve_xprt = xprt_reserve_xprt_cong,
.release_xprt = xprt_release_xprt_cong,
......@@ -2764,7 +2768,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
.inject_disconnect = xs_inject_disconnect,
};
static struct rpc_xprt_ops xs_tcp_ops = {
static const struct rpc_xprt_ops xs_tcp_ops = {
.reserve_xprt = xprt_reserve_xprt,
.release_xprt = xs_tcp_release_xprt,
.alloc_slot = xprt_lock_and_alloc_slot,
......@@ -2795,7 +2799,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
* The rpc_xprt_ops for the server backchannel
*/
static struct rpc_xprt_ops bc_tcp_ops = {
static const struct rpc_xprt_ops bc_tcp_ops = {
.reserve_xprt = xprt_reserve_xprt,
.release_xprt = xprt_release_xprt,
.alloc_slot = xprt_alloc_slot,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment