Commit 43e95988 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Limit data payload size for ALLPHYSICAL

When the client uses physical memory registration, each page in the
payload gets its own array entry in the RPC/RDMA header's chunk list.

Therefore, don't advertise a maximum payload size that would require
more array entries than can fit in the RPC buffer where RPC/RDMA
headers are built.

BugLink: https://bugzilla.linux-nfs.org/show_bug.cgi?id=248Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Tested-by: default avatarSteve Wise <swise@opengridcomputing.com>
Tested-by: default avatarShirley Ma <shirley.ma@oracle.com>
Tested-by: default avatarDevesh Sharma <devesh.sharma@emulex.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 73806c88
...@@ -296,7 +296,6 @@ xprt_setup_rdma(struct xprt_create *args) ...@@ -296,7 +296,6 @@ xprt_setup_rdma(struct xprt_create *args)
xprt->resvport = 0; /* privileged port not needed */ xprt->resvport = 0; /* privileged port not needed */
xprt->tsh_size = 0; /* RPC-RDMA handles framing */ xprt->tsh_size = 0; /* RPC-RDMA handles framing */
xprt->max_payload = RPCRDMA_MAX_DATA_SEGS * PAGE_SIZE;
xprt->ops = &xprt_rdma_procs; xprt->ops = &xprt_rdma_procs;
/* /*
...@@ -382,6 +381,9 @@ xprt_setup_rdma(struct xprt_create *args) ...@@ -382,6 +381,9 @@ xprt_setup_rdma(struct xprt_create *args)
new_ep->rep_xprt = xprt; new_ep->rep_xprt = xprt;
xprt_rdma_format_addresses(xprt); xprt_rdma_format_addresses(xprt);
xprt->max_payload = rpcrdma_max_payload(new_xprt);
dprintk("RPC: %s: transport data payload maximum: %zu bytes\n",
__func__, xprt->max_payload);
if (!try_module_get(THIS_MODULE)) if (!try_module_get(THIS_MODULE))
goto out4; goto out4;
......
...@@ -1825,3 +1825,44 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia, ...@@ -1825,3 +1825,44 @@ rpcrdma_ep_post_recv(struct rpcrdma_ia *ia,
rc); rc);
return rc; return rc;
} }
/* Physical mapping means one Read/Write list entry per-page.
* All list entries must fit within an inline buffer
*
* NB: The server must return a Write list for NFS READ,
* which has the same constraint. Factor in the inline
* rsize as well.
*/
static size_t
rpcrdma_physical_max_payload(struct rpcrdma_xprt *r_xprt)
{
struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
unsigned int inline_size, pages;
inline_size = min_t(unsigned int,
cdata->inline_wsize, cdata->inline_rsize);
inline_size -= RPCRDMA_HDRLEN_MIN;
pages = inline_size / sizeof(struct rpcrdma_segment);
return pages << PAGE_SHIFT;
}
static size_t
rpcrdma_mr_max_payload(struct rpcrdma_xprt *r_xprt)
{
return RPCRDMA_MAX_DATA_SEGS << PAGE_SHIFT;
}
size_t
rpcrdma_max_payload(struct rpcrdma_xprt *r_xprt)
{
size_t result;
switch (r_xprt->rx_ia.ri_memreg_strategy) {
case RPCRDMA_ALLPHYSICAL:
result = rpcrdma_physical_max_payload(r_xprt);
break;
default:
result = rpcrdma_mr_max_payload(r_xprt);
}
return result;
}
...@@ -348,6 +348,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *); ...@@ -348,6 +348,7 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *);
* RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c * RPC/RDMA protocol calls - xprtrdma/rpc_rdma.c
*/ */
int rpcrdma_marshal_req(struct rpc_rqst *); int rpcrdma_marshal_req(struct rpc_rqst *);
size_t rpcrdma_max_payload(struct rpcrdma_xprt *);
/* Temporary NFS request map cache. Created in svc_rdma.c */ /* Temporary NFS request map cache. Created in svc_rdma.c */
extern struct kmem_cache *svc_rdma_map_cachep; extern struct kmem_cache *svc_rdma_map_cachep;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment