Commit ff053dbb authored by Trond Myklebust's avatar Trond Myklebust

SUNRPC: Move the call to xprt_send_pagedata() out of xprt_sock_sendmsg()

The client and server have different requirements for their memory
allocation, so move the allocation of the send buffer out of the socket
send code that is common to both.
Reported-by: default avatarNeilBrown <neilb@suse.de>
Fixes: b2648015 ("SUNRPC: Make the rpciod and xprtiod slab allocation modes consistent")
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent b056fa07
...@@ -221,12 +221,6 @@ static int xprt_send_kvec(struct socket *sock, struct msghdr *msg, ...@@ -221,12 +221,6 @@ static int xprt_send_kvec(struct socket *sock, struct msghdr *msg,
static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg, static int xprt_send_pagedata(struct socket *sock, struct msghdr *msg,
struct xdr_buf *xdr, size_t base) struct xdr_buf *xdr, size_t base)
{ {
int err;
err = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
if (err < 0)
return err;
iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr), iov_iter_bvec(&msg->msg_iter, WRITE, xdr->bvec, xdr_buf_pagecount(xdr),
xdr->page_len + xdr->page_base); xdr->page_len + xdr->page_base);
return xprt_sendmsg(sock, msg, base + xdr->page_base); return xprt_sendmsg(sock, msg, base + xdr->page_base);
......
...@@ -579,15 +579,18 @@ static int svc_udp_sendto(struct svc_rqst *rqstp) ...@@ -579,15 +579,18 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
if (svc_xprt_is_dead(xprt)) if (svc_xprt_is_dead(xprt))
goto out_notconn; goto out_notconn;
err = xdr_alloc_bvec(xdr, GFP_KERNEL);
if (err < 0)
goto out_unlock;
err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent); err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
xdr_free_bvec(xdr);
if (err == -ECONNREFUSED) { if (err == -ECONNREFUSED) {
/* ICMP error on earlier request. */ /* ICMP error on earlier request. */
err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent); err = xprt_sock_sendmsg(svsk->sk_sock, &msg, xdr, 0, 0, &sent);
xdr_free_bvec(xdr);
} }
xdr_free_bvec(xdr);
trace_svcsock_udp_send(xprt, err); trace_svcsock_udp_send(xprt, err);
out_unlock:
mutex_unlock(&xprt->xpt_mutex); mutex_unlock(&xprt->xpt_mutex);
if (err < 0) if (err < 0)
return err; return err;
......
...@@ -825,9 +825,14 @@ static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait) ...@@ -825,9 +825,14 @@ static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait)
static int static int
xs_stream_prepare_request(struct rpc_rqst *req) xs_stream_prepare_request(struct rpc_rqst *req)
{ {
gfp_t gfp = rpc_task_gfp_mask();
int ret;
ret = xdr_alloc_bvec(&req->rq_snd_buf, gfp);
if (ret < 0)
return ret;
xdr_free_bvec(&req->rq_rcv_buf); xdr_free_bvec(&req->rq_rcv_buf);
return xdr_alloc_bvec( return xdr_alloc_bvec(&req->rq_rcv_buf, gfp);
&req->rq_rcv_buf, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
} }
/* /*
...@@ -956,6 +961,9 @@ static int xs_udp_send_request(struct rpc_rqst *req) ...@@ -956,6 +961,9 @@ static int xs_udp_send_request(struct rpc_rqst *req)
if (!xprt_request_get_cong(xprt, req)) if (!xprt_request_get_cong(xprt, req))
return -EBADSLT; return -EBADSLT;
status = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
if (status < 0)
return status;
req->rq_xtime = ktime_get(); req->rq_xtime = ktime_get();
status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent); status = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, 0, &sent);
...@@ -2554,6 +2562,9 @@ static int bc_sendto(struct rpc_rqst *req) ...@@ -2554,6 +2562,9 @@ static int bc_sendto(struct rpc_rqst *req)
int err; int err;
req->rq_xtime = ktime_get(); req->rq_xtime = ktime_get();
err = xdr_alloc_bvec(xdr, rpc_task_gfp_mask());
if (err < 0)
return err;
err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent); err = xprt_sock_sendmsg(transport->sock, &msg, xdr, 0, marker, &sent);
xdr_free_bvec(xdr); xdr_free_bvec(xdr);
if (err < 0 || sent != (xdr->len + sizeof(marker))) if (err < 0 || sent != (xdr->len + sizeof(marker)))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment