Commit eb07d5a4 authored by NeilBrown's avatar NeilBrown Committed by Trond Myklebust

SUNRPC: handle malloc failure in ->request_prepare

If ->request_prepare() detects an error, it sets ->rq_task->tk_status.
This is easy for callers to ignore.
The only caller is xprt_request_enqueue_receive() and it does ignore the
error, as does call_encode() which calls it.  This can result in a
request being queued to receive a reply without an allocated receive buffer.

So instead of setting rq_task->tk_status, return an error, and store in
->tk_status only in call_encode();

The call to xprt_request_enqueue_receive() is now earlier in
call_encode(), where the error can still be handled.
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parent b243874f
...@@ -144,7 +144,7 @@ struct rpc_xprt_ops { ...@@ -144,7 +144,7 @@ struct rpc_xprt_ops {
unsigned short (*get_srcport)(struct rpc_xprt *xprt); unsigned short (*get_srcport)(struct rpc_xprt *xprt);
int (*buf_alloc)(struct rpc_task *task); int (*buf_alloc)(struct rpc_task *task);
void (*buf_free)(struct rpc_task *task); void (*buf_free)(struct rpc_task *task);
void (*prepare_request)(struct rpc_rqst *req); int (*prepare_request)(struct rpc_rqst *req);
int (*send_request)(struct rpc_rqst *req); int (*send_request)(struct rpc_rqst *req);
void (*wait_for_reply_request)(struct rpc_task *task); void (*wait_for_reply_request)(struct rpc_task *task);
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task); void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
...@@ -357,10 +357,9 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task); ...@@ -357,10 +357,9 @@ int xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task); void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
void xprt_free_slot(struct rpc_xprt *xprt, void xprt_free_slot(struct rpc_xprt *xprt,
struct rpc_rqst *req); struct rpc_rqst *req);
void xprt_request_prepare(struct rpc_rqst *req);
bool xprt_prepare_transmit(struct rpc_task *task); bool xprt_prepare_transmit(struct rpc_task *task);
void xprt_request_enqueue_transmit(struct rpc_task *task); void xprt_request_enqueue_transmit(struct rpc_task *task);
void xprt_request_enqueue_receive(struct rpc_task *task); int xprt_request_enqueue_receive(struct rpc_task *task);
void xprt_request_wait_receive(struct rpc_task *task); void xprt_request_wait_receive(struct rpc_task *task);
void xprt_request_dequeue_xprt(struct rpc_task *task); void xprt_request_dequeue_xprt(struct rpc_task *task);
bool xprt_request_need_retransmit(struct rpc_task *task); bool xprt_request_need_retransmit(struct rpc_task *task);
......
...@@ -1858,6 +1858,9 @@ call_encode(struct rpc_task *task) ...@@ -1858,6 +1858,9 @@ call_encode(struct rpc_task *task)
xprt_request_dequeue_xprt(task); xprt_request_dequeue_xprt(task);
/* Encode here so that rpcsec_gss can use correct sequence number. */ /* Encode here so that rpcsec_gss can use correct sequence number. */
rpc_xdr_encode(task); rpc_xdr_encode(task);
/* Add task to reply queue before transmission to avoid races */
if (task->tk_status == 0 && rpc_reply_expected(task))
task->tk_status = xprt_request_enqueue_receive(task);
/* Did the encode result in an error condition? */ /* Did the encode result in an error condition? */
if (task->tk_status != 0) { if (task->tk_status != 0) {
/* Was the error nonfatal? */ /* Was the error nonfatal? */
...@@ -1881,9 +1884,6 @@ call_encode(struct rpc_task *task) ...@@ -1881,9 +1884,6 @@ call_encode(struct rpc_task *task)
return; return;
} }
/* Add task to reply queue before transmission to avoid races */
if (rpc_reply_expected(task))
xprt_request_enqueue_receive(task);
xprt_request_enqueue_transmit(task); xprt_request_enqueue_transmit(task);
out: out:
task->tk_action = call_transmit; task->tk_action = call_transmit;
......
...@@ -69,10 +69,11 @@ ...@@ -69,10 +69,11 @@
/* /*
* Local functions * Local functions
*/ */
static void xprt_init(struct rpc_xprt *xprt, struct net *net); static void xprt_init(struct rpc_xprt *xprt, struct net *net);
static __be32 xprt_alloc_xid(struct rpc_xprt *xprt); static __be32 xprt_alloc_xid(struct rpc_xprt *xprt);
static void xprt_destroy(struct rpc_xprt *xprt); static void xprt_destroy(struct rpc_xprt *xprt);
static void xprt_request_init(struct rpc_task *task); static void xprt_request_init(struct rpc_task *task);
static int xprt_request_prepare(struct rpc_rqst *req);
static DEFINE_SPINLOCK(xprt_list_lock); static DEFINE_SPINLOCK(xprt_list_lock);
static LIST_HEAD(xprt_list); static LIST_HEAD(xprt_list);
...@@ -1143,16 +1144,19 @@ xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req) ...@@ -1143,16 +1144,19 @@ xprt_request_need_enqueue_receive(struct rpc_task *task, struct rpc_rqst *req)
* @task: RPC task * @task: RPC task
* *
*/ */
void int
xprt_request_enqueue_receive(struct rpc_task *task) xprt_request_enqueue_receive(struct rpc_task *task)
{ {
struct rpc_rqst *req = task->tk_rqstp; struct rpc_rqst *req = task->tk_rqstp;
struct rpc_xprt *xprt = req->rq_xprt; struct rpc_xprt *xprt = req->rq_xprt;
int ret;
if (!xprt_request_need_enqueue_receive(task, req)) if (!xprt_request_need_enqueue_receive(task, req))
return; return 0;
xprt_request_prepare(task->tk_rqstp); ret = xprt_request_prepare(task->tk_rqstp);
if (ret)
return ret;
spin_lock(&xprt->queue_lock); spin_lock(&xprt->queue_lock);
/* Update the softirq receive buffer */ /* Update the softirq receive buffer */
...@@ -1166,6 +1170,7 @@ xprt_request_enqueue_receive(struct rpc_task *task) ...@@ -1166,6 +1170,7 @@ xprt_request_enqueue_receive(struct rpc_task *task)
/* Turn off autodisconnect */ /* Turn off autodisconnect */
del_singleshot_timer_sync(&xprt->timer); del_singleshot_timer_sync(&xprt->timer);
return 0;
} }
/** /**
...@@ -1452,14 +1457,16 @@ xprt_request_dequeue_xprt(struct rpc_task *task) ...@@ -1452,14 +1457,16 @@ xprt_request_dequeue_xprt(struct rpc_task *task)
* *
* Calls into the transport layer to do whatever is needed to prepare * Calls into the transport layer to do whatever is needed to prepare
* the request for transmission or receive. * the request for transmission or receive.
* Returns error, or zero.
*/ */
void static int
xprt_request_prepare(struct rpc_rqst *req) xprt_request_prepare(struct rpc_rqst *req)
{ {
struct rpc_xprt *xprt = req->rq_xprt; struct rpc_xprt *xprt = req->rq_xprt;
if (xprt->ops->prepare_request) if (xprt->ops->prepare_request)
xprt->ops->prepare_request(req); return xprt->ops->prepare_request(req);
return 0;
} }
/** /**
......
...@@ -822,11 +822,11 @@ static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait) ...@@ -822,11 +822,11 @@ static int xs_stream_nospace(struct rpc_rqst *req, bool vm_wait)
return ret; return ret;
} }
static void static int
xs_stream_prepare_request(struct rpc_rqst *req) xs_stream_prepare_request(struct rpc_rqst *req)
{ {
xdr_free_bvec(&req->rq_rcv_buf); xdr_free_bvec(&req->rq_rcv_buf);
req->rq_task->tk_status = xdr_alloc_bvec( return xdr_alloc_bvec(
&req->rq_rcv_buf, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN); &req->rq_rcv_buf, GFP_KERNEL | __GFP_NORETRY | __GFP_NOWARN);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment