Commit 948f072a authored by NeilBrown's avatar NeilBrown Committed by Chuck Lever

SUNRPC: always free ctxt when freeing deferred request

Since the ->xprt_ctxt pointer was added to svc_deferred_req, it has not
been sufficient to use kfree() to free a deferred request.  We may need
to free the ctxt as well.

As freeing the ctxt is all that ->xpo_release_rqst() does, we repurpose
it to explicit do that even when the ctxt is not stored in an rqst.
So we now have ->xpo_release_ctxt() which is given an xprt and a ctxt,
which may have been taken either from an rqst or from a dreq.  The
caller is now responsible for clearing that pointer after the call to
->xpo_release_ctxt.

We also clear dr->xprt_ctxt when the ctxt is moved into a new rqst when
revisiting a deferred request.  This ensures there is only one pointer
to the ctxt, so the risk of double freeing in future is reduced.  The
new code in svc_xprt_release which releases both the ctxt and any
rq_deferred depends on this.

Fixes: 773f91b2 ("SUNRPC: Fix NFSD's request deferral on RDMA transports")
Signed-off-by: default avatarNeilBrown <neilb@suse.de>
Reviewed-by: default avatarJeff Layton <jlayton@kernel.org>
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
parent eb8d3a2c
......@@ -176,7 +176,7 @@ extern struct svc_rdma_recv_ctxt *
extern void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
struct svc_rdma_recv_ctxt *ctxt);
extern void svc_rdma_flush_recv_queues(struct svcxprt_rdma *rdma);
extern void svc_rdma_release_rqst(struct svc_rqst *rqstp);
extern void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *ctxt);
extern int svc_rdma_recvfrom(struct svc_rqst *);
/* svc_rdma_rw.c */
......
......@@ -23,7 +23,7 @@ struct svc_xprt_ops {
int (*xpo_sendto)(struct svc_rqst *);
int (*xpo_result_payload)(struct svc_rqst *, unsigned int,
unsigned int);
void (*xpo_release_rqst)(struct svc_rqst *);
void (*xpo_release_ctxt)(struct svc_xprt *xprt, void *ctxt);
void (*xpo_detach)(struct svc_xprt *);
void (*xpo_free)(struct svc_xprt *);
void (*xpo_kill_temp_xprt)(struct svc_xprt *);
......
......@@ -532,13 +532,23 @@ void svc_reserve(struct svc_rqst *rqstp, int space)
}
EXPORT_SYMBOL_GPL(svc_reserve);
static void free_deferred(struct svc_xprt *xprt, struct svc_deferred_req *dr)
{
if (!dr)
return;
xprt->xpt_ops->xpo_release_ctxt(xprt, dr->xprt_ctxt);
kfree(dr);
}
static void svc_xprt_release(struct svc_rqst *rqstp)
{
struct svc_xprt *xprt = rqstp->rq_xprt;
xprt->xpt_ops->xpo_release_rqst(rqstp);
xprt->xpt_ops->xpo_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
rqstp->rq_xprt_ctxt = NULL;
kfree(rqstp->rq_deferred);
free_deferred(xprt, rqstp->rq_deferred);
rqstp->rq_deferred = NULL;
svc_rqst_release_pages(rqstp);
......@@ -1054,7 +1064,7 @@ static void svc_delete_xprt(struct svc_xprt *xprt)
spin_unlock_bh(&serv->sv_lock);
while ((dr = svc_deferred_dequeue(xprt)) != NULL)
kfree(dr);
free_deferred(xprt, dr);
call_xpt_users(xprt);
svc_xprt_put(xprt);
......@@ -1176,8 +1186,8 @@ static void svc_revisit(struct cache_deferred_req *dreq, int too_many)
if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) {
spin_unlock(&xprt->xpt_lock);
trace_svc_defer_drop(dr);
free_deferred(xprt, dr);
svc_xprt_put(xprt);
kfree(dr);
return;
}
dr->xprt = NULL;
......@@ -1222,14 +1232,13 @@ static struct cache_deferred_req *svc_defer(struct cache_req *req)
dr->addrlen = rqstp->rq_addrlen;
dr->daddr = rqstp->rq_daddr;
dr->argslen = rqstp->rq_arg.len >> 2;
dr->xprt_ctxt = rqstp->rq_xprt_ctxt;
/* back up head to the start of the buffer and copy */
skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len;
memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip,
dr->argslen << 2);
}
WARN_ON_ONCE(rqstp->rq_xprt_ctxt != dr->xprt_ctxt);
dr->xprt_ctxt = rqstp->rq_xprt_ctxt;
rqstp->rq_xprt_ctxt = NULL;
trace_svc_defer(rqstp);
svc_xprt_get(rqstp->rq_xprt);
......@@ -1263,6 +1272,8 @@ static noinline int svc_deferred_recv(struct svc_rqst *rqstp)
rqstp->rq_daddr = dr->daddr;
rqstp->rq_respages = rqstp->rq_pages;
rqstp->rq_xprt_ctxt = dr->xprt_ctxt;
dr->xprt_ctxt = NULL;
svc_xprt_received(rqstp->rq_xprt);
return dr->argslen << 2;
}
......
......@@ -121,27 +121,27 @@ static void svc_reclassify_socket(struct socket *sock)
#endif
/**
* svc_tcp_release_rqst - Release transport-related resources
* @rqstp: request structure with resources to be released
* svc_tcp_release_ctxt - Release transport-related resources
* @xprt: the transport which owned the context
* @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
*
*/
static void svc_tcp_release_rqst(struct svc_rqst *rqstp)
static void svc_tcp_release_ctxt(struct svc_xprt *xprt, void *ctxt)
{
}
/**
* svc_udp_release_rqst - Release transport-related resources
* @rqstp: request structure with resources to be released
* svc_udp_release_ctxt - Release transport-related resources
* @xprt: the transport which owned the context
* @ctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
*
*/
static void svc_udp_release_rqst(struct svc_rqst *rqstp)
static void svc_udp_release_ctxt(struct svc_xprt *xprt, void *ctxt)
{
struct sk_buff *skb = rqstp->rq_xprt_ctxt;
struct sk_buff *skb = ctxt;
if (skb) {
rqstp->rq_xprt_ctxt = NULL;
if (skb)
consume_skb(skb);
}
}
union svc_pktinfo_u {
......@@ -696,7 +696,8 @@ static int svc_udp_sendto(struct svc_rqst *rqstp)
unsigned int sent;
int err;
svc_udp_release_rqst(rqstp);
svc_udp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
rqstp->rq_xprt_ctxt = NULL;
svc_set_cmsg_data(rqstp, cmh);
......@@ -768,7 +769,7 @@ static const struct svc_xprt_ops svc_udp_ops = {
.xpo_recvfrom = svc_udp_recvfrom,
.xpo_sendto = svc_udp_sendto,
.xpo_result_payload = svc_sock_result_payload,
.xpo_release_rqst = svc_udp_release_rqst,
.xpo_release_ctxt = svc_udp_release_ctxt,
.xpo_detach = svc_sock_detach,
.xpo_free = svc_sock_free,
.xpo_has_wspace = svc_udp_has_wspace,
......@@ -1301,7 +1302,8 @@ static int svc_tcp_sendto(struct svc_rqst *rqstp)
unsigned int sent;
int err;
svc_tcp_release_rqst(rqstp);
svc_tcp_release_ctxt(xprt, rqstp->rq_xprt_ctxt);
rqstp->rq_xprt_ctxt = NULL;
atomic_inc(&svsk->sk_sendqlen);
mutex_lock(&xprt->xpt_mutex);
......@@ -1346,7 +1348,7 @@ static const struct svc_xprt_ops svc_tcp_ops = {
.xpo_recvfrom = svc_tcp_recvfrom,
.xpo_sendto = svc_tcp_sendto,
.xpo_result_payload = svc_sock_result_payload,
.xpo_release_rqst = svc_tcp_release_rqst,
.xpo_release_ctxt = svc_tcp_release_ctxt,
.xpo_detach = svc_tcp_sock_detach,
.xpo_free = svc_sock_free,
.xpo_has_wspace = svc_tcp_has_wspace,
......
......@@ -239,21 +239,20 @@ void svc_rdma_recv_ctxt_put(struct svcxprt_rdma *rdma,
}
/**
* svc_rdma_release_rqst - Release transport-specific per-rqst resources
* @rqstp: svc_rqst being released
* svc_rdma_release_ctxt - Release transport-specific per-rqst resources
* @xprt: the transport which owned the context
* @vctxt: the context from rqstp->rq_xprt_ctxt or dr->xprt_ctxt
*
* Ensure that the recv_ctxt is released whether or not a Reply
* was sent. For example, the client could close the connection,
* or svc_process could drop an RPC, before the Reply is sent.
*/
void svc_rdma_release_rqst(struct svc_rqst *rqstp)
void svc_rdma_release_ctxt(struct svc_xprt *xprt, void *vctxt)
{
struct svc_rdma_recv_ctxt *ctxt = rqstp->rq_xprt_ctxt;
struct svc_xprt *xprt = rqstp->rq_xprt;
struct svc_rdma_recv_ctxt *ctxt = vctxt;
struct svcxprt_rdma *rdma =
container_of(xprt, struct svcxprt_rdma, sc_xprt);
rqstp->rq_xprt_ctxt = NULL;
if (ctxt)
svc_rdma_recv_ctxt_put(rdma, ctxt);
}
......
......@@ -80,7 +80,7 @@ static const struct svc_xprt_ops svc_rdma_ops = {
.xpo_recvfrom = svc_rdma_recvfrom,
.xpo_sendto = svc_rdma_sendto,
.xpo_result_payload = svc_rdma_result_payload,
.xpo_release_rqst = svc_rdma_release_rqst,
.xpo_release_ctxt = svc_rdma_release_ctxt,
.xpo_detach = svc_rdma_detach,
.xpo_free = svc_rdma_free,
.xpo_has_wspace = svc_rdma_has_wspace,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment