Commit 3435c74a authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

SUNRPC: Generalize the RPC buffer release API

xprtrdma needs to allocate the Call and Reply buffers separately.
TBH, the reliance on using a single buffer for the pair of XDR
buffers is transport implementation-specific.

Instead of passing just the rq_buffer into the buf_free method, pass
the task structure and let buf_free take care of freeing both
XDR buffers at once.

There's a micro-optimization here. In the common case, both
xprt_release and the transport's buf_free method were checking if
rq_buffer was NULL. Now the check is done only once per RPC.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 5fe6eaa1
...@@ -240,7 +240,7 @@ struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *, ...@@ -240,7 +240,7 @@ struct rpc_task *rpc_wake_up_first(struct rpc_wait_queue *,
void rpc_wake_up_status(struct rpc_wait_queue *, int); void rpc_wake_up_status(struct rpc_wait_queue *, int);
void rpc_delay(struct rpc_task *, unsigned long); void rpc_delay(struct rpc_task *, unsigned long);
int rpc_malloc(struct rpc_task *); int rpc_malloc(struct rpc_task *);
void rpc_free(void *); void rpc_free(struct rpc_task *);
int rpciod_up(void); int rpciod_up(void);
void rpciod_down(void); void rpciod_down(void);
int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *); int __rpc_wait_for_completion_task(struct rpc_task *task, wait_bit_action_f *);
......
...@@ -128,7 +128,7 @@ struct rpc_xprt_ops { ...@@ -128,7 +128,7 @@ struct rpc_xprt_ops {
void (*set_port)(struct rpc_xprt *xprt, unsigned short port); void (*set_port)(struct rpc_xprt *xprt, unsigned short port);
void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task); void (*connect)(struct rpc_xprt *xprt, struct rpc_task *task);
int (*buf_alloc)(struct rpc_task *task); int (*buf_alloc)(struct rpc_task *task);
void (*buf_free)(void *buffer); void (*buf_free)(struct rpc_task *task);
int (*send_request)(struct rpc_task *task); int (*send_request)(struct rpc_task *task);
void (*set_retrans_timeout)(struct rpc_task *task); void (*set_retrans_timeout)(struct rpc_task *task);
void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task); void (*timer)(struct rpc_xprt *xprt, struct rpc_task *task);
......
...@@ -896,18 +896,16 @@ int rpc_malloc(struct rpc_task *task) ...@@ -896,18 +896,16 @@ int rpc_malloc(struct rpc_task *task)
EXPORT_SYMBOL_GPL(rpc_malloc); EXPORT_SYMBOL_GPL(rpc_malloc);
/** /**
* rpc_free - free buffer allocated via rpc_malloc * rpc_free - free RPC buffer resources allocated via rpc_malloc
* @buffer: buffer to free * @task: RPC task
* *
*/ */
void rpc_free(void *buffer) void rpc_free(struct rpc_task *task)
{ {
void *buffer = task->tk_rqstp->rq_buffer;
size_t size; size_t size;
struct rpc_buffer *buf; struct rpc_buffer *buf;
if (!buffer)
return;
buf = container_of(buffer, struct rpc_buffer, data); buf = container_of(buffer, struct rpc_buffer, data);
size = buf->len; size = buf->len;
......
...@@ -1295,7 +1295,7 @@ void xprt_release(struct rpc_task *task) ...@@ -1295,7 +1295,7 @@ void xprt_release(struct rpc_task *task)
xprt_schedule_autodisconnect(xprt); xprt_schedule_autodisconnect(xprt);
spin_unlock_bh(&xprt->transport_lock); spin_unlock_bh(&xprt->transport_lock);
if (req->rq_buffer) if (req->rq_buffer)
xprt->ops->buf_free(req->rq_buffer); xprt->ops->buf_free(task);
xprt_inject_disconnect(xprt); xprt_inject_disconnect(xprt);
if (req->rq_cred != NULL) if (req->rq_cred != NULL)
put_rpccred(req->rq_cred); put_rpccred(req->rq_cred);
......
...@@ -186,7 +186,7 @@ xprt_rdma_bc_allocate(struct rpc_task *task) ...@@ -186,7 +186,7 @@ xprt_rdma_bc_allocate(struct rpc_task *task)
} }
static void static void
xprt_rdma_bc_free(void *buffer) xprt_rdma_bc_free(struct rpc_task *task)
{ {
/* No-op: ctxt and page have already been freed. */ /* No-op: ctxt and page have already been freed. */
} }
......
...@@ -523,7 +523,6 @@ xprt_rdma_allocate(struct rpc_task *task) ...@@ -523,7 +523,6 @@ xprt_rdma_allocate(struct rpc_task *task)
out: out:
dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req); dprintk("RPC: %s: size %zd, request 0x%p\n", __func__, size, req);
req->rl_connect_cookie = 0; /* our reserved value */ req->rl_connect_cookie = 0; /* our reserved value */
req->rl_task = task;
rqst->rq_buffer = req->rl_sendbuf->rg_base; rqst->rq_buffer = req->rl_sendbuf->rg_base;
return 0; return 0;
...@@ -571,31 +570,26 @@ xprt_rdma_allocate(struct rpc_task *task) ...@@ -571,31 +570,26 @@ xprt_rdma_allocate(struct rpc_task *task)
return -ENOMEM; return -ENOMEM;
} }
/* /**
* This function returns all RDMA resources to the pool. * xprt_rdma_free - release resources allocated by xprt_rdma_allocate
* @task: RPC task
*
* Caller guarantees rqst->rq_buffer is non-NULL.
*/ */
static void static void
xprt_rdma_free(void *buffer) xprt_rdma_free(struct rpc_task *task)
{ {
struct rpcrdma_req *req; struct rpc_rqst *rqst = task->tk_rqstp;
struct rpcrdma_xprt *r_xprt; struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
struct rpcrdma_regbuf *rb; struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
if (buffer == NULL)
return;
rb = container_of(buffer, struct rpcrdma_regbuf, rg_base[0]);
req = rb->rg_owner;
if (req->rl_backchannel) if (req->rl_backchannel)
return; return;
r_xprt = container_of(req->rl_buffer, struct rpcrdma_xprt, rx_buf);
dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply); dprintk("RPC: %s: called on 0x%p\n", __func__, req->rl_reply);
r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req, r_xprt->rx_ia.ri_ops->ro_unmap_safe(r_xprt, req,
!RPC_IS_ASYNC(req->rl_task)); !RPC_IS_ASYNC(task));
rpcrdma_buffer_put(req); rpcrdma_buffer_put(req);
} }
......
...@@ -283,7 +283,6 @@ struct rpcrdma_req { ...@@ -283,7 +283,6 @@ struct rpcrdma_req {
struct list_head rl_free; struct list_head rl_free;
unsigned int rl_niovs; unsigned int rl_niovs;
unsigned int rl_connect_cookie; unsigned int rl_connect_cookie;
struct rpc_task *rl_task;
struct rpcrdma_buffer *rl_buffer; struct rpcrdma_buffer *rl_buffer;
struct rpcrdma_rep *rl_reply;/* holder for reply buffer */ struct rpcrdma_rep *rl_reply;/* holder for reply buffer */
struct ib_sge rl_send_iov[RPCRDMA_MAX_IOVS]; struct ib_sge rl_send_iov[RPCRDMA_MAX_IOVS];
......
...@@ -2560,13 +2560,11 @@ static int bc_malloc(struct rpc_task *task) ...@@ -2560,13 +2560,11 @@ static int bc_malloc(struct rpc_task *task)
/* /*
* Free the space allocated in the bc_alloc routine * Free the space allocated in the bc_alloc routine
*/ */
static void bc_free(void *buffer) static void bc_free(struct rpc_task *task)
{ {
void *buffer = task->tk_rqstp->rq_buffer;
struct rpc_buffer *buf; struct rpc_buffer *buf;
if (!buffer)
return;
buf = container_of(buffer, struct rpc_buffer, data); buf = container_of(buffer, struct rpc_buffer, data);
free_page((unsigned long)buf); free_page((unsigned long)buf);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment