Commit 9a5c63e9 authored by Chuck Lever's avatar Chuck Lever Committed by Anna Schumaker

xprtrdma: Refactor management of mw_list field

Clean up some duplicate code.
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
parent 0a90487b
...@@ -310,10 +310,7 @@ fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, ...@@ -310,10 +310,7 @@ fmr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
struct rpcrdma_mw *mw; struct rpcrdma_mw *mw;
while (!list_empty(&req->rl_registered)) { while (!list_empty(&req->rl_registered)) {
mw = list_first_entry(&req->rl_registered, mw = rpcrdma_pop_mw(&req->rl_registered);
struct rpcrdma_mw, mw_list);
list_del_init(&mw->mw_list);
if (sync) if (sync)
fmr_op_recover_mr(mw); fmr_op_recover_mr(mw);
else else
......
...@@ -466,8 +466,8 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -466,8 +466,8 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
struct ib_send_wr *first, **prev, *last, *bad_wr; struct ib_send_wr *first, **prev, *last, *bad_wr;
struct rpcrdma_rep *rep = req->rl_reply; struct rpcrdma_rep *rep = req->rl_reply;
struct rpcrdma_ia *ia = &r_xprt->rx_ia; struct rpcrdma_ia *ia = &r_xprt->rx_ia;
struct rpcrdma_mw *mw, *tmp;
struct rpcrdma_frmr *f; struct rpcrdma_frmr *f;
struct rpcrdma_mw *mw;
int count, rc; int count, rc;
dprintk("RPC: %s: req %p\n", __func__, req); dprintk("RPC: %s: req %p\n", __func__, req);
...@@ -534,10 +534,10 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -534,10 +534,10 @@ frwr_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
* them to the free MW list. * them to the free MW list.
*/ */
unmap: unmap:
list_for_each_entry_safe(mw, tmp, &req->rl_registered, mw_list) { while (!list_empty(&req->rl_registered)) {
mw = rpcrdma_pop_mw(&req->rl_registered);
dprintk("RPC: %s: DMA unmapping frmr %p\n", dprintk("RPC: %s: DMA unmapping frmr %p\n",
__func__, &mw->frmr); __func__, &mw->frmr);
list_del_init(&mw->mw_list);
ib_dma_unmap_sg(ia->ri_device, ib_dma_unmap_sg(ia->ri_device,
mw->mw_sg, mw->mw_nents, mw->mw_dir); mw->mw_sg, mw->mw_nents, mw->mw_dir);
rpcrdma_put_mw(r_xprt, mw); rpcrdma_put_mw(r_xprt, mw);
...@@ -571,10 +571,7 @@ frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, ...@@ -571,10 +571,7 @@ frwr_op_unmap_safe(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
struct rpcrdma_mw *mw; struct rpcrdma_mw *mw;
while (!list_empty(&req->rl_registered)) { while (!list_empty(&req->rl_registered)) {
mw = list_first_entry(&req->rl_registered, mw = rpcrdma_pop_mw(&req->rl_registered);
struct rpcrdma_mw, mw_list);
list_del_init(&mw->mw_list);
if (sync) if (sync)
frwr_op_recover_mr(mw); frwr_op_recover_mr(mw);
else else
......
...@@ -322,7 +322,7 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt, ...@@ -322,7 +322,7 @@ rpcrdma_encode_read_list(struct rpcrdma_xprt *r_xprt,
false, &mw); false, &mw);
if (n < 0) if (n < 0)
return ERR_PTR(n); return ERR_PTR(n);
list_add(&mw->mw_list, &req->rl_registered); rpcrdma_push_mw(mw, &req->rl_registered);
*iptr++ = xdr_one; /* item present */ *iptr++ = xdr_one; /* item present */
...@@ -390,7 +390,7 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req, ...@@ -390,7 +390,7 @@ rpcrdma_encode_write_list(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req,
true, &mw); true, &mw);
if (n < 0) if (n < 0)
return ERR_PTR(n); return ERR_PTR(n);
list_add(&mw->mw_list, &req->rl_registered); rpcrdma_push_mw(mw, &req->rl_registered);
iptr = xdr_encode_rdma_segment(iptr, mw); iptr = xdr_encode_rdma_segment(iptr, mw);
...@@ -455,7 +455,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt, ...@@ -455,7 +455,7 @@ rpcrdma_encode_reply_chunk(struct rpcrdma_xprt *r_xprt,
true, &mw); true, &mw);
if (n < 0) if (n < 0)
return ERR_PTR(n); return ERR_PTR(n);
list_add(&mw->mw_list, &req->rl_registered); rpcrdma_push_mw(mw, &req->rl_registered);
iptr = xdr_encode_rdma_segment(iptr, mw); iptr = xdr_encode_rdma_segment(iptr, mw);
......
...@@ -776,9 +776,7 @@ rpcrdma_mr_recovery_worker(struct work_struct *work) ...@@ -776,9 +776,7 @@ rpcrdma_mr_recovery_worker(struct work_struct *work)
spin_lock(&buf->rb_recovery_lock); spin_lock(&buf->rb_recovery_lock);
while (!list_empty(&buf->rb_stale_mrs)) { while (!list_empty(&buf->rb_stale_mrs)) {
mw = list_first_entry(&buf->rb_stale_mrs, mw = rpcrdma_pop_mw(&buf->rb_stale_mrs);
struct rpcrdma_mw, mw_list);
list_del_init(&mw->mw_list);
spin_unlock(&buf->rb_recovery_lock); spin_unlock(&buf->rb_recovery_lock);
dprintk("RPC: %s: recovering MR %p\n", __func__, mw); dprintk("RPC: %s: recovering MR %p\n", __func__, mw);
...@@ -796,7 +794,7 @@ rpcrdma_defer_mr_recovery(struct rpcrdma_mw *mw) ...@@ -796,7 +794,7 @@ rpcrdma_defer_mr_recovery(struct rpcrdma_mw *mw)
struct rpcrdma_buffer *buf = &r_xprt->rx_buf; struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
spin_lock(&buf->rb_recovery_lock); spin_lock(&buf->rb_recovery_lock);
list_add(&mw->mw_list, &buf->rb_stale_mrs); rpcrdma_push_mw(mw, &buf->rb_stale_mrs);
spin_unlock(&buf->rb_recovery_lock); spin_unlock(&buf->rb_recovery_lock);
schedule_delayed_work(&buf->rb_recovery_worker, 0); schedule_delayed_work(&buf->rb_recovery_worker, 0);
...@@ -1072,11 +1070,8 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt) ...@@ -1072,11 +1070,8 @@ rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt)
struct rpcrdma_mw *mw = NULL; struct rpcrdma_mw *mw = NULL;
spin_lock(&buf->rb_mwlock); spin_lock(&buf->rb_mwlock);
if (!list_empty(&buf->rb_mws)) { if (!list_empty(&buf->rb_mws))
mw = list_first_entry(&buf->rb_mws, mw = rpcrdma_pop_mw(&buf->rb_mws);
struct rpcrdma_mw, mw_list);
list_del_init(&mw->mw_list);
}
spin_unlock(&buf->rb_mwlock); spin_unlock(&buf->rb_mwlock);
if (!mw) if (!mw)
...@@ -1099,7 +1094,7 @@ rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw) ...@@ -1099,7 +1094,7 @@ rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw)
struct rpcrdma_buffer *buf = &r_xprt->rx_buf; struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
spin_lock(&buf->rb_mwlock); spin_lock(&buf->rb_mwlock);
list_add_tail(&mw->mw_list, &buf->rb_mws); rpcrdma_push_mw(mw, &buf->rb_mws);
spin_unlock(&buf->rb_mwlock); spin_unlock(&buf->rb_mwlock);
} }
......
...@@ -354,6 +354,22 @@ rpcr_to_rdmar(struct rpc_rqst *rqst) ...@@ -354,6 +354,22 @@ rpcr_to_rdmar(struct rpc_rqst *rqst)
return rqst->rq_xprtdata; return rqst->rq_xprtdata;
} }
static inline void
rpcrdma_push_mw(struct rpcrdma_mw *mw, struct list_head *list)
{
list_add_tail(&mw->mw_list, list);
}
static inline struct rpcrdma_mw *
rpcrdma_pop_mw(struct list_head *list)
{
struct rpcrdma_mw *mw;
mw = list_first_entry(list, struct rpcrdma_mw, mw_list);
list_del(&mw->mw_list);
return mw;
}
/* /*
* struct rpcrdma_buffer -- holds list/queue of pre-registered memory for * struct rpcrdma_buffer -- holds list/queue of pre-registered memory for
* inline requests/replies, and client/server credits. * inline requests/replies, and client/server credits.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment