Commit e6ab9143 authored by Tom Tucker's avatar Tom Tucker

svcrdma: Move the DMA unmap logic to the CQ handler

Separate DMA unmap from context destruction and perform DMA unmapping
in the SQ/RQ CQ reap functions. This is necessary to support software
based RDMA implementations that actually copy the data in their
ib_dma_unmap callback functions and architectures that don't have
cache coherent I/O busses.
Signed-off-by: default avatarTom Tucker <tom@opengridcomputing.com>
parent f820c57e
...@@ -150,6 +150,18 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt) ...@@ -150,6 +150,18 @@ struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *xprt)
return ctxt; return ctxt;
} }
static void svc_rdma_unmap_dma(struct svc_rdma_op_ctxt *ctxt)
{
struct svcxprt_rdma *xprt = ctxt->xprt;
int i;
for (i = 0; i < ctxt->count && ctxt->sge[i].length; i++) {
ib_dma_unmap_single(xprt->sc_cm_id->device,
ctxt->sge[i].addr,
ctxt->sge[i].length,
ctxt->direction);
}
}
void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
{ {
struct svcxprt_rdma *xprt; struct svcxprt_rdma *xprt;
...@@ -161,12 +173,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages) ...@@ -161,12 +173,6 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
for (i = 0; i < ctxt->count; i++) for (i = 0; i < ctxt->count; i++)
put_page(ctxt->pages[i]); put_page(ctxt->pages[i]);
for (i = 0; i < ctxt->count; i++)
ib_dma_unmap_single(xprt->sc_cm_id->device,
ctxt->sge[i].addr,
ctxt->sge[i].length,
ctxt->direction);
spin_lock_bh(&xprt->sc_ctxt_lock); spin_lock_bh(&xprt->sc_ctxt_lock);
list_add(&ctxt->free_list, &xprt->sc_ctxt_free); list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
spin_unlock_bh(&xprt->sc_ctxt_lock); spin_unlock_bh(&xprt->sc_ctxt_lock);
...@@ -328,6 +334,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt) ...@@ -328,6 +334,7 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
ctxt->wc_status = wc.status; ctxt->wc_status = wc.status;
ctxt->byte_len = wc.byte_len; ctxt->byte_len = wc.byte_len;
svc_rdma_unmap_dma(ctxt);
if (wc.status != IB_WC_SUCCESS) { if (wc.status != IB_WC_SUCCESS) {
/* Close the transport */ /* Close the transport */
dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt); dprintk("svcrdma: transport closing putting ctxt %p\n", ctxt);
...@@ -377,6 +384,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt) ...@@ -377,6 +384,7 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id; ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
xprt = ctxt->xprt; xprt = ctxt->xprt;
svc_rdma_unmap_dma(ctxt);
if (wc.status != IB_WC_SUCCESS) if (wc.status != IB_WC_SUCCESS)
/* Close the transport */ /* Close the transport */
set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags); set_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment