Commit f820c57e authored by Tom Tucker's avatar Tom Tucker

svcrdma: Use reply and chunk map for RDMA_READ processing

Modify the RDMA_READ processing to use the reply and chunk list mapping data
types. Also add a special purpose 'hdr_count' field in in the context to hold
the header page count instead of overloading the SGE length field and
corrupting the DMA map length.
Signed-off-by: default avatarTom Tucker <tom@opengridcomputing.com>
parent 34d16e42
...@@ -72,6 +72,7 @@ extern atomic_t rdma_stat_sq_prod; ...@@ -72,6 +72,7 @@ extern atomic_t rdma_stat_sq_prod;
*/ */
struct svc_rdma_op_ctxt { struct svc_rdma_op_ctxt {
struct svc_rdma_op_ctxt *read_hdr; struct svc_rdma_op_ctxt *read_hdr;
int hdr_count;
struct list_head free_list; struct list_head free_list;
struct xdr_buf arg; struct xdr_buf arg;
struct list_head dto_q; struct list_head dto_q;
......
...@@ -112,11 +112,6 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp, ...@@ -112,11 +112,6 @@ static void rdma_build_arg_xdr(struct svc_rqst *rqstp,
rqstp->rq_arg.tail[0].iov_len = 0; rqstp->rq_arg.tail[0].iov_len = 0;
} }
struct chunk_sge {
int start; /* sge no for this chunk */
int count; /* sge count for this chunk */
};
/* Encode a read-chunk-list as an array of IB SGE /* Encode a read-chunk-list as an array of IB SGE
* *
* Assumptions: * Assumptions:
...@@ -134,8 +129,8 @@ static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt, ...@@ -134,8 +129,8 @@ static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt,
struct svc_rqst *rqstp, struct svc_rqst *rqstp,
struct svc_rdma_op_ctxt *head, struct svc_rdma_op_ctxt *head,
struct rpcrdma_msg *rmsgp, struct rpcrdma_msg *rmsgp,
struct ib_sge *sge, struct svc_rdma_req_map *rpl_map,
struct chunk_sge *ch_sge_ary, struct svc_rdma_req_map *chl_map,
int ch_count, int ch_count,
int byte_count) int byte_count)
{ {
...@@ -156,22 +151,18 @@ static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt, ...@@ -156,22 +151,18 @@ static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt,
head->arg.head[0] = rqstp->rq_arg.head[0]; head->arg.head[0] = rqstp->rq_arg.head[0];
head->arg.tail[0] = rqstp->rq_arg.tail[0]; head->arg.tail[0] = rqstp->rq_arg.tail[0];
head->arg.pages = &head->pages[head->count]; head->arg.pages = &head->pages[head->count];
head->sge[0].length = head->count; /* save count of hdr pages */ head->hdr_count = head->count; /* save count of hdr pages */
head->arg.page_base = 0; head->arg.page_base = 0;
head->arg.page_len = ch_bytes; head->arg.page_len = ch_bytes;
head->arg.len = rqstp->rq_arg.len + ch_bytes; head->arg.len = rqstp->rq_arg.len + ch_bytes;
head->arg.buflen = rqstp->rq_arg.buflen + ch_bytes; head->arg.buflen = rqstp->rq_arg.buflen + ch_bytes;
head->count++; head->count++;
ch_sge_ary[0].start = 0; chl_map->ch[0].start = 0;
while (byte_count) { while (byte_count) {
rpl_map->sge[sge_no].iov_base =
page_address(rqstp->rq_arg.pages[page_no]) + page_off;
sge_bytes = min_t(int, PAGE_SIZE-page_off, ch_bytes); sge_bytes = min_t(int, PAGE_SIZE-page_off, ch_bytes);
sge[sge_no].addr = rpl_map->sge[sge_no].iov_len = sge_bytes;
ib_dma_map_page(xprt->sc_cm_id->device,
rqstp->rq_arg.pages[page_no],
page_off, sge_bytes,
DMA_FROM_DEVICE);
sge[sge_no].length = sge_bytes;
sge[sge_no].lkey = xprt->sc_phys_mr->lkey;
/* /*
* Don't bump head->count here because the same page * Don't bump head->count here because the same page
* may be used by multiple SGE. * may be used by multiple SGE.
...@@ -187,11 +178,11 @@ static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt, ...@@ -187,11 +178,11 @@ static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt,
* SGE, move to the next SGE * SGE, move to the next SGE
*/ */
if (ch_bytes == 0) { if (ch_bytes == 0) {
ch_sge_ary[ch_no].count = chl_map->ch[ch_no].count =
sge_no - ch_sge_ary[ch_no].start; sge_no - chl_map->ch[ch_no].start;
ch_no++; ch_no++;
ch++; ch++;
ch_sge_ary[ch_no].start = sge_no; chl_map->ch[ch_no].start = sge_no;
ch_bytes = ch->rc_target.rs_length; ch_bytes = ch->rc_target.rs_length;
/* If bytes remaining account for next chunk */ /* If bytes remaining account for next chunk */
if (byte_count) { if (byte_count) {
...@@ -220,18 +211,24 @@ static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt, ...@@ -220,18 +211,24 @@ static int rdma_rcl_to_sge(struct svcxprt_rdma *xprt,
return sge_no; return sge_no;
} }
static void rdma_set_ctxt_sge(struct svc_rdma_op_ctxt *ctxt, static void rdma_set_ctxt_sge(struct svcxprt_rdma *xprt,
struct ib_sge *sge, struct svc_rdma_op_ctxt *ctxt,
struct kvec *vec,
u64 *sgl_offset, u64 *sgl_offset,
int count) int count)
{ {
int i; int i;
ctxt->count = count; ctxt->count = count;
ctxt->direction = DMA_FROM_DEVICE;
for (i = 0; i < count; i++) { for (i = 0; i < count; i++) {
ctxt->sge[i].addr = sge[i].addr; ctxt->sge[i].addr =
ctxt->sge[i].length = sge[i].length; ib_dma_map_single(xprt->sc_cm_id->device,
*sgl_offset = *sgl_offset + sge[i].length; vec[i].iov_base, vec[i].iov_len,
DMA_FROM_DEVICE);
ctxt->sge[i].length = vec[i].iov_len;
ctxt->sge[i].lkey = xprt->sc_phys_mr->lkey;
*sgl_offset = *sgl_offset + vec[i].iov_len;
} }
} }
...@@ -282,34 +279,29 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt, ...@@ -282,34 +279,29 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
struct ib_send_wr read_wr; struct ib_send_wr read_wr;
int err = 0; int err = 0;
int ch_no; int ch_no;
struct ib_sge *sge;
int ch_count; int ch_count;
int byte_count; int byte_count;
int sge_count; int sge_count;
u64 sgl_offset; u64 sgl_offset;
struct rpcrdma_read_chunk *ch; struct rpcrdma_read_chunk *ch;
struct svc_rdma_op_ctxt *ctxt = NULL; struct svc_rdma_op_ctxt *ctxt = NULL;
struct svc_rdma_op_ctxt *tmp_sge_ctxt; struct svc_rdma_req_map *rpl_map;
struct svc_rdma_op_ctxt *tmp_ch_ctxt; struct svc_rdma_req_map *chl_map;
struct chunk_sge *ch_sge_ary;
/* If no read list is present, return 0 */ /* If no read list is present, return 0 */
ch = svc_rdma_get_read_chunk(rmsgp); ch = svc_rdma_get_read_chunk(rmsgp);
if (!ch) if (!ch)
return 0; return 0;
/* Allocate temporary contexts to keep SGE */ /* Allocate temporary reply and chunk maps */
BUG_ON(sizeof(struct ib_sge) < sizeof(struct chunk_sge)); rpl_map = svc_rdma_get_req_map();
tmp_sge_ctxt = svc_rdma_get_context(xprt); chl_map = svc_rdma_get_req_map();
sge = tmp_sge_ctxt->sge;
tmp_ch_ctxt = svc_rdma_get_context(xprt);
ch_sge_ary = (struct chunk_sge *)tmp_ch_ctxt->sge;
svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count); svc_rdma_rcl_chunk_counts(ch, &ch_count, &byte_count);
if (ch_count > RPCSVC_MAXPAGES) if (ch_count > RPCSVC_MAXPAGES)
return -EINVAL; return -EINVAL;
sge_count = rdma_rcl_to_sge(xprt, rqstp, hdr_ctxt, rmsgp, sge_count = rdma_rcl_to_sge(xprt, rqstp, hdr_ctxt, rmsgp,
sge, ch_sge_ary, rpl_map, chl_map,
ch_count, byte_count); ch_count, byte_count);
sgl_offset = 0; sgl_offset = 0;
ch_no = 0; ch_no = 0;
...@@ -331,14 +323,15 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt, ...@@ -331,14 +323,15 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
read_wr.wr.rdma.remote_addr = read_wr.wr.rdma.remote_addr =
get_unaligned(&(ch->rc_target.rs_offset)) + get_unaligned(&(ch->rc_target.rs_offset)) +
sgl_offset; sgl_offset;
read_wr.sg_list = &sge[ch_sge_ary[ch_no].start]; read_wr.sg_list = ctxt->sge;
read_wr.num_sge = read_wr.num_sge =
rdma_read_max_sge(xprt, ch_sge_ary[ch_no].count); rdma_read_max_sge(xprt, chl_map->ch[ch_no].count);
rdma_set_ctxt_sge(ctxt, &sge[ch_sge_ary[ch_no].start], rdma_set_ctxt_sge(xprt, ctxt,
&rpl_map->sge[chl_map->ch[ch_no].start],
&sgl_offset, &sgl_offset,
read_wr.num_sge); read_wr.num_sge);
if (((ch+1)->rc_discrim == 0) && if (((ch+1)->rc_discrim == 0) &&
(read_wr.num_sge == ch_sge_ary[ch_no].count)) { (read_wr.num_sge == chl_map->ch[ch_no].count)) {
/* /*
* Mark the last RDMA_READ with a bit to * Mark the last RDMA_READ with a bit to
* indicate all RPC data has been fetched from * indicate all RPC data has been fetched from
...@@ -358,9 +351,9 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt, ...@@ -358,9 +351,9 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
} }
atomic_inc(&rdma_stat_read); atomic_inc(&rdma_stat_read);
if (read_wr.num_sge < ch_sge_ary[ch_no].count) { if (read_wr.num_sge < chl_map->ch[ch_no].count) {
ch_sge_ary[ch_no].count -= read_wr.num_sge; chl_map->ch[ch_no].count -= read_wr.num_sge;
ch_sge_ary[ch_no].start += read_wr.num_sge; chl_map->ch[ch_no].start += read_wr.num_sge;
goto next_sge; goto next_sge;
} }
sgl_offset = 0; sgl_offset = 0;
...@@ -368,8 +361,8 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt, ...@@ -368,8 +361,8 @@ static int rdma_read_xdr(struct svcxprt_rdma *xprt,
} }
out: out:
svc_rdma_put_context(tmp_sge_ctxt, 0); svc_rdma_put_req_map(rpl_map);
svc_rdma_put_context(tmp_ch_ctxt, 0); svc_rdma_put_req_map(chl_map);
/* Detach arg pages. svc_recv will replenish them */ /* Detach arg pages. svc_recv will replenish them */
for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++) for (ch_no = 0; &rqstp->rq_pages[ch_no] < rqstp->rq_respages; ch_no++)
...@@ -399,7 +392,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp, ...@@ -399,7 +392,7 @@ static int rdma_read_complete(struct svc_rqst *rqstp,
rqstp->rq_pages[page_no] = head->pages[page_no]; rqstp->rq_pages[page_no] = head->pages[page_no];
} }
/* Point rq_arg.pages past header */ /* Point rq_arg.pages past header */
rqstp->rq_arg.pages = &rqstp->rq_pages[head->sge[0].length]; rqstp->rq_arg.pages = &rqstp->rq_pages[head->hdr_count];
rqstp->rq_arg.page_len = head->arg.page_len; rqstp->rq_arg.page_len = head->arg.page_len;
rqstp->rq_arg.page_base = head->arg.page_base; rqstp->rq_arg.page_base = head->arg.page_base;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment