Commit edffb84c authored by Trond Myklebust's avatar Trond Myklebust

Merge tag 'nfs-rdma-for-5.11-1' of git://git.linux-nfs.org/projects/anna/linux-nfs into linux-next

NFSoRDmA Client updates for Linux 5.11

Cleanups and improvements:
  - Remove use of raw kernel memory addresses in tracepoints
  - Replace dprintk() call sites in ERR_CHUNK path
  - Trace unmap sync calls
  - Optimize MR DMA-unmapping
Signed-off-by: default avatarTrond Myklebust <trond.myklebust@hammerspace.com>
parents 5c3485bb 7a03aeb6
...@@ -60,7 +60,7 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class, ...@@ -60,7 +60,7 @@ DECLARE_EVENT_CLASS(rpcrdma_completion_class,
), \ ), \
TP_ARGS(wc, cid)) TP_ARGS(wc, cid))
DECLARE_EVENT_CLASS(xprtrdma_reply_event, DECLARE_EVENT_CLASS(xprtrdma_reply_class,
TP_PROTO( TP_PROTO(
const struct rpcrdma_rep *rep const struct rpcrdma_rep *rep
), ),
...@@ -68,29 +68,30 @@ DECLARE_EVENT_CLASS(xprtrdma_reply_event, ...@@ -68,29 +68,30 @@ DECLARE_EVENT_CLASS(xprtrdma_reply_event,
TP_ARGS(rep), TP_ARGS(rep),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(const void *, rep)
__field(const void *, r_xprt)
__field(u32, xid) __field(u32, xid)
__field(u32, version) __field(u32, version)
__field(u32, proc) __field(u32, proc)
__string(addr, rpcrdma_addrstr(rep->rr_rxprt))
__string(port, rpcrdma_portstr(rep->rr_rxprt))
), ),
TP_fast_assign( TP_fast_assign(
__entry->rep = rep;
__entry->r_xprt = rep->rr_rxprt;
__entry->xid = be32_to_cpu(rep->rr_xid); __entry->xid = be32_to_cpu(rep->rr_xid);
__entry->version = be32_to_cpu(rep->rr_vers); __entry->version = be32_to_cpu(rep->rr_vers);
__entry->proc = be32_to_cpu(rep->rr_proc); __entry->proc = be32_to_cpu(rep->rr_proc);
__assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
__assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
), ),
TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u", TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
__entry->r_xprt, __entry->xid, __entry->rep, __get_str(addr), __get_str(port),
__entry->version, __entry->proc __entry->xid, __entry->version, __entry->proc
) )
); );
#define DEFINE_REPLY_EVENT(name) \ #define DEFINE_REPLY_EVENT(name) \
DEFINE_EVENT(xprtrdma_reply_event, name, \ DEFINE_EVENT(xprtrdma_reply_class, \
xprtrdma_reply_##name##_err, \
TP_PROTO( \ TP_PROTO( \
const struct rpcrdma_rep *rep \ const struct rpcrdma_rep *rep \
), \ ), \
...@@ -261,54 +262,67 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event, ...@@ -261,54 +262,67 @@ DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
), \ ), \
TP_ARGS(task, mr, nsegs)) TP_ARGS(task, mr, nsegs))
DECLARE_EVENT_CLASS(xprtrdma_frwr_done, TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
TRACE_DEFINE_ENUM(DMA_NONE);
#define xprtrdma_show_direction(x) \
__print_symbolic(x, \
{ DMA_BIDIRECTIONAL, "BIDIR" }, \
{ DMA_TO_DEVICE, "TO_DEVICE" }, \
{ DMA_FROM_DEVICE, "FROM_DEVICE" }, \
{ DMA_NONE, "NONE" })
DECLARE_EVENT_CLASS(xprtrdma_mr_class,
TP_PROTO( TP_PROTO(
const struct ib_wc *wc, const struct rpcrdma_mr *mr
const struct rpcrdma_frwr *frwr
), ),
TP_ARGS(wc, frwr), TP_ARGS(mr),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, mr_id) __field(u32, mr_id)
__field(unsigned int, status) __field(int, nents)
__field(unsigned int, vendor_err) __field(u32, handle)
__field(u32, length)
__field(u64, offset)
__field(u32, dir)
), ),
TP_fast_assign( TP_fast_assign(
__entry->mr_id = frwr->fr_mr->res.id; const struct rpcrdma_req *req = mr->mr_req;
__entry->status = wc->status; const struct rpc_task *task = req->rl_slot.rq_task;
__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->mr_id = mr->frwr.fr_mr->res.id;
__entry->nents = mr->mr_nents;
__entry->handle = mr->mr_handle;
__entry->length = mr->mr_length;
__entry->offset = mr->mr_offset;
__entry->dir = mr->mr_dir;
), ),
TP_printk( TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
"mr.id=%u: %s (%u/0x%x)", __entry->task_id, __entry->client_id,
__entry->mr_id, rdma_show_wc_status(__entry->status), __entry->mr_id, __entry->nents, __entry->length,
__entry->status, __entry->vendor_err (unsigned long long)__entry->offset, __entry->handle,
xprtrdma_show_direction(__entry->dir)
) )
); );
#define DEFINE_FRWR_DONE_EVENT(name) \ #define DEFINE_MR_EVENT(name) \
DEFINE_EVENT(xprtrdma_frwr_done, name, \ DEFINE_EVENT(xprtrdma_mr_class, \
xprtrdma_mr_##name, \
TP_PROTO( \ TP_PROTO( \
const struct ib_wc *wc, \ const struct rpcrdma_mr *mr \
const struct rpcrdma_frwr *frwr \
), \ ), \
TP_ARGS(wc, frwr)) TP_ARGS(mr))
TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
TRACE_DEFINE_ENUM(DMA_NONE);
#define xprtrdma_show_direction(x) \
__print_symbolic(x, \
{ DMA_BIDIRECTIONAL, "BIDIR" }, \
{ DMA_TO_DEVICE, "TO_DEVICE" }, \
{ DMA_FROM_DEVICE, "FROM_DEVICE" }, \
{ DMA_NONE, "NONE" })
DECLARE_EVENT_CLASS(xprtrdma_mr, DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
TP_PROTO( TP_PROTO(
const struct rpcrdma_mr *mr const struct rpcrdma_mr *mr
), ),
...@@ -340,45 +354,47 @@ DECLARE_EVENT_CLASS(xprtrdma_mr, ...@@ -340,45 +354,47 @@ DECLARE_EVENT_CLASS(xprtrdma_mr,
) )
); );
#define DEFINE_MR_EVENT(name) \ #define DEFINE_ANON_MR_EVENT(name) \
DEFINE_EVENT(xprtrdma_mr, xprtrdma_mr_##name, \ DEFINE_EVENT(xprtrdma_anonymous_mr_class, \
xprtrdma_mr_##name, \
TP_PROTO( \ TP_PROTO( \
const struct rpcrdma_mr *mr \ const struct rpcrdma_mr *mr \
), \ ), \
TP_ARGS(mr)) TP_ARGS(mr))
DECLARE_EVENT_CLASS(xprtrdma_cb_event, DECLARE_EVENT_CLASS(xprtrdma_callback_class,
TP_PROTO( TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
const struct rpc_rqst *rqst const struct rpc_rqst *rqst
), ),
TP_ARGS(rqst), TP_ARGS(r_xprt, rqst),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(const void *, rqst)
__field(const void *, rep)
__field(const void *, req)
__field(u32, xid) __field(u32, xid)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
), ),
TP_fast_assign( TP_fast_assign(
__entry->rqst = rqst;
__entry->req = rpcr_to_rdmar(rqst);
__entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
__entry->xid = be32_to_cpu(rqst->rq_xid); __entry->xid = be32_to_cpu(rqst->rq_xid);
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
), ),
TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p", TP_printk("peer=[%s]:%s xid=0x%08x",
__entry->xid, __entry->rqst, __entry->req, __entry->rep __get_str(addr), __get_str(port), __entry->xid
) )
); );
#define DEFINE_CB_EVENT(name) \ #define DEFINE_CALLBACK_EVENT(name) \
DEFINE_EVENT(xprtrdma_cb_event, name, \ DEFINE_EVENT(xprtrdma_callback_class, \
xprtrdma_cb_##name, \
TP_PROTO( \ TP_PROTO( \
const struct rpcrdma_xprt *r_xprt, \
const struct rpc_rqst *rqst \ const struct rpc_rqst *rqst \
), \ ), \
TP_ARGS(rqst)) TP_ARGS(r_xprt, rqst))
/** /**
** Connection events ** Connection events
...@@ -549,61 +565,33 @@ TRACE_EVENT(xprtrdma_createmrs, ...@@ -549,61 +565,33 @@ TRACE_EVENT(xprtrdma_createmrs,
) )
); );
TRACE_EVENT(xprtrdma_mr_get, TRACE_EVENT(xprtrdma_nomrs_err,
TP_PROTO( TP_PROTO(
const struct rpcrdma_xprt *r_xprt,
const struct rpcrdma_req *req const struct rpcrdma_req *req
), ),
TP_ARGS(req), TP_ARGS(r_xprt, req),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(const void *, req)
__field(unsigned int, task_id) __field(unsigned int, task_id)
__field(unsigned int, client_id) __field(unsigned int, client_id)
__field(u32, xid) __string(addr, rpcrdma_addrstr(r_xprt))
), __string(port, rpcrdma_portstr(r_xprt))
TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot;
__entry->req = req;
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
),
TP_printk("task:%u@%u xid=0x%08x req=%p",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->req
)
);
TRACE_EVENT(xprtrdma_nomrs,
TP_PROTO(
const struct rpcrdma_req *req
),
TP_ARGS(req),
TP_STRUCT__entry(
__field(const void *, req)
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
), ),
TP_fast_assign( TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot; const struct rpc_rqst *rqst = &req->rl_slot;
__entry->req = req;
__entry->task_id = rqst->rq_task->tk_pid; __entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid; __entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid); __assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
), ),
TP_printk("task:%u@%u xid=0x%08x req=%p", TP_printk("peer=[%s]:%s task:%u@%u",
__entry->task_id, __entry->client_id, __entry->xid, __get_str(addr), __get_str(port),
__entry->req __entry->task_id, __entry->client_id
) )
); );
...@@ -735,8 +723,8 @@ TRACE_EVENT(xprtrdma_post_send, ...@@ -735,8 +723,8 @@ TRACE_EVENT(xprtrdma_post_send,
TP_ARGS(req), TP_ARGS(req),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(const void *, req) __field(u32, cq_id)
__field(const void *, sc) __field(int, completion_id)
__field(unsigned int, task_id) __field(unsigned int, task_id)
__field(unsigned int, client_id) __field(unsigned int, client_id)
__field(int, num_sge) __field(int, num_sge)
...@@ -745,20 +733,21 @@ TRACE_EVENT(xprtrdma_post_send, ...@@ -745,20 +733,21 @@ TRACE_EVENT(xprtrdma_post_send,
TP_fast_assign( TP_fast_assign(
const struct rpc_rqst *rqst = &req->rl_slot; const struct rpc_rqst *rqst = &req->rl_slot;
const struct rpcrdma_sendctx *sc = req->rl_sendctx;
__entry->cq_id = sc->sc_cid.ci_queue_id;
__entry->completion_id = sc->sc_cid.ci_completion_id;
__entry->task_id = rqst->rq_task->tk_pid; __entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client ? __entry->client_id = rqst->rq_task->tk_client ?
rqst->rq_task->tk_client->cl_clid : -1; rqst->rq_task->tk_client->cl_clid : -1;
__entry->req = req;
__entry->sc = req->rl_sendctx;
__entry->num_sge = req->rl_wr.num_sge; __entry->num_sge = req->rl_wr.num_sge;
__entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED; __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
), ),
TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %s", TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
__entry->task_id, __entry->client_id, __entry->task_id, __entry->client_id,
__entry->req, __entry->sc, __entry->num_sge, __entry->cq_id, __entry->completion_id,
(__entry->num_sge == 1 ? "" : "s"), __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
(__entry->signaled ? "signaled" : "") (__entry->signaled ? "signaled" : "")
) )
); );
...@@ -771,15 +760,17 @@ TRACE_EVENT(xprtrdma_post_recv, ...@@ -771,15 +760,17 @@ TRACE_EVENT(xprtrdma_post_recv,
TP_ARGS(rep), TP_ARGS(rep),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(const void *, rep) __field(u32, cq_id)
__field(int, completion_id)
), ),
TP_fast_assign( TP_fast_assign(
__entry->rep = rep; __entry->cq_id = rep->rr_cid.ci_queue_id;
__entry->completion_id = rep->rr_cid.ci_completion_id;
), ),
TP_printk("rep=%p", TP_printk("cq.id=%d cid=%d",
__entry->rep __entry->cq_id, __entry->completion_id
) )
); );
...@@ -816,7 +807,7 @@ TRACE_EVENT(xprtrdma_post_recvs, ...@@ -816,7 +807,7 @@ TRACE_EVENT(xprtrdma_post_recvs,
) )
); );
TRACE_EVENT(xprtrdma_post_linv, TRACE_EVENT(xprtrdma_post_linv_err,
TP_PROTO( TP_PROTO(
const struct rpcrdma_req *req, const struct rpcrdma_req *req,
int status int status
...@@ -825,19 +816,21 @@ TRACE_EVENT(xprtrdma_post_linv, ...@@ -825,19 +816,21 @@ TRACE_EVENT(xprtrdma_post_linv,
TP_ARGS(req, status), TP_ARGS(req, status),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(const void *, req) __field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, status) __field(int, status)
__field(u32, xid)
), ),
TP_fast_assign( TP_fast_assign(
__entry->req = req; const struct rpc_task *task = req->rl_slot.rq_task;
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
__entry->status = status; __entry->status = status;
__entry->xid = be32_to_cpu(req->rl_slot.rq_xid);
), ),
TP_printk("req=%p xid=0x%08x status=%d", TP_printk("task:%u@%u status=%d",
__entry->req, __entry->xid, __entry->status __entry->task_id, __entry->client_id, __entry->status
) )
); );
...@@ -845,75 +838,12 @@ TRACE_EVENT(xprtrdma_post_linv, ...@@ -845,75 +838,12 @@ TRACE_EVENT(xprtrdma_post_linv,
** Completion events ** Completion events
**/ **/
TRACE_EVENT(xprtrdma_wc_send, DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive);
TP_PROTO( DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
const struct rpcrdma_sendctx *sc, DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg);
const struct ib_wc *wc DEFINE_COMPLETION_EVENT(xprtrdma_wc_li);
), DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_wake);
DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_done);
TP_ARGS(sc, wc),
TP_STRUCT__entry(
__field(const void *, req)
__field(const void *, sc)
__field(unsigned int, unmap_count)
__field(unsigned int, status)
__field(unsigned int, vendor_err)
),
TP_fast_assign(
__entry->req = sc->sc_req;
__entry->sc = sc;
__entry->unmap_count = sc->sc_unmap_count;
__entry->status = wc->status;
__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
),
TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)",
__entry->req, __entry->sc, __entry->unmap_count,
rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err
)
);
TRACE_EVENT(xprtrdma_wc_receive,
TP_PROTO(
const struct ib_wc *wc
),
TP_ARGS(wc),
TP_STRUCT__entry(
__field(const void *, rep)
__field(u32, byte_len)
__field(unsigned int, status)
__field(u32, vendor_err)
),
TP_fast_assign(
__entry->rep = container_of(wc->wr_cqe, struct rpcrdma_rep,
rr_cqe);
__entry->status = wc->status;
if (wc->status) {
__entry->byte_len = 0;
__entry->vendor_err = wc->vendor_err;
} else {
__entry->byte_len = wc->byte_len;
__entry->vendor_err = 0;
}
),
TP_printk("rep=%p %u bytes: %s (%u/0x%x)",
__entry->rep, __entry->byte_len,
rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err
)
);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_done);
TRACE_EVENT(xprtrdma_frwr_alloc, TRACE_EVENT(xprtrdma_frwr_alloc,
TP_PROTO( TP_PROTO(
...@@ -1036,9 +966,9 @@ TRACE_EVENT(xprtrdma_frwr_maperr, ...@@ -1036,9 +966,9 @@ TRACE_EVENT(xprtrdma_frwr_maperr,
DEFINE_MR_EVENT(localinv); DEFINE_MR_EVENT(localinv);
DEFINE_MR_EVENT(map); DEFINE_MR_EVENT(map);
DEFINE_MR_EVENT(unmap);
DEFINE_MR_EVENT(reminv); DEFINE_ANON_MR_EVENT(unmap);
DEFINE_MR_EVENT(recycle); DEFINE_ANON_MR_EVENT(recycle);
TRACE_EVENT(xprtrdma_dma_maperr, TRACE_EVENT(xprtrdma_dma_maperr,
TP_PROTO( TP_PROTO(
...@@ -1066,17 +996,14 @@ TRACE_EVENT(xprtrdma_reply, ...@@ -1066,17 +996,14 @@ TRACE_EVENT(xprtrdma_reply,
TP_PROTO( TP_PROTO(
const struct rpc_task *task, const struct rpc_task *task,
const struct rpcrdma_rep *rep, const struct rpcrdma_rep *rep,
const struct rpcrdma_req *req,
unsigned int credits unsigned int credits
), ),
TP_ARGS(task, rep, req, credits), TP_ARGS(task, rep, credits),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned int, task_id) __field(unsigned int, task_id)
__field(unsigned int, client_id) __field(unsigned int, client_id)
__field(const void *, rep)
__field(const void *, req)
__field(u32, xid) __field(u32, xid)
__field(unsigned int, credits) __field(unsigned int, credits)
), ),
...@@ -1084,49 +1011,102 @@ TRACE_EVENT(xprtrdma_reply, ...@@ -1084,49 +1011,102 @@ TRACE_EVENT(xprtrdma_reply,
TP_fast_assign( TP_fast_assign(
__entry->task_id = task->tk_pid; __entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid; __entry->client_id = task->tk_client->cl_clid;
__entry->rep = rep;
__entry->req = req;
__entry->xid = be32_to_cpu(rep->rr_xid); __entry->xid = be32_to_cpu(rep->rr_xid);
__entry->credits = credits; __entry->credits = credits;
), ),
TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p", TP_printk("task:%u@%u xid=0x%08x credits=%u",
__entry->task_id, __entry->client_id, __entry->xid, __entry->task_id, __entry->client_id, __entry->xid,
__entry->credits, __entry->rep, __entry->req __entry->credits
) )
); );
TRACE_EVENT(xprtrdma_defer_cmp, DEFINE_REPLY_EVENT(vers);
DEFINE_REPLY_EVENT(rqst);
DEFINE_REPLY_EVENT(short);
DEFINE_REPLY_EVENT(hdr);
TRACE_EVENT(xprtrdma_err_vers,
TP_PROTO( TP_PROTO(
const struct rpcrdma_rep *rep const struct rpc_rqst *rqst,
__be32 *min,
__be32 *max
), ),
TP_ARGS(rep), TP_ARGS(rqst, min, max),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(unsigned int, task_id) __field(unsigned int, task_id)
__field(unsigned int, client_id) __field(unsigned int, client_id)
__field(const void *, rep)
__field(u32, xid) __field(u32, xid)
__field(u32, min)
__field(u32, max)
), ),
TP_fast_assign( TP_fast_assign(
__entry->task_id = rep->rr_rqst->rq_task->tk_pid; __entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid; __entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->rep = rep; __entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->xid = be32_to_cpu(rep->rr_xid); __entry->min = be32_to_cpup(min);
__entry->max = be32_to_cpup(max);
), ),
TP_printk("task:%u@%u xid=0x%08x rep=%p", TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
__entry->task_id, __entry->client_id, __entry->xid, __entry->task_id, __entry->client_id, __entry->xid,
__entry->rep __entry->min, __entry->max
) )
); );
DEFINE_REPLY_EVENT(xprtrdma_reply_vers); TRACE_EVENT(xprtrdma_err_chunk,
DEFINE_REPLY_EVENT(xprtrdma_reply_rqst); TP_PROTO(
DEFINE_REPLY_EVENT(xprtrdma_reply_short); const struct rpc_rqst *rqst
DEFINE_REPLY_EVENT(xprtrdma_reply_hdr); ),
TP_ARGS(rqst),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
),
TP_fast_assign(
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
),
TP_printk("task:%u@%u xid=0x%08x",
__entry->task_id, __entry->client_id, __entry->xid
)
);
TRACE_EVENT(xprtrdma_err_unrecognized,
TP_PROTO(
const struct rpc_rqst *rqst,
__be32 *procedure
),
TP_ARGS(rqst, procedure),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(u32, procedure)
),
TP_fast_assign(
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->procedure = be32_to_cpup(procedure);
),
TP_printk("task:%u@%u xid=0x%08x procedure=%u",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->procedure
)
);
TRACE_EVENT(xprtrdma_fixup, TRACE_EVENT(xprtrdma_fixup,
TP_PROTO( TP_PROTO(
...@@ -1187,6 +1167,28 @@ TRACE_EVENT(xprtrdma_decode_seg, ...@@ -1187,6 +1167,28 @@ TRACE_EVENT(xprtrdma_decode_seg,
) )
); );
TRACE_EVENT(xprtrdma_mrs_zap,
TP_PROTO(
const struct rpc_task *task
),
TP_ARGS(task),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
),
TP_fast_assign(
__entry->task_id = task->tk_pid;
__entry->client_id = task->tk_client->cl_clid;
),
TP_printk("task:%u@%u",
__entry->task_id, __entry->client_id
)
);
/** /**
** Callback events ** Callback events
**/ **/
...@@ -1219,36 +1221,8 @@ TRACE_EVENT(xprtrdma_cb_setup, ...@@ -1219,36 +1221,8 @@ TRACE_EVENT(xprtrdma_cb_setup,
) )
); );
DEFINE_CB_EVENT(xprtrdma_cb_call); DEFINE_CALLBACK_EVENT(call);
DEFINE_CB_EVENT(xprtrdma_cb_reply); DEFINE_CALLBACK_EVENT(reply);
TRACE_EVENT(xprtrdma_leaked_rep,
TP_PROTO(
const struct rpc_rqst *rqst,
const struct rpcrdma_rep *rep
),
TP_ARGS(rqst, rep),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(u32, xid)
__field(const void *, rep)
),
TP_fast_assign(
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
__entry->xid = be32_to_cpu(rqst->rq_xid);
__entry->rep = rep;
),
TP_printk("task:%u@%u xid=0x%08x rep=%p",
__entry->task_id, __entry->client_id, __entry->xid,
__entry->rep
)
);
/** /**
** Server-side RPC/RDMA events ** Server-side RPC/RDMA events
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* Copyright (c) 2015 Oracle. All rights reserved. * Copyright (c) 2015-2020, Oracle and/or its affiliates.
* *
* Support for backward direction RPCs on RPC/RDMA. * Support for backward direction RPCs on RPC/RDMA.
*/ */
...@@ -82,7 +82,7 @@ static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst) ...@@ -82,7 +82,7 @@ static int rpcrdma_bc_marshal_reply(struct rpc_rqst *rqst)
&rqst->rq_snd_buf, rpcrdma_noch_pullup)) &rqst->rq_snd_buf, rpcrdma_noch_pullup))
return -EIO; return -EIO;
trace_xprtrdma_cb_reply(rqst); trace_xprtrdma_cb_reply(r_xprt, rqst);
return 0; return 0;
} }
...@@ -260,7 +260,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt, ...@@ -260,7 +260,7 @@ void rpcrdma_bc_receive_call(struct rpcrdma_xprt *r_xprt,
*/ */
req = rpcr_to_rdmar(rqst); req = rpcr_to_rdmar(rqst);
req->rl_reply = rep; req->rl_reply = rep;
trace_xprtrdma_cb_call(rqst); trace_xprtrdma_cb_call(r_xprt, rqst);
/* Queue rqst for ULP's callback service */ /* Queue rqst for ULP's callback service */
bc_serv = xprt->bc_serv; bc_serv = xprt->bc_serv;
......
...@@ -65,18 +65,23 @@ void frwr_release_mr(struct rpcrdma_mr *mr) ...@@ -65,18 +65,23 @@ void frwr_release_mr(struct rpcrdma_mr *mr)
kfree(mr); kfree(mr);
} }
static void frwr_mr_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
{
if (mr->mr_device) {
trace_xprtrdma_mr_unmap(mr);
ib_dma_unmap_sg(mr->mr_device, mr->mr_sg, mr->mr_nents,
mr->mr_dir);
mr->mr_device = NULL;
}
}
static void frwr_mr_recycle(struct rpcrdma_mr *mr) static void frwr_mr_recycle(struct rpcrdma_mr *mr)
{ {
struct rpcrdma_xprt *r_xprt = mr->mr_xprt; struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
trace_xprtrdma_mr_recycle(mr); trace_xprtrdma_mr_recycle(mr);
if (mr->mr_dir != DMA_NONE) { frwr_mr_unmap(r_xprt, mr);
trace_xprtrdma_mr_unmap(mr);
ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device,
mr->mr_sg, mr->mr_nents, mr->mr_dir);
mr->mr_dir = DMA_NONE;
}
spin_lock(&r_xprt->rx_buf.rb_lock); spin_lock(&r_xprt->rx_buf.rb_lock);
list_del(&mr->mr_all); list_del(&mr->mr_all);
...@@ -86,6 +91,16 @@ static void frwr_mr_recycle(struct rpcrdma_mr *mr) ...@@ -86,6 +91,16 @@ static void frwr_mr_recycle(struct rpcrdma_mr *mr)
frwr_release_mr(mr); frwr_release_mr(mr);
} }
static void frwr_mr_put(struct rpcrdma_mr *mr)
{
frwr_mr_unmap(mr->mr_xprt, mr);
/* The MR is returned to the req's MR free list instead
* of to the xprt's MR free list. No spinlock is needed.
*/
rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
}
/* frwr_reset - Place MRs back on the free list /* frwr_reset - Place MRs back on the free list
* @req: request to reset * @req: request to reset
* *
...@@ -101,7 +116,7 @@ void frwr_reset(struct rpcrdma_req *req) ...@@ -101,7 +116,7 @@ void frwr_reset(struct rpcrdma_req *req)
struct rpcrdma_mr *mr; struct rpcrdma_mr *mr;
while ((mr = rpcrdma_mr_pop(&req->rl_registered))) while ((mr = rpcrdma_mr_pop(&req->rl_registered)))
rpcrdma_mr_put(mr); frwr_mr_put(mr);
} }
/** /**
...@@ -130,7 +145,7 @@ int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr) ...@@ -130,7 +145,7 @@ int frwr_mr_init(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr *mr)
mr->mr_xprt = r_xprt; mr->mr_xprt = r_xprt;
mr->frwr.fr_mr = frmr; mr->frwr.fr_mr = frmr;
mr->mr_dir = DMA_NONE; mr->mr_device = NULL;
INIT_LIST_HEAD(&mr->mr_list); INIT_LIST_HEAD(&mr->mr_list);
init_completion(&mr->frwr.fr_linv_done); init_completion(&mr->frwr.fr_linv_done);
...@@ -315,6 +330,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, ...@@ -315,6 +330,7 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
mr->mr_dir); mr->mr_dir);
if (!dma_nents) if (!dma_nents)
goto out_dmamap_err; goto out_dmamap_err;
mr->mr_device = ep->re_id->device;
ibmr = mr->frwr.fr_mr; ibmr = mr->frwr.fr_mr;
n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE); n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
...@@ -341,7 +357,6 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt, ...@@ -341,7 +357,6 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
return seg; return seg;
out_dmamap_err: out_dmamap_err:
mr->mr_dir = DMA_NONE;
trace_xprtrdma_frwr_sgerr(mr, i); trace_xprtrdma_frwr_sgerr(mr, i);
return ERR_PTR(-EIO); return ERR_PTR(-EIO);
...@@ -363,12 +378,21 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) ...@@ -363,12 +378,21 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
container_of(cqe, struct rpcrdma_frwr, fr_cqe); container_of(cqe, struct rpcrdma_frwr, fr_cqe);
/* WARNING: Only wr_cqe and status are reliable at this point */ /* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_fastreg(wc, frwr); trace_xprtrdma_wc_fastreg(wc, &frwr->fr_cid);
/* The MR will get recycled when the associated req is retransmitted */ /* The MR will get recycled when the associated req is retransmitted */
rpcrdma_flush_disconnect(cq->cq_context, wc); rpcrdma_flush_disconnect(cq->cq_context, wc);
} }
static void frwr_cid_init(struct rpcrdma_ep *ep,
struct rpcrdma_frwr *frwr)
{
struct rpc_rdma_cid *cid = &frwr->fr_cid;
cid->ci_queue_id = ep->re_attr.send_cq->res.id;
cid->ci_completion_id = frwr->fr_mr->res.id;
}
/** /**
* frwr_send - post Send WRs containing the RPC Call message * frwr_send - post Send WRs containing the RPC Call message
* @r_xprt: controlling transport instance * @r_xprt: controlling transport instance
...@@ -385,6 +409,7 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc) ...@@ -385,6 +409,7 @@ static void frwr_wc_fastreg(struct ib_cq *cq, struct ib_wc *wc)
*/ */
int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
{ {
struct rpcrdma_ep *ep = r_xprt->rx_ep;
struct ib_send_wr *post_wr; struct ib_send_wr *post_wr;
struct rpcrdma_mr *mr; struct rpcrdma_mr *mr;
...@@ -395,6 +420,7 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -395,6 +420,7 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
frwr = &mr->frwr; frwr = &mr->frwr;
frwr->fr_cqe.done = frwr_wc_fastreg; frwr->fr_cqe.done = frwr_wc_fastreg;
frwr_cid_init(ep, frwr);
frwr->fr_regwr.wr.next = post_wr; frwr->fr_regwr.wr.next = post_wr;
frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe; frwr->fr_regwr.wr.wr_cqe = &frwr->fr_cqe;
frwr->fr_regwr.wr.num_sge = 0; frwr->fr_regwr.wr.num_sge = 0;
...@@ -404,7 +430,7 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -404,7 +430,7 @@ int frwr_send(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
post_wr = &frwr->fr_regwr.wr; post_wr = &frwr->fr_regwr.wr;
} }
return ib_post_send(r_xprt->rx_ep->re_id->qp, post_wr, NULL); return ib_post_send(ep->re_id->qp, post_wr, NULL);
} }
/** /**
...@@ -420,18 +446,17 @@ void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs) ...@@ -420,18 +446,17 @@ void frwr_reminv(struct rpcrdma_rep *rep, struct list_head *mrs)
list_for_each_entry(mr, mrs, mr_list) list_for_each_entry(mr, mrs, mr_list)
if (mr->mr_handle == rep->rr_inv_rkey) { if (mr->mr_handle == rep->rr_inv_rkey) {
list_del_init(&mr->mr_list); list_del_init(&mr->mr_list);
trace_xprtrdma_mr_reminv(mr); frwr_mr_put(mr);
rpcrdma_mr_put(mr);
break; /* only one invalidated MR per RPC */ break; /* only one invalidated MR per RPC */
} }
} }
static void __frwr_release_mr(struct ib_wc *wc, struct rpcrdma_mr *mr) static void frwr_mr_done(struct ib_wc *wc, struct rpcrdma_mr *mr)
{ {
if (wc->status != IB_WC_SUCCESS) if (wc->status != IB_WC_SUCCESS)
frwr_mr_recycle(mr); frwr_mr_recycle(mr);
else else
rpcrdma_mr_put(mr); frwr_mr_put(mr);
} }
/** /**
...@@ -448,8 +473,8 @@ static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc) ...@@ -448,8 +473,8 @@ static void frwr_wc_localinv(struct ib_cq *cq, struct ib_wc *wc)
struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
/* WARNING: Only wr_cqe and status are reliable at this point */ /* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_li(wc, frwr); trace_xprtrdma_wc_li(wc, &frwr->fr_cid);
__frwr_release_mr(wc, mr); frwr_mr_done(wc, mr);
rpcrdma_flush_disconnect(cq->cq_context, wc); rpcrdma_flush_disconnect(cq->cq_context, wc);
} }
...@@ -469,8 +494,8 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) ...@@ -469,8 +494,8 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr); struct rpcrdma_mr *mr = container_of(frwr, struct rpcrdma_mr, frwr);
/* WARNING: Only wr_cqe and status are reliable at this point */ /* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_li_wake(wc, frwr); trace_xprtrdma_wc_li_wake(wc, &frwr->fr_cid);
__frwr_release_mr(wc, mr); frwr_mr_done(wc, mr);
complete(&frwr->fr_linv_done); complete(&frwr->fr_linv_done);
rpcrdma_flush_disconnect(cq->cq_context, wc); rpcrdma_flush_disconnect(cq->cq_context, wc);
...@@ -490,6 +515,7 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc) ...@@ -490,6 +515,7 @@ static void frwr_wc_localinv_wake(struct ib_cq *cq, struct ib_wc *wc)
void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
{ {
struct ib_send_wr *first, **prev, *last; struct ib_send_wr *first, **prev, *last;
struct rpcrdma_ep *ep = r_xprt->rx_ep;
const struct ib_send_wr *bad_wr; const struct ib_send_wr *bad_wr;
struct rpcrdma_frwr *frwr; struct rpcrdma_frwr *frwr;
struct rpcrdma_mr *mr; struct rpcrdma_mr *mr;
...@@ -509,6 +535,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -509,6 +535,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
frwr = &mr->frwr; frwr = &mr->frwr;
frwr->fr_cqe.done = frwr_wc_localinv; frwr->fr_cqe.done = frwr_wc_localinv;
frwr_cid_init(ep, frwr);
last = &frwr->fr_invwr; last = &frwr->fr_invwr;
last->next = NULL; last->next = NULL;
last->wr_cqe = &frwr->fr_cqe; last->wr_cqe = &frwr->fr_cqe;
...@@ -534,7 +561,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -534,7 +561,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
* unless re_id->qp is a valid pointer. * unless re_id->qp is a valid pointer.
*/ */
bad_wr = NULL; bad_wr = NULL;
rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr); rc = ib_post_send(ep->re_id->qp, first, &bad_wr);
/* The final LOCAL_INV WR in the chain is supposed to /* The final LOCAL_INV WR in the chain is supposed to
* do the wake. If it was never posted, the wake will * do the wake. If it was never posted, the wake will
...@@ -547,7 +574,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -547,7 +574,7 @@ void frwr_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
/* Recycle MRs in the LOCAL_INV chain that did not get posted. /* Recycle MRs in the LOCAL_INV chain that did not get posted.
*/ */
trace_xprtrdma_post_linv(req, rc); trace_xprtrdma_post_linv_err(req, rc);
while (bad_wr) { while (bad_wr) {
frwr = container_of(bad_wr, struct rpcrdma_frwr, frwr = container_of(bad_wr, struct rpcrdma_frwr,
fr_invwr); fr_invwr);
...@@ -574,10 +601,10 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -574,10 +601,10 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
struct rpcrdma_rep *rep = mr->mr_req->rl_reply; struct rpcrdma_rep *rep = mr->mr_req->rl_reply;
/* WARNING: Only wr_cqe and status are reliable at this point */ /* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_li_done(wc, frwr); trace_xprtrdma_wc_li_done(wc, &frwr->fr_cid);
__frwr_release_mr(wc, mr); frwr_mr_done(wc, mr);
/* Ensure @rep is generated before __frwr_release_mr */ /* Ensure @rep is generated before frwr_mr_done */
smp_rmb(); smp_rmb();
rpcrdma_complete_rqst(rep); rpcrdma_complete_rqst(rep);
...@@ -597,6 +624,7 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc) ...@@ -597,6 +624,7 @@ static void frwr_wc_localinv_done(struct ib_cq *cq, struct ib_wc *wc)
void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
{ {
struct ib_send_wr *first, *last, **prev; struct ib_send_wr *first, *last, **prev;
struct rpcrdma_ep *ep = r_xprt->rx_ep;
const struct ib_send_wr *bad_wr; const struct ib_send_wr *bad_wr;
struct rpcrdma_frwr *frwr; struct rpcrdma_frwr *frwr;
struct rpcrdma_mr *mr; struct rpcrdma_mr *mr;
...@@ -614,6 +642,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -614,6 +642,7 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
frwr = &mr->frwr; frwr = &mr->frwr;
frwr->fr_cqe.done = frwr_wc_localinv; frwr->fr_cqe.done = frwr_wc_localinv;
frwr_cid_init(ep, frwr);
last = &frwr->fr_invwr; last = &frwr->fr_invwr;
last->next = NULL; last->next = NULL;
last->wr_cqe = &frwr->fr_cqe; last->wr_cqe = &frwr->fr_cqe;
...@@ -639,13 +668,13 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req) ...@@ -639,13 +668,13 @@ void frwr_unmap_async(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
* unless re_id->qp is a valid pointer. * unless re_id->qp is a valid pointer.
*/ */
bad_wr = NULL; bad_wr = NULL;
rc = ib_post_send(r_xprt->rx_ep->re_id->qp, first, &bad_wr); rc = ib_post_send(ep->re_id->qp, first, &bad_wr);
if (!rc) if (!rc)
return; return;
/* Recycle MRs in the LOCAL_INV chain that did not get posted. /* Recycle MRs in the LOCAL_INV chain that did not get posted.
*/ */
trace_xprtrdma_post_linv(req, rc); trace_xprtrdma_post_linv_err(req, rc);
while (bad_wr) { while (bad_wr) {
frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr); frwr = container_of(bad_wr, struct rpcrdma_frwr, fr_invwr);
mr = container_of(frwr, struct rpcrdma_mr, frwr); mr = container_of(frwr, struct rpcrdma_mr, frwr);
......
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/* /*
* Copyright (c) 2014-2017 Oracle. All rights reserved. * Copyright (c) 2014-2020, Oracle and/or its affiliates.
* Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
* *
* This software is available to you under a choice of one of two * This software is available to you under a choice of one of two
...@@ -331,7 +331,6 @@ static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt, ...@@ -331,7 +331,6 @@ static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
*mr = rpcrdma_mr_get(r_xprt); *mr = rpcrdma_mr_get(r_xprt);
if (!*mr) if (!*mr)
goto out_getmr_err; goto out_getmr_err;
trace_xprtrdma_mr_get(req);
(*mr)->mr_req = req; (*mr)->mr_req = req;
} }
...@@ -339,7 +338,7 @@ static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt, ...@@ -339,7 +338,7 @@ static struct rpcrdma_mr_seg *rpcrdma_mr_prepare(struct rpcrdma_xprt *r_xprt,
return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr); return frwr_map(r_xprt, seg, nsegs, writing, req->rl_slot.rq_xid, *mr);
out_getmr_err: out_getmr_err:
trace_xprtrdma_nomrs(req); trace_xprtrdma_nomrs_err(r_xprt, req);
xprt_wait_for_buffer_space(&r_xprt->rx_xprt); xprt_wait_for_buffer_space(&r_xprt->rx_xprt);
rpcrdma_mrs_refresh(r_xprt); rpcrdma_mrs_refresh(r_xprt);
return ERR_PTR(-EAGAIN); return ERR_PTR(-EAGAIN);
...@@ -1344,20 +1343,13 @@ rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep, ...@@ -1344,20 +1343,13 @@ rpcrdma_decode_error(struct rpcrdma_xprt *r_xprt, struct rpcrdma_rep *rep,
p = xdr_inline_decode(xdr, 2 * sizeof(*p)); p = xdr_inline_decode(xdr, 2 * sizeof(*p));
if (!p) if (!p)
break; break;
dprintk("RPC: %s: server reports " trace_xprtrdma_err_vers(rqst, p, p + 1);
"version error (%u-%u), xid %08x\n", __func__,
be32_to_cpup(p), be32_to_cpu(*(p + 1)),
be32_to_cpu(rep->rr_xid));
break; break;
case err_chunk: case err_chunk:
dprintk("RPC: %s: server reports " trace_xprtrdma_err_chunk(rqst);
"header decoding error, xid %08x\n", __func__,
be32_to_cpu(rep->rr_xid));
break; break;
default: default:
dprintk("RPC: %s: server reports " trace_xprtrdma_err_unrecognized(rqst, p);
"unrecognized error %d, xid %08x\n", __func__,
be32_to_cpup(p), be32_to_cpu(rep->rr_xid));
} }
return -EIO; return -EIO;
...@@ -1398,7 +1390,7 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep) ...@@ -1398,7 +1390,7 @@ void rpcrdma_complete_rqst(struct rpcrdma_rep *rep)
return; return;
out_badheader: out_badheader:
trace_xprtrdma_reply_hdr(rep); trace_xprtrdma_reply_hdr_err(rep);
r_xprt->rx_stats.bad_reply_count++; r_xprt->rx_stats.bad_reply_count++;
rqst->rq_task->tk_status = status; rqst->rq_task->tk_status = status;
status = 0; status = 0;
...@@ -1472,14 +1464,12 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) ...@@ -1472,14 +1464,12 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
rpcrdma_post_recvs(r_xprt, false); rpcrdma_post_recvs(r_xprt, false);
req = rpcr_to_rdmar(rqst); req = rpcr_to_rdmar(rqst);
if (req->rl_reply) { if (unlikely(req->rl_reply))
trace_xprtrdma_leaked_rep(rqst, req->rl_reply);
rpcrdma_recv_buffer_put(req->rl_reply); rpcrdma_recv_buffer_put(req->rl_reply);
}
req->rl_reply = rep; req->rl_reply = rep;
rep->rr_rqst = rqst; rep->rr_rqst = rqst;
trace_xprtrdma_reply(rqst->rq_task, rep, req, credits); trace_xprtrdma_reply(rqst->rq_task, rep, credits);
if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE) if (rep->rr_wc_flags & IB_WC_WITH_INVALIDATE)
frwr_reminv(rep, &req->rl_registered); frwr_reminv(rep, &req->rl_registered);
...@@ -1491,16 +1481,16 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep) ...@@ -1491,16 +1481,16 @@ void rpcrdma_reply_handler(struct rpcrdma_rep *rep)
return; return;
out_badversion: out_badversion:
trace_xprtrdma_reply_vers(rep); trace_xprtrdma_reply_vers_err(rep);
goto out; goto out;
out_norqst: out_norqst:
spin_unlock(&xprt->queue_lock); spin_unlock(&xprt->queue_lock);
trace_xprtrdma_reply_rqst(rep); trace_xprtrdma_reply_rqst_err(rep);
goto out; goto out;
out_shortreply: out_shortreply:
trace_xprtrdma_reply_short(rep); trace_xprtrdma_reply_short_err(rep);
out: out:
rpcrdma_recv_buffer_put(rep); rpcrdma_recv_buffer_put(rep);
......
...@@ -599,11 +599,12 @@ static void ...@@ -599,11 +599,12 @@ static void
xprt_rdma_free(struct rpc_task *task) xprt_rdma_free(struct rpc_task *task)
{ {
struct rpc_rqst *rqst = task->tk_rqstp; struct rpc_rqst *rqst = task->tk_rqstp;
struct rpcrdma_xprt *r_xprt = rpcx_to_rdmax(rqst->rq_xprt);
struct rpcrdma_req *req = rpcr_to_rdmar(rqst); struct rpcrdma_req *req = rpcr_to_rdmar(rqst);
if (!list_empty(&req->rl_registered)) if (unlikely(!list_empty(&req->rl_registered))) {
frwr_unmap_sync(r_xprt, req); trace_xprtrdma_mrs_zap(task);
frwr_unmap_sync(rpcx_to_rdmax(rqst->rq_xprt), req);
}
/* XXX: If the RPC is completing because of a signal and /* XXX: If the RPC is completing because of a signal and
* not because a reply was received, we ought to ensure * not because a reply was received, we ought to ensure
......
...@@ -167,7 +167,7 @@ static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) ...@@ -167,7 +167,7 @@ static void rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
struct rpcrdma_xprt *r_xprt = cq->cq_context; struct rpcrdma_xprt *r_xprt = cq->cq_context;
/* WARNING: Only wr_cqe and status are reliable at this point */ /* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_send(sc, wc); trace_xprtrdma_wc_send(wc, &sc->sc_cid);
rpcrdma_sendctx_put_locked(r_xprt, sc); rpcrdma_sendctx_put_locked(r_xprt, sc);
rpcrdma_flush_disconnect(r_xprt, wc); rpcrdma_flush_disconnect(r_xprt, wc);
} }
...@@ -186,7 +186,7 @@ static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) ...@@ -186,7 +186,7 @@ static void rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
struct rpcrdma_xprt *r_xprt = cq->cq_context; struct rpcrdma_xprt *r_xprt = cq->cq_context;
/* WARNING: Only wr_cqe and status are reliable at this point */ /* WARNING: Only wr_cqe and status are reliable at this point */
trace_xprtrdma_wc_receive(wc); trace_xprtrdma_wc_receive(wc, &rep->rr_cid);
--r_xprt->rx_ep->re_receive_count; --r_xprt->rx_ep->re_receive_count;
if (wc->status != IB_WC_SUCCESS) if (wc->status != IB_WC_SUCCESS)
goto out_flushed; goto out_flushed;
...@@ -643,6 +643,9 @@ static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep) ...@@ -643,6 +643,9 @@ static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ep *ep)
return NULL; return NULL;
sc->sc_cqe.done = rpcrdma_wc_send; sc->sc_cqe.done = rpcrdma_wc_send;
sc->sc_cid.ci_queue_id = ep->re_attr.send_cq->res.id;
sc->sc_cid.ci_completion_id =
atomic_inc_return(&ep->re_completion_ids);
return sc; return sc;
} }
...@@ -972,6 +975,9 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt, ...@@ -972,6 +975,9 @@ struct rpcrdma_rep *rpcrdma_rep_create(struct rpcrdma_xprt *r_xprt,
if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf)) if (!rpcrdma_regbuf_dma_map(r_xprt, rep->rr_rdmabuf))
goto out_free_regbuf; goto out_free_regbuf;
rep->rr_cid.ci_completion_id =
atomic_inc_return(&r_xprt->rx_ep->re_completion_ids);
xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf), xdr_buf_init(&rep->rr_hdrbuf, rdmab_data(rep->rr_rdmabuf),
rdmab_length(rep->rr_rdmabuf)); rdmab_length(rep->rr_rdmabuf));
rep->rr_cqe.done = rpcrdma_wc_receive; rep->rr_cqe.done = rpcrdma_wc_receive;
...@@ -1178,25 +1184,6 @@ rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt) ...@@ -1178,25 +1184,6 @@ rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
return mr; return mr;
} }
/**
* rpcrdma_mr_put - DMA unmap an MR and release it
* @mr: MR to release
*
*/
void rpcrdma_mr_put(struct rpcrdma_mr *mr)
{
struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
if (mr->mr_dir != DMA_NONE) {
trace_xprtrdma_mr_unmap(mr);
ib_dma_unmap_sg(r_xprt->rx_ep->re_id->device,
mr->mr_sg, mr->mr_nents, mr->mr_dir);
mr->mr_dir = DMA_NONE;
}
rpcrdma_mr_push(mr, &mr->mr_req->rl_free_mrs);
}
/** /**
* rpcrdma_buffer_get - Get a request buffer * rpcrdma_buffer_get - Get a request buffer
* @buffers: Buffer pool from which to obtain a buffer * @buffers: Buffer pool from which to obtain a buffer
...@@ -1411,6 +1398,7 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp) ...@@ -1411,6 +1398,7 @@ void rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
if (!rep) if (!rep)
break; break;
rep->rr_cid.ci_queue_id = ep->re_attr.recv_cq->res.id;
trace_xprtrdma_post_recv(rep); trace_xprtrdma_post_recv(rep);
rep->rr_recv_wr.next = wr; rep->rr_recv_wr.next = wr;
wr = &rep->rr_recv_wr; wr = &rep->rr_recv_wr;
......
...@@ -53,6 +53,7 @@ ...@@ -53,6 +53,7 @@
#include <rdma/ib_verbs.h> /* RDMA verbs api */ #include <rdma/ib_verbs.h> /* RDMA verbs api */
#include <linux/sunrpc/clnt.h> /* rpc_xprt */ #include <linux/sunrpc/clnt.h> /* rpc_xprt */
#include <linux/sunrpc/rpc_rdma_cid.h> /* completion IDs */
#include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */ #include <linux/sunrpc/rpc_rdma.h> /* RPC/RDMA protocol */
#include <linux/sunrpc/xprtrdma.h> /* xprt parameters */ #include <linux/sunrpc/xprtrdma.h> /* xprt parameters */
...@@ -93,6 +94,8 @@ struct rpcrdma_ep { ...@@ -93,6 +94,8 @@ struct rpcrdma_ep {
unsigned int re_max_requests; /* depends on device */ unsigned int re_max_requests; /* depends on device */
unsigned int re_inline_send; /* negotiated */ unsigned int re_inline_send; /* negotiated */
unsigned int re_inline_recv; /* negotiated */ unsigned int re_inline_recv; /* negotiated */
atomic_t re_completion_ids;
}; };
/* Pre-allocate extra Work Requests for handling backward receives /* Pre-allocate extra Work Requests for handling backward receives
...@@ -180,6 +183,8 @@ enum { ...@@ -180,6 +183,8 @@ enum {
struct rpcrdma_rep { struct rpcrdma_rep {
struct ib_cqe rr_cqe; struct ib_cqe rr_cqe;
struct rpc_rdma_cid rr_cid;
__be32 rr_xid; __be32 rr_xid;
__be32 rr_vers; __be32 rr_vers;
__be32 rr_proc; __be32 rr_proc;
...@@ -211,6 +216,7 @@ enum { ...@@ -211,6 +216,7 @@ enum {
struct rpcrdma_req; struct rpcrdma_req;
struct rpcrdma_sendctx { struct rpcrdma_sendctx {
struct ib_cqe sc_cqe; struct ib_cqe sc_cqe;
struct rpc_rdma_cid sc_cid;
struct rpcrdma_req *sc_req; struct rpcrdma_req *sc_req;
unsigned int sc_unmap_count; unsigned int sc_unmap_count;
struct ib_sge sc_sges[]; struct ib_sge sc_sges[];
...@@ -225,6 +231,7 @@ struct rpcrdma_sendctx { ...@@ -225,6 +231,7 @@ struct rpcrdma_sendctx {
struct rpcrdma_frwr { struct rpcrdma_frwr {
struct ib_mr *fr_mr; struct ib_mr *fr_mr;
struct ib_cqe fr_cqe; struct ib_cqe fr_cqe;
struct rpc_rdma_cid fr_cid;
struct completion fr_linv_done; struct completion fr_linv_done;
union { union {
struct ib_reg_wr fr_regwr; struct ib_reg_wr fr_regwr;
...@@ -236,6 +243,7 @@ struct rpcrdma_req; ...@@ -236,6 +243,7 @@ struct rpcrdma_req;
struct rpcrdma_mr { struct rpcrdma_mr {
struct list_head mr_list; struct list_head mr_list;
struct rpcrdma_req *mr_req; struct rpcrdma_req *mr_req;
struct ib_device *mr_device;
struct scatterlist *mr_sg; struct scatterlist *mr_sg;
int mr_nents; int mr_nents;
enum dma_data_direction mr_dir; enum dma_data_direction mr_dir;
...@@ -466,7 +474,6 @@ void rpcrdma_buffer_destroy(struct rpcrdma_buffer *); ...@@ -466,7 +474,6 @@ void rpcrdma_buffer_destroy(struct rpcrdma_buffer *);
struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt); struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_xprt *r_xprt);
struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt); struct rpcrdma_mr *rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt);
void rpcrdma_mr_put(struct rpcrdma_mr *mr);
void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt); void rpcrdma_mrs_refresh(struct rpcrdma_xprt *r_xprt);
struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *); struct rpcrdma_req *rpcrdma_buffer_get(struct rpcrdma_buffer *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment