Commit fe7539b3 authored by Chuck Lever's avatar Chuck Lever Committed by Kamal Mostafa

xprtrdma: Re-arm after missed events

commit 7b3d770c upstream.

ib_req_notify_cq(IB_CQ_REPORT_MISSED_EVENTS) returns a positive
value if WCs were added to a CQ after the last completion upcall
but before the CQ has been re-armed.

Commit 7f23f6f6 ("xprtrmda: Reduce lock contention in
completion handlers") assumed that when ib_req_notify_cq() returned
a positive RC, the CQ had also been successfully re-armed, making
it safe to return control to the provider without losing any
completion signals. That is an invalid assumption.

Change both completion handlers to continue polling while
ib_req_notify_cq() returns a positive value.

Fixes: 7f23f6f6 ("xprtrmda: Reduce lock contention in ...")
Signed-off-by: default avatarChuck Lever <chuck.lever@oracle.com>
Reviewed-by: default avatarSagi Grimberg <sagig@mellanox.com>
Reviewed-by: default avatarDevesh Sharma <devesh.sharma@avagotech.com>
Tested-By: default avatarDevesh Sharma <devesh.sharma@avagotech.com>
Signed-off-by: default avatarAnna Schumaker <Anna.Schumaker@Netapp.com>
Signed-off-by: default avatarKamal Mostafa <kamal@canonical.com>
parent 6b27cd74
...@@ -178,38 +178,17 @@ rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep) ...@@ -178,38 +178,17 @@ rpcrdma_sendcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
return 0; return 0;
} }
/* /* Handle provider send completion upcalls.
* Handle send, fast_reg_mr, and local_inv completions.
*
* Send events are typically suppressed and thus do not result
* in an upcall. Occasionally one is signaled, however. This
* prevents the provider's completion queue from wrapping and
* losing a completion.
*/ */
static void static void
rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context) rpcrdma_sendcq_upcall(struct ib_cq *cq, void *cq_context)
{ {
struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context; struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
int rc;
rc = rpcrdma_sendcq_poll(cq, ep);
if (rc) {
dprintk("RPC: %s: ib_poll_cq failed: %i\n",
__func__, rc);
return;
}
rc = ib_req_notify_cq(cq, do {
IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); rpcrdma_sendcq_poll(cq, ep);
if (rc == 0) } while (ib_req_notify_cq(cq, IB_CQ_NEXT_COMP |
return; IB_CQ_REPORT_MISSED_EVENTS) > 0);
if (rc < 0) {
dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
__func__, rc);
return;
}
rpcrdma_sendcq_poll(cq, ep);
} }
static void static void
...@@ -273,42 +252,17 @@ rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep) ...@@ -273,42 +252,17 @@ rpcrdma_recvcq_poll(struct ib_cq *cq, struct rpcrdma_ep *ep)
return rc; return rc;
} }
/* /* Handle provider receive completion upcalls.
* Handle receive completions.
*
* It is reentrant but processes single events in order to maintain
* ordering of receives to keep server credits.
*
* It is the responsibility of the scheduled tasklet to return
* recv buffers to the pool. NOTE: this affects synchronization of
* connection shutdown. That is, the structures required for
* the completion of the reply handler must remain intact until
* all memory has been reclaimed.
*/ */
static void static void
rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context) rpcrdma_recvcq_upcall(struct ib_cq *cq, void *cq_context)
{ {
struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context; struct rpcrdma_ep *ep = (struct rpcrdma_ep *)cq_context;
int rc;
rc = rpcrdma_recvcq_poll(cq, ep);
if (rc) {
dprintk("RPC: %s: ib_poll_cq failed: %i\n",
__func__, rc);
return;
}
rc = ib_req_notify_cq(cq, do {
IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS); rpcrdma_recvcq_poll(cq, ep);
if (rc == 0) } while (ib_req_notify_cq(cq, IB_CQ_NEXT_COMP |
return; IB_CQ_REPORT_MISSED_EVENTS) > 0);
if (rc < 0) {
dprintk("RPC: %s: ib_req_notify_cq failed: %i\n",
__func__, rc);
return;
}
rpcrdma_recvcq_poll(cq, ep);
} }
static void static void
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment