Commit 2b74c878 authored by Kaike Wan's avatar Kaike Wan Committed by Jason Gunthorpe

IB/hfi1: Unreserve a flushed OPFN request

When an OPFN request is flushed, the request is completed without
unreserving itself from the send queue. Subsequently, when a new
request is post sent, the following warning will be triggered:

WARNING: CPU: 4 PID: 8130 at rdmavt/qp.c:1761 rvt_post_send+0x72a/0x880 [rdmavt]
Call Trace:
[<ffffffffbbb61e41>] dump_stack+0x19/0x1b
[<ffffffffbb497688>] __warn+0xd8/0x100
[<ffffffffbb4977cd>] warn_slowpath_null+0x1d/0x20
[<ffffffffc01c941a>] rvt_post_send+0x72a/0x880 [rdmavt]
[<ffffffffbb4dcabe>] ? account_entity_dequeue+0xae/0xd0
[<ffffffffbb61d645>] ? __kmalloc+0x55/0x230
[<ffffffffc04e1a4c>] ib_uverbs_post_send+0x37c/0x5d0 [ib_uverbs]
[<ffffffffc04e5e36>] ? rdma_lookup_put_uobject+0x26/0x60 [ib_uverbs]
[<ffffffffc04dbce6>] ib_uverbs_write+0x286/0x460 [ib_uverbs]
[<ffffffffbb6f9457>] ? security_file_permission+0x27/0xa0
[<ffffffffbb641650>] vfs_write+0xc0/0x1f0
[<ffffffffbb64246f>] SyS_write+0x7f/0xf0
[<ffffffffbbb74ddb>] system_call_fastpath+0x22/0x27

This patch fixes the problem by moving rvt_qp_wqe_unreserve() into
rvt_qp_complete_swqe() to simplify the code and make it less
error-prone.

Fixes: ca95f802 ("IB/hfi1: Unreserve a reserved request when it is completed")
Link: https://lore.kernel.org/r/20190715164528.74174.31364.stgit@awfm-01.aw.intel.com
Cc: <stable@vger.kernel.org>
Reviewed-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarKaike Wan <kaike.wan@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent cd48a820
...@@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah) ...@@ -1835,7 +1835,6 @@ void hfi1_rc_send_complete(struct rvt_qp *qp, struct hfi1_opa_header *opah)
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)
break; break;
trdma_clean_swqe(qp, wqe); trdma_clean_swqe(qp, wqe);
rvt_qp_wqe_unreserve(qp, wqe);
trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
rvt_qp_complete_swqe(qp, rvt_qp_complete_swqe(qp,
wqe, wqe,
...@@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp, ...@@ -1882,7 +1881,6 @@ struct rvt_swqe *do_rc_completion(struct rvt_qp *qp,
if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 || if (cmp_psn(wqe->lpsn, qp->s_sending_psn) < 0 ||
cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { cmp_psn(qp->s_sending_psn, qp->s_sending_hpsn) > 0) {
trdma_clean_swqe(qp, wqe); trdma_clean_swqe(qp, wqe);
rvt_qp_wqe_unreserve(qp, wqe);
trace_hfi1_qp_send_completion(qp, wqe, qp->s_last); trace_hfi1_qp_send_completion(qp, wqe, qp->s_last);
rvt_qp_complete_swqe(qp, rvt_qp_complete_swqe(qp,
wqe, wqe,
......
...@@ -608,7 +608,7 @@ static inline void rvt_qp_wqe_reserve( ...@@ -608,7 +608,7 @@ static inline void rvt_qp_wqe_reserve(
/** /**
* rvt_qp_wqe_unreserve - clean reserved operation * rvt_qp_wqe_unreserve - clean reserved operation
* @qp - the rvt qp * @qp - the rvt qp
* @wqe - the send wqe * @flags - send wqe flags
* *
* This decrements the reserve use count. * This decrements the reserve use count.
* *
...@@ -620,11 +620,9 @@ static inline void rvt_qp_wqe_reserve( ...@@ -620,11 +620,9 @@ static inline void rvt_qp_wqe_reserve(
* the compiler does not juggle the order of the s_last * the compiler does not juggle the order of the s_last
* ring index and the decrementing of s_reserved_used. * ring index and the decrementing of s_reserved_used.
*/ */
static inline void rvt_qp_wqe_unreserve( static inline void rvt_qp_wqe_unreserve(struct rvt_qp *qp, int flags)
struct rvt_qp *qp,
struct rvt_swqe *wqe)
{ {
if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) { if (unlikely(flags & RVT_SEND_RESERVE_USED)) {
atomic_dec(&qp->s_reserved_used); atomic_dec(&qp->s_reserved_used);
/* insure no compiler re-order up to s_last change */ /* insure no compiler re-order up to s_last change */
smp_mb__after_atomic(); smp_mb__after_atomic();
...@@ -853,6 +851,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp, ...@@ -853,6 +851,7 @@ rvt_qp_complete_swqe(struct rvt_qp *qp,
u32 byte_len, last; u32 byte_len, last;
int flags = wqe->wr.send_flags; int flags = wqe->wr.send_flags;
rvt_qp_wqe_unreserve(qp, flags);
rvt_put_qp_swqe(qp, wqe); rvt_put_qp_swqe(qp, wqe);
need_completion = need_completion =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment