Commit 2aee309d authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Doug Ledford

IB/hfi1: Fix deadlock with txreq allocation slow path

A failure in the get_txreq() inline will result in a
slow path retry using __get_txreq().

__get_txreq() attempts to procure the qp s_lock, which
is already held in all callers.

Fix by deleting the s_lock maintenance in __get_txreq()
and add sparse syntax hooks to future proof the code.

Cc: Stable <stable@vger.kernel.org> # 4.6+
Reviewed-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 34d351f8
......@@ -92,11 +92,10 @@ void hfi1_put_txreq(struct verbs_txreq *tx)
struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
__must_hold(&qp->s_lock)
{
struct verbs_txreq *tx = ERR_PTR(-EBUSY);
unsigned long flags;
spin_lock_irqsave(&qp->s_lock, flags);
write_seqlock(&dev->iowait_lock);
if (ib_rvt_state_ops[qp->state] & RVT_PROCESS_RECV_OK) {
struct hfi1_qp_priv *priv;
......@@ -116,7 +115,6 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
}
out:
write_sequnlock(&dev->iowait_lock);
spin_unlock_irqrestore(&qp->s_lock, flags);
return tx;
}
......
......@@ -73,6 +73,7 @@ struct verbs_txreq *__get_txreq(struct hfi1_ibdev *dev,
static inline struct verbs_txreq *get_txreq(struct hfi1_ibdev *dev,
struct rvt_qp *qp)
__must_hold(&qp->slock)
{
struct verbs_txreq *tx;
struct hfi1_qp_priv *priv = qp->priv;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment