Commit 3fb755b3 authored by Somnath Kotur's avatar Somnath Kotur Committed by Doug Ledford

RDMA/bnxt_re: Add HW workaround for avoiding stall for UD QPs

HW stalls out after 0x800000 WQEs are posted for UD QPs.
To workaround this problem, driver will send a modify_qp cmd
to the HW at around the halfway mark(0x400000) so that FW
can accordingly modify the QP context in the HW to prevent this
stall.
This workaround needs to be done for UD, QP1 and Raw Ethertype
packets. Added a counter to keep track of WQEs posted during post_send.
Signed-off-by: default avatarSomnath Kotur <somnath.kotur@broadcom.com>
Signed-off-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 1c980b01
...@@ -56,6 +56,8 @@ ...@@ -56,6 +56,8 @@
#define BNXT_RE_MAX_SRQC_COUNT (64 * 1024) #define BNXT_RE_MAX_SRQC_COUNT (64 * 1024)
#define BNXT_RE_MAX_CQ_COUNT (64 * 1024) #define BNXT_RE_MAX_CQ_COUNT (64 * 1024)
#define BNXT_RE_UD_QP_HW_STALL 0x400000
struct bnxt_re_work { struct bnxt_re_work {
struct work_struct work; struct work_struct work;
unsigned long event; unsigned long event;
......
...@@ -2075,6 +2075,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev, ...@@ -2075,6 +2075,22 @@ static int bnxt_re_copy_wr_payload(struct bnxt_re_dev *rdev,
return payload_sz; return payload_sz;
} }
static void bnxt_ud_qp_hw_stall_workaround(struct bnxt_re_qp *qp)
{
if ((qp->ib_qp.qp_type == IB_QPT_UD ||
qp->ib_qp.qp_type == IB_QPT_GSI ||
qp->ib_qp.qp_type == IB_QPT_RAW_ETHERTYPE) &&
qp->qplib_qp.wqe_cnt == BNXT_RE_UD_QP_HW_STALL) {
int qp_attr_mask;
struct ib_qp_attr qp_attr;
qp_attr_mask = IB_QP_STATE;
qp_attr.qp_state = IB_QPS_RTS;
bnxt_re_modify_qp(&qp->ib_qp, &qp_attr, qp_attr_mask, NULL);
qp->qplib_qp.wqe_cnt = 0;
}
}
static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
struct bnxt_re_qp *qp, struct bnxt_re_qp *qp,
struct ib_send_wr *wr) struct ib_send_wr *wr)
...@@ -2120,6 +2136,7 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev, ...@@ -2120,6 +2136,7 @@ static int bnxt_re_post_send_shadow_qp(struct bnxt_re_dev *rdev,
wr = wr->next; wr = wr->next;
} }
bnxt_qplib_post_send_db(&qp->qplib_qp); bnxt_qplib_post_send_db(&qp->qplib_qp);
bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags); spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc; return rc;
} }
...@@ -2216,6 +2233,7 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr, ...@@ -2216,6 +2233,7 @@ int bnxt_re_post_send(struct ib_qp *ib_qp, struct ib_send_wr *wr,
wr = wr->next; wr = wr->next;
} }
bnxt_qplib_post_send_db(&qp->qplib_qp); bnxt_qplib_post_send_db(&qp->qplib_qp);
bnxt_ud_qp_hw_stall_workaround(qp);
spin_unlock_irqrestore(&qp->sq_lock, flags); spin_unlock_irqrestore(&qp->sq_lock, flags);
return rc; return rc;
......
...@@ -1298,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp, ...@@ -1298,6 +1298,9 @@ int bnxt_qplib_post_send(struct bnxt_qplib_qp *qp,
} }
sq->hwq.prod++; sq->hwq.prod++;
qp->wqe_cnt++;
done: done:
return rc; return rc;
} }
......
...@@ -250,6 +250,7 @@ struct bnxt_qplib_qp { ...@@ -250,6 +250,7 @@ struct bnxt_qplib_qp {
u8 timeout; u8 timeout;
u8 retry_cnt; u8 retry_cnt;
u8 rnr_retry; u8 rnr_retry;
u64 wqe_cnt;
u32 min_rnr_timer; u32 min_rnr_timer;
u32 max_rd_atomic; u32 max_rd_atomic;
u32 max_dest_rd_atomic; u32 max_dest_rd_atomic;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment