Commit c50866e2 authored by Devesh Sharma's avatar Devesh Sharma Committed by Jason Gunthorpe

bnxt_re: fix the regression due to changes in alloc_pbl

While adding the use of for_each_sg_dma_page iterator for Brodcom's rdma
driver, there was a regression added in the __alloc_pbl path. The change
left bnxt_re in DOA state in for-next branch.

Fixing the regression to avoid the host crash when a user space object is
created. Restricting the unconditional access to hwq.pg_arr when hwq is
initialized for user space objects.

Fixes: 161ebe24 ("RDMA/bnxt_re: Use for_each_sg_dma_page iterator on umem SGL")
Reported-by: default avatarGal Pressman <galpress@amazon.com>
Signed-off-by: default avatarSelvin Xavier <selvin.xavier@broadcom.com>
Signed-off-by: default avatarDevesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 2612d723
...@@ -793,8 +793,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) ...@@ -793,8 +793,8 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
{ {
struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp); struct bnxt_re_qp *qp = container_of(ib_qp, struct bnxt_re_qp, ib_qp);
struct bnxt_re_dev *rdev = qp->rdev; struct bnxt_re_dev *rdev = qp->rdev;
int rc;
unsigned int flags; unsigned int flags;
int rc;
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp); bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp); rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
...@@ -803,9 +803,12 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp) ...@@ -803,9 +803,12 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp)
return rc; return rc;
} }
flags = bnxt_re_lock_cqs(qp); if (!rdma_is_kernel_res(&qp->ib_qp.res)) {
bnxt_qplib_clean_qp(&qp->qplib_qp); flags = bnxt_re_lock_cqs(qp);
bnxt_re_unlock_cqs(qp, flags); bnxt_qplib_clean_qp(&qp->qplib_qp);
bnxt_re_unlock_cqs(qp, flags);
}
bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp); bnxt_qplib_free_qp_res(&rdev->qplib_res, &qp->qplib_qp);
if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) { if (ib_qp->qp_type == IB_QPT_GSI && rdev->qp1_sqp) {
......
...@@ -862,18 +862,18 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) ...@@ -862,18 +862,18 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
{ {
struct bnxt_qplib_rcfw *rcfw = res->rcfw; struct bnxt_qplib_rcfw *rcfw = res->rcfw;
struct sq_send *hw_sq_send_hdr, **hw_sq_send_ptr;
struct cmdq_create_qp req;
struct creq_create_qp_resp resp;
struct bnxt_qplib_pbl *pbl;
struct sq_psn_search **psn_search_ptr;
unsigned long int psn_search, poff = 0; unsigned long int psn_search, poff = 0;
struct sq_psn_search **psn_search_ptr;
struct bnxt_qplib_q *sq = &qp->sq; struct bnxt_qplib_q *sq = &qp->sq;
struct bnxt_qplib_q *rq = &qp->rq; struct bnxt_qplib_q *rq = &qp->rq;
int i, rc, req_size, psn_sz = 0; int i, rc, req_size, psn_sz = 0;
struct sq_send **hw_sq_send_ptr;
struct creq_create_qp_resp resp;
struct bnxt_qplib_hwq *xrrq; struct bnxt_qplib_hwq *xrrq;
u16 cmd_flags = 0, max_ssge; u16 cmd_flags = 0, max_ssge;
u32 sw_prod, qp_flags = 0; struct cmdq_create_qp req;
struct bnxt_qplib_pbl *pbl;
u32 qp_flags = 0;
u16 max_rsge; u16 max_rsge;
RCFW_CMD_PREP(req, CREATE_QP, cmd_flags); RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
...@@ -948,14 +948,6 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp) ...@@ -948,14 +948,6 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G : CMDQ_CREATE_QP_SQ_PG_SIZE_PG_1G :
CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K); CMDQ_CREATE_QP_SQ_PG_SIZE_PG_4K);
/* initialize all SQ WQEs to LOCAL_INVALID (sq prep for hw fetch) */
hw_sq_send_ptr = (struct sq_send **)sq->hwq.pbl_ptr;
for (sw_prod = 0; sw_prod < sq->hwq.max_elements; sw_prod++) {
hw_sq_send_hdr = &hw_sq_send_ptr[get_sqe_pg(sw_prod)]
[get_sqe_idx(sw_prod)];
hw_sq_send_hdr->wqe_type = SQ_BASE_WQE_TYPE_LOCAL_INVALID;
}
if (qp->scq) if (qp->scq)
req.scq_cid = cpu_to_le32(qp->scq->id); req.scq_cid = cpu_to_le32(qp->scq->id);
......
...@@ -119,11 +119,8 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl, ...@@ -119,11 +119,8 @@ static int __alloc_pbl(struct pci_dev *pdev, struct bnxt_qplib_pbl *pbl,
for_each_sg_dma_page (sghead, &sg_iter, pages, 0) { for_each_sg_dma_page (sghead, &sg_iter, pages, 0) {
pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter); pbl->pg_map_arr[i] = sg_page_iter_dma_address(&sg_iter);
pbl->pg_arr[i] = NULL; pbl->pg_arr[i] = NULL;
if (!pbl->pg_arr[i])
goto fail;
i++;
pbl->pg_count++; pbl->pg_count++;
i++;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment