Commit 45e86b33 authored by Naresh Gottumukkala's avatar Naresh Gottumukkala Committed by Roland Dreier

RDMA/ocrdma: Cache recv DB until QP moved to RTR

1) In post recv, don't ring the DB doorbell if the QP is in RTR state.
   Cache the DB calls, until the QP is moved to RTS state.
2) Add max_rd_sge support to dev->attr.
3) Code cleanup in alloc_pd path.
Signed-off-by: default avatarNaresh Gottumukkala <bgottumukkala@emulex.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent 7b9b1a59
......@@ -60,6 +60,7 @@ struct ocrdma_dev_attr {
int max_send_sge;
int max_recv_sge;
int max_srq_sge;
int max_rdma_sge;
int max_mr;
u64 max_mr_size;
u32 max_num_mr_pbl;
......@@ -287,6 +288,7 @@ struct ocrdma_qp {
u32 qkey;
bool dpp_enabled;
u8 *ird_q_va;
u16 db_cache;
};
struct ocrdma_hw_mr {
......
......@@ -992,6 +992,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
attr->max_srq_sge = (rsp->max_srq_rqe_sge &
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
attr->max_rdma_sge = (rsp->max_write_send_sge &
OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT;
attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
......
......@@ -1306,7 +1306,7 @@ struct ocrdma_reg_nsmr_cont {
u32 last;
struct ocrdma_pa pbl[MAX_OCRDMA_NSMR_PBL];
} __packed;
};
struct ocrdma_pbe {
u32 pa_hi;
......
......@@ -84,7 +84,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_LOCAL_DMA_LKEY;
attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
attr->max_sge_rd = 0;
attr->max_sge_rd = dev->attr.max_rdma_sge;
attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe;
attr->max_mr = dev->attr.max_mr;
......@@ -327,7 +327,7 @@ int ocrdma_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
return status;
}
static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
static int ocrdma_copy_pd_uresp(struct ocrdma_dev *dev, struct ocrdma_pd *pd,
struct ib_ucontext *ib_ctx,
struct ib_udata *udata)
{
......@@ -337,7 +337,6 @@ static int ocrdma_copy_pd_uresp(struct ocrdma_pd *pd,
u32 db_page_size;
struct ocrdma_alloc_pd_uresp rsp;
struct ocrdma_ucontext *uctx = get_ocrdma_ucontext(ib_ctx);
struct ocrdma_dev *dev = get_ocrdma_dev(pd->ibpd.device);
memset(&rsp, 0, sizeof(rsp));
rsp.id = pd->id;
......@@ -400,14 +399,15 @@ struct ib_pd *ocrdma_alloc_pd(struct ib_device *ibdev,
}
if (udata && context) {
status = ocrdma_copy_pd_uresp(pd, context, udata);
status = ocrdma_copy_pd_uresp(dev, pd, context, udata);
if (status)
goto err;
}
return &pd->ibpd;
err:
ocrdma_dealloc_pd(&pd->ibpd);
status = ocrdma_mbx_dealloc_pd(dev, pd);
kfree(pd);
return ERR_PTR(status);
}
......@@ -1090,6 +1090,17 @@ struct ib_qp *ocrdma_create_qp(struct ib_pd *ibpd,
return ERR_PTR(status);
}
static void ocrdma_flush_rq_db(struct ocrdma_qp *qp)
{
if (qp->db_cache) {
u32 val = qp->rq.dbid | (qp->db_cache <<
ocrdma_get_num_posted_shift(qp));
iowrite32(val, qp->rq_db);
qp->db_cache = 0;
}
}
int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
int attr_mask)
{
......@@ -1108,6 +1119,9 @@ int _ocrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
if (status < 0)
return status;
status = ocrdma_mbx_modify_qp(dev, qp, attr, attr_mask, old_qps);
if (!status && attr_mask & IB_QP_STATE && attr->qp_state == IB_QPS_RTR)
ocrdma_flush_rq_db(qp);
return status;
}
......@@ -1822,7 +1836,10 @@ static void ocrdma_ring_rq_db(struct ocrdma_qp *qp)
{
u32 val = qp->rq.dbid | (1 << ocrdma_get_num_posted_shift(qp));
if (qp->state != OCRDMA_QPS_INIT)
iowrite32(val, qp->rq_db);
else
qp->db_cache++;
}
static void ocrdma_build_rqe(struct ocrdma_hdr_wqe *rqe, struct ib_recv_wr *wr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment