Commit fddcbbb0 authored by Devesh Sharma's avatar Devesh Sharma Committed by Jason Gunthorpe

RDMA/bnxt_re: Simplify obtaining queue entry from hw ring

Restructring the data path and control path queue management code to
simplify the way a queue element is extracted from the hardware ring.

Introduced a new function which will give a pointer to the next ring item
depending upon the current cons/prod index in the hardware queue.

Further, there are hardcoding when size of queue entry is calculated,
replacing it with an inline function. This function would be easier to
expand if need going forward.

The code section to initialize the PSN search areas has also been
restructured and couple of functions has been added there.

Link: https://lore.kernel.org/r/1585851136-2316-4-git-send-email-devesh.sharma@broadcom.comSigned-off-by: default avatarDevesh Sharma <devesh.sharma@broadcom.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent c78671a4
...@@ -856,7 +856,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, ...@@ -856,7 +856,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
return -EFAULT; return -EFAULT;
bytes = (qplib_qp->sq.max_wqe * BNXT_QPLIB_MAX_SQE_ENTRY_SIZE); bytes = (qplib_qp->sq.max_wqe * qplib_qp->sq.wqe_size);
/* Consider mapping PSN search memory only for RC QPs. */ /* Consider mapping PSN search memory only for RC QPs. */
if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) { if (qplib_qp->type == CMDQ_CREATE_QP_TYPE_RC) {
psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ? psn_sz = bnxt_qplib_is_chip_gen_p5(rdev->chip_ctx) ?
...@@ -879,7 +879,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd, ...@@ -879,7 +879,7 @@ static int bnxt_re_init_user_qp(struct bnxt_re_dev *rdev, struct bnxt_re_pd *pd,
qplib_qp->qp_handle = ureq.qp_handle; qplib_qp->qp_handle = ureq.qp_handle;
if (!qp->qplib_qp.srq) { if (!qp->qplib_qp.srq) {
bytes = (qplib_qp->rq.max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); bytes = (qplib_qp->rq.max_wqe * qplib_qp->rq.wqe_size);
bytes = PAGE_ALIGN(bytes); bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes, umem = ib_umem_get(&rdev->ibdev, ureq.qprva, bytes,
IB_ACCESS_LOCAL_WRITE); IB_ACCESS_LOCAL_WRITE);
...@@ -976,6 +976,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp ...@@ -976,6 +976,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.sig_type = true; qp->qplib_qp.sig_type = true;
/* Shadow QP SQ depth should be same as QP1 RQ depth */ /* Shadow QP SQ depth should be same as QP1 RQ depth */
qp->qplib_qp.sq.wqe_size = bnxt_re_get_swqe_size();
qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.sq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.sq.max_sge = 2; qp->qplib_qp.sq.max_sge = 2;
/* Q full delta can be 1 since it is internal QP */ /* Q full delta can be 1 since it is internal QP */
...@@ -986,6 +987,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp ...@@ -986,6 +987,7 @@ static struct bnxt_re_qp *bnxt_re_create_shadow_qp
qp->qplib_qp.scq = qp1_qp->scq; qp->qplib_qp.scq = qp1_qp->scq;
qp->qplib_qp.rcq = qp1_qp->rcq; qp->qplib_qp.rcq = qp1_qp->rcq;
qp->qplib_qp.rq.wqe_size = bnxt_re_get_rwqe_size();
qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe; qp->qplib_qp.rq.max_wqe = qp1_qp->rq.max_wqe;
qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge; qp->qplib_qp.rq.max_sge = qp1_qp->rq.max_sge;
/* Q full delta can be 1 since it is internal QP */ /* Q full delta can be 1 since it is internal QP */
...@@ -1021,10 +1023,12 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, ...@@ -1021,10 +1023,12 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
struct bnxt_qplib_dev_attr *dev_attr; struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp; struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev; struct bnxt_re_dev *rdev;
struct bnxt_qplib_q *rq;
int entries; int entries;
rdev = qp->rdev; rdev = qp->rdev;
qplqp = &qp->qplib_qp; qplqp = &qp->qplib_qp;
rq = &qplqp->rq;
dev_attr = &rdev->dev_attr; dev_attr = &rdev->dev_attr;
if (init_attr->srq) { if (init_attr->srq) {
...@@ -1036,23 +1040,21 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp, ...@@ -1036,23 +1040,21 @@ static int bnxt_re_init_rq_attr(struct bnxt_re_qp *qp,
return -EINVAL; return -EINVAL;
} }
qplqp->srq = &srq->qplib_srq; qplqp->srq = &srq->qplib_srq;
qplqp->rq.max_wqe = 0; rq->max_wqe = 0;
} else { } else {
rq->wqe_size = bnxt_re_get_rwqe_size();
/* Allocate 1 more than what's provided so posting max doesn't /* Allocate 1 more than what's provided so posting max doesn't
* mean empty. * mean empty.
*/ */
entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1); entries = roundup_pow_of_two(init_attr->cap.max_recv_wr + 1);
qplqp->rq.max_wqe = min_t(u32, entries, rq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + 1);
dev_attr->max_qp_wqes + 1); rq->q_full_delta = rq->max_wqe - init_attr->cap.max_recv_wr;
rq->max_sge = init_attr->cap.max_recv_sge;
qplqp->rq.q_full_delta = qplqp->rq.max_wqe - if (rq->max_sge > dev_attr->max_qp_sges)
init_attr->cap.max_recv_wr; rq->max_sge = dev_attr->max_qp_sges;
qplqp->rq.max_sge = init_attr->cap.max_recv_sge;
if (qplqp->rq.max_sge > dev_attr->max_qp_sges)
qplqp->rq.max_sge = dev_attr->max_qp_sges;
} }
qplqp->rq.sg_info.pgsize = PAGE_SIZE; rq->sg_info.pgsize = PAGE_SIZE;
qplqp->rq.sg_info.pgshft = PAGE_SHIFT; rq->sg_info.pgshft = PAGE_SHIFT;
return 0; return 0;
} }
...@@ -1080,15 +1082,18 @@ static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, ...@@ -1080,15 +1082,18 @@ static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
struct bnxt_qplib_dev_attr *dev_attr; struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_qplib_qp *qplqp; struct bnxt_qplib_qp *qplqp;
struct bnxt_re_dev *rdev; struct bnxt_re_dev *rdev;
struct bnxt_qplib_q *sq;
int entries; int entries;
rdev = qp->rdev; rdev = qp->rdev;
qplqp = &qp->qplib_qp; qplqp = &qp->qplib_qp;
sq = &qplqp->sq;
dev_attr = &rdev->dev_attr; dev_attr = &rdev->dev_attr;
qplqp->sq.max_sge = init_attr->cap.max_send_sge; sq->wqe_size = bnxt_re_get_swqe_size();
if (qplqp->sq.max_sge > dev_attr->max_qp_sges) sq->max_sge = init_attr->cap.max_send_sge;
qplqp->sq.max_sge = dev_attr->max_qp_sges; if (sq->max_sge > dev_attr->max_qp_sges)
sq->max_sge = dev_attr->max_qp_sges;
/* /*
* Change the SQ depth if user has requested minimum using * Change the SQ depth if user has requested minimum using
* configfs. Only supported for kernel consumers * configfs. Only supported for kernel consumers
...@@ -1096,9 +1101,9 @@ static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp, ...@@ -1096,9 +1101,9 @@ static void bnxt_re_init_sq_attr(struct bnxt_re_qp *qp,
entries = init_attr->cap.max_send_wr; entries = init_attr->cap.max_send_wr;
/* Allocate 128 + 1 more than what's provided */ /* Allocate 128 + 1 more than what's provided */
entries = roundup_pow_of_two(entries + BNXT_QPLIB_RESERVED_QP_WRS + 1); entries = roundup_pow_of_two(entries + BNXT_QPLIB_RESERVED_QP_WRS + 1);
qplqp->sq.max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes + sq->max_wqe = min_t(u32, entries, dev_attr->max_qp_wqes +
BNXT_QPLIB_RESERVED_QP_WRS + 1); BNXT_QPLIB_RESERVED_QP_WRS + 1);
qplqp->sq.q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1; sq->q_full_delta = BNXT_QPLIB_RESERVED_QP_WRS + 1;
/* /*
* Reserving one slot for Phantom WQE. Application can * Reserving one slot for Phantom WQE. Application can
* post one extra entry in this case. But allowing this to avoid * post one extra entry in this case. But allowing this to avoid
...@@ -1511,7 +1516,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev, ...@@ -1511,7 +1516,7 @@ static int bnxt_re_init_user_srq(struct bnxt_re_dev *rdev,
if (ib_copy_from_udata(&ureq, udata, sizeof(ureq))) if (ib_copy_from_udata(&ureq, udata, sizeof(ureq)))
return -EFAULT; return -EFAULT;
bytes = (qplib_srq->max_wqe * BNXT_QPLIB_MAX_RQE_ENTRY_SIZE); bytes = (qplib_srq->max_wqe * qplib_srq->wqe_size);
bytes = PAGE_ALIGN(bytes); bytes = PAGE_ALIGN(bytes);
umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes, umem = ib_umem_get(&rdev->ibdev, ureq.srqva, bytes,
IB_ACCESS_LOCAL_WRITE); IB_ACCESS_LOCAL_WRITE);
...@@ -1534,15 +1539,20 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, ...@@ -1534,15 +1539,20 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
struct ib_srq_init_attr *srq_init_attr, struct ib_srq_init_attr *srq_init_attr,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct ib_pd *ib_pd = ib_srq->pd; struct bnxt_qplib_dev_attr *dev_attr;
struct bnxt_re_pd *pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
struct bnxt_re_dev *rdev = pd->rdev;
struct bnxt_qplib_dev_attr *dev_attr = &rdev->dev_attr;
struct bnxt_re_srq *srq =
container_of(ib_srq, struct bnxt_re_srq, ib_srq);
struct bnxt_qplib_nq *nq = NULL; struct bnxt_qplib_nq *nq = NULL;
struct bnxt_re_dev *rdev;
struct bnxt_re_srq *srq;
struct bnxt_re_pd *pd;
struct ib_pd *ib_pd;
int rc, entries; int rc, entries;
ib_pd = ib_srq->pd;
pd = container_of(ib_pd, struct bnxt_re_pd, ib_pd);
rdev = pd->rdev;
dev_attr = &rdev->dev_attr;
srq = container_of(ib_srq, struct bnxt_re_srq, ib_srq);
if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) { if (srq_init_attr->attr.max_wr >= dev_attr->max_srq_wqes) {
ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded"); ibdev_err(&rdev->ibdev, "Create CQ failed - max exceeded");
rc = -EINVAL; rc = -EINVAL;
...@@ -1563,8 +1573,9 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq, ...@@ -1563,8 +1573,9 @@ int bnxt_re_create_srq(struct ib_srq *ib_srq,
entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1); entries = roundup_pow_of_two(srq_init_attr->attr.max_wr + 1);
if (entries > dev_attr->max_srq_wqes + 1) if (entries > dev_attr->max_srq_wqes + 1)
entries = dev_attr->max_srq_wqes + 1; entries = dev_attr->max_srq_wqes + 1;
srq->qplib_srq.max_wqe = entries; srq->qplib_srq.max_wqe = entries;
srq->qplib_srq.wqe_size = bnxt_re_get_rwqe_size();
srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge; srq->qplib_srq.max_sge = srq_init_attr->attr.max_sge;
srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit; srq->qplib_srq.threshold = srq_init_attr->attr.srq_limit;
srq->srq_limit = srq_init_attr->attr.srq_limit; srq->srq_limit = srq_init_attr->attr.srq_limit;
......
...@@ -142,6 +142,16 @@ struct bnxt_re_ucontext { ...@@ -142,6 +142,16 @@ struct bnxt_re_ucontext {
spinlock_t sh_lock; /* protect shpg */ spinlock_t sh_lock; /* protect shpg */
}; };
static inline u16 bnxt_re_get_swqe_size(void)
{
return sizeof(struct sq_send);
}
static inline u16 bnxt_re_get_rwqe_size(void)
{
return sizeof(struct rq_wqe);
}
int bnxt_re_query_device(struct ib_device *ibdev, int bnxt_re_query_device(struct ib_device *ibdev,
struct ib_device_attr *ib_attr, struct ib_device_attr *ib_attr,
struct ib_udata *udata); struct ib_udata *udata);
......
This diff is collapsed.
...@@ -45,6 +45,7 @@ struct bnxt_qplib_srq { ...@@ -45,6 +45,7 @@ struct bnxt_qplib_srq {
struct bnxt_qplib_db_info dbinfo; struct bnxt_qplib_db_info dbinfo;
u64 srq_handle; u64 srq_handle;
u32 id; u32 id;
u16 wqe_size;
u32 max_wqe; u32 max_wqe;
u32 max_sge; u32 max_sge;
u32 threshold; u32 threshold;
...@@ -65,38 +66,7 @@ struct bnxt_qplib_sge { ...@@ -65,38 +66,7 @@ struct bnxt_qplib_sge {
u32 size; u32 size;
}; };
#define BNXT_QPLIB_MAX_SQE_ENTRY_SIZE sizeof(struct sq_send)
#define SQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_SQE_ENTRY_SIZE)
#define SQE_MAX_IDX_PER_PG (SQE_CNT_PER_PG - 1)
static inline u32 get_sqe_pg(u32 val)
{
return ((val & ~SQE_MAX_IDX_PER_PG) / SQE_CNT_PER_PG);
}
static inline u32 get_sqe_idx(u32 val)
{
return (val & SQE_MAX_IDX_PER_PG);
}
#define BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE sizeof(struct sq_psn_search)
#define PSNE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_PSNE_ENTRY_SIZE)
#define PSNE_MAX_IDX_PER_PG (PSNE_CNT_PER_PG - 1)
static inline u32 get_psne_pg(u32 val)
{
return ((val & ~PSNE_MAX_IDX_PER_PG) / PSNE_CNT_PER_PG);
}
static inline u32 get_psne_idx(u32 val)
{
return (val & PSNE_MAX_IDX_PER_PG);
}
#define BNXT_QPLIB_QP_MAX_SGL 6 #define BNXT_QPLIB_QP_MAX_SGL 6
struct bnxt_qplib_swq { struct bnxt_qplib_swq {
u64 wr_id; u64 wr_id;
int next_idx; int next_idx;
...@@ -226,19 +196,13 @@ struct bnxt_qplib_swqe { ...@@ -226,19 +196,13 @@ struct bnxt_qplib_swqe {
}; };
}; };
#define BNXT_QPLIB_MAX_RQE_ENTRY_SIZE sizeof(struct rq_wqe)
#define RQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_MAX_RQE_ENTRY_SIZE)
#define RQE_MAX_IDX_PER_PG (RQE_CNT_PER_PG - 1)
#define RQE_PG(x) (((x) & ~RQE_MAX_IDX_PER_PG) / RQE_CNT_PER_PG)
#define RQE_IDX(x) ((x) & RQE_MAX_IDX_PER_PG)
struct bnxt_qplib_q { struct bnxt_qplib_q {
struct bnxt_qplib_hwq hwq; struct bnxt_qplib_hwq hwq;
struct bnxt_qplib_swq *swq; struct bnxt_qplib_swq *swq;
struct bnxt_qplib_db_info dbinfo; struct bnxt_qplib_db_info dbinfo;
struct bnxt_qplib_sg_info sg_info; struct bnxt_qplib_sg_info sg_info;
u32 max_wqe; u32 max_wqe;
u16 wqe_size;
u16 q_full_delta; u16 q_full_delta;
u16 max_sge; u16 max_sge;
u32 psn; u32 psn;
......
...@@ -89,10 +89,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, ...@@ -89,10 +89,9 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
struct creq_base *resp, void *sb, u8 is_block) struct creq_base *resp, void *sb, u8 is_block)
{ {
struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq;
struct bnxt_qplib_cmdqe *cmdqe, **hwq_ptr;
struct bnxt_qplib_hwq *hwq = &cmdq->hwq; struct bnxt_qplib_hwq *hwq = &cmdq->hwq;
struct bnxt_qplib_crsqe *crsqe; struct bnxt_qplib_crsqe *crsqe;
u32 cmdq_depth = rcfw->cmdq_depth; struct bnxt_qplib_cmdqe *cmdqe;
u32 sw_prod, cmdq_prod; u32 sw_prod, cmdq_prod;
struct pci_dev *pdev; struct pci_dev *pdev;
unsigned long flags; unsigned long flags;
...@@ -163,13 +162,11 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req, ...@@ -163,13 +162,11 @@ static int __send_message(struct bnxt_qplib_rcfw *rcfw, struct cmdq_base *req,
BNXT_QPLIB_CMDQE_UNITS; BNXT_QPLIB_CMDQE_UNITS;
} }
hwq_ptr = (struct bnxt_qplib_cmdqe **)hwq->pbl_ptr;
preq = (u8 *)req; preq = (u8 *)req;
do { do {
/* Locate the next cmdq slot */ /* Locate the next cmdq slot */
sw_prod = HWQ_CMP(hwq->prod, hwq); sw_prod = HWQ_CMP(hwq->prod, hwq);
cmdqe = &hwq_ptr[get_cmdq_pg(sw_prod, cmdq_depth)] cmdqe = bnxt_qplib_get_qe(hwq, sw_prod, NULL);
[get_cmdq_idx(sw_prod, cmdq_depth)];
if (!cmdqe) { if (!cmdqe) {
dev_err(&pdev->dev, dev_err(&pdev->dev,
"RCFW request failed with no cmdqe!\n"); "RCFW request failed with no cmdqe!\n");
...@@ -378,7 +375,7 @@ static void bnxt_qplib_service_creq(unsigned long data) ...@@ -378,7 +375,7 @@ static void bnxt_qplib_service_creq(unsigned long data)
struct bnxt_qplib_creq_ctx *creq = &rcfw->creq; struct bnxt_qplib_creq_ctx *creq = &rcfw->creq;
u32 type, budget = CREQ_ENTRY_POLL_BUDGET; u32 type, budget = CREQ_ENTRY_POLL_BUDGET;
struct bnxt_qplib_hwq *hwq = &creq->hwq; struct bnxt_qplib_hwq *hwq = &creq->hwq;
struct creq_base *creqe, **hwq_ptr; struct creq_base *creqe;
u32 sw_cons, raw_cons; u32 sw_cons, raw_cons;
unsigned long flags; unsigned long flags;
...@@ -387,8 +384,7 @@ static void bnxt_qplib_service_creq(unsigned long data) ...@@ -387,8 +384,7 @@ static void bnxt_qplib_service_creq(unsigned long data)
raw_cons = hwq->cons; raw_cons = hwq->cons;
while (budget > 0) { while (budget > 0) {
sw_cons = HWQ_CMP(raw_cons, hwq); sw_cons = HWQ_CMP(raw_cons, hwq);
hwq_ptr = (struct creq_base **)hwq->pbl_ptr; creqe = bnxt_qplib_get_qe(hwq, sw_cons, NULL);
creqe = &hwq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)];
if (!CREQ_CMP_VALID(creqe, raw_cons, hwq->max_elements)) if (!CREQ_CMP_VALID(creqe, raw_cons, hwq->max_elements))
break; break;
/* The valid test of the entry must be done first before /* The valid test of the entry must be done first before
...@@ -434,7 +430,6 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) ...@@ -434,7 +430,6 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
{ {
struct bnxt_qplib_rcfw *rcfw = dev_instance; struct bnxt_qplib_rcfw *rcfw = dev_instance;
struct bnxt_qplib_creq_ctx *creq; struct bnxt_qplib_creq_ctx *creq;
struct creq_base **creq_ptr;
struct bnxt_qplib_hwq *hwq; struct bnxt_qplib_hwq *hwq;
u32 sw_cons; u32 sw_cons;
...@@ -442,8 +437,7 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance) ...@@ -442,8 +437,7 @@ static irqreturn_t bnxt_qplib_creq_irq(int irq, void *dev_instance)
hwq = &creq->hwq; hwq = &creq->hwq;
/* Prefetch the CREQ element */ /* Prefetch the CREQ element */
sw_cons = HWQ_CMP(hwq->cons, hwq); sw_cons = HWQ_CMP(hwq->cons, hwq);
creq_ptr = (struct creq_base **)creq->hwq.pbl_ptr; prefetch(bnxt_qplib_get_qe(hwq, sw_cons, NULL));
prefetch(&creq_ptr[get_creq_pg(sw_cons)][get_creq_idx(sw_cons)]);
tasklet_schedule(&creq->creq_tasklet); tasklet_schedule(&creq->creq_tasklet);
......
...@@ -87,12 +87,6 @@ static inline u32 bnxt_qplib_cmdqe_page_size(u32 depth) ...@@ -87,12 +87,6 @@ static inline u32 bnxt_qplib_cmdqe_page_size(u32 depth)
return (bnxt_qplib_cmdqe_npages(depth) * PAGE_SIZE); return (bnxt_qplib_cmdqe_npages(depth) * PAGE_SIZE);
} }
static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
{
return (bnxt_qplib_cmdqe_page_size(depth) /
BNXT_QPLIB_CMDQE_UNITS);
}
/* Set the cmd_size to a factor of CMDQE unit */ /* Set the cmd_size to a factor of CMDQE unit */
static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req) static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
{ {
...@@ -100,30 +94,12 @@ static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req) ...@@ -100,30 +94,12 @@ static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
BNXT_QPLIB_CMDQE_UNITS; BNXT_QPLIB_CMDQE_UNITS;
} }
#define MAX_CMDQ_IDX(depth) ((depth) - 1)
static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
{
return (bnxt_qplib_cmdqe_cnt_per_pg(depth) - 1);
}
#define RCFW_MAX_COOKIE_VALUE 0x7FFF #define RCFW_MAX_COOKIE_VALUE 0x7FFF
#define RCFW_CMD_IS_BLOCKING 0x8000 #define RCFW_CMD_IS_BLOCKING 0x8000
#define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20 #define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20
#define HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK 0x1000900020011ULL #define HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK 0x1000900020011ULL
static inline u32 get_cmdq_pg(u32 val, u32 depth)
{
return (val & ~(bnxt_qplib_max_cmdq_idx_per_pg(depth))) /
(bnxt_qplib_cmdqe_cnt_per_pg(depth));
}
static inline u32 get_cmdq_idx(u32 val, u32 depth)
{
return val & (bnxt_qplib_max_cmdq_idx_per_pg(depth));
}
/* Crsq buf is 1024-Byte */ /* Crsq buf is 1024-Byte */
struct bnxt_qplib_crsbe { struct bnxt_qplib_crsbe {
u8 data[1024]; u8 data[1024];
...@@ -133,23 +109,6 @@ struct bnxt_qplib_crsbe { ...@@ -133,23 +109,6 @@ struct bnxt_qplib_crsbe {
/* Allocate 1 per QP for async error notification for now */ /* Allocate 1 per QP for async error notification for now */
#define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024) #define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
#define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */ #define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */
#define BNXT_QPLIB_CREQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CREQE_UNITS)
#define MAX_CREQ_IDX (BNXT_QPLIB_CREQE_MAX_CNT - 1)
#define MAX_CREQ_IDX_PER_PG (BNXT_QPLIB_CREQE_CNT_PER_PG - 1)
static inline u32 get_creq_pg(u32 val)
{
return (val & ~MAX_CREQ_IDX_PER_PG) / BNXT_QPLIB_CREQE_CNT_PER_PG;
}
static inline u32 get_creq_idx(u32 val)
{
return val & MAX_CREQ_IDX_PER_PG;
}
#define BNXT_QPLIB_CREQE_PER_PG (PAGE_SIZE / sizeof(struct creq_base))
#define CREQ_CMP_VALID(hdr, raw_cons, cp_bit) \ #define CREQ_CMP_VALID(hdr, raw_cons, cp_bit) \
(!!((hdr)->v & CREQ_BASE_V) == \ (!!((hdr)->v & CREQ_BASE_V) == \
!((raw_cons) & (cp_bit))) !((raw_cons) & (cp_bit)))
......
...@@ -347,6 +347,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq, ...@@ -347,6 +347,7 @@ int bnxt_qplib_alloc_init_hwq(struct bnxt_qplib_hwq *hwq,
hwq->depth = hwq_attr->depth; hwq->depth = hwq_attr->depth;
hwq->max_elements = depth; hwq->max_elements = depth;
hwq->element_size = stride; hwq->element_size = stride;
hwq->qe_ppg = pg_size / stride;
/* For direct access to the elements */ /* For direct access to the elements */
lvl = hwq->level; lvl = hwq->level;
if (hwq_attr->sginfo->nopte && hwq->level) if (hwq_attr->sginfo->nopte && hwq->level)
......
...@@ -135,6 +135,7 @@ struct bnxt_qplib_hwq { ...@@ -135,6 +135,7 @@ struct bnxt_qplib_hwq {
u32 max_elements; u32 max_elements;
u32 depth; u32 depth;
u16 element_size; /* Size of each entry */ u16 element_size; /* Size of each entry */
u16 qe_ppg; /* queue entry per page */
u32 prod; /* raw */ u32 prod; /* raw */
u32 cons; /* raw */ u32 cons; /* raw */
...@@ -304,6 +305,18 @@ static inline u8 bnxt_qplib_base_pg_size(struct bnxt_qplib_hwq *hwq) ...@@ -304,6 +305,18 @@ static inline u8 bnxt_qplib_base_pg_size(struct bnxt_qplib_hwq *hwq)
return pg_size; return pg_size;
} }
static inline void *bnxt_qplib_get_qe(struct bnxt_qplib_hwq *hwq,
u32 indx, u64 *pg)
{
u32 pg_num, pg_idx;
pg_num = (indx / hwq->qe_ppg);
pg_idx = (indx % hwq->qe_ppg);
if (pg)
*pg = (u64)&hwq->pbl_ptr[pg_num];
return (void *)(hwq->pbl_ptr[pg_num] + hwq->element_size * pg_idx);
}
#define to_bnxt_qplib(ptr, type, member) \ #define to_bnxt_qplib(ptr, type, member) \
container_of(ptr, type, member) container_of(ptr, type, member)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment