Commit 6332dee8 authored by Adit Ranadive's avatar Adit Ranadive Committed by Doug Ledford

RDMA/vmw_pvrdma: Cleanup unused variables

Removed the unused nreq and redundant index variables.
Moved hardcoded async and cq ring pages number to macro.
Reported-by: default avatarYuval Shaia <yuval.shaia@oracle.com>
Signed-off-by: default avatarAdit Ranadive <aditr@vmware.com>
Reviewed-by: default avatarAditya Sarwade <asarwade@vmware.com>
Tested-by: default avatarAndrew Boyer <andrew.boyer@dell.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent cb886455
...@@ -69,6 +69,8 @@ ...@@ -69,6 +69,8 @@
*/ */
#define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820 #define PCI_DEVICE_ID_VMWARE_PVRDMA 0x0820
#define PVRDMA_NUM_RING_PAGES 4
struct pvrdma_dev; struct pvrdma_dev;
struct pvrdma_page_dir { struct pvrdma_page_dir {
......
...@@ -858,7 +858,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, ...@@ -858,7 +858,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
dev->dsr->resp_slot_dma = (u64)slot_dma; dev->dsr->resp_slot_dma = (u64)slot_dma;
/* Async event ring */ /* Async event ring */
dev->dsr->async_ring_pages.num_pages = 4; dev->dsr->async_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
ret = pvrdma_page_dir_init(dev, &dev->async_pdir, ret = pvrdma_page_dir_init(dev, &dev->async_pdir,
dev->dsr->async_ring_pages.num_pages, true); dev->dsr->async_ring_pages.num_pages, true);
if (ret) if (ret)
...@@ -867,7 +867,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev, ...@@ -867,7 +867,7 @@ static int pvrdma_pci_probe(struct pci_dev *pdev,
dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma; dev->dsr->async_ring_pages.pdir_dma = dev->async_pdir.dir_dma;
/* CQ notification ring */ /* CQ notification ring */
dev->dsr->cq_ring_pages.num_pages = 4; dev->dsr->cq_ring_pages.num_pages = PVRDMA_NUM_RING_PAGES;
ret = pvrdma_page_dir_init(dev, &dev->cq_pdir, ret = pvrdma_page_dir_init(dev, &dev->cq_pdir,
dev->dsr->cq_ring_pages.num_pages, true); dev->dsr->cq_ring_pages.num_pages, true);
if (ret) if (ret)
......
...@@ -554,13 +554,13 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, ...@@ -554,13 +554,13 @@ int pvrdma_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
return ret; return ret;
} }
static inline void *get_sq_wqe(struct pvrdma_qp *qp, int n) static inline void *get_sq_wqe(struct pvrdma_qp *qp, unsigned int n)
{ {
return pvrdma_page_dir_get_ptr(&qp->pdir, return pvrdma_page_dir_get_ptr(&qp->pdir,
qp->sq.offset + n * qp->sq.wqe_size); qp->sq.offset + n * qp->sq.wqe_size);
} }
static inline void *get_rq_wqe(struct pvrdma_qp *qp, int n) static inline void *get_rq_wqe(struct pvrdma_qp *qp, unsigned int n)
{ {
return pvrdma_page_dir_get_ptr(&qp->pdir, return pvrdma_page_dir_get_ptr(&qp->pdir,
qp->rq.offset + n * qp->rq.wqe_size); qp->rq.offset + n * qp->rq.wqe_size);
...@@ -598,9 +598,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -598,9 +598,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
unsigned long flags; unsigned long flags;
struct pvrdma_sq_wqe_hdr *wqe_hdr; struct pvrdma_sq_wqe_hdr *wqe_hdr;
struct pvrdma_sge *sge; struct pvrdma_sge *sge;
int i, index; int i, ret;
int nreq;
int ret;
/* /*
* In states lower than RTS, we can fail immediately. In other states, * In states lower than RTS, we can fail immediately. In other states,
...@@ -613,9 +611,8 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -613,9 +611,8 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
spin_lock_irqsave(&qp->sq.lock, flags); spin_lock_irqsave(&qp->sq.lock, flags);
index = pvrdma_idx(&qp->sq.ring->prod_tail, qp->sq.wqe_cnt); while (wr) {
for (nreq = 0; wr; nreq++, wr = wr->next) { unsigned int tail = 0;
unsigned int tail;
if (unlikely(!pvrdma_idx_ring_has_space( if (unlikely(!pvrdma_idx_ring_has_space(
qp->sq.ring, qp->sq.wqe_cnt, &tail))) { qp->sq.ring, qp->sq.wqe_cnt, &tail))) {
...@@ -680,7 +677,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -680,7 +677,7 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
} }
} }
wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, index); wqe_hdr = (struct pvrdma_sq_wqe_hdr *)get_sq_wqe(qp, tail);
memset(wqe_hdr, 0, sizeof(*wqe_hdr)); memset(wqe_hdr, 0, sizeof(*wqe_hdr));
wqe_hdr->wr_id = wr->wr_id; wqe_hdr->wr_id = wr->wr_id;
wqe_hdr->num_sge = wr->num_sge; wqe_hdr->num_sge = wr->num_sge;
...@@ -771,12 +768,11 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, ...@@ -771,12 +768,11 @@ int pvrdma_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
/* Make sure wqe is written before index update */ /* Make sure wqe is written before index update */
smp_wmb(); smp_wmb();
index++;
if (unlikely(index >= qp->sq.wqe_cnt))
index = 0;
/* Update shared sq ring */ /* Update shared sq ring */
pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail, pvrdma_idx_ring_inc(&qp->sq.ring->prod_tail,
qp->sq.wqe_cnt); qp->sq.wqe_cnt);
wr = wr->next;
} }
ret = 0; ret = 0;
...@@ -806,7 +802,6 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -806,7 +802,6 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
struct pvrdma_qp *qp = to_vqp(ibqp); struct pvrdma_qp *qp = to_vqp(ibqp);
struct pvrdma_rq_wqe_hdr *wqe_hdr; struct pvrdma_rq_wqe_hdr *wqe_hdr;
struct pvrdma_sge *sge; struct pvrdma_sge *sge;
int index, nreq;
int ret = 0; int ret = 0;
int i; int i;
...@@ -821,9 +816,8 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -821,9 +816,8 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
spin_lock_irqsave(&qp->rq.lock, flags); spin_lock_irqsave(&qp->rq.lock, flags);
index = pvrdma_idx(&qp->rq.ring->prod_tail, qp->rq.wqe_cnt); while (wr) {
for (nreq = 0; wr; nreq++, wr = wr->next) { unsigned int tail = 0;
unsigned int tail;
if (unlikely(wr->num_sge > qp->rq.max_sg || if (unlikely(wr->num_sge > qp->rq.max_sg ||
wr->num_sge < 0)) { wr->num_sge < 0)) {
...@@ -843,7 +837,7 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -843,7 +837,7 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
goto out; goto out;
} }
wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, index); wqe_hdr = (struct pvrdma_rq_wqe_hdr *)get_rq_wqe(qp, tail);
wqe_hdr->wr_id = wr->wr_id; wqe_hdr->wr_id = wr->wr_id;
wqe_hdr->num_sge = wr->num_sge; wqe_hdr->num_sge = wr->num_sge;
wqe_hdr->total_len = 0; wqe_hdr->total_len = 0;
...@@ -859,12 +853,11 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -859,12 +853,11 @@ int pvrdma_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
/* Make sure wqe is written before index update */ /* Make sure wqe is written before index update */
smp_wmb(); smp_wmb();
index++;
if (unlikely(index >= qp->rq.wqe_cnt))
index = 0;
/* Update shared rq ring */ /* Update shared rq ring */
pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail, pvrdma_idx_ring_inc(&qp->rq.ring->prod_tail,
qp->rq.wqe_cnt); qp->rq.wqe_cnt);
wr = wr->next;
} }
spin_unlock_irqrestore(&qp->rq.lock, flags); spin_unlock_irqrestore(&qp->rq.lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment