Commit ec6adad0 authored by Yixian Liu's avatar Yixian Liu Committed by Jason Gunthorpe

RDMA/hns: Delete unnecessary variable max_post

There is no need to define max_post in hns_roce_wq, as it does same thing
as wqe_cnt.

Link: https://lore.kernel.org/r/1572952082-6681-2-git-send-email-liweihang@hisilicon.comSigned-off-by: default avatarYixian Liu <liuyixian@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@hisilicon.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent ffa2fd13
...@@ -424,7 +424,6 @@ struct hns_roce_wq { ...@@ -424,7 +424,6 @@ struct hns_roce_wq {
u64 *wrid; /* Work request ID */ u64 *wrid; /* Work request ID */
spinlock_t lock; spinlock_t lock;
int wqe_cnt; /* WQE num */ int wqe_cnt; /* WQE num */
u32 max_post;
int max_gs; int max_gs;
int offset; int offset;
int wqe_shift; /* WQE size */ int wqe_shift; /* WQE size */
......
...@@ -318,7 +318,7 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev, ...@@ -318,7 +318,7 @@ static int hns_roce_set_rq_size(struct hns_roce_dev *hr_dev,
* hr_qp->rq.max_gs); * hr_qp->rq.max_gs);
} }
cap->max_recv_wr = hr_qp->rq.max_post = hr_qp->rq.wqe_cnt; cap->max_recv_wr = hr_qp->rq.wqe_cnt;
cap->max_recv_sge = hr_qp->rq.max_gs; cap->max_recv_sge = hr_qp->rq.max_gs;
return 0; return 0;
...@@ -610,7 +610,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev, ...@@ -610,7 +610,7 @@ static int hns_roce_set_kernel_sq_size(struct hns_roce_dev *hr_dev,
hr_qp->buff_size = size; hr_qp->buff_size = size;
/* Get wr and sge number which send */ /* Get wr and sge number which send */
cap->max_send_wr = hr_qp->sq.max_post = hr_qp->sq.wqe_cnt; cap->max_send_wr = hr_qp->sq.wqe_cnt;
cap->max_send_sge = hr_qp->sq.max_gs; cap->max_send_sge = hr_qp->sq.max_gs;
/* We don't support inline sends for kernel QPs (yet) */ /* We don't support inline sends for kernel QPs (yet) */
...@@ -1291,7 +1291,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, ...@@ -1291,7 +1291,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
u32 cur; u32 cur;
cur = hr_wq->head - hr_wq->tail; cur = hr_wq->head - hr_wq->tail;
if (likely(cur + nreq < hr_wq->max_post)) if (likely(cur + nreq < hr_wq->wqe_cnt))
return false; return false;
hr_cq = to_hr_cq(ib_cq); hr_cq = to_hr_cq(ib_cq);
...@@ -1299,7 +1299,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq, ...@@ -1299,7 +1299,7 @@ bool hns_roce_wq_overflow(struct hns_roce_wq *hr_wq, int nreq,
cur = hr_wq->head - hr_wq->tail; cur = hr_wq->head - hr_wq->tail;
spin_unlock(&hr_cq->lock); spin_unlock(&hr_cq->lock);
return cur + nreq >= hr_wq->max_post; return cur + nreq >= hr_wq->wqe_cnt;
} }
int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev) int hns_roce_init_qp_table(struct hns_roce_dev *hr_dev)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment