Commit c462a024 authored by Wenpeng Liang's avatar Wenpeng Liang Committed by Jason Gunthorpe

RDMA/hns: Encapsulate flushing CQE as a function

The process of flushing CQE can be encapsultated into a function, which
can reduce duplicate code.

Link: https://lore.kernel.org/r/1624011020-16992-9-git-send-email-liweihang@huawei.comSigned-off-by: default avatarWenpeng Liang <liangwenpeng@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent a33958ca
......@@ -1252,6 +1252,7 @@ void hns_roce_free_db(struct hns_roce_dev *hr_dev, struct hns_roce_db *db);
void hns_roce_cq_completion(struct hns_roce_dev *hr_dev, u32 cqn);
void hns_roce_cq_event(struct hns_roce_dev *hr_dev, u32 cqn, int event_type);
void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp);
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type);
void hns_roce_srq_event(struct hns_roce_dev *hr_dev, u32 srqn, int event_type);
u8 hns_get_gid_index(struct hns_roce_dev *hr_dev, u32 port, int gid_index);
......
......@@ -624,18 +624,8 @@ static inline int set_rc_wqe(struct hns_roce_qp *qp,
static inline void update_sq_db(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *qp)
{
/*
* Hip08 hardware cannot flush the WQEs in SQ if the QP state
* gets into errored mode. Hence, as a workaround to this
* hardware limitation, driver needs to assist in flushing. But
* the flushing operation uses mailbox to convey the QP state to
* the hardware and which can sleep due to the mutex protection
* around the mailbox calls. Hence, use the deferred flush for
* now.
*/
if (unlikely(qp->state == IB_QPS_ERR)) {
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
init_flush_work(hr_dev, qp);
flush_cqe(hr_dev, qp);
} else {
struct hns_roce_v2_db sq_db = {};
......@@ -651,18 +641,8 @@ static inline void update_sq_db(struct hns_roce_dev *hr_dev,
static inline void update_rq_db(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *qp)
{
/*
* Hip08 hardware cannot flush the WQEs in RQ if the QP state
* gets into errored mode. Hence, as a workaround to this
* hardware limitation, driver needs to assist in flushing. But
* the flushing operation uses mailbox to convey the QP state to
* the hardware and which can sleep due to the mutex protection
* around the mailbox calls. Hence, use the deferred flush for
* now.
*/
if (unlikely(qp->state == IB_QPS_ERR)) {
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
init_flush_work(hr_dev, qp);
flush_cqe(hr_dev, qp);
} else {
if (likely(qp->en_flags & HNS_ROCE_QP_CAP_RQ_RECORD_DB)) {
*qp->rdb.db_record =
......@@ -3553,17 +3533,7 @@ static void get_cqe_status(struct hns_roce_dev *hr_dev, struct hns_roce_qp *qp,
if (cqe_status == HNS_ROCE_CQE_V2_GENERAL_ERR)
return;
/*
* Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state gets
* into errored mode. Hence, as a workaround to this hardware
* limitation, driver needs to assist in flushing. But the flushing
* operation uses mailbox to convey the QP state to the hardware and
* which can sleep due to the mutex protection around the mailbox calls.
* Hence, use the deferred flush for now. Once wc error detected, the
* flushing operation is needed.
*/
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
init_flush_work(hr_dev, qp);
flush_cqe(hr_dev, qp);
}
static int get_cur_qp(struct hns_roce_cq *hr_cq, struct hns_roce_v2_cqe *cqe,
......
......@@ -79,6 +79,21 @@ void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp)
queue_work(hr_dev->irq_workq, &flush_work->work);
}
void flush_cqe(struct hns_roce_dev *dev, struct hns_roce_qp *qp)
{
/*
* Hip08 hardware cannot flush the WQEs in SQ/RQ if the QP state
* gets into errored mode. Hence, as a workaround to this
* hardware limitation, driver needs to assist in flushing. But
* the flushing operation uses mailbox to convey the QP state to
* the hardware and which can sleep due to the mutex protection
* around the mailbox calls. Hence, use the deferred flush for
* now.
*/
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
init_flush_work(dev, qp);
}
void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
{
struct device *dev = hr_dev->dev;
......@@ -102,8 +117,8 @@ void hns_roce_qp_event(struct hns_roce_dev *hr_dev, u32 qpn, int event_type)
event_type == HNS_ROCE_EVENT_TYPE_XRCD_VIOLATION ||
event_type == HNS_ROCE_EVENT_TYPE_INVALID_XRCETH)) {
qp->state = IB_QPS_ERR;
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &qp->flush_flag))
init_flush_work(hr_dev, qp);
flush_cqe(hr_dev, qp);
}
qp->event(qp, (enum hns_roce_event)event_type);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment