Commit ae1c6148 authored by Lijun Ou's avatar Lijun Ou Committed by Jason Gunthorpe

RDMA/hns: Unify format of prints

Use ibdev_err/dbg/warn() instead of dev_err/dbg/warn(), and modify some
prints into format of "failed to do something, ret = n".

Link: https://lore.kernel.org/r/1584674622-52773-2-git-send-email-liweihang@huawei.comSigned-off-by: default avatarLijun Ou <oulijun@huawei.com>
Signed-off-by: default avatarWeihang Li <liweihang@huawei.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@mellanox.com>
parent 26e28deb
...@@ -266,21 +266,24 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr, ...@@ -266,21 +266,24 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, const struct ib_send_wr *wr,
static int check_send_valid(struct hns_roce_dev *hr_dev, static int check_send_valid(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp) struct hns_roce_qp *hr_qp)
{ {
struct ib_device *ibdev = &hr_dev->ib_dev;
struct ib_qp *ibqp = &hr_qp->ibqp; struct ib_qp *ibqp = &hr_qp->ibqp;
struct device *dev = hr_dev->dev;
if (unlikely(ibqp->qp_type != IB_QPT_RC && if (unlikely(ibqp->qp_type != IB_QPT_RC &&
ibqp->qp_type != IB_QPT_GSI && ibqp->qp_type != IB_QPT_GSI &&
ibqp->qp_type != IB_QPT_UD)) { ibqp->qp_type != IB_QPT_UD)) {
dev_err(dev, "Not supported QP(0x%x)type!\n", ibqp->qp_type); ibdev_err(ibdev, "Not supported QP(0x%x)type!\n",
ibqp->qp_type);
return -EOPNOTSUPP; return -EOPNOTSUPP;
} else if (unlikely(hr_qp->state == IB_QPS_RESET || } else if (unlikely(hr_qp->state == IB_QPS_RESET ||
hr_qp->state == IB_QPS_INIT || hr_qp->state == IB_QPS_INIT ||
hr_qp->state == IB_QPS_RTR)) { hr_qp->state == IB_QPS_RTR)) {
dev_err(dev, "Post WQE fail, QP state %d!\n", hr_qp->state); ibdev_err(ibdev, "failed to post WQE, QP state %d!\n",
hr_qp->state);
return -EINVAL; return -EINVAL;
} else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) { } else if (unlikely(hr_dev->state >= HNS_ROCE_DEVICE_STATE_RST_DOWN)) {
dev_err(dev, "Post WQE fail, dev state %d!\n", hr_dev->state); ibdev_err(ibdev, "failed to post WQE, dev state %d!\n",
hr_dev->state);
return -EIO; return -EIO;
} }
...@@ -625,9 +628,9 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, ...@@ -625,9 +628,9 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_v2_wqe_data_seg *dseg; struct hns_roce_v2_wqe_data_seg *dseg;
struct hns_roce_rinl_sge *sge_list; struct hns_roce_rinl_sge *sge_list;
struct device *dev = hr_dev->dev;
unsigned long flags; unsigned long flags;
void *wqe = NULL; void *wqe = NULL;
u32 wqe_idx; u32 wqe_idx;
...@@ -655,7 +658,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, ...@@ -655,7 +658,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp,
wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1); wqe_idx = (hr_qp->rq.head + nreq) & (hr_qp->rq.wqe_cnt - 1);
if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) { if (unlikely(wr->num_sge > hr_qp->rq.max_gs)) {
dev_err(dev, "rq:num_sge=%d > qp->sq.max_gs=%d\n", ibdev_err(ibdev, "rq:num_sge=%d >= qp->sq.max_gs=%d\n",
wr->num_sge, hr_qp->rq.max_gs); wr->num_sge, hr_qp->rq.max_gs);
ret = -EINVAL; ret = -EINVAL;
*bad_wr = wr; *bad_wr = wr;
...@@ -2440,7 +2443,9 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port, ...@@ -2440,7 +2443,9 @@ static int hns_roce_v2_set_gid(struct hns_roce_dev *hr_dev, u8 port,
ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type); ret = hns_roce_config_sgid_table(hr_dev, gid_index, gid, sgid_type);
if (ret) if (ret)
dev_err(hr_dev->dev, "Configure sgid table failed(%d)!\n", ret); ibdev_err(&hr_dev->ib_dev,
"failed to configure sgid table, ret = %d!\n",
ret);
return ret; return ret;
} }
...@@ -3022,8 +3027,9 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, ...@@ -3022,8 +3027,9 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) { if (!*cur_qp || (qpn & HNS_ROCE_V2_CQE_QPN_MASK) != (*cur_qp)->qpn) {
hr_qp = __hns_roce_qp_lookup(hr_dev, qpn); hr_qp = __hns_roce_qp_lookup(hr_dev, qpn);
if (unlikely(!hr_qp)) { if (unlikely(!hr_qp)) {
dev_err(hr_dev->dev, "CQ %06lx with entry for unknown QPN %06x\n", ibdev_err(&hr_dev->ib_dev,
hr_cq->cqn, (qpn & HNS_ROCE_V2_CQE_QPN_MASK)); "CQ %06lx with entry for unknown QPN %06x\n",
hr_cq->cqn, qpn & HNS_ROCE_V2_CQE_QPN_MASK);
return -EINVAL; return -EINVAL;
} }
*cur_qp = hr_qp; *cur_qp = hr_qp;
...@@ -3125,7 +3131,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq, ...@@ -3125,7 +3131,7 @@ static int hns_roce_v2_poll_one(struct hns_roce_cq *hr_cq,
*/ */
if (wc->status != IB_WC_SUCCESS && if (wc->status != IB_WC_SUCCESS &&
wc->status != IB_WC_WR_FLUSH_ERR) { wc->status != IB_WC_WR_FLUSH_ERR) {
dev_err(hr_dev->dev, "error cqe status is: 0x%x\n", ibdev_err(&hr_dev->ib_dev, "error cqe status is: 0x%x\n",
status & HNS_ROCE_V2_CQE_STATUS_MASK); status & HNS_ROCE_V2_CQE_STATUS_MASK);
if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) if (!test_and_set_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag))
...@@ -3974,20 +3980,21 @@ static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev, ...@@ -3974,20 +3980,21 @@ static bool check_wqe_rq_mtt_count(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp, int mtt_cnt, struct hns_roce_qp *hr_qp, int mtt_cnt,
u32 page_size) u32 page_size)
{ {
struct device *dev = hr_dev->dev; struct ib_device *ibdev = &hr_dev->ib_dev;
if (hr_qp->rq.wqe_cnt < 1) if (hr_qp->rq.wqe_cnt < 1)
return true; return true;
if (mtt_cnt < 1) { if (mtt_cnt < 1) {
dev_err(dev, "qp(0x%lx) rqwqe buf ba find failed\n", ibdev_err(ibdev, "failed to find RQWQE buf ba of QP(0x%lx)\n",
hr_qp->qpn); hr_qp->qpn);
return false; return false;
} }
if (mtt_cnt < MTT_MIN_COUNT && if (mtt_cnt < MTT_MIN_COUNT &&
(hr_qp->rq.offset + page_size) < hr_qp->buff_size) { (hr_qp->rq.offset + page_size) < hr_qp->buff_size) {
dev_err(dev, "qp(0x%lx) next rqwqe buf ba find failed\n", ibdev_err(ibdev,
"failed to find next RQWQE buf ba of QP(0x%lx)\n",
hr_qp->qpn); hr_qp->qpn);
return false; return false;
} }
...@@ -4003,7 +4010,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -4003,7 +4010,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct device *dev = hr_dev->dev; struct ib_device *ibdev = &hr_dev->ib_dev;
u64 mtts[MTT_MIN_COUNT] = { 0 }; u64 mtts[MTT_MIN_COUNT] = { 0 };
dma_addr_t dma_handle_3; dma_addr_t dma_handle_3;
dma_addr_t dma_handle_2; dma_addr_t dma_handle_2;
...@@ -4030,7 +4037,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -4030,7 +4037,7 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table, mtts_2 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.irrl_table,
hr_qp->qpn, &dma_handle_2); hr_qp->qpn, &dma_handle_2);
if (!mtts_2) { if (!mtts_2) {
dev_err(dev, "qp irrl_table find failed\n"); ibdev_err(ibdev, "failed to find QP irrl_table\n");
return -EINVAL; return -EINVAL;
} }
...@@ -4038,12 +4045,13 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp, ...@@ -4038,12 +4045,13 @@ static int modify_qp_init_to_rtr(struct ib_qp *ibqp,
mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table, mtts_3 = hns_roce_table_find(hr_dev, &hr_dev->qp_table.trrl_table,
hr_qp->qpn, &dma_handle_3); hr_qp->qpn, &dma_handle_3);
if (!mtts_3) { if (!mtts_3) {
dev_err(dev, "qp trrl_table find failed\n"); ibdev_err(ibdev, "failed to find QP trrl_table\n");
return -EINVAL; return -EINVAL;
} }
if (attr_mask & IB_QP_ALT_PATH) { if (attr_mask & IB_QP_ALT_PATH) {
dev_err(dev, "INIT2RTR attr_mask (0x%x) error\n", attr_mask); ibdev_err(ibdev, "INIT2RTR attr_mask (0x%x) error\n",
attr_mask);
return -EINVAL; return -EINVAL;
} }
...@@ -4246,7 +4254,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, ...@@ -4246,7 +4254,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
{ {
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct device *dev = hr_dev->dev; struct ib_device *ibdev = &hr_dev->ib_dev;
u64 sge_cur_blk = 0; u64 sge_cur_blk = 0;
u64 sq_cur_blk = 0; u64 sq_cur_blk = 0;
u32 page_size; u32 page_size;
...@@ -4255,7 +4263,8 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, ...@@ -4255,7 +4263,8 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
/* Search qp buf's mtts */ /* Search qp buf's mtts */
count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL); count = hns_roce_mtr_find(hr_dev, &hr_qp->mtr, 0, &sq_cur_blk, 1, NULL);
if (count < 1) { if (count < 1) {
dev_err(dev, "qp(0x%lx) buf pa find failed\n", hr_qp->qpn); ibdev_err(ibdev, "failed to find buf pa of QP(0x%lx)\n",
hr_qp->qpn);
return -EINVAL; return -EINVAL;
} }
...@@ -4265,7 +4274,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, ...@@ -4265,7 +4274,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
hr_qp->sge.offset / page_size, hr_qp->sge.offset / page_size,
&sge_cur_blk, 1, NULL); &sge_cur_blk, 1, NULL);
if (count < 1) { if (count < 1) {
dev_err(dev, "qp(0x%lx) sge pa find failed\n", ibdev_err(ibdev, "failed to find sge pa of QP(0x%lx)\n",
hr_qp->qpn); hr_qp->qpn);
return -EINVAL; return -EINVAL;
} }
...@@ -4274,7 +4283,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp, ...@@ -4274,7 +4283,7 @@ static int modify_qp_rtr_to_rts(struct ib_qp *ibqp,
/* Not support alternate path and path migration */ /* Not support alternate path and path migration */
if ((attr_mask & IB_QP_ALT_PATH) || if ((attr_mask & IB_QP_ALT_PATH) ||
(attr_mask & IB_QP_PATH_MIG_STATE)) { (attr_mask & IB_QP_PATH_MIG_STATE)) {
dev_err(dev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask); ibdev_err(ibdev, "RTR2RTS attr_mask (0x%x)error\n", attr_mask);
return -EINVAL; return -EINVAL;
} }
...@@ -4392,6 +4401,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, ...@@ -4392,6 +4401,7 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr); const struct ib_global_route *grh = rdma_ah_read_grh(&attr->ah_attr);
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct ib_device *ibdev = &hr_dev->ib_dev;
const struct ib_gid_attr *gid_attr = NULL; const struct ib_gid_attr *gid_attr = NULL;
int is_roce_protocol; int is_roce_protocol;
u16 vlan_id = 0xffff; u16 vlan_id = 0xffff;
...@@ -4433,13 +4443,13 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp, ...@@ -4433,13 +4443,13 @@ static int hns_roce_v2_set_path(struct ib_qp *ibqp,
V2_QPC_BYTE_24_VLAN_ID_S, 0); V2_QPC_BYTE_24_VLAN_ID_S, 0);
if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) { if (grh->sgid_index >= hr_dev->caps.gid_table_len[hr_port]) {
dev_err(hr_dev->dev, "sgid_index(%u) too large. max is %d\n", ibdev_err(ibdev, "sgid_index(%u) too large. max is %d\n",
grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]); grh->sgid_index, hr_dev->caps.gid_table_len[hr_port]);
return -EINVAL; return -EINVAL;
} }
if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) { if (attr->ah_attr.type != RDMA_AH_ATTR_TYPE_ROCE) {
dev_err(hr_dev->dev, "ah attr is not RDMA roce type\n"); ibdev_err(ibdev, "ah attr is not RDMA roce type\n");
return -EINVAL; return -EINVAL;
} }
...@@ -4517,7 +4527,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp, ...@@ -4517,7 +4527,7 @@ static int hns_roce_v2_set_abs_fields(struct ib_qp *ibqp,
/* Nothing */ /* Nothing */
; ;
} else { } else {
dev_err(hr_dev->dev, "Illegal state for QP!\n"); ibdev_err(&hr_dev->ib_dev, "Illegal state for QP!\n");
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -4552,7 +4562,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp, ...@@ -4552,7 +4562,7 @@ static int hns_roce_v2_set_opt_fields(struct ib_qp *ibqp,
V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S, V2_QPC_BYTE_28_AT_M, V2_QPC_BYTE_28_AT_S,
0); 0);
} else { } else {
dev_warn(hr_dev->dev, ibdev_warn(&hr_dev->ib_dev,
"Local ACK timeout shall be 0 to 30.\n"); "Local ACK timeout shall be 0 to 30.\n");
} }
} }
...@@ -4721,7 +4731,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, ...@@ -4721,7 +4731,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
struct hns_roce_v2_qp_context ctx[2]; struct hns_roce_v2_qp_context ctx[2];
struct hns_roce_v2_qp_context *context = ctx; struct hns_roce_v2_qp_context *context = ctx;
struct hns_roce_v2_qp_context *qpc_mask = ctx + 1; struct hns_roce_v2_qp_context *qpc_mask = ctx + 1;
struct device *dev = hr_dev->dev; struct ib_device *ibdev = &hr_dev->ib_dev;
unsigned long sq_flag = 0; unsigned long sq_flag = 0;
unsigned long rq_flag = 0; unsigned long rq_flag = 0;
int ret; int ret;
...@@ -4785,7 +4795,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp, ...@@ -4785,7 +4795,7 @@ static int hns_roce_v2_modify_qp(struct ib_qp *ibqp,
/* SW pass context to HW */ /* SW pass context to HW */
ret = hns_roce_v2_qp_modify(hr_dev, ctx, hr_qp); ret = hns_roce_v2_qp_modify(hr_dev, ctx, hr_qp);
if (ret) { if (ret) {
dev_err(dev, "hns_roce_qp_modify failed(%d)\n", ret); ibdev_err(ibdev, "failed to modify QP, ret = %d\n", ret);
goto out; goto out;
} }
...@@ -4842,10 +4852,8 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev, ...@@ -4842,10 +4852,8 @@ static int hns_roce_v2_query_qpc(struct hns_roce_dev *hr_dev,
ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0, ret = hns_roce_cmd_mbox(hr_dev, 0, mailbox->dma, hr_qp->qpn, 0,
HNS_ROCE_CMD_QUERY_QPC, HNS_ROCE_CMD_QUERY_QPC,
HNS_ROCE_CMD_TIMEOUT_MSECS); HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) { if (ret)
dev_err(hr_dev->dev, "QUERY QP cmd process error\n");
goto out; goto out;
}
memcpy(hr_context, mailbox->buf, sizeof(*hr_context)); memcpy(hr_context, mailbox->buf, sizeof(*hr_context));
...@@ -4861,7 +4869,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ...@@ -4861,7 +4869,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device); struct hns_roce_dev *hr_dev = to_hr_dev(ibqp->device);
struct hns_roce_qp *hr_qp = to_hr_qp(ibqp); struct hns_roce_qp *hr_qp = to_hr_qp(ibqp);
struct hns_roce_v2_qp_context context = {}; struct hns_roce_v2_qp_context context = {};
struct device *dev = hr_dev->dev; struct ib_device *ibdev = &hr_dev->ib_dev;
int tmp_qp_state; int tmp_qp_state;
int state; int state;
int ret; int ret;
...@@ -4879,7 +4887,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ...@@ -4879,7 +4887,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context); ret = hns_roce_v2_query_qpc(hr_dev, hr_qp, &context);
if (ret) { if (ret) {
dev_err(dev, "query qpc error\n"); ibdev_err(ibdev, "failed to query QPC, ret = %d\n", ret);
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -4888,7 +4896,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, ...@@ -4888,7 +4896,7 @@ static int hns_roce_v2_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr,
V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S); V2_QPC_BYTE_60_QP_ST_M, V2_QPC_BYTE_60_QP_ST_S);
tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state); tmp_qp_state = to_ib_qp_st((enum hns_roce_v2_qp_state)state);
if (tmp_qp_state == -1) { if (tmp_qp_state == -1) {
dev_err(dev, "Illegal ib_qp_state\n"); ibdev_err(ibdev, "Illegal ib_qp_state\n");
ret = -EINVAL; ret = -EINVAL;
goto out; goto out;
} }
...@@ -4986,8 +4994,8 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, ...@@ -4986,8 +4994,8 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp, struct hns_roce_qp *hr_qp,
struct ib_udata *udata) struct ib_udata *udata)
{ {
struct hns_roce_cq *send_cq, *recv_cq;
struct ib_device *ibdev = &hr_dev->ib_dev; struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_cq *send_cq, *recv_cq;
unsigned long flags; unsigned long flags;
int ret = 0; int ret = 0;
...@@ -4996,7 +5004,9 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev, ...@@ -4996,7 +5004,9 @@ static int hns_roce_v2_destroy_qp_common(struct hns_roce_dev *hr_dev,
ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0, ret = hns_roce_v2_modify_qp(&hr_qp->ibqp, NULL, 0,
hr_qp->state, IB_QPS_RESET); hr_qp->state, IB_QPS_RESET);
if (ret) if (ret)
ibdev_err(ibdev, "modify QP to Reset failed.\n"); ibdev_err(ibdev,
"failed to modify QP to RST, ret = %d\n",
ret);
} }
send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL; send_cq = hr_qp->ibqp.send_cq ? to_hr_cq(hr_qp->ibqp.send_cq) : NULL;
...@@ -5033,7 +5043,8 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ...@@ -5033,7 +5043,8 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata); ret = hns_roce_v2_destroy_qp_common(hr_dev, hr_qp, udata);
if (ret) if (ret)
ibdev_err(&hr_dev->ib_dev, "Destroy qp 0x%06lx failed(%d)\n", ibdev_err(&hr_dev->ib_dev,
"failed to destroy QP 0x%06lx, ret = %d\n",
hr_qp->qpn, ret); hr_qp->qpn, ret);
hns_roce_qp_destroy(hr_dev, hr_qp, udata); hns_roce_qp_destroy(hr_dev, hr_qp, udata);
...@@ -5044,6 +5055,7 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata) ...@@ -5044,6 +5055,7 @@ static int hns_roce_v2_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
struct hns_roce_qp *hr_qp) struct hns_roce_qp *hr_qp)
{ {
struct ib_device *ibdev = &hr_dev->ib_dev;
struct hns_roce_sccc_clr_done *resp; struct hns_roce_sccc_clr_done *resp;
struct hns_roce_sccc_clr *clr; struct hns_roce_sccc_clr *clr;
struct hns_roce_cmq_desc desc; struct hns_roce_cmq_desc desc;
...@@ -5055,7 +5067,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, ...@@ -5055,7 +5067,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false); hns_roce_cmq_setup_basic_desc(&desc, HNS_ROCE_OPC_RESET_SCCC, false);
ret = hns_roce_cmq_send(hr_dev, &desc, 1); ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "Reset SCC ctx failed(%d)\n", ret); ibdev_err(ibdev, "failed to reset SCC ctx, ret = %d\n", ret);
goto out; goto out;
} }
...@@ -5065,7 +5077,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, ...@@ -5065,7 +5077,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
clr->qpn = cpu_to_le32(hr_qp->qpn); clr->qpn = cpu_to_le32(hr_qp->qpn);
ret = hns_roce_cmq_send(hr_dev, &desc, 1); ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "Clear SCC ctx failed(%d)\n", ret); ibdev_err(ibdev, "failed to clear SCC ctx, ret = %d\n", ret);
goto out; goto out;
} }
...@@ -5076,7 +5088,8 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, ...@@ -5076,7 +5088,8 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
HNS_ROCE_OPC_QUERY_SCCC, true); HNS_ROCE_OPC_QUERY_SCCC, true);
ret = hns_roce_cmq_send(hr_dev, &desc, 1); ret = hns_roce_cmq_send(hr_dev, &desc, 1);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "Query clr cmq failed(%d)\n", ret); ibdev_err(ibdev, "failed to query clr cmq, ret = %d\n",
ret);
goto out; goto out;
} }
...@@ -5086,7 +5099,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev, ...@@ -5086,7 +5099,7 @@ static int hns_roce_v2_qp_flow_control_init(struct hns_roce_dev *hr_dev,
msleep(20); msleep(20);
} }
dev_err(hr_dev->dev, "Query SCC clr done flag overtime.\n"); ibdev_err(ibdev, "Query SCC clr done flag overtime.\n");
ret = -ETIMEDOUT; ret = -ETIMEDOUT;
out: out:
...@@ -5130,7 +5143,9 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period) ...@@ -5130,7 +5143,9 @@ static int hns_roce_v2_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
HNS_ROCE_CMD_TIMEOUT_MSECS); HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) if (ret)
dev_err(hr_dev->dev, "MODIFY CQ Failed to cmd mailbox.\n"); ibdev_err(&hr_dev->ib_dev,
"failed to process cmd when modifying CQ, ret = %d\n",
ret);
return ret; return ret;
} }
...@@ -5139,54 +5154,54 @@ static void hns_roce_irq_work_handle(struct work_struct *work) ...@@ -5139,54 +5154,54 @@ static void hns_roce_irq_work_handle(struct work_struct *work)
{ {
struct hns_roce_work *irq_work = struct hns_roce_work *irq_work =
container_of(work, struct hns_roce_work, work); container_of(work, struct hns_roce_work, work);
struct device *dev = irq_work->hr_dev->dev; struct ib_device *ibdev = &irq_work->hr_dev->ib_dev;
u32 qpn = irq_work->qpn; u32 qpn = irq_work->qpn;
u32 cqn = irq_work->cqn; u32 cqn = irq_work->cqn;
switch (irq_work->event_type) { switch (irq_work->event_type) {
case HNS_ROCE_EVENT_TYPE_PATH_MIG: case HNS_ROCE_EVENT_TYPE_PATH_MIG:
dev_info(dev, "Path migrated succeeded.\n"); ibdev_info(ibdev, "Path migrated succeeded.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED: case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED:
dev_warn(dev, "Path migration failed.\n"); ibdev_warn(ibdev, "Path migration failed.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_COMM_EST: case HNS_ROCE_EVENT_TYPE_COMM_EST:
break; break;
case HNS_ROCE_EVENT_TYPE_SQ_DRAINED: case HNS_ROCE_EVENT_TYPE_SQ_DRAINED:
dev_warn(dev, "Send queue drained.\n"); ibdev_warn(ibdev, "Send queue drained.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR: case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR:
dev_err(dev, "Local work queue 0x%x catas error, sub_type:%d\n", ibdev_err(ibdev, "Local work queue 0x%x catast error, sub_event type is: %d\n",
qpn, irq_work->sub_type); qpn, irq_work->sub_type);
break; break;
case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR: case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR:
dev_err(dev, "Invalid request local work queue 0x%x error.\n", ibdev_err(ibdev, "Invalid request local work queue 0x%x error.\n",
qpn); qpn);
break; break;
case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR:
dev_err(dev, "Local access violation work queue 0x%x error, sub_type:%d\n", ibdev_err(ibdev, "Local access violation work queue 0x%x error, sub_event type is: %d\n",
qpn, irq_work->sub_type); qpn, irq_work->sub_type);
break; break;
case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH: case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH:
dev_warn(dev, "SRQ limit reach.\n"); ibdev_warn(ibdev, "SRQ limit reach.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH: case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH:
dev_warn(dev, "SRQ last wqe reach.\n"); ibdev_warn(ibdev, "SRQ last wqe reach.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR: case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR:
dev_err(dev, "SRQ catas error.\n"); ibdev_err(ibdev, "SRQ catas error.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR: case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR:
dev_err(dev, "CQ 0x%x access err.\n", cqn); ibdev_err(ibdev, "CQ 0x%x access err.\n", cqn);
break; break;
case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW: case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW:
dev_warn(dev, "CQ 0x%x overflow\n", cqn); ibdev_warn(ibdev, "CQ 0x%x overflow\n", cqn);
break; break;
case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW: case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW:
dev_warn(dev, "DB overflow.\n"); ibdev_warn(ibdev, "DB overflow.\n");
break; break;
case HNS_ROCE_EVENT_TYPE_FLR: case HNS_ROCE_EVENT_TYPE_FLR:
dev_warn(dev, "Function level reset.\n"); ibdev_warn(ibdev, "Function level reset.\n");
break; break;
default: default:
break; break;
...@@ -6119,8 +6134,9 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq, ...@@ -6119,8 +6134,9 @@ static int hns_roce_v2_modify_srq(struct ib_srq *ibsrq,
HNS_ROCE_CMD_TIMEOUT_MSECS); HNS_ROCE_CMD_TIMEOUT_MSECS);
hns_roce_free_cmd_mailbox(hr_dev, mailbox); hns_roce_free_cmd_mailbox(hr_dev, mailbox);
if (ret) { if (ret) {
dev_err(hr_dev->dev, ibdev_err(&hr_dev->ib_dev,
"MODIFY SRQ Failed to cmd mailbox.\n"); "failed to process cmd when modifying SRQ, ret = %d\n",
ret);
return ret; return ret;
} }
} }
...@@ -6146,7 +6162,9 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr) ...@@ -6146,7 +6162,9 @@ static int hns_roce_v2_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr)
HNS_ROCE_CMD_QUERY_SRQC, HNS_ROCE_CMD_QUERY_SRQC,
HNS_ROCE_CMD_TIMEOUT_MSECS); HNS_ROCE_CMD_TIMEOUT_MSECS);
if (ret) { if (ret) {
dev_err(hr_dev->dev, "QUERY SRQ cmd process error\n"); ibdev_err(&hr_dev->ib_dev,
"failed to process cmd when querying SRQ, ret = %d\n",
ret);
goto out; goto out;
} }
......
...@@ -60,14 +60,12 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev) ...@@ -60,14 +60,12 @@ void hns_roce_cleanup_pd_table(struct hns_roce_dev *hr_dev)
int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
{ {
struct ib_device *ib_dev = ibpd->device; struct ib_device *ib_dev = ibpd->device;
struct hns_roce_dev *hr_dev = to_hr_dev(ib_dev);
struct device *dev = hr_dev->dev;
struct hns_roce_pd *pd = to_hr_pd(ibpd); struct hns_roce_pd *pd = to_hr_pd(ibpd);
int ret; int ret;
ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn); ret = hns_roce_pd_alloc(to_hr_dev(ib_dev), &pd->pdn);
if (ret) { if (ret) {
dev_err(dev, "[alloc_pd]hns_roce_pd_alloc failed!\n"); ibdev_err(ib_dev, "failed to alloc pd, ret = %d\n", ret);
return ret; return ret;
} }
...@@ -76,7 +74,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata) ...@@ -76,7 +74,7 @@ int hns_roce_alloc_pd(struct ib_pd *ibpd, struct ib_udata *udata)
if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) { if (ib_copy_to_udata(udata, &uresp, sizeof(uresp))) {
hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn); hns_roce_pd_free(to_hr_dev(ib_dev), pd->pdn);
dev_err(dev, "[alloc_pd]ib_copy_to_udata failed!\n"); ibdev_err(ib_dev, "failed to copy to udata\n");
return -EFAULT; return -EFAULT;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment