Commit 8b9b8d14 authored by oulijun's avatar oulijun Committed by Doug Ledford

RDMA/hns: Fix the endian problem for hns

The hip06 and hip08 run on a little endian ARM, it needs to
revise the annotations to indicate that the HW uses little
endian data in the various DMA buffers, and flow the necessary
swaps throughout.

The imm_data use big endian mode. The cpu_to_le32/le32_to_cpu
swaps are no-op for this, which makes the only substantive
change the handling of imm_data which is now mandatory swapped.

This also keep match with the userspace hns driver and resolve
the warning by sparse.
Signed-off-by: default avatarLijun Ou <oulijun@huawei.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent e9d1e389
......@@ -43,15 +43,15 @@
__raw_writel((__force u32)cpu_to_le32(value), (addr))
#define roce_get_field(origin, mask, shift) \
(((origin) & (mask)) >> (shift))
(((le32_to_cpu(origin)) & (mask)) >> (shift))
#define roce_get_bit(origin, shift) \
roce_get_field((origin), (1ul << (shift)), (shift))
#define roce_set_field(origin, mask, shift, val) \
do { \
(origin) &= (~(mask)); \
(origin) |= (((u32)(val) << (shift)) & (mask)); \
(origin) &= ~cpu_to_le32(mask); \
(origin) |= cpu_to_le32(((u32)(val) << (shift)) & (mask)); \
} while (0)
#define roce_set_bit(origin, shift, val) \
......
......@@ -466,7 +466,7 @@ struct hns_roce_qp {
struct ib_qp ibqp;
struct hns_roce_buf hr_buf;
struct hns_roce_wq rq;
__le64 doorbell_qpn;
u32 doorbell_qpn;
__le32 sq_signal_bits;
u32 sq_next_wqe;
int sq_max_wqes_per_wr;
......
......@@ -195,23 +195,47 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
memcpy(&ud_sq_wqe->dgid[0], &ah->av.dgid[0], GID_LEN);
ud_sq_wqe->va0_l = (u32)wr->sg_list[0].addr;
ud_sq_wqe->va0_h = (wr->sg_list[0].addr) >> 32;
ud_sq_wqe->l_key0 = wr->sg_list[0].lkey;
ud_sq_wqe->va1_l = (u32)wr->sg_list[1].addr;
ud_sq_wqe->va1_h = (wr->sg_list[1].addr) >> 32;
ud_sq_wqe->l_key1 = wr->sg_list[1].lkey;
ud_sq_wqe->va0_l =
cpu_to_le32((u32)wr->sg_list[0].addr);
ud_sq_wqe->va0_h =
cpu_to_le32((wr->sg_list[0].addr) >> 32);
ud_sq_wqe->l_key0 =
cpu_to_le32(wr->sg_list[0].lkey);
ud_sq_wqe->va1_l =
cpu_to_le32((u32)wr->sg_list[1].addr);
ud_sq_wqe->va1_h =
cpu_to_le32((wr->sg_list[1].addr) >> 32);
ud_sq_wqe->l_key1 =
cpu_to_le32(wr->sg_list[1].lkey);
ind++;
} else if (ibqp->qp_type == IB_QPT_RC) {
u32 tmp_len = 0;
ctrl = wqe;
memset(ctrl, 0, sizeof(struct hns_roce_wqe_ctrl_seg));
for (i = 0; i < wr->num_sge; i++)
ctrl->msg_length += wr->sg_list[i].length;
tmp_len += wr->sg_list[i].length;
ctrl->msg_length =
cpu_to_le32(le32_to_cpu(ctrl->msg_length) + tmp_len);
ctrl->sgl_pa_h = 0;
ctrl->flag = 0;
ctrl->imm_data = send_ieth(wr);
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
case IB_WR_RDMA_WRITE_WITH_IMM:
ctrl->imm_data = wr->ex.imm_data;
break;
case IB_WR_SEND_WITH_INV:
ctrl->inv_key =
cpu_to_le32(wr->ex.invalidate_rkey);
break;
default:
ctrl->imm_data = 0;
break;
}
/*Ctrl field, ctrl set type: sig, solic, imm, fence */
/* SO wait for conforming application scenarios */
......@@ -258,7 +282,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
dseg = wqe;
if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
if (ctrl->msg_length >
if (le32_to_cpu(ctrl->msg_length) >
hr_dev->caps.max_sq_inline) {
ret = -EINVAL;
*bad_wr = wr;
......@@ -273,7 +297,7 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
wr->sg_list[i].length);
wqe += wr->sg_list[i].length;
}
ctrl->flag |= HNS_ROCE_WQE_INLINE;
ctrl->flag |= cpu_to_le32(HNS_ROCE_WQE_INLINE);
} else {
/*sqe num is two */
for (i = 0; i < wr->num_sge; i++)
......@@ -306,8 +330,8 @@ static int hns_roce_v1_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
SQ_DOORBELL_U32_8_QPN_S, qp->doorbell_qpn);
roce_set_bit(sq_db.u32_8, SQ_DOORBELL_HW_SYNC_S, 1);
doorbell[0] = sq_db.u32_4;
doorbell[1] = sq_db.u32_8;
doorbell[0] = le32_to_cpu(sq_db.u32_4);
doorbell[1] = le32_to_cpu(sq_db.u32_8);
hns_roce_write64_k(doorbell, qp->sq.db_reg_l);
qp->sq_next_wqe = ind;
......@@ -403,8 +427,8 @@ static int hns_roce_v1_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
roce_set_bit(rq_db.u32_8, RQ_DOORBELL_U32_8_HW_SYNC_S,
1);
doorbell[0] = rq_db.u32_4;
doorbell[1] = rq_db.u32_8;
doorbell[0] = le32_to_cpu(rq_db.u32_4);
doorbell[1] = le32_to_cpu(rq_db.u32_8);
hns_roce_write64_k(doorbell, hr_qp->rq.db_reg_l);
}
......@@ -2261,7 +2285,7 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
CQE_BYTE_4_WQE_INDEX_M,
CQE_BYTE_4_WQE_INDEX_S)&
((*cur_qp)->sq.wqe_cnt-1));
switch (sq_wqe->flag & HNS_ROCE_WQE_OPCODE_MASK) {
switch (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_OPCODE_MASK) {
case HNS_ROCE_WQE_OPCODE_SEND:
wc->opcode = IB_WC_SEND;
break;
......@@ -2282,7 +2306,7 @@ static int hns_roce_v1_poll_one(struct hns_roce_cq *hr_cq,
wc->status = IB_WC_GENERAL_ERR;
break;
}
wc->wc_flags = (sq_wqe->flag & HNS_ROCE_WQE_IMM ?
wc->wc_flags = (le32_to_cpu(sq_wqe->flag) & HNS_ROCE_WQE_IMM ?
IB_WC_WITH_IMM : 0);
wq = &(*cur_qp)->sq;
......
......@@ -200,14 +200,14 @@
#define HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M GENMASK(4, 0)
struct hns_roce_cq_context {
u32 cqc_byte_4;
u32 cq_bt_l;
u32 cqc_byte_12;
u32 cur_cqe_ba0_l;
u32 cqc_byte_20;
u32 cqe_tptr_addr_l;
u32 cur_cqe_ba1_l;
u32 cqc_byte_32;
__le32 cqc_byte_4;
__le32 cq_bt_l;
__le32 cqc_byte_12;
__le32 cur_cqe_ba0_l;
__le32 cqc_byte_20;
__le32 cqe_tptr_addr_l;
__le32 cur_cqe_ba1_l;
__le32 cqc_byte_32;
};
#define CQ_CONTEXT_CQC_BYTE_4_CQC_STATE_S 0
......@@ -257,17 +257,17 @@ struct hns_roce_cq_context {
(((1UL << 16) - 1) << CQ_CONTEXT_CQC_BYTE_32_CQ_CONS_IDX_S)
struct hns_roce_cqe {
u32 cqe_byte_4;
__le32 cqe_byte_4;
union {
u32 r_key;
u32 immediate_data;
__le32 r_key;
__be32 immediate_data;
};
u32 byte_cnt;
u32 cqe_byte_16;
u32 cqe_byte_20;
u32 s_mac_l;
u32 cqe_byte_28;
u32 reserved;
__le32 byte_cnt;
__le32 cqe_byte_16;
__le32 cqe_byte_20;
__le32 s_mac_l;
__le32 cqe_byte_28;
__le32 reserved;
};
#define CQE_BYTE_4_OWNER_S 7
......@@ -308,22 +308,22 @@ struct hns_roce_cqe {
#define CQ_DB_REQ_NOT (1 << 16)
struct hns_roce_v1_mpt_entry {
u32 mpt_byte_4;
u32 pbl_addr_l;
u32 mpt_byte_12;
u32 virt_addr_l;
u32 virt_addr_h;
u32 length;
u32 mpt_byte_28;
u32 pa0_l;
u32 mpt_byte_36;
u32 mpt_byte_40;
u32 mpt_byte_44;
u32 mpt_byte_48;
u32 pa4_l;
u32 mpt_byte_56;
u32 mpt_byte_60;
u32 mpt_byte_64;
__le32 mpt_byte_4;
__le32 pbl_addr_l;
__le32 mpt_byte_12;
__le32 virt_addr_l;
__le32 virt_addr_h;
__le32 length;
__le32 mpt_byte_28;
__le32 pa0_l;
__le32 mpt_byte_36;
__le32 mpt_byte_40;
__le32 mpt_byte_44;
__le32 mpt_byte_48;
__le32 pa4_l;
__le32 mpt_byte_56;
__le32 mpt_byte_60;
__le32 mpt_byte_64;
};
#define MPT_BYTE_4_KEY_STATE_S 0
......@@ -408,30 +408,32 @@ struct hns_roce_v1_mpt_entry {
(((1UL << 8) - 1) << MPT_BYTE_64_L_KEY_IDX_H_S)
struct hns_roce_wqe_ctrl_seg {
__be32 sgl_pa_h;
__be32 flag;
__le32 sgl_pa_h;
__le32 flag;
union {
__be32 imm_data;
__be32 msg_length;
__le32 inv_key;
};
__le32 msg_length;
};
struct hns_roce_wqe_data_seg {
__be64 addr;
__be32 lkey;
__be32 len;
__le64 addr;
__le32 lkey;
__le32 len;
};
struct hns_roce_wqe_raddr_seg {
__be32 rkey;
__be32 len;/* reserved */
__be64 raddr;
__le32 rkey;
__le32 len;/* reserved */
__le64 raddr;
};
struct hns_roce_rq_wqe_ctrl {
u32 rwqe_byte_4;
u32 rocee_sgl_ba_l;
u32 rwqe_byte_12;
u32 reserved[5];
__le32 rwqe_byte_4;
__le32 rocee_sgl_ba_l;
__le32 rwqe_byte_12;
__le32 reserved[5];
};
#define RQ_WQE_CTRL_RWQE_BYTE_12_RWQE_SGE_NUM_S 16
......@@ -443,31 +445,31 @@ struct hns_roce_rq_wqe_ctrl {
#define GID_LEN 16
struct hns_roce_ud_send_wqe {
u32 dmac_h;
u32 u32_8;
u32 immediate_data;
__le32 dmac_h;
__le32 u32_8;
__le32 immediate_data;
u32 u32_16;
__le32 u32_16;
union {
unsigned char dgid[GID_LEN];
struct {
u32 u32_20;
u32 u32_24;
u32 u32_28;
u32 u32_32;
__le32 u32_20;
__le32 u32_24;
__le32 u32_28;
__le32 u32_32;
};
};
u32 u32_36;
u32 u32_40;
__le32 u32_36;
__le32 u32_40;
u32 va0_l;
u32 va0_h;
u32 l_key0;
__le32 va0_l;
__le32 va0_h;
__le32 l_key0;
u32 va1_l;
u32 va1_h;
u32 l_key1;
__le32 va1_l;
__le32 va1_h;
__le32 l_key1;
};
#define UD_SEND_WQE_U32_4_DMAC_0_S 0
......@@ -535,16 +537,16 @@ struct hns_roce_ud_send_wqe {
(((1UL << 8) - 1) << UD_SEND_WQE_U32_40_TRAFFIC_CLASS_S)
struct hns_roce_sqp_context {
u32 qp1c_bytes_4;
u32 sq_rq_bt_l;
u32 qp1c_bytes_12;
u32 qp1c_bytes_16;
u32 qp1c_bytes_20;
u32 cur_rq_wqe_ba_l;
u32 qp1c_bytes_28;
u32 qp1c_bytes_32;
u32 cur_sq_wqe_ba_l;
u32 qp1c_bytes_40;
__le32 qp1c_bytes_4;
__le32 sq_rq_bt_l;
__le32 qp1c_bytes_12;
__le32 qp1c_bytes_16;
__le32 qp1c_bytes_20;
__le32 cur_rq_wqe_ba_l;
__le32 qp1c_bytes_28;
__le32 qp1c_bytes_32;
__le32 cur_sq_wqe_ba_l;
__le32 qp1c_bytes_40;
};
#define QP1C_BYTES_4_QP_STATE_S 0
......@@ -626,64 +628,64 @@ struct hns_roce_sqp_context {
#define HNS_ROCE_WQE_OPCODE_MASK (15<<16)
struct hns_roce_qp_context {
u32 qpc_bytes_4;
u32 qpc_bytes_8;
u32 qpc_bytes_12;
u32 qpc_bytes_16;
u32 sq_rq_bt_l;
u32 qpc_bytes_24;
u32 irrl_ba_l;
u32 qpc_bytes_32;
u32 qpc_bytes_36;
u32 dmac_l;
u32 qpc_bytes_44;
u32 qpc_bytes_48;
__le32 qpc_bytes_4;
__le32 qpc_bytes_8;
__le32 qpc_bytes_12;
__le32 qpc_bytes_16;
__le32 sq_rq_bt_l;
__le32 qpc_bytes_24;
__le32 irrl_ba_l;
__le32 qpc_bytes_32;
__le32 qpc_bytes_36;
__le32 dmac_l;
__le32 qpc_bytes_44;
__le32 qpc_bytes_48;
u8 dgid[16];
u32 qpc_bytes_68;
u32 cur_rq_wqe_ba_l;
u32 qpc_bytes_76;
u32 rx_rnr_time;
u32 qpc_bytes_84;
u32 qpc_bytes_88;
__le32 qpc_bytes_68;
__le32 cur_rq_wqe_ba_l;
__le32 qpc_bytes_76;
__le32 rx_rnr_time;
__le32 qpc_bytes_84;
__le32 qpc_bytes_88;
union {
u32 rx_sge_len;
u32 dma_length;
__le32 rx_sge_len;
__le32 dma_length;
};
union {
u32 rx_sge_num;
u32 rx_send_pktn;
u32 r_key;
__le32 rx_sge_num;
__le32 rx_send_pktn;
__le32 r_key;
};
u32 va_l;
u32 va_h;
u32 qpc_bytes_108;
u32 qpc_bytes_112;
u32 rx_cur_sq_wqe_ba_l;
u32 qpc_bytes_120;
u32 qpc_bytes_124;
u32 qpc_bytes_128;
u32 qpc_bytes_132;
u32 qpc_bytes_136;
u32 qpc_bytes_140;
u32 qpc_bytes_144;
u32 qpc_bytes_148;
__le32 va_l;
__le32 va_h;
__le32 qpc_bytes_108;
__le32 qpc_bytes_112;
__le32 rx_cur_sq_wqe_ba_l;
__le32 qpc_bytes_120;
__le32 qpc_bytes_124;
__le32 qpc_bytes_128;
__le32 qpc_bytes_132;
__le32 qpc_bytes_136;
__le32 qpc_bytes_140;
__le32 qpc_bytes_144;
__le32 qpc_bytes_148;
union {
u32 rnr_retry;
u32 ack_time;
__le32 rnr_retry;
__le32 ack_time;
};
u32 qpc_bytes_156;
u32 pkt_use_len;
u32 qpc_bytes_164;
u32 qpc_bytes_168;
__le32 qpc_bytes_156;
__le32 pkt_use_len;
__le32 qpc_bytes_164;
__le32 qpc_bytes_168;
union {
u32 sge_use_len;
u32 pa_use_len;
__le32 sge_use_len;
__le32 pa_use_len;
};
u32 qpc_bytes_176;
u32 qpc_bytes_180;
u32 tx_cur_sq_wqe_ba_l;
u32 qpc_bytes_188;
u32 rvd21;
__le32 qpc_bytes_176;
__le32 qpc_bytes_180;
__le32 tx_cur_sq_wqe_ba_l;
__le32 qpc_bytes_188;
__le32 rvd21;
};
#define QP_CONTEXT_QPC_BYTES_4_TRANSPORT_SERVICE_TYPE_S 0
......@@ -996,8 +998,8 @@ struct hns_roce_qp_context {
#define HCR_GO_BIT 15
struct hns_roce_rq_db {
u32 u32_4;
u32 u32_8;
__le32 u32_4;
__le32 u32_8;
};
#define RQ_DOORBELL_U32_4_RQ_HEAD_S 0
......@@ -1013,8 +1015,8 @@ struct hns_roce_rq_db {
#define RQ_DOORBELL_U32_8_HW_SYNC_S 31
struct hns_roce_sq_db {
u32 u32_4;
u32 u32_8;
__le32 u32_4;
__le32 u32_8;
};
#define SQ_DOORBELL_U32_4_SQ_HEAD_S 0
......
......@@ -63,7 +63,8 @@ static int set_rwqe_data_seg(struct ib_qp *ibqp, struct ib_send_wr *wr,
int i;
if (wr->send_flags & IB_SEND_INLINE && wr->num_sge) {
if (rc_sq_wqe->msg_len > hr_dev->caps.max_sq_inline) {
if (le32_to_cpu(rc_sq_wqe->msg_len) >
hr_dev->caps.max_sq_inline) {
*bad_wr = wr;
dev_err(hr_dev->dev, "inline len(1-%d)=%d, illegal",
rc_sq_wqe->msg_len, hr_dev->caps.max_sq_inline);
......@@ -136,6 +137,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
unsigned long flags;
unsigned int ind;
void *wqe = NULL;
u32 tmp_len = 0;
bool loopback;
int ret = 0;
u8 *smac;
......@@ -218,9 +220,20 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
HNS_ROCE_V2_WQE_OP_SEND);
for (i = 0; i < wr->num_sge; i++)
ud_sq_wqe->msg_len += wr->sg_list[i].length;
tmp_len += wr->sg_list[i].length;
ud_sq_wqe->immtdata = send_ieth(wr);
ud_sq_wqe->msg_len =
cpu_to_le32(le32_to_cpu(ud_sq_wqe->msg_len) + tmp_len);
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
case IB_WR_RDMA_WRITE_WITH_IMM:
ud_sq_wqe->immtdata = wr->ex.imm_data;
break;
default:
ud_sq_wqe->immtdata = 0;
break;
}
/* Set sig attr */
roce_set_bit(ud_sq_wqe->byte_4,
......@@ -254,8 +267,8 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
V2_UD_SEND_WQE_BYTE_24_UDPSPN_M,
V2_UD_SEND_WQE_BYTE_24_UDPSPN_S, 0);
ud_sq_wqe->qkey =
cpu_to_be32(ud_wr(wr)->remote_qkey & 0x80000000) ?
qp->qkey : ud_wr(wr)->remote_qkey;
cpu_to_le32(ud_wr(wr)->remote_qkey & 0x80000000 ?
qp->qkey : ud_wr(wr)->remote_qkey);
roce_set_field(ud_sq_wqe->byte_32,
V2_UD_SEND_WQE_BYTE_32_DQPN_M,
V2_UD_SEND_WQE_BYTE_32_DQPN_S,
......@@ -264,7 +277,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
roce_set_field(ud_sq_wqe->byte_36,
V2_UD_SEND_WQE_BYTE_36_VLAN_M,
V2_UD_SEND_WQE_BYTE_36_VLAN_S,
ah->av.vlan);
le16_to_cpu(ah->av.vlan));
roce_set_field(ud_sq_wqe->byte_36,
V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_M,
V2_UD_SEND_WQE_BYTE_36_HOPLIMIT_S,
......@@ -283,7 +296,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
roce_set_field(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_SL_M,
V2_UD_SEND_WQE_BYTE_40_SL_S,
ah->av.sl_tclass_flowlabel >>
le32_to_cpu(ah->av.sl_tclass_flowlabel) >>
HNS_ROCE_SL_SHIFT);
roce_set_field(ud_sq_wqe->byte_40,
V2_UD_SEND_WQE_BYTE_40_PORTN_M,
......@@ -311,9 +324,24 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
rc_sq_wqe = wqe;
memset(rc_sq_wqe, 0, sizeof(*rc_sq_wqe));
for (i = 0; i < wr->num_sge; i++)
rc_sq_wqe->msg_len += wr->sg_list[i].length;
tmp_len += wr->sg_list[i].length;
rc_sq_wqe->msg_len =
cpu_to_le32(le32_to_cpu(rc_sq_wqe->msg_len) + tmp_len);
rc_sq_wqe->inv_key_immtdata = send_ieth(wr);
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
case IB_WR_RDMA_WRITE_WITH_IMM:
rc_sq_wqe->immtdata = wr->ex.imm_data;
break;
case IB_WR_SEND_WITH_INV:
rc_sq_wqe->inv_key =
cpu_to_le32(wr->ex.invalidate_rkey);
break;
default:
rc_sq_wqe->immtdata = 0;
break;
}
roce_set_bit(rc_sq_wqe->byte_4,
V2_RC_SEND_WQE_BYTE_4_FENCE_S,
......@@ -451,7 +479,7 @@ static int hns_roce_v2_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
roce_set_field(sq_db.parameter, V2_DB_PARAMETER_SL_M,
V2_DB_PARAMETER_SL_S, qp->sl);
hns_roce_write64_k((__be32 *)&sq_db, qp->sq.db_reg_l);
hns_roce_write64_k((__le32 *)&sq_db, qp->sq.db_reg_l);
qp->sq_next_wqe = ind;
qp->next_sge = sge_ind;
......@@ -513,7 +541,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
}
if (i < hr_qp->rq.max_gs) {
dseg[i].lkey = cpu_to_be32(HNS_ROCE_INVALID_LKEY);
dseg[i].lkey = cpu_to_le32(HNS_ROCE_INVALID_LKEY);
dseg[i].addr = 0;
}
......@@ -546,7 +574,7 @@ static int hns_roce_v2_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
roce_set_field(rq_db.parameter, V2_DB_PARAMETER_CONS_IDX_M,
V2_DB_PARAMETER_CONS_IDX_S, hr_qp->rq.head);
hns_roce_write64_k((__be32 *)&rq_db, hr_qp->rq.db_reg_l);
hns_roce_write64_k((__le32 *)&rq_db, hr_qp->rq.db_reg_l);
}
spin_unlock_irqrestore(&hr_qp->rq.lock, flags);
......
......@@ -224,22 +224,22 @@ enum hns_roce_sgid_type {
};
struct hns_roce_v2_cq_context {
u32 byte_4_pg_ceqn;
u32 byte_8_cqn;
u32 cqe_cur_blk_addr;
u32 byte_16_hop_addr;
u32 cqe_nxt_blk_addr;
u32 byte_24_pgsz_addr;
u32 byte_28_cq_pi;
u32 byte_32_cq_ci;
u32 cqe_ba;
u32 byte_40_cqe_ba;
u32 byte_44_db_record;
u32 db_record_addr;
u32 byte_52_cqe_cnt;
u32 byte_56_cqe_period_maxcnt;
u32 cqe_report_timer;
u32 byte_64_se_cqe_idx;
__le32 byte_4_pg_ceqn;
__le32 byte_8_cqn;
__le32 cqe_cur_blk_addr;
__le32 byte_16_hop_addr;
__le32 cqe_nxt_blk_addr;
__le32 byte_24_pgsz_addr;
__le32 byte_28_cq_pi;
__le32 byte_32_cq_ci;
__le32 cqe_ba;
__le32 byte_40_cqe_ba;
__le32 byte_44_db_record;
__le32 db_record_addr;
__le32 byte_52_cqe_cnt;
__le32 byte_56_cqe_period_maxcnt;
__le32 cqe_report_timer;
__le32 byte_64_se_cqe_idx;
};
#define HNS_ROCE_V2_CQ_DEFAULT_BURST_NUM 0x0
#define HNS_ROCE_V2_CQ_DEFAULT_INTERVAL 0x0
......@@ -328,66 +328,66 @@ enum hns_roce_v2_qp_state {
};
struct hns_roce_v2_qp_context {
u32 byte_4_sqpn_tst;
u32 wqe_sge_ba;
u32 byte_12_sq_hop;
u32 byte_16_buf_ba_pg_sz;
u32 byte_20_smac_sgid_idx;
u32 byte_24_mtu_tc;
u32 byte_28_at_fl;
__le32 byte_4_sqpn_tst;
__le32 wqe_sge_ba;
__le32 byte_12_sq_hop;
__le32 byte_16_buf_ba_pg_sz;
__le32 byte_20_smac_sgid_idx;
__le32 byte_24_mtu_tc;
__le32 byte_28_at_fl;
u8 dgid[GID_LEN_V2];
u32 dmac;
u32 byte_52_udpspn_dmac;
u32 byte_56_dqpn_err;
u32 byte_60_qpst_mapid;
u32 qkey_xrcd;
u32 byte_68_rq_db;
u32 rq_db_record_addr;
u32 byte_76_srqn_op_en;
u32 byte_80_rnr_rx_cqn;
u32 byte_84_rq_ci_pi;
u32 rq_cur_blk_addr;
u32 byte_92_srq_info;
u32 byte_96_rx_reqmsn;
u32 rq_nxt_blk_addr;
u32 byte_104_rq_sge;
u32 byte_108_rx_reqepsn;
u32 rq_rnr_timer;
u32 rx_msg_len;
u32 rx_rkey_pkt_info;
u64 rx_va;
u32 byte_132_trrl;
u32 trrl_ba;
u32 byte_140_raq;
u32 byte_144_raq;
u32 byte_148_raq;
u32 byte_152_raq;
u32 byte_156_raq;
u32 byte_160_sq_ci_pi;
u32 sq_cur_blk_addr;
u32 byte_168_irrl_idx;
u32 byte_172_sq_psn;
u32 byte_176_msg_pktn;
u32 sq_cur_sge_blk_addr;
u32 byte_184_irrl_idx;
u32 cur_sge_offset;
u32 byte_192_ext_sge;
u32 byte_196_sq_psn;
u32 byte_200_sq_max;
u32 irrl_ba;
u32 byte_208_irrl;
u32 byte_212_lsn;
u32 sq_timer;
u32 byte_220_retry_psn_msn;
u32 byte_224_retry_msg;
u32 rx_sq_cur_blk_addr;
u32 byte_232_irrl_sge;
u32 irrl_cur_sge_offset;
u32 byte_240_irrl_tail;
u32 byte_244_rnr_rxack;
u32 byte_248_ack_psn;
u32 byte_252_err_txcqn;
u32 byte_256_sqflush_rqcqe;
__le32 dmac;
__le32 byte_52_udpspn_dmac;
__le32 byte_56_dqpn_err;
__le32 byte_60_qpst_mapid;
__le32 qkey_xrcd;
__le32 byte_68_rq_db;
__le32 rq_db_record_addr;
__le32 byte_76_srqn_op_en;
__le32 byte_80_rnr_rx_cqn;
__le32 byte_84_rq_ci_pi;
__le32 rq_cur_blk_addr;
__le32 byte_92_srq_info;
__le32 byte_96_rx_reqmsn;
__le32 rq_nxt_blk_addr;
__le32 byte_104_rq_sge;
__le32 byte_108_rx_reqepsn;
__le32 rq_rnr_timer;
__le32 rx_msg_len;
__le32 rx_rkey_pkt_info;
__le64 rx_va;
__le32 byte_132_trrl;
__le32 trrl_ba;
__le32 byte_140_raq;
__le32 byte_144_raq;
__le32 byte_148_raq;
__le32 byte_152_raq;
__le32 byte_156_raq;
__le32 byte_160_sq_ci_pi;
__le32 sq_cur_blk_addr;
__le32 byte_168_irrl_idx;
__le32 byte_172_sq_psn;
__le32 byte_176_msg_pktn;
__le32 sq_cur_sge_blk_addr;
__le32 byte_184_irrl_idx;
__le32 cur_sge_offset;
__le32 byte_192_ext_sge;
__le32 byte_196_sq_psn;
__le32 byte_200_sq_max;
__le32 irrl_ba;
__le32 byte_208_irrl;
__le32 byte_212_lsn;
__le32 sq_timer;
__le32 byte_220_retry_psn_msn;
__le32 byte_224_retry_msg;
__le32 rx_sq_cur_blk_addr;
__le32 byte_232_irrl_sge;
__le32 irrl_cur_sge_offset;
__le32 byte_240_irrl_tail;
__le32 byte_244_rnr_rxack;
__le32 byte_248_ack_psn;
__le32 byte_252_err_txcqn;
__le32 byte_256_sqflush_rqcqe;
};
#define V2_QPC_BYTE_4_TST_S 0
......@@ -761,17 +761,17 @@ struct hns_roce_v2_qp_context {
#define V2_QPC_BYTE_256_SQ_FLUSH_IDX_M GENMASK(31, 16)
struct hns_roce_v2_cqe {
u32 byte_4;
__le32 byte_4;
union {
__le32 rkey;
__be32 immtdata;
};
u32 byte_12;
u32 byte_16;
u32 byte_cnt;
__le32 byte_12;
__le32 byte_16;
__le32 byte_cnt;
u8 smac[4];
u32 byte_28;
u32 byte_32;
__le32 byte_28;
__le32 byte_32;
};
#define V2_CQE_BYTE_4_OPCODE_S 0
......@@ -901,8 +901,8 @@ struct hns_roce_v2_mpt_entry {
#define V2_DB_PARAMETER_SL_M GENMASK(18, 16)
struct hns_roce_v2_cq_db {
u32 byte_4;
u32 parameter;
__le32 byte_4;
__le32 parameter;
};
#define V2_CQ_DB_BYTE_4_TAG_S 0
......@@ -920,18 +920,18 @@ struct hns_roce_v2_cq_db {
#define V2_CQ_DB_PARAMETER_NOTIFY_S 24
struct hns_roce_v2_ud_send_wqe {
u32 byte_4;
u32 msg_len;
u32 immtdata;
u32 byte_16;
u32 byte_20;
u32 byte_24;
u32 qkey;
u32 byte_32;
u32 byte_36;
u32 byte_40;
u32 dmac;
u32 byte_48;
__le32 byte_4;
__le32 msg_len;
__be32 immtdata;
__le32 byte_16;
__le32 byte_20;
__le32 byte_24;
__le32 qkey;
__le32 byte_32;
__le32 byte_36;
__le32 byte_40;
__le32 dmac;
__le32 byte_48;
u8 dgid[GID_LEN_V2];
};
......@@ -1004,13 +1004,16 @@ struct hns_roce_v2_ud_send_wqe {
#define V2_UD_SEND_WQE_BYTE_48_SMAC_INDX_M GENMASK(31, 24)
struct hns_roce_v2_rc_send_wqe {
u32 byte_4;
u32 msg_len;
u32 inv_key_immtdata;
u32 byte_16;
u32 byte_20;
u32 rkey;
u64 va;
__le32 byte_4;
__le32 msg_len;
union {
__le32 inv_key;
__be32 immtdata;
};
__le32 byte_16;
__le32 byte_20;
__le32 rkey;
__le64 va;
};
#define V2_RC_SEND_WQE_BYTE_4_OPCODE_S 0
......@@ -1038,14 +1041,14 @@ struct hns_roce_v2_rc_send_wqe {
#define V2_RC_SEND_WQE_BYTE_20_MSG_START_SGE_IDX_M GENMASK(23, 0)
struct hns_roce_v2_wqe_data_seg {
__be32 len;
__be32 lkey;
__be64 addr;
__le32 len;
__le32 lkey;
__le64 addr;
};
struct hns_roce_v2_db {
u32 byte_4;
u32 parameter;
__le32 byte_4;
__le32 parameter;
};
struct hns_roce_query_version {
......@@ -1105,12 +1108,12 @@ struct hns_roce_pf_res {
#define PF_RES_DATA_5_PF_EQC_BT_NUM_M GENMASK(25, 16)
struct hns_roce_vf_res_a {
u32 vf_id;
u32 vf_qpc_bt_idx_num;
u32 vf_srqc_bt_idx_num;
u32 vf_cqc_bt_idx_num;
u32 vf_mpt_bt_idx_num;
u32 vf_eqc_bt_idx_num;
__le32 vf_id;
__le32 vf_qpc_bt_idx_num;
__le32 vf_srqc_bt_idx_num;
__le32 vf_cqc_bt_idx_num;
__le32 vf_mpt_bt_idx_num;
__le32 vf_eqc_bt_idx_num;
};
#define VF_RES_A_DATA_1_VF_QPC_BT_IDX_S 0
......@@ -1144,11 +1147,11 @@ struct hns_roce_vf_res_a {
#define VF_RES_A_DATA_5_VF_EQC_NUM_M GENMASK(25, 16)
struct hns_roce_vf_res_b {
u32 rsv0;
u32 vf_smac_idx_num;
u32 vf_sgid_idx_num;
u32 vf_qid_idx_sl_num;
u32 rsv[2];
__le32 rsv0;
__le32 vf_smac_idx_num;
__le32 vf_sgid_idx_num;
__le32 vf_qid_idx_sl_num;
__le32 rsv[2];
};
#define VF_RES_B_DATA_0_VF_ID_S 0
......@@ -1180,11 +1183,11 @@ struct hns_roce_vf_res_b {
#define ROCEE_VF_SGID_CFG4_SGID_TYPE_M GENMASK(1, 0)
struct hns_roce_cfg_bt_attr {
u32 vf_qpc_cfg;
u32 vf_srqc_cfg;
u32 vf_cqc_cfg;
u32 vf_mpt_cfg;
u32 rsv[2];
__le32 vf_qpc_cfg;
__le32 vf_srqc_cfg;
__le32 vf_cqc_cfg;
__le32 vf_mpt_cfg;
__le32 rsv[2];
};
#define CFG_BT_ATTR_DATA_0_VF_QPC_BA_PGSZ_S 0
......@@ -1224,11 +1227,11 @@ struct hns_roce_cfg_bt_attr {
#define CFG_BT_ATTR_DATA_3_VF_MPT_HOPNUM_M GENMASK(9, 8)
struct hns_roce_cmq_desc {
u16 opcode;
u16 flag;
u16 retval;
u16 rsv;
u32 data[6];
__le16 opcode;
__le16 flag;
__le16 retval;
__le16 rsv;
__le32 data[6];
};
#define HNS_ROCE_V2_GO_BIT_TIMEOUT_MSECS 10000
......@@ -1274,18 +1277,18 @@ struct hns_roce_v2_priv {
};
struct hns_roce_eq_context {
u32 byte_4;
u32 byte_8;
u32 byte_12;
u32 eqe_report_timer;
u32 eqe_ba0;
u32 eqe_ba1;
u32 byte_28;
u32 byte_32;
u32 byte_36;
u32 nxt_eqe_ba0;
u32 nxt_eqe_ba1;
u32 rsv[5];
__le32 byte_4;
__le32 byte_8;
__le32 byte_12;
__le32 eqe_report_timer;
__le32 eqe_ba0;
__le32 eqe_ba1;
__le32 byte_28;
__le32 byte_32;
__le32 byte_36;
__le32 nxt_eqe_ba0;
__le32 nxt_eqe_ba1;
__le32 rsv[5];
};
#define HNS_ROCE_AEQ_DEFAULT_BURST_NUM 0x0
......
......@@ -200,7 +200,7 @@ static int hns_roce_query_device(struct ib_device *ib_dev,
memset(props, 0, sizeof(*props));
props->sys_image_guid = hr_dev->sys_image_guid;
props->sys_image_guid = cpu_to_be32(hr_dev->sys_image_guid);
props->max_mr_size = (u64)(~(0ULL));
props->page_size_cap = hr_dev->caps.page_size_cap;
props->vendor_id = hr_dev->vendor_id;
......
......@@ -512,9 +512,9 @@ static int hns_roce_create_qp_common(struct hns_roce_dev *hr_dev,
hr_qp->ibqp.qp_type = init_attr->qp_type;
if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR)
hr_qp->sq_signal_bits = IB_SIGNAL_ALL_WR;
hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_ALL_WR);
else
hr_qp->sq_signal_bits = IB_SIGNAL_REQ_WR;
hr_qp->sq_signal_bits = cpu_to_le32(IB_SIGNAL_REQ_WR);
ret = hns_roce_set_rq_size(hr_dev, &init_attr->cap, !!ib_pd->uobject,
!!init_attr->srq, hr_qp);
......@@ -937,20 +937,6 @@ void hns_roce_unlock_cqs(struct hns_roce_cq *send_cq,
}
EXPORT_SYMBOL_GPL(hns_roce_unlock_cqs);
__be32 send_ieth(struct ib_send_wr *wr)
{
switch (wr->opcode) {
case IB_WR_SEND_WITH_IMM:
case IB_WR_RDMA_WRITE_WITH_IMM:
return cpu_to_le32(wr->ex.imm_data);
case IB_WR_SEND_WITH_INV:
return cpu_to_le32(wr->ex.invalidate_rkey);
default:
return 0;
}
}
EXPORT_SYMBOL_GPL(send_ieth);
static void *get_wqe(struct hns_roce_qp *hr_qp, int offset)
{
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment