Commit d52c89f1 authored by Michal Kalderon's avatar Michal Kalderon Committed by David S. Miller

qed*: Utilize FW 8.37.2.0

This FW contains several fixes and features.

RDMA
- Several modifications and fixes for Memory Windows
- drop vlan and tcp timestamp from mss calculation in driver for
  this FW
- Fix SQ completion flow when local ack timeout is infinite
- Modifications in t10dif support

ETH
- Fix aRFS for tunneled traffic without inner IP.
- Fix chip configuration which may fail under heavy traffic conditions.
- Support receiving any-VNI in VXLAN and GENEVE RX classification.

iSCSI / FcoE
- Fix iSCSI recovery flow
- Drop vlan and tcp timestamp from mss calc for fw 8.37.2.0

Misc
- Several registers (split registers) won't read correctly with
  ethtool -d
Signed-off-by: default avatarAriel Elior <Ariel.Elior@cavium.com>
Signed-off-by: default avatarManish Rangankar <manish.rangankar@cavium.com>
Signed-off-by: default avatarMichal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 95358a95
...@@ -116,6 +116,7 @@ enum rdma_cqe_requester_status_enum { ...@@ -116,6 +116,7 @@ enum rdma_cqe_requester_status_enum {
RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR, RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR, RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
RDMA_CQE_REQ_STS_XRC_VOILATION_ERR, RDMA_CQE_REQ_STS_XRC_VOILATION_ERR,
RDMA_CQE_REQ_STS_SIG_ERR,
MAX_RDMA_CQE_REQUESTER_STATUS_ENUM MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
}; };
...@@ -152,12 +153,12 @@ struct rdma_rq_sge { ...@@ -152,12 +153,12 @@ struct rdma_rq_sge {
struct regpair addr; struct regpair addr;
__le32 length; __le32 length;
__le32 flags; __le32 flags;
#define RDMA_RQ_SGE_L_KEY_MASK 0x3FFFFFF #define RDMA_RQ_SGE_L_KEY_LO_MASK 0x3FFFFFF
#define RDMA_RQ_SGE_L_KEY_SHIFT 0 #define RDMA_RQ_SGE_L_KEY_LO_SHIFT 0
#define RDMA_RQ_SGE_NUM_SGES_MASK 0x7 #define RDMA_RQ_SGE_NUM_SGES_MASK 0x7
#define RDMA_RQ_SGE_NUM_SGES_SHIFT 26 #define RDMA_RQ_SGE_NUM_SGES_SHIFT 26
#define RDMA_RQ_SGE_RESERVED0_MASK 0x7 #define RDMA_RQ_SGE_L_KEY_HI_MASK 0x7
#define RDMA_RQ_SGE_RESERVED0_SHIFT 29 #define RDMA_RQ_SGE_L_KEY_HI_SHIFT 29
}; };
struct rdma_srq_sge { struct rdma_srq_sge {
...@@ -241,18 +242,39 @@ enum rdma_dif_io_direction_flg { ...@@ -241,18 +242,39 @@ enum rdma_dif_io_direction_flg {
MAX_RDMA_DIF_IO_DIRECTION_FLG MAX_RDMA_DIF_IO_DIRECTION_FLG
}; };
/* RDMA DIF Runt Result Structure */ struct rdma_dif_params {
struct rdma_dif_runt_result { __le32 base_ref_tag;
__le16 guard_tag; __le16 app_tag;
__le16 reserved[3]; __le16 app_tag_mask;
__le16 runt_crc_value;
__le16 flags;
#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_MASK 0x1
#define RDMA_DIF_PARAMS_IO_DIRECTION_FLG_SHIFT 0
#define RDMA_DIF_PARAMS_BLOCK_SIZE_MASK 0x1
#define RDMA_DIF_PARAMS_BLOCK_SIZE_SHIFT 1
#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_MASK 0x1
#define RDMA_DIF_PARAMS_RUNT_VALID_FLG_SHIFT 2
#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_MASK 0x1
#define RDMA_DIF_PARAMS_VALIDATE_CRC_GUARD_SHIFT 3
#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_MASK 0x1
#define RDMA_DIF_PARAMS_VALIDATE_REF_TAG_SHIFT 4
#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_MASK 0x1
#define RDMA_DIF_PARAMS_VALIDATE_APP_TAG_SHIFT 5
#define RDMA_DIF_PARAMS_CRC_SEED_MASK 0x1
#define RDMA_DIF_PARAMS_CRC_SEED_SHIFT 6
#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_MASK 0x1
#define RDMA_DIF_PARAMS_RX_REF_TAG_CONST_SHIFT 7
#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_MASK 0x1
#define RDMA_DIF_PARAMS_BLOCK_GUARD_TYPE_SHIFT 8
#define RDMA_DIF_PARAMS_APP_ESCAPE_MASK 0x1
#define RDMA_DIF_PARAMS_APP_ESCAPE_SHIFT 9
#define RDMA_DIF_PARAMS_REF_ESCAPE_MASK 0x1
#define RDMA_DIF_PARAMS_REF_ESCAPE_SHIFT 10
#define RDMA_DIF_PARAMS_RESERVED4_MASK 0x1F
#define RDMA_DIF_PARAMS_RESERVED4_SHIFT 11
__le32 reserved5;
}; };
/* Memory window type enumeration */
enum rdma_mw_type {
RDMA_MW_TYPE_1,
RDMA_MW_TYPE_2A,
MAX_RDMA_MW_TYPE
};
struct rdma_sq_atomic_wqe { struct rdma_sq_atomic_wqe {
__le32 reserved1; __le32 reserved1;
...@@ -334,17 +356,17 @@ struct rdma_sq_bind_wqe { ...@@ -334,17 +356,17 @@ struct rdma_sq_bind_wqe {
#define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3 #define RDMA_SQ_BIND_WQE_SE_FLG_SHIFT 3
#define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1 #define RDMA_SQ_BIND_WQE_INLINE_FLG_MASK 0x1
#define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4 #define RDMA_SQ_BIND_WQE_INLINE_FLG_SHIFT 4
#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x7 #define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_MASK 0x1
#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 5 #define RDMA_SQ_BIND_WQE_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_BIND_WQE_RESERVED0_MASK 0x3
#define RDMA_SQ_BIND_WQE_RESERVED0_SHIFT 6
u8 wqe_size; u8 wqe_size;
u8 prev_wqe_size; u8 prev_wqe_size;
u8 bind_ctrl; u8 bind_ctrl;
#define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1 #define RDMA_SQ_BIND_WQE_ZERO_BASED_MASK 0x1
#define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0 #define RDMA_SQ_BIND_WQE_ZERO_BASED_SHIFT 0
#define RDMA_SQ_BIND_WQE_MW_TYPE_MASK 0x1 #define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x7F
#define RDMA_SQ_BIND_WQE_MW_TYPE_SHIFT 1 #define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 1
#define RDMA_SQ_BIND_WQE_RESERVED1_MASK 0x3F
#define RDMA_SQ_BIND_WQE_RESERVED1_SHIFT 2
u8 access_ctrl; u8 access_ctrl;
#define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1 #define RDMA_SQ_BIND_WQE_REMOTE_READ_MASK 0x1
#define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0 #define RDMA_SQ_BIND_WQE_REMOTE_READ_SHIFT 0
...@@ -363,6 +385,7 @@ struct rdma_sq_bind_wqe { ...@@ -363,6 +385,7 @@ struct rdma_sq_bind_wqe {
__le32 length_lo; __le32 length_lo;
__le32 parent_l_key; __le32 parent_l_key;
__le32 reserved4; __le32 reserved4;
struct rdma_dif_params dif_params;
}; };
/* First element (16 bytes) of bind wqe */ /* First element (16 bytes) of bind wqe */
...@@ -392,10 +415,8 @@ struct rdma_sq_bind_wqe_2nd { ...@@ -392,10 +415,8 @@ struct rdma_sq_bind_wqe_2nd {
u8 bind_ctrl; u8 bind_ctrl;
#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1 #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_MASK 0x1
#define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0 #define RDMA_SQ_BIND_WQE_2ND_ZERO_BASED_SHIFT 0
#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_MASK 0x1 #define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x7F
#define RDMA_SQ_BIND_WQE_2ND_MW_TYPE_SHIFT 1 #define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 1
#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_MASK 0x3F
#define RDMA_SQ_BIND_WQE_2ND_RESERVED1_SHIFT 2
u8 access_ctrl; u8 access_ctrl;
#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_MASK 0x1
#define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0 #define RDMA_SQ_BIND_WQE_2ND_REMOTE_READ_SHIFT 0
...@@ -416,6 +437,11 @@ struct rdma_sq_bind_wqe_2nd { ...@@ -416,6 +437,11 @@ struct rdma_sq_bind_wqe_2nd {
__le32 reserved4; __le32 reserved4;
}; };
/* Third element (16 bytes) of bind wqe */
struct rdma_sq_bind_wqe_3rd {
struct rdma_dif_params dif_params;
};
/* Structure with only the SQ WQE common /* Structure with only the SQ WQE common
* fields. Size is of one SQ element (16B) * fields. Size is of one SQ element (16B)
*/ */
...@@ -486,30 +512,6 @@ struct rdma_sq_fmr_wqe { ...@@ -486,30 +512,6 @@ struct rdma_sq_fmr_wqe {
u8 length_hi; u8 length_hi;
__le32 length_lo; __le32 length_lo;
struct regpair pbl_addr; struct regpair pbl_addr;
__le32 dif_base_ref_tag;
__le16 dif_app_tag;
__le16 dif_app_tag_mask;
__le16 dif_runt_crc_value;
__le16 dif_flags;
#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7
#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
__le32 reserved5;
}; };
/* First element (16 bytes) of fmr wqe */ /* First element (16 bytes) of fmr wqe */
...@@ -566,33 +568,6 @@ struct rdma_sq_fmr_wqe_2nd { ...@@ -566,33 +568,6 @@ struct rdma_sq_fmr_wqe_2nd {
struct regpair pbl_addr; struct regpair pbl_addr;
}; };
/* Third element (16 bytes) of fmr wqe */
struct rdma_sq_fmr_wqe_3rd {
__le32 dif_base_ref_tag;
__le16 dif_app_tag;
__le16 dif_app_tag_mask;
__le16 dif_runt_crc_value;
__le16 dif_flags;
#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
__le32 reserved5;
};
struct rdma_sq_local_inv_wqe { struct rdma_sq_local_inv_wqe {
struct regpair reserved; struct regpair reserved;
...@@ -637,8 +612,8 @@ struct rdma_sq_rdma_wqe { ...@@ -637,8 +612,8 @@ struct rdma_sq_rdma_wqe {
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6
#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1 #define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1
#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7 #define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 7
u8 wqe_size; u8 wqe_size;
u8 prev_wqe_size; u8 prev_wqe_size;
struct regpair remote_va; struct regpair remote_va;
...@@ -646,13 +621,9 @@ struct rdma_sq_rdma_wqe { ...@@ -646,13 +621,9 @@ struct rdma_sq_rdma_wqe {
u8 dif_flags; u8 dif_flags;
#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1 #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_MASK 0x1
#define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0 #define RDMA_SQ_RDMA_WQE_DIF_BLOCK_SIZE_SHIFT 0
#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_MASK 0x1 #define RDMA_SQ_RDMA_WQE_RESERVED2_MASK 0x7F
#define RDMA_SQ_RDMA_WQE_DIF_FIRST_RDMA_IN_IO_FLG_SHIFT 1 #define RDMA_SQ_RDMA_WQE_RESERVED2_SHIFT 1
#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_MASK 0x1 u8 reserved3[3];
#define RDMA_SQ_RDMA_WQE_DIF_LAST_RDMA_IN_IO_FLG_SHIFT 2
#define RDMA_SQ_RDMA_WQE_RESERVED1_MASK 0x1F
#define RDMA_SQ_RDMA_WQE_RESERVED1_SHIFT 3
u8 reserved2[3];
}; };
/* First element (16 bytes) of rdma wqe */ /* First element (16 bytes) of rdma wqe */
......
...@@ -3276,7 +3276,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -3276,7 +3276,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES,
wr->num_sge); wr->num_sge);
SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO,
wr->sg_list[i].lkey); wr->sg_list[i].lkey);
RQ_SGE_SET(rqe, wr->sg_list[i].addr, RQ_SGE_SET(rqe, wr->sg_list[i].addr,
...@@ -3295,7 +3295,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr, ...@@ -3295,7 +3295,7 @@ int qedr_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *wr,
/* First one must include the number /* First one must include the number
* of SGE in the list * of SGE in the list
*/ */
SET_FIELD(flags, RDMA_RQ_SGE_L_KEY, 0); SET_FIELD(flags, RDMA_RQ_SGE_L_KEY_LO, 0);
SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1); SET_FIELD(flags, RDMA_RQ_SGE_NUM_SGES, 1);
RQ_SGE_SET(rqe, 0, 0, flags); RQ_SGE_SET(rqe, 0, 0, flags);
......
This diff is collapsed.
...@@ -2792,7 +2792,7 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn, ...@@ -2792,7 +2792,7 @@ static void qed_hw_info_port_num_bb(struct qed_hwfn *p_hwfn,
{ {
u32 port_mode; u32 port_mode;
port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0); port_mode = qed_rd(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB);
if (port_mode < 3) { if (port_mode < 3) {
p_hwfn->cdev->num_ports_in_engine = 1; p_hwfn->cdev->num_ports_in_engine = 1;
......
This diff is collapsed.
...@@ -360,6 +360,26 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -360,6 +360,26 @@ void qed_port_unpretend(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
*(u32 *)&p_ptt->pxp.pretend); *(u32 *)&p_ptt->pxp.pretend);
} }
void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 port_id, u16 fid)
{
u16 control = 0;
SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id);
SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1);
SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1);
SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1);
if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID))
fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID);
p_ptt->pxp.pretend.control = cpu_to_le16(control);
p_ptt->pxp.pretend.fid.concrete_fid.fid = cpu_to_le16(fid);
REG_WR(p_hwfn,
qed_ptt_config_addr(p_ptt) +
offsetof(struct pxp_ptt_entry, pretend),
*(u32 *)&p_ptt->pxp.pretend);
}
u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid) u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
{ {
u32 concrete_fid = 0; u32 concrete_fid = 0;
......
...@@ -244,6 +244,18 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn, ...@@ -244,6 +244,18 @@ void qed_port_pretend(struct qed_hwfn *p_hwfn,
void qed_port_unpretend(struct qed_hwfn *p_hwfn, void qed_port_unpretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt); struct qed_ptt *p_ptt);
/**
* @brief qed_port_fid_pretend - pretend to another port and another function
* when accessing the ptt window
*
* @param p_hwfn
* @param p_ptt
* @param port_id - the port to pretend to
* @param fid - fid field of pxp_pretend structure. Can contain either pf / vf.
*/
void qed_port_fid_pretend(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, u8 port_id, u16 fid);
/** /**
* @brief qed_vfid_to_concrete - build a concrete FID for a * @brief qed_vfid_to_concrete - build a concrete FID for a
* given VF ID * given VF ID
......
...@@ -1245,7 +1245,7 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, ...@@ -1245,7 +1245,7 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
bool udp, bool udp,
bool ipv4, bool ipv6, enum gft_profile_type profile_type) bool ipv4, bool ipv6, enum gft_profile_type profile_type)
{ {
u32 reg_val, cam_line, ram_line_lo, ram_line_hi; u32 reg_val, cam_line, ram_line_lo, ram_line_hi, search_non_ip_as_gft;
if (!ipv6 && !ipv4) if (!ipv6 && !ipv4)
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
...@@ -1314,6 +1314,9 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, ...@@ -1314,6 +1314,9 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
ram_line_lo = 0; ram_line_lo = 0;
ram_line_hi = 0; ram_line_hi = 0;
/* Search no IP as GFT */
search_non_ip_as_gft = 0;
/* Tunnel type */ /* Tunnel type */
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_DST_PORT, 1);
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL, 1);
...@@ -1337,8 +1340,13 @@ void qed_gft_config(struct qed_hwfn *p_hwfn, ...@@ -1337,8 +1340,13 @@ void qed_gft_config(struct qed_hwfn *p_hwfn,
SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_ETHERTYPE, 1);
} else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) { } else if (profile_type == GFT_PROFILE_TYPE_TUNNEL_TYPE) {
SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1); SET_FIELD(ram_line_lo, GFT_RAM_LINE_TUNNEL_ETHERTYPE, 1);
/* Allow tunneled traffic without inner IP */
search_non_ip_as_gft = 1;
} }
qed_wr(p_hwfn,
p_ptt, PRS_REG_SEARCH_NON_IP_AS_GFT, search_non_ip_as_gft);
qed_wr(p_hwfn, qed_wr(p_hwfn,
p_ptt, p_ptt,
PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id, PRS_REG_GFT_PROFILE_MASK_RAM + RAM_LINE_SIZE * pf_id,
...@@ -1509,3 +1517,43 @@ void qed_enable_context_validation(struct qed_hwfn *p_hwfn, ...@@ -1509,3 +1517,43 @@ void qed_enable_context_validation(struct qed_hwfn *p_hwfn,
ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8; ctx_validation = CDU_VALIDATION_DEFAULT_CFG << 8;
qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation); qed_wr(p_hwfn, p_ptt, CDU_REG_TCFC_CTX_VALID0, ctx_validation);
} }
static u32 qed_get_rdma_assert_ram_addr(struct qed_hwfn *p_hwfn, u8 storm_id)
{
switch (storm_id) {
case 0:
return TSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
TSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 1:
return MSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
MSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 2:
return USEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
USTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 3:
return XSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
XSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 4:
return YSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
YSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
case 5:
return PSEM_REG_FAST_MEMORY + SEM_FAST_REG_INT_RAM +
PSTORM_RDMA_ASSERT_LEVEL_OFFSET(p_hwfn->rel_pf_id);
default:
return 0;
}
}
void qed_set_rdma_error_level(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
u8 assert_level[NUM_STORMS])
{
u8 storm_id;
for (storm_id = 0; storm_id < NUM_STORMS; storm_id++) {
u32 ram_addr = qed_get_rdma_assert_ram_addr(p_hwfn, storm_id);
qed_wr(p_hwfn, p_ptt, ram_addr, assert_level[storm_id]);
}
}
...@@ -1159,7 +1159,6 @@ int qed_iwarp_connect(void *rdma_cxt, ...@@ -1159,7 +1159,6 @@ int qed_iwarp_connect(void *rdma_cxt,
struct qed_iwarp_info *iwarp_info; struct qed_iwarp_info *iwarp_info;
struct qed_iwarp_ep *ep; struct qed_iwarp_ep *ep;
u8 mpa_data_size = 0; u8 mpa_data_size = 0;
u8 ts_hdr_size = 0;
u32 cid; u32 cid;
int rc; int rc;
...@@ -1218,10 +1217,7 @@ int qed_iwarp_connect(void *rdma_cxt, ...@@ -1218,10 +1217,7 @@ int qed_iwarp_connect(void *rdma_cxt,
iparams->cm_info.private_data, iparams->cm_info.private_data,
iparams->cm_info.private_data_len); iparams->cm_info.private_data_len);
if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) ep->mss = iparams->mss;
ts_hdr_size = TIMESTAMP_HEADER_SIZE;
ep->mss = iparams->mss - ts_hdr_size;
ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
ep->event_cb = iparams->event_cb; ep->event_cb = iparams->event_cb;
...@@ -2337,7 +2333,6 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) ...@@ -2337,7 +2333,6 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
u8 local_mac_addr[ETH_ALEN]; u8 local_mac_addr[ETH_ALEN];
struct qed_iwarp_ep *ep; struct qed_iwarp_ep *ep;
int tcp_start_offset; int tcp_start_offset;
u8 ts_hdr_size = 0;
u8 ll2_syn_handle; u8 ll2_syn_handle;
int payload_len; int payload_len;
u32 hdr_size; u32 hdr_size;
...@@ -2415,11 +2410,7 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data) ...@@ -2415,11 +2410,7 @@ qed_iwarp_ll2_comp_syn_pkt(void *cxt, struct qed_ll2_comp_rx_data *data)
memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info)); memcpy(&ep->cm_info, &cm_info, sizeof(ep->cm_info));
if (p_hwfn->p_rdma_info->iwarp.tcp_flags & QED_IWARP_TS_EN) hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60);
ts_hdr_size = TIMESTAMP_HEADER_SIZE;
hdr_size = ((cm_info.ip_version == QED_TCP_IPV4) ? 40 : 60) +
ts_hdr_size;
ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size; ep->mss = p_hwfn->p_rdma_info->iwarp.max_mtu - hdr_size;
ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss); ep->mss = min_t(u16, QED_IWARP_MAX_FW_MSS, ep->mss);
......
...@@ -586,6 +586,9 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn, ...@@ -586,6 +586,9 @@ qed_sp_update_accept_mode(struct qed_hwfn *p_hwfn,
SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, SET_FIELD(state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL,
!!(accept_filter & QED_ACCEPT_BCAST)); !!(accept_filter & QED_ACCEPT_BCAST));
SET_FIELD(state, ETH_VPORT_RX_MODE_ACCEPT_ANY_VNI,
!!(accept_filter & QED_ACCEPT_ANY_VNI));
p_ramrod->rx_mode.state = cpu_to_le16(state); p_ramrod->rx_mode.state = cpu_to_le16(state);
DP_VERBOSE(p_hwfn, QED_MSG_SP, DP_VERBOSE(p_hwfn, QED_MSG_SP,
"p_ramrod->rx_mode.state = 0x%x\n", state); "p_ramrod->rx_mode.state = 0x%x\n", state);
......
...@@ -183,6 +183,7 @@ struct qed_filter_accept_flags { ...@@ -183,6 +183,7 @@ struct qed_filter_accept_flags {
#define QED_ACCEPT_MCAST_MATCHED 0x08 #define QED_ACCEPT_MCAST_MATCHED 0x08
#define QED_ACCEPT_MCAST_UNMATCHED 0x10 #define QED_ACCEPT_MCAST_UNMATCHED 0x10
#define QED_ACCEPT_BCAST 0x20 #define QED_ACCEPT_BCAST 0x20
#define QED_ACCEPT_ANY_VNI 0x40
}; };
struct qed_arfs_config_params { struct qed_arfs_config_params {
......
...@@ -1508,11 +1508,8 @@ qed_rdma_register_tid(void *rdma_cxt, ...@@ -1508,11 +1508,8 @@ qed_rdma_register_tid(void *rdma_cxt,
case QED_RDMA_TID_FMR: case QED_RDMA_TID_FMR:
tid_type = RDMA_TID_FMR; tid_type = RDMA_TID_FMR;
break; break;
case QED_RDMA_TID_MW_TYPE1: case QED_RDMA_TID_MW:
tid_type = RDMA_TID_MW_TYPE1; tid_type = RDMA_TID_MW;
break;
case QED_RDMA_TID_MW_TYPE2A:
tid_type = RDMA_TID_MW_TYPE2A;
break; break;
default: default:
rc = -EINVAL; rc = -EINVAL;
...@@ -1544,7 +1541,6 @@ qed_rdma_register_tid(void *rdma_cxt, ...@@ -1544,7 +1541,6 @@ qed_rdma_register_tid(void *rdma_cxt,
RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1); RDMA_REGISTER_TID_RAMROD_DATA_DIF_ON_HOST_FLG, 1);
DMA_REGPAIR_LE(p_ramrod->dif_error_addr, DMA_REGPAIR_LE(p_ramrod->dif_error_addr,
params->dif_error_addr); params->dif_error_addr);
DMA_REGPAIR_LE(p_ramrod->dif_runt_addr, params->dif_runt_addr);
} }
rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code); rc = qed_spq_post(p_hwfn, p_ent, &fw_return_code);
......
...@@ -178,7 +178,7 @@ ...@@ -178,7 +178,7 @@
0x008c80UL 0x008c80UL
#define MCP_REG_SCRATCH \ #define MCP_REG_SCRATCH \
0xe20000UL 0xe20000UL
#define CNIG_REG_NW_PORT_MODE_BB_B0 \ #define CNIG_REG_NW_PORT_MODE_BB \
0x218200UL 0x218200UL
#define MISCS_REG_CHIP_NUM \ #define MISCS_REG_CHIP_NUM \
0x00976cUL 0x00976cUL
...@@ -1621,6 +1621,7 @@ ...@@ -1621,6 +1621,7 @@
#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1 #define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1
#define PRS_REG_SEARCH_GFT 0x1f11bcUL #define PRS_REG_SEARCH_GFT 0x1f11bcUL
#define PRS_REG_SEARCH_NON_IP_AS_GFT 0x1f11c0UL
#define PRS_REG_CM_HDR_GFT 0x1f11c8UL #define PRS_REG_CM_HDR_GFT 0x1f11c8UL
#define PRS_REG_GFT_CAM 0x1f1100UL #define PRS_REG_GFT_CAM 0x1f1100UL
#define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL #define PRS_REG_GFT_PROFILE_MASK_RAM 0x1f1000UL
......
...@@ -681,7 +681,6 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn, ...@@ -681,7 +681,6 @@ static int qed_roce_sp_modify_requester(struct qed_hwfn *p_hwfn,
static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp, struct qed_rdma_qp *qp,
u32 *num_invalidated_mw,
u32 *cq_prod) u32 *cq_prod)
{ {
struct roce_destroy_qp_resp_output_params *p_ramrod_res; struct roce_destroy_qp_resp_output_params *p_ramrod_res;
...@@ -692,8 +691,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, ...@@ -692,8 +691,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
int rc; int rc;
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "icid = %08x\n", qp->icid);
*num_invalidated_mw = 0;
*cq_prod = qp->cq_prod; *cq_prod = qp->cq_prod;
if (!qp->resp_offloaded) { if (!qp->resp_offloaded) {
...@@ -742,7 +739,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, ...@@ -742,7 +739,6 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
goto err; goto err;
*num_invalidated_mw = le32_to_cpu(p_ramrod_res->num_invalidated_mw);
*cq_prod = le32_to_cpu(p_ramrod_res->cq_prod); *cq_prod = le32_to_cpu(p_ramrod_res->cq_prod);
qp->cq_prod = *cq_prod; qp->cq_prod = *cq_prod;
...@@ -764,8 +760,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn, ...@@ -764,8 +760,7 @@ static int qed_roce_sp_destroy_qp_responder(struct qed_hwfn *p_hwfn,
} }
static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
struct qed_rdma_qp *qp, struct qed_rdma_qp *qp)
u32 *num_bound_mw)
{ {
struct roce_destroy_qp_req_output_params *p_ramrod_res; struct roce_destroy_qp_req_output_params *p_ramrod_res;
struct roce_destroy_qp_req_ramrod_data *p_ramrod; struct roce_destroy_qp_req_ramrod_data *p_ramrod;
...@@ -807,7 +802,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn, ...@@ -807,7 +802,6 @@ static int qed_roce_sp_destroy_qp_requester(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
goto err; goto err;
*num_bound_mw = le32_to_cpu(p_ramrod_res->num_bound_mw);
/* Free ORQ - only if ramrod succeeded, in case FW is still using it */ /* Free ORQ - only if ramrod succeeded, in case FW is still using it */
dma_free_coherent(&p_hwfn->cdev->pdev->dev, dma_free_coherent(&p_hwfn->cdev->pdev->dev,
...@@ -968,8 +962,6 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn, ...@@ -968,8 +962,6 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
{ {
u32 num_invalidated_mw = 0;
u32 num_bound_mw = 0;
u32 cq_prod; u32 cq_prod;
int rc; int rc;
...@@ -984,22 +976,14 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp) ...@@ -984,22 +976,14 @@ int qed_roce_destroy_qp(struct qed_hwfn *p_hwfn, struct qed_rdma_qp *qp)
if (qp->cur_state != QED_ROCE_QP_STATE_RESET) { if (qp->cur_state != QED_ROCE_QP_STATE_RESET) {
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp, rc = qed_roce_sp_destroy_qp_responder(p_hwfn, qp,
&num_invalidated_mw,
&cq_prod); &cq_prod);
if (rc) if (rc)
return rc; return rc;
/* Send destroy requester ramrod */ /* Send destroy requester ramrod */
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
&num_bound_mw);
if (rc) if (rc)
return rc; return rc;
if (num_invalidated_mw != num_bound_mw) {
DP_NOTICE(p_hwfn,
"number of invalidate memory windows is different from bounded ones\n");
return -EINVAL;
}
} }
return 0; return 0;
...@@ -1010,7 +994,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, ...@@ -1010,7 +994,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
enum qed_roce_qp_state prev_state, enum qed_roce_qp_state prev_state,
struct qed_rdma_modify_qp_in_params *params) struct qed_rdma_modify_qp_in_params *params)
{ {
u32 num_invalidated_mw = 0, num_bound_mw = 0;
int rc = 0; int rc = 0;
/* Perform additional operations according to the current state and the /* Perform additional operations according to the current state and the
...@@ -1090,7 +1073,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, ...@@ -1090,7 +1073,6 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
/* Send destroy responder ramrod */ /* Send destroy responder ramrod */
rc = qed_roce_sp_destroy_qp_responder(p_hwfn, rc = qed_roce_sp_destroy_qp_responder(p_hwfn,
qp, qp,
&num_invalidated_mw,
&cq_prod); &cq_prod);
if (rc) if (rc)
...@@ -1098,14 +1080,7 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn, ...@@ -1098,14 +1080,7 @@ int qed_roce_modify_qp(struct qed_hwfn *p_hwfn,
qp->cq_prod = cq_prod; qp->cq_prod = cq_prod;
rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp, rc = qed_roce_sp_destroy_qp_requester(p_hwfn, qp);
&num_bound_mw);
if (num_invalidated_mw != num_bound_mw) {
DP_NOTICE(p_hwfn,
"number of invalidate memory windows is different from bounded ones\n");
return -EINVAL;
}
} else { } else {
DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n"); DP_VERBOSE(p_hwfn, QED_MSG_RDMA, "0\n");
} }
......
...@@ -471,14 +471,8 @@ static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en) ...@@ -471,14 +471,8 @@ static u16 qedi_calc_mss(u16 pmtu, u8 is_ipv6, u8 tcp_ts_en, u8 vlan_en)
else else
hdrs += IPV4_HDR_LEN; hdrs += IPV4_HDR_LEN;
if (vlan_en)
hdrs += VLAN_LEN;
mss = pmtu - hdrs; mss = pmtu - hdrs;
if (tcp_ts_en)
mss -= TCP_OPTION_LEN;
if (!mss) if (!mss)
mss = DEF_MSS; mss = DEF_MSS;
......
...@@ -109,8 +109,8 @@ ...@@ -109,8 +109,8 @@
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48 #define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8 #define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 33 #define FW_MINOR_VERSION 37
#define FW_REVISION_VERSION 11 #define FW_REVISION_VERSION 2
#define FW_ENGINEERING_VERSION 0 #define FW_ENGINEERING_VERSION 0
/***********************/ /***********************/
......
...@@ -799,8 +799,8 @@ struct e4_mstorm_iscsi_task_ag_ctx { ...@@ -799,8 +799,8 @@ struct e4_mstorm_iscsi_task_ag_ctx {
#define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 #define E4_MSTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 #define E4_MSTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
#define E4_MSTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 #define E4_MSTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_MASK 0x1
#define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6 #define E4_MSTORM_ISCSI_TASK_AG_CTX_VALID_SHIFT 6
#define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1 #define E4_MSTORM_ISCSI_TASK_AG_CTX_TASK_CLEANUP_FLAG_MASK 0x1
...@@ -849,8 +849,8 @@ struct e4_ustorm_iscsi_task_ag_ctx { ...@@ -849,8 +849,8 @@ struct e4_ustorm_iscsi_task_ag_ctx {
#define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0 #define E4_USTORM_ISCSI_TASK_AG_CTX_CONNECTION_TYPE_SHIFT 0
#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_MASK 0x1
#define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4 #define E4_USTORM_ISCSI_TASK_AG_CTX_EXIST_IN_QM0_SHIFT 4
#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_MASK 0x1 #define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_MASK 0x1
#define E4_USTORM_ISCSI_TASK_AG_CTX_BIT1_SHIFT 5 #define E4_USTORM_ISCSI_TASK_AG_CTX_CONN_CLEAR_SQ_FLAG_SHIFT 5
#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3 #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_MASK 0x3
#define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6 #define E4_USTORM_ISCSI_TASK_AG_CTX_HQ_SCANNED_CF_SHIFT 6
u8 flags1; u8 flags1;
......
...@@ -65,8 +65,7 @@ enum qed_roce_qp_state { ...@@ -65,8 +65,7 @@ enum qed_roce_qp_state {
enum qed_rdma_tid_type { enum qed_rdma_tid_type {
QED_RDMA_TID_REGISTERED_MR, QED_RDMA_TID_REGISTERED_MR,
QED_RDMA_TID_FMR, QED_RDMA_TID_FMR,
QED_RDMA_TID_MW_TYPE1, QED_RDMA_TID_MW
QED_RDMA_TID_MW_TYPE2A
}; };
struct qed_rdma_events { struct qed_rdma_events {
...@@ -280,7 +279,6 @@ struct qed_rdma_register_tid_in_params { ...@@ -280,7 +279,6 @@ struct qed_rdma_register_tid_in_params {
bool dif_enabled; bool dif_enabled;
u64 dif_error_addr; u64 dif_error_addr;
u64 dif_runt_addr;
}; };
struct qed_rdma_create_cq_in_params { struct qed_rdma_create_cq_in_params {
......
...@@ -43,6 +43,7 @@ ...@@ -43,6 +43,7 @@
#define ROCE_MAX_QPS (32 * 1024) #define ROCE_MAX_QPS (32 * 1024)
#define ROCE_DCQCN_NP_MAX_QPS (64) #define ROCE_DCQCN_NP_MAX_QPS (64)
#define ROCE_DCQCN_RP_MAX_QPS (64) #define ROCE_DCQCN_RP_MAX_QPS (64)
#define ROCE_LKEY_MW_DIF_EN_BIT (28)
/* Affiliated asynchronous events / errors enumeration */ /* Affiliated asynchronous events / errors enumeration */
enum roce_async_events_type { enum roce_async_events_type {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment