Commit a2e7699e authored by Tomer Tayar's avatar Tomer Tayar Committed by David S. Miller

qed*: Refactoring and rearranging FW API with no functional impact

This patch refactors and reorders the FW API files in preparation of
upgrading the code to support new FW.

- Make use of the BIT macro in appropriate places.
- Whitespace changes to align values and code blocks.
- Comments are updated (spelling mistakes, removed if not clear).
- Group together code blocks which are related or deal with similar
 matters.
Signed-off-by: default avatarAriel Elior <Ariel.Elior@cavium.com>
Signed-off-by: default avatarMichal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: default avatarTomer Tayar <Tomer.Tayar@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bbb6189d
...@@ -180,12 +180,12 @@ struct rdma_pwm_val32_data { ...@@ -180,12 +180,12 @@ struct rdma_pwm_val32_data {
__le16 icid; __le16 icid;
u8 agg_flags; u8 agg_flags;
u8 params; u8 params;
#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3 #define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3
#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0 #define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F #define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F
#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3 #define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3
__le32 value; __le32 value;
}; };
...@@ -478,23 +478,23 @@ struct rdma_sq_fmr_wqe { ...@@ -478,23 +478,23 @@ struct rdma_sq_fmr_wqe {
__le16 dif_app_tag_mask; __le16 dif_app_tag_mask;
__le16 dif_runt_crc_value; __le16 dif_runt_crc_value;
__le16 dif_flags; __le16 dif_flags;
#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1 #define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0 #define RDMA_SQ_FMR_WQE_DIF_IO_DIRECTION_FLG_SHIFT 0
#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1 #define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1 #define RDMA_SQ_FMR_WQE_DIF_BLOCK_SIZE_SHIFT 1
#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1 #define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2 #define RDMA_SQ_FMR_WQE_DIF_RUNT_VALID_FLG_SHIFT 2
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_CRC_GUARD_SHIFT 3
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_REF_TAG_SHIFT 4
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF #define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7 #define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7
__le32 Reserved5; __le32 reserved5;
}; };
/* First element (16 bytes) of fmr wqe */ /* First element (16 bytes) of fmr wqe */
...@@ -558,23 +558,23 @@ struct rdma_sq_fmr_wqe_3rd { ...@@ -558,23 +558,23 @@ struct rdma_sq_fmr_wqe_3rd {
__le16 dif_app_tag_mask; __le16 dif_app_tag_mask;
__le16 dif_runt_crc_value; __le16 dif_runt_crc_value;
__le16 dif_flags; __le16 dif_flags;
#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1 #define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0 #define RDMA_SQ_FMR_WQE_3RD_DIF_IO_DIRECTION_FLG_SHIFT 0
#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1 #define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1 #define RDMA_SQ_FMR_WQE_3RD_DIF_BLOCK_SIZE_SHIFT 1
#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1 #define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2 #define RDMA_SQ_FMR_WQE_3RD_DIF_RUNT_VALID_FLG_SHIFT 2
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_CRC_GUARD_SHIFT 3
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_REF_TAG_SHIFT 4
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7 #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7
__le32 Reserved5; __le32 reserved5;
}; };
struct rdma_sq_local_inv_wqe { struct rdma_sq_local_inv_wqe {
...@@ -606,20 +606,20 @@ struct rdma_sq_rdma_wqe { ...@@ -606,20 +606,20 @@ struct rdma_sq_rdma_wqe {
__le32 xrc_srq; __le32 xrc_srq;
u8 req_type; u8 req_type;
u8 flags; u8 flags;
#define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1 #define RDMA_SQ_RDMA_WQE_COMP_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0 #define RDMA_SQ_RDMA_WQE_COMP_FLG_SHIFT 0
#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1 #define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1 #define RDMA_SQ_RDMA_WQE_RD_FENCE_FLG_SHIFT 1
#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1 #define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2 #define RDMA_SQ_RDMA_WQE_INV_FENCE_FLG_SHIFT 2
#define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1 #define RDMA_SQ_RDMA_WQE_SE_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3 #define RDMA_SQ_RDMA_WQE_SE_FLG_SHIFT 3
#define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1 #define RDMA_SQ_RDMA_WQE_INLINE_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4 #define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3 #define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3
#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6 #define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6
u8 wqe_size; u8 wqe_size;
u8 prev_wqe_size; u8 prev_wqe_size;
struct regpair remote_va; struct regpair remote_va;
......
...@@ -779,7 +779,7 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, ...@@ -779,7 +779,7 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
} }
#define PURE_LB_TC 8 #define PURE_LB_TC 8
#define PKT_LB_TC 9 #define PKT_LB_TC 9
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
......
...@@ -109,8 +109,8 @@ struct src_ent { ...@@ -109,8 +109,8 @@ struct src_ent {
u64 next; u64 next;
}; };
#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ #define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12)) #define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12)
#define CONN_CXT_SIZE(p_hwfn) \ #define CONN_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
...@@ -2326,7 +2326,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, ...@@ -2326,7 +2326,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
for (elem_i = 0; elem_i < elems_per_p; elem_i++) { for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
elem = (union type1_task_context *)elem_start; elem = (union type1_task_context *)elem_start;
SET_FIELD(elem->roce_ctx.tdif_context.flags1, SET_FIELD(elem->roce_ctx.tdif_context.flags1,
TDIF_TASK_CONTEXT_REFTAGMASK, 0xf); TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
} }
} }
......
...@@ -358,20 +358,14 @@ struct phy_defs { ...@@ -358,20 +358,14 @@ struct phy_defs {
(arr)[i] = qed_rd(dev, ptt, addr); \ (arr)[i] = qed_rd(dev, ptt, addr); \
} while (0) } while (0)
#ifndef DWORDS_TO_BYTES
#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD) #define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
#endif
#ifndef BYTES_TO_DWORDS
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD) #define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
#endif
/* extra lines include a signature line + optional latency events line */ /* Extra lines include a signature line + optional latency events line */
#ifndef NUM_DBG_LINES
#define NUM_EXTRA_DBG_LINES(block_desc) \ #define NUM_EXTRA_DBG_LINES(block_desc) \
(1 + ((block_desc)->has_latency_events ? 1 : 0)) (1 + ((block_desc)->has_latency_events ? 1 : 0))
#define NUM_DBG_LINES(block_desc) \ #define NUM_DBG_LINES(block_desc) \
((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc)) ((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
#endif
#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2) #define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
#define RAM_LINES_TO_BYTES(lines) \ #define RAM_LINES_TO_BYTES(lines) \
...@@ -441,23 +435,17 @@ struct phy_defs { ...@@ -441,23 +435,17 @@ struct phy_defs {
#define FW_IMG_MAIN 1 #define FW_IMG_MAIN 1
#ifndef REG_FIFO_ELEMENT_DWORDS
#define REG_FIFO_ELEMENT_DWORDS 2 #define REG_FIFO_ELEMENT_DWORDS 2
#endif
#define REG_FIFO_DEPTH_ELEMENTS 32 #define REG_FIFO_DEPTH_ELEMENTS 32
#define REG_FIFO_DEPTH_DWORDS \ #define REG_FIFO_DEPTH_DWORDS \
(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS) (REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
#ifndef IGU_FIFO_ELEMENT_DWORDS
#define IGU_FIFO_ELEMENT_DWORDS 4 #define IGU_FIFO_ELEMENT_DWORDS 4
#endif
#define IGU_FIFO_DEPTH_ELEMENTS 64 #define IGU_FIFO_DEPTH_ELEMENTS 64
#define IGU_FIFO_DEPTH_DWORDS \ #define IGU_FIFO_DEPTH_DWORDS \
(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS) (IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2 #define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
#endif
#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20 #define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
#define PROTECTION_OVERRIDE_DEPTH_DWORDS \ #define PROTECTION_OVERRIDE_DEPTH_DWORDS \
(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \ (PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
...@@ -1089,6 +1077,20 @@ static struct block_defs block_xyld_defs = { ...@@ -1089,6 +1077,20 @@ static struct block_defs block_xyld_defs = {
true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
}; };
static struct block_defs block_ptld_defs = {
"ptld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_ypld_defs = {
"ypld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_prm_defs = { static struct block_defs block_prm_defs = {
"prm", "prm",
{true, true}, false, 0, {true, true}, false, 0,
...@@ -1221,6 +1223,34 @@ static struct block_defs block_cau_defs = { ...@@ -1221,6 +1223,34 @@ static struct block_defs block_cau_defs = {
true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19 true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
}; };
static struct block_defs block_rgfs_defs = {
"rgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_rgsrc_defs = {
"rgsrc", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_tgfs_defs = {
"tgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_tgsrc_defs = {
"tgsrc", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_umac_defs = { static struct block_defs block_umac_defs = {
"umac", "umac",
{false, true}, false, 0, {false, true}, false, 0,
...@@ -1338,48 +1368,6 @@ static struct block_defs block_avs_wrap_defs = { ...@@ -1338,48 +1368,6 @@ static struct block_defs block_avs_wrap_defs = {
true, false, DBG_RESET_REG_MISCS_PL_UA, 11 true, false, DBG_RESET_REG_MISCS_PL_UA, 11
}; };
static struct block_defs block_rgfs_defs = {
"rgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_rgsrc_defs = {
"rgsrc", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_tgfs_defs = {
"tgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_tgsrc_defs = {
"tgsrc", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_ptld_defs = {
"ptld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_ypld_defs = {
"ypld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_aeu_defs = { static struct block_defs block_misc_aeu_defs = {
"misc_aeu", {false, false}, false, 0, "misc_aeu", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS}, {MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
...@@ -5596,10 +5584,6 @@ struct igu_fifo_addr_data { ...@@ -5596,10 +5584,6 @@ struct igu_fifo_addr_data {
#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4 #define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
/********************************* Macros ************************************/
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
/***************************** Constant Arrays *******************************/ /***************************** Constant Arrays *******************************/
struct user_dbg_array { struct user_dbg_array {
......
...@@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn) ...@@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* This function reconfigures the QM pf on the fly. /* This function reconfigures the QM pf on the fly.
* For this purpose we: * For this purpose we:
* 1. reconfigure the QM database * 1. reconfigure the QM database
* 2. set new values to runtime arrat * 2. set new values to runtime array
* 3. send an sdm_qm_cmd through the rbc interface to stop the QM * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
* 4. activate init tool in QM_PF stage * 4. activate init tool in QM_PF stage
* 5. send an sdm_qm_cmd through rbc interface to release the QM * 5. send an sdm_qm_cmd through rbc interface to release the QM
...@@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, ...@@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
} }
/* Protocl Configuration */ /* Protocol Configuration */
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
(p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
......
This diff is collapsed.
...@@ -46,75 +46,110 @@ ...@@ -46,75 +46,110 @@
0x1000) : 0) 0x1000) : 0)
#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \ #define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
0x100) - 1 : 0) 0x100) - 1 : 0)
#define QM_INVALID_PQ_ID 0xffff #define QM_INVALID_PQ_ID 0xffff
/* Feature enable */ /* Feature enable */
#define QM_BYPASS_EN 1 #define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1 #define QM_BYTE_CRD_EN 1
/* Other PQ constants */ /* Other PQ constants */
#define QM_OTHER_PQS_PER_PF 4 #define QM_OTHER_PQS_PER_PF 4
/* WFQ constants */ /* WFQ constants */
#define QM_WFQ_UPPER_BOUND 62500000
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 /* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
#define QM_WFQ_VP_PQ_PF_SHIFT 5 #define QM_WFQ_UPPER_BOUND 62500000
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
#define QM_WFQ_MAX_INC_VAL 43750000 /* Bit of VOQ in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
/* Bit of PF in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_PF_SHIFT 5
/* 0x9000 = 4*9*1024 */
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
/* Max WFQ increment value is 0.7 * upper bound */
#define QM_WFQ_MAX_INC_VAL 43750000
/* RL constants */ /* RL constants */
#define QM_RL_UPPER_BOUND 62500000
#define QM_RL_PERIOD 5 /* in us */ /* Period in us */
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) #define QM_RL_PERIOD 5
#define QM_RL_MAX_INC_VAL 43750000
/* Period in 25MHz cycles */
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
/* RL increment value - rate is specified in mbps */
#define QM_RL_INC_VAL(rate) max_t(u32, \ #define QM_RL_INC_VAL(rate) max_t(u32, \
(u32)(((rate ? rate : \ (u32)(((rate ? rate : \
1000000) * \ 1000000) * \
QM_RL_PERIOD * \ QM_RL_PERIOD * \
101) / (8 * 100)), 1) 101) / (8 * 100)), 1)
/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
#define QM_RL_UPPER_BOUND 62500000
/* Max PF RL increment value is 0.7 * upper bound */
#define QM_RL_MAX_INC_VAL 43750000
/* AFullOprtnstcCrdMask constants */ /* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1 #define QM_OPPOR_LINE_VOQ_DEF 1
#define QM_OPPOR_FW_STOP_DEF 0 #define QM_OPPOR_FW_STOP_DEF 0
#define QM_OPPOR_PQ_EMPTY_DEF 1 #define QM_OPPOR_PQ_EMPTY_DEF 1
/* Command Queue constants */ /* Command Queue constants */
#define PBF_CMDQ_PURE_LB_LINES 150
#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \ /* Pure LB CmdQ lines (+spare) */
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \ #define PBF_CMDQ_PURE_LB_LINES 150
(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) #define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \ (PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \ (ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \ PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \ #define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
4) * \ (PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
2) | QM_LINE_CRD_REG_SIGN_BIT) (ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
/* BTB: blocks constants (block size = 256B) */ /* BTB: blocks constants (block size = 256B) */
#define BTB_JUMBO_PKT_BLOCKS 38
#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS /* 256B blocks in 9700B packet */
#define BTB_PURE_LB_FACTOR 10 #define BTB_JUMBO_PKT_BLOCKS 38
#define BTB_PURE_LB_RATIO 7
/* Headroom per-port */
#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
#define BTB_PURE_LB_FACTOR 10
/* Factored (hence really 0.7) */
#define BTB_PURE_LB_RATIO 7
/* QM stop command constants */ /* QM stop command constants */
#define QM_STOP_PQ_MASK_WIDTH 32 #define QM_STOP_PQ_MASK_WIDTH 32
#define QM_STOP_CMD_ADDR 2 #define QM_STOP_CMD_ADDR 2
#define QM_STOP_CMD_STRUCT_SIZE 2 #define QM_STOP_CMD_STRUCT_SIZE 2
#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 #define QM_STOP_CMD_PAUSE_MASK_OFFSET 0
#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 #define QM_STOP_CMD_PAUSE_MASK_SHIFT 0
#define QM_STOP_CMD_PAUSE_MASK_MASK -1 #define QM_STOP_CMD_PAUSE_MASK_MASK -1
#define QM_STOP_CMD_GROUP_ID_OFFSET 1 #define QM_STOP_CMD_GROUP_ID_OFFSET 1
#define QM_STOP_CMD_GROUP_ID_SHIFT 16 #define QM_STOP_CMD_GROUP_ID_SHIFT 16
#define QM_STOP_CMD_GROUP_ID_MASK 15 #define QM_STOP_CMD_GROUP_ID_MASK 15
#define QM_STOP_CMD_PQ_TYPE_OFFSET 1 #define QM_STOP_CMD_PQ_TYPE_OFFSET 1
#define QM_STOP_CMD_PQ_TYPE_SHIFT 24 #define QM_STOP_CMD_PQ_TYPE_SHIFT 24
#define QM_STOP_CMD_PQ_TYPE_MASK 1 #define QM_STOP_CMD_PQ_TYPE_MASK 1
#define QM_STOP_CMD_MAX_POLL_COUNT 100 #define QM_STOP_CMD_MAX_POLL_COUNT 100
#define QM_STOP_CMD_POLL_PERIOD_US 500 #define QM_STOP_CMD_POLL_PERIOD_US 500
/* QM command macros */ /* QM command macros */
#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \ #define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE
_STRUCT_SIZE #define QM_CMD_SET_FIELD(var, cmd, field, value) \
#define QM_CMD_SET_FIELD(var, cmd, field, \ SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
value) SET_FIELD(var[cmd ## _ ## field ## \ cmd ## _ ## field, \
_OFFSET], \ value)
cmd ## _ ## field, \
value)
/* QM: VOQ macros */ /* QM: VOQ macros */
#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \ #define PHYS_VOQ(port, tc, max_phys_tcs_per_port) ((port) * \
(max_phys_tcs_per_port) + \ (max_phys_tcs_per_port) + \
...@@ -128,6 +163,7 @@ ...@@ -128,6 +163,7 @@
max_phy_tcs_pr_port) \ max_phy_tcs_pr_port) \
: LB_VOQ(port)) : LB_VOQ(port))
/******************** INTERNAL IMPLEMENTATION *********************/ /******************** INTERNAL IMPLEMENTATION *********************/
/* Prepare PF RL enable/disable runtime init values */ /* Prepare PF RL enable/disable runtime init values */
static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en) static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{ {
......
...@@ -59,10 +59,10 @@ struct qed_pi_info { ...@@ -59,10 +59,10 @@ struct qed_pi_info {
}; };
struct qed_sb_sp_info { struct qed_sb_sp_info {
struct qed_sb_info sb_info; struct qed_sb_info sb_info;
/* per protocol index data */ /* per protocol index data */
struct qed_pi_info pi_info_arr[PIS_PER_SB]; struct qed_pi_info pi_info_arr[PIS_PER_SB];
}; };
enum qed_attention_type { enum qed_attention_type {
...@@ -82,7 +82,7 @@ struct aeu_invert_reg_bit { ...@@ -82,7 +82,7 @@ struct aeu_invert_reg_bit {
#define ATTENTION_LENGTH_SHIFT (4) #define ATTENTION_LENGTH_SHIFT (4)
#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
ATTENTION_LENGTH_SHIFT) ATTENTION_LENGTH_SHIFT)
#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
ATTENTION_PARITY) ATTENTION_PARITY)
......
...@@ -62,22 +62,6 @@ ...@@ -62,22 +62,6 @@
#include "qed_sriov.h" #include "qed_sriov.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
static int
qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
u16 echo, union event_ring_data *data, u8 fw_return_code)
{
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
return p_iscsi->event_cb(p_iscsi->event_context,
fw_event_code, data);
} else {
DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
return -EINVAL;
}
}
struct qed_iscsi_conn { struct qed_iscsi_conn {
struct list_head list_entry; struct list_head list_entry;
bool free_on_delete; bool free_on_delete;
...@@ -161,6 +145,22 @@ struct qed_iscsi_conn { ...@@ -161,6 +145,22 @@ struct qed_iscsi_conn {
u8 abortive_dsconnect; u8 abortive_dsconnect;
}; };
static int
qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
u16 echo, union event_ring_data *data, u8 fw_return_code)
{
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
return p_iscsi->event_cb(p_iscsi->event_context,
fw_event_code, data);
} else {
DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
return -EINVAL;
}
}
static int static int
qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode, enum spq_mode comp_mode,
...@@ -276,7 +276,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, ...@@ -276,7 +276,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer); p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
val = p_params->tx_sws_timer; val = p_params->tx_sws_timer;
p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val); p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt; p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt;
p_hwfn->p_iscsi_info->event_context = event_context; p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb; p_hwfn->p_iscsi_info->event_cb = async_event_cb;
...@@ -304,8 +304,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, ...@@ -304,8 +304,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
int rc = 0; int rc = 0;
u32 dval; u32 dval;
u16 wval; u16 wval;
u8 i;
u16 *p; u16 *p;
u8 i;
/* Get SPQ entry */ /* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data)); memset(&init_data, 0, sizeof(init_data));
......
...@@ -342,56 +342,57 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context, ...@@ -342,56 +342,57 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
cpu_to_le16(dif_task_params->application_tag_mask); cpu_to_le16(dif_task_params->application_tag_mask);
SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED, SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
dif_task_params->crc_seed ? 1 : 0); dif_task_params->crc_seed ? 1 : 0);
SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE, SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
dif_task_params->host_guard_type); dif_task_params->host_guard_type);
SET_FIELD(rdif_context->flags0, SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_PROTECTIONTYPE, RDIF_TASK_CONTEXT_PROTECTION_TYPE,
dif_task_params->protection_type); dif_task_params->protection_type);
SET_FIELD(rdif_context->flags0, SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1); RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1);
SET_FIELD(rdif_context->flags0, SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_KEEPREFTAGCONST, RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
dif_task_params->keep_ref_tag_const ? 1 : 0); dif_task_params->keep_ref_tag_const ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_VALIDATEAPPTAG, RDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
(dif_task_params->validate_app_tag && (dif_task_params->validate_app_tag &&
dif_task_params->dif_on_network) ? 1 : 0); dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_VALIDATEGUARD, RDIF_TASK_CONTEXT_VALIDATE_GUARD,
(dif_task_params->validate_guard && (dif_task_params->validate_guard &&
dif_task_params->dif_on_network) ? 1 : 0); dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_VALIDATEREFTAG, RDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
(dif_task_params->validate_ref_tag && (dif_task_params->validate_ref_tag &&
dif_task_params->dif_on_network) ? 1 : 0); dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_HOSTINTERFACE, RDIF_TASK_CONTEXT_HOST_INTERFACE,
dif_task_params->dif_on_host ? 1 : 0); dif_task_params->dif_on_host ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_NETWORKINTERFACE, RDIF_TASK_CONTEXT_NETWORK_INTERFACE,
dif_task_params->dif_on_network ? 1 : 0); dif_task_params->dif_on_network ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDGUARD, RDIF_TASK_CONTEXT_FORWARD_GUARD,
dif_task_params->forward_guard ? 1 : 0); dif_task_params->forward_guard ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDAPPTAG, RDIF_TASK_CONTEXT_FORWARD_APP_TAG,
dif_task_params->forward_app_tag ? 1 : 0); dif_task_params->forward_app_tag ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDREFTAG, RDIF_TASK_CONTEXT_FORWARD_REF_TAG,
dif_task_params->forward_ref_tag ? 1 : 0); dif_task_params->forward_ref_tag ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK, RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
dif_task_params->forward_app_tag_with_mask ? 1 : 0); dif_task_params->forward_app_tag_with_mask ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK, RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
dif_task_params->forward_ref_tag_with_mask ? 1 : 0); dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
SET_FIELD(rdif_context->flags1, SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_INTERVALSIZE, RDIF_TASK_CONTEXT_INTERVAL_SIZE,
dif_task_params->dif_block_size_log - 9); dif_task_params->dif_block_size_log - 9);
SET_FIELD(rdif_context->state, SET_FIELD(rdif_context->state,
RDIF_TASK_CONTEXT_REFTAGMASK, RDIF_TASK_CONTEXT_REF_TAG_MASK,
dif_task_params->ref_tag_mask); dif_task_params->ref_tag_mask);
SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG, SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG,
dif_task_params->ignore_app_tag); dif_task_params->ignore_app_tag);
} }
...@@ -399,7 +400,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context, ...@@ -399,7 +400,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) { task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
tdif_context->app_tag_value = tdif_context->app_tag_value =
cpu_to_le16(dif_task_params->application_tag); cpu_to_le16(dif_task_params->application_tag);
tdif_context->partial_crc_valueB = tdif_context->partial_crc_value_b =
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000); cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
tdif_context->partial_crc_value_a = tdif_context->partial_crc_value_a =
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000); cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
...@@ -407,59 +408,63 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context, ...@@ -407,59 +408,63 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
dif_task_params->crc_seed ? 1 : 0); dif_task_params->crc_seed ? 1 : 0);
SET_FIELD(tdif_context->flags0, SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_SETERRORWITHEOP, TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP,
dif_task_params->tx_dif_conn_err_en ? 1 : 0); dif_task_params->tx_dif_conn_err_en ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD, SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD,
dif_task_params->forward_guard ? 1 : 0); dif_task_params->forward_guard ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_FORWARD_APP_TAG,
dif_task_params->forward_app_tag ? 1 : 0); dif_task_params->forward_app_tag ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_FORWARD_REF_TAG,
dif_task_params->forward_ref_tag ? 1 : 0); dif_task_params->forward_ref_tag ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE, SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE,
dif_task_params->dif_block_size_log - 9); dif_task_params->dif_block_size_log - 9);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_HOST_INTERFACE,
dif_task_params->dif_on_host ? 1 : 0); dif_task_params->dif_on_host ? 1 : 0);
SET_FIELD(tdif_context->flags1, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_NETWORKINTERFACE, TDIF_TASK_CONTEXT_NETWORK_INTERFACE,
dif_task_params->dif_on_network ? 1 : 0); dif_task_params->dif_on_network ? 1 : 0);
val = cpu_to_le32(dif_task_params->initial_ref_tag); val = cpu_to_le32(dif_task_params->initial_ref_tag);
tdif_context->initial_ref_tag = val; tdif_context->initial_ref_tag = val;
tdif_context->app_tag_mask = tdif_context->app_tag_mask =
cpu_to_le16(dif_task_params->application_tag_mask); cpu_to_le16(dif_task_params->application_tag_mask);
SET_FIELD(tdif_context->flags0, SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_HOSTGUARDTYPE, TDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
dif_task_params->host_guard_type); dif_task_params->host_guard_type);
SET_FIELD(tdif_context->flags0, SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_PROTECTIONTYPE, TDIF_TASK_CONTEXT_PROTECTION_TYPE,
dif_task_params->protection_type); dif_task_params->protection_type);
SET_FIELD(tdif_context->flags0, SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_INITIALREFTAGVALID, TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID,
dif_task_params->initial_ref_tag_is_valid ? 1 : 0); dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
SET_FIELD(tdif_context->flags0, SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_KEEPREFTAGCONST, TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
dif_task_params->keep_ref_tag_const ? 1 : 0); dif_task_params->keep_ref_tag_const ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_VALIDATE_GUARD,
(dif_task_params->validate_guard && (dif_task_params->validate_guard &&
dif_task_params->dif_on_host) ? 1 : 0); dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_VALIDATEAPPTAG, TDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
(dif_task_params->validate_app_tag && (dif_task_params->validate_app_tag &&
dif_task_params->dif_on_host) ? 1 : 0); dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_VALIDATEREFTAG, TDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
(dif_task_params->validate_ref_tag && (dif_task_params->validate_ref_tag &&
dif_task_params->dif_on_host) ? 1 : 0); dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK, TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
dif_task_params->forward_app_tag_with_mask ? 1 : 0); dif_task_params->forward_app_tag_with_mask ? 1 : 0);
SET_FIELD(tdif_context->flags1, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK, TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
dif_task_params->forward_ref_tag_with_mask ? 1 : 0); dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
SET_FIELD(tdif_context->flags1, SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_REFTAGMASK, TDIF_TASK_CONTEXT_REF_TAG_MASK,
dif_task_params->ref_tag_mask); dif_task_params->ref_tag_mask);
SET_FIELD(tdif_context->flags0, SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_IGNOREAPPTAG, TDIF_TASK_CONTEXT_IGNORE_APP_TAG,
dif_task_params->ignore_app_tag ? 1 : 0); dif_task_params->ignore_app_tag ? 1 : 0);
} }
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -29,9 +29,12 @@ ...@@ -29,9 +29,12 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE. * SOFTWARE.
*/ */
#ifndef __IWARP_COMMON__ #ifndef __IWARP_COMMON__
#define __IWARP_COMMON__ #define __IWARP_COMMON__
#include <linux/qed/rdma_common.h> #include <linux/qed/rdma_common.h>
/************************/ /************************/
/* IWARP FW CONSTANTS */ /* IWARP FW CONSTANTS */
/************************/ /************************/
...@@ -40,14 +43,14 @@ ...@@ -40,14 +43,14 @@
#define IWARP_PASSIVE_MODE 1 #define IWARP_PASSIVE_MODE 1
#define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000) #define IWARP_SHARED_QUEUE_PAGE_SIZE (0x8000)
#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000) #define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_OFFSET (0x4000)
#define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000) #define IWARP_SHARED_QUEUE_PAGE_RQ_PBL_MAX_SIZE (0x1000)
#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000) #define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_OFFSET (0x5000)
#define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000) #define IWARP_SHARED_QUEUE_PAGE_SQ_PBL_MAX_SIZE (0x3000)
#define IWARP_REQ_MAX_INLINE_DATA_SIZE (128) #define IWARP_REQ_MAX_INLINE_DATA_SIZE (128)
#define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176) #define IWARP_REQ_MAX_SINGLE_SQ_WQE_SIZE (176)
#define IWARP_MAX_QPS (64 * 1024) #define IWARP_MAX_QPS (64 * 1024)
#endif /* __IWARP_COMMON__ */ #endif /* __IWARP_COMMON__ */
...@@ -316,16 +316,16 @@ enum qed_int_mode { ...@@ -316,16 +316,16 @@ enum qed_int_mode {
}; };
struct qed_sb_info { struct qed_sb_info {
struct status_block *sb_virt; struct status_block *sb_virt;
dma_addr_t sb_phys; dma_addr_t sb_phys;
u32 sb_ack; /* Last given ack */ u32 sb_ack; /* Last given ack */
u16 igu_sb_id; u16 igu_sb_id;
void __iomem *igu_addr; void __iomem *igu_addr;
u8 flags; u8 flags;
#define QED_SB_INFO_INIT 0x1 #define QED_SB_INFO_INIT 0x1
#define QED_SB_INFO_SETUP 0x2 #define QED_SB_INFO_SETUP 0x2
struct qed_dev *cdev; struct qed_dev *cdev;
}; };
enum qed_dev_type { enum qed_dev_type {
......
...@@ -32,28 +32,29 @@ ...@@ -32,28 +32,29 @@
#ifndef __RDMA_COMMON__ #ifndef __RDMA_COMMON__
#define __RDMA_COMMON__ #define __RDMA_COMMON__
/************************/ /************************/
/* RDMA FW CONSTANTS */ /* RDMA FW CONSTANTS */
/************************/ /************************/
#define RDMA_RESERVED_LKEY (0) #define RDMA_RESERVED_LKEY (0)
#define RDMA_RING_PAGE_SIZE (0x1000) #define RDMA_RING_PAGE_SIZE (0x1000)
#define RDMA_MAX_SGE_PER_SQ_WQE (4) #define RDMA_MAX_SGE_PER_SQ_WQE (4)
#define RDMA_MAX_SGE_PER_RQ_WQE (4) #define RDMA_MAX_SGE_PER_RQ_WQE (4)
#define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000) #define RDMA_MAX_DATA_SIZE_IN_WQE (0x80000000)
#define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50) #define RDMA_REQ_RD_ATOMIC_ELM_SIZE (0x50)
#define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20) #define RDMA_RESP_RD_ATOMIC_ELM_SIZE (0x20)
#define RDMA_MAX_CQS (64 * 1024) #define RDMA_MAX_CQS (64 * 1024)
#define RDMA_MAX_TIDS (128 * 1024 - 1) #define RDMA_MAX_TIDS (128 * 1024 - 1)
#define RDMA_MAX_PDS (64 * 1024) #define RDMA_MAX_PDS (64 * 1024)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS #define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2 #define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB #define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE) #define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
......
...@@ -33,13 +33,18 @@ ...@@ -33,13 +33,18 @@
#ifndef __ROCE_COMMON__ #ifndef __ROCE_COMMON__
#define __ROCE_COMMON__ #define __ROCE_COMMON__
#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256) /************************/
#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288) /* ROCE FW CONSTANTS */
/************************/
#define ROCE_MAX_QPS (32 * 1024) #define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
#define ROCE_DCQCN_NP_MAX_QPS (64) #define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
#define ROCE_DCQCN_RP_MAX_QPS (64)
#define ROCE_MAX_QPS (32 * 1024)
#define ROCE_DCQCN_NP_MAX_QPS (64)
#define ROCE_DCQCN_RP_MAX_QPS (64)
/* Affiliated asynchronous events / errors enumeration */
enum roce_async_events_type { enum roce_async_events_type {
ROCE_ASYNC_EVENT_NONE = 0, ROCE_ASYNC_EVENT_NONE = 0,
ROCE_ASYNC_EVENT_COMM_EST = 1, ROCE_ASYNC_EVENT_COMM_EST = 1,
......
...@@ -33,43 +33,53 @@ ...@@ -33,43 +33,53 @@
#ifndef __STORAGE_COMMON__ #ifndef __STORAGE_COMMON__
#define __STORAGE_COMMON__ #define __STORAGE_COMMON__
#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2) /*********************/
#define BDQ_NUM_RESOURCES (4) /* SCSI CONSTANTS */
/*********************/
#define BDQ_ID_RQ (0) #define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
#define BDQ_ID_IMM_DATA (1) #define BDQ_NUM_RESOURCES (4)
#define BDQ_NUM_IDS (2)
#define SCSI_NUM_SGES_SLOW_SGL_THR 8 #define BDQ_ID_RQ (0)
#define BDQ_ID_IMM_DATA (1)
#define BDQ_NUM_IDS (2)
#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15) #define SCSI_NUM_SGES_SLOW_SGL_THR 8
#define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15)
/* SCSI buffer descriptor */
struct scsi_bd { struct scsi_bd {
struct regpair address; struct regpair address;
struct regpair opaque; struct regpair opaque;
}; };
/* Scsi Drv BDQ struct */
struct scsi_bdq_ram_drv_data { struct scsi_bdq_ram_drv_data {
__le16 external_producer; __le16 external_producer;
__le16 reserved0[3]; __le16 reserved0[3];
}; };
/* SCSI SGE entry */
struct scsi_sge { struct scsi_sge {
struct regpair sge_addr; struct regpair sge_addr;
__le32 sge_len; __le32 sge_len;
__le32 reserved; __le32 reserved;
}; };
/* Cached SGEs section */
struct scsi_cached_sges { struct scsi_cached_sges {
struct scsi_sge sge[4]; struct scsi_sge sge[4];
}; };
/* Scsi Drv CMDQ struct */
struct scsi_drv_cmdq { struct scsi_drv_cmdq {
__le16 cmdq_cons; __le16 cmdq_cons;
__le16 reserved0; __le16 reserved0;
__le32 reserved1; __le32 reserved1;
}; };
/* Common SCSI init params passed by driver to FW in function init ramrod */
struct scsi_init_func_params { struct scsi_init_func_params {
__le16 num_tasks; __le16 num_tasks;
u8 log_page_size; u8 log_page_size;
...@@ -77,6 +87,7 @@ struct scsi_init_func_params { ...@@ -77,6 +87,7 @@ struct scsi_init_func_params {
u8 reserved2[12]; u8 reserved2[12];
}; };
/* SCSI RQ/CQ/CMDQ firmware function init parameters */
struct scsi_init_func_queues { struct scsi_init_func_queues {
struct regpair glbl_q_params_addr; struct regpair glbl_q_params_addr;
__le16 rq_buffer_size; __le16 rq_buffer_size;
...@@ -84,14 +95,14 @@ struct scsi_init_func_queues { ...@@ -84,14 +95,14 @@ struct scsi_init_func_queues {
__le16 cmdq_num_entries; __le16 cmdq_num_entries;
u8 bdq_resource_id; u8 bdq_resource_id;
u8 q_validity; u8 q_validity;
#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1 #define SCSI_INIT_FUNC_QUEUES_RQ_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0 #define SCSI_INIT_FUNC_QUEUES_RQ_VALID_SHIFT 0
#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1 #define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1 #define SCSI_INIT_FUNC_QUEUES_IMM_DATA_VALID_SHIFT 1
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1 #define SCSI_INIT_FUNC_QUEUES_CMD_VALID_MASK 0x1
#define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2 #define SCSI_INIT_FUNC_QUEUES_CMD_VALID_SHIFT 2
#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F #define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_MASK 0x1F
#define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3 #define SCSI_INIT_FUNC_QUEUES_RESERVED_VALID_SHIFT 3
u8 num_queues; u8 num_queues;
u8 queue_relative_offset; u8 queue_relative_offset;
u8 cq_sb_pi; u8 cq_sb_pi;
...@@ -107,16 +118,19 @@ struct scsi_init_func_queues { ...@@ -107,16 +118,19 @@ struct scsi_init_func_queues {
__le32 reserved1; __le32 reserved1;
}; };
/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */
struct scsi_ram_per_bdq_resource_drv_data { struct scsi_ram_per_bdq_resource_drv_data {
struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS]; struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
}; };
/* SCSI SGL types */
enum scsi_sgl_mode { enum scsi_sgl_mode {
SCSI_TX_SLOW_SGL, SCSI_TX_SLOW_SGL,
SCSI_FAST_SGL, SCSI_FAST_SGL,
MAX_SCSI_SGL_MODE MAX_SCSI_SGL_MODE
}; };
/* SCSI SGL parameters */
struct scsi_sgl_params { struct scsi_sgl_params {
struct regpair sgl_addr; struct regpair sgl_addr;
__le32 sgl_total_length; __le32 sgl_total_length;
...@@ -126,6 +140,7 @@ struct scsi_sgl_params { ...@@ -126,6 +140,7 @@ struct scsi_sgl_params {
u8 reserved; u8 reserved;
}; };
/* SCSI terminate connection params */
struct scsi_terminate_extra_params { struct scsi_terminate_extra_params {
__le16 unsolicited_cq_count; __le16 unsolicited_cq_count;
__le16 cmdq_count; __le16 cmdq_count;
......
...@@ -33,8 +33,13 @@ ...@@ -33,8 +33,13 @@
#ifndef __TCP_COMMON__ #ifndef __TCP_COMMON__
#define __TCP_COMMON__ #define __TCP_COMMON__
#define TCP_INVALID_TIMEOUT_VAL -1 /********************/
/* TCP FW CONSTANTS */
/********************/
#define TCP_INVALID_TIMEOUT_VAL -1
/* OOO opaque data received from LL2 */
struct ooo_opaque { struct ooo_opaque {
__le32 cid; __le32 cid;
u8 drop_isle; u8 drop_isle;
...@@ -43,25 +48,29 @@ struct ooo_opaque { ...@@ -43,25 +48,29 @@ struct ooo_opaque {
u8 ooo_isle; u8 ooo_isle;
}; };
/* tcp connect mode enum */
enum tcp_connect_mode { enum tcp_connect_mode {
TCP_CONNECT_ACTIVE, TCP_CONNECT_ACTIVE,
TCP_CONNECT_PASSIVE, TCP_CONNECT_PASSIVE,
MAX_TCP_CONNECT_MODE MAX_TCP_CONNECT_MODE
}; };
/* tcp function init parameters */
struct tcp_init_params { struct tcp_init_params {
__le32 two_msl_timer; __le32 two_msl_timer;
__le16 tx_sws_timer; __le16 tx_sws_timer;
u8 maxfinrt; u8 max_fin_rt;
u8 reserved[9]; u8 reserved[9];
}; };
/* tcp IPv4/IPv6 enum */
enum tcp_ip_version { enum tcp_ip_version {
TCP_IPV4, TCP_IPV4,
TCP_IPV6, TCP_IPV6,
MAX_TCP_IP_VERSION MAX_TCP_IP_VERSION
}; };
/* tcp offload parameters */
struct tcp_offload_params { struct tcp_offload_params {
__le16 local_mac_addr_lo; __le16 local_mac_addr_lo;
__le16 local_mac_addr_mid; __le16 local_mac_addr_mid;
...@@ -71,22 +80,22 @@ struct tcp_offload_params { ...@@ -71,22 +80,22 @@ struct tcp_offload_params {
__le16 remote_mac_addr_hi; __le16 remote_mac_addr_hi;
__le16 vlan_id; __le16 vlan_id;
u8 flags; u8 flags;
#define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_TS_EN_MASK 0x1
#define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0 #define TCP_OFFLOAD_PARAMS_TS_EN_SHIFT 0
#define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_DA_EN_MASK 0x1
#define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1 #define TCP_OFFLOAD_PARAMS_DA_EN_SHIFT 1
#define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_KA_EN_MASK 0x1
#define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2 #define TCP_OFFLOAD_PARAMS_KA_EN_SHIFT 2
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_NAGLE_EN_MASK 0x1
#define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3 #define TCP_OFFLOAD_PARAMS_NAGLE_EN_SHIFT 3
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_DA_CNT_EN_MASK 0x1
#define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4 #define TCP_OFFLOAD_PARAMS_DA_CNT_EN_SHIFT 4
#define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1 #define TCP_OFFLOAD_PARAMS_FIN_SENT_MASK 0x1
#define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5 #define TCP_OFFLOAD_PARAMS_FIN_SENT_SHIFT 5
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1 #define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_MASK 0x1
#define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6 #define TCP_OFFLOAD_PARAMS_FIN_RECEIVED_SHIFT 6
#define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1 #define TCP_OFFLOAD_PARAMS_RESERVED0_MASK 0x1
#define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7 #define TCP_OFFLOAD_PARAMS_RESERVED0_SHIFT 7
u8 ip_version; u8 ip_version;
__le32 remote_ip[4]; __le32 remote_ip[4];
__le32 local_ip[4]; __le32 local_ip[4];
...@@ -132,6 +141,7 @@ struct tcp_offload_params { ...@@ -132,6 +141,7 @@ struct tcp_offload_params {
__le32 reserved3[2]; __le32 reserved3[2];
}; };
/* tcp offload parameters */
struct tcp_offload_params_opt2 { struct tcp_offload_params_opt2 {
__le16 local_mac_addr_lo; __le16 local_mac_addr_lo;
__le16 local_mac_addr_mid; __le16 local_mac_addr_mid;
...@@ -141,14 +151,14 @@ struct tcp_offload_params_opt2 { ...@@ -141,14 +151,14 @@ struct tcp_offload_params_opt2 {
__le16 remote_mac_addr_hi; __le16 remote_mac_addr_hi;
__le16 vlan_id; __le16 vlan_id;
u8 flags; u8 flags;
#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_MASK 0x1
#define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0 #define TCP_OFFLOAD_PARAMS_OPT2_TS_EN_SHIFT 0
#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_MASK 0x1
#define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1 #define TCP_OFFLOAD_PARAMS_OPT2_DA_EN_SHIFT 1
#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1 #define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_MASK 0x1
#define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2 #define TCP_OFFLOAD_PARAMS_OPT2_KA_EN_SHIFT 2
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F #define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_MASK 0x1F
#define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3 #define TCP_OFFLOAD_PARAMS_OPT2_RESERVED0_SHIFT 3
u8 ip_version; u8 ip_version;
__le32 remote_ip[4]; __le32 remote_ip[4];
__le32 local_ip[4]; __le32 local_ip[4];
...@@ -166,6 +176,7 @@ struct tcp_offload_params_opt2 { ...@@ -166,6 +176,7 @@ struct tcp_offload_params_opt2 {
__le32 reserved1[22]; __le32 reserved1[22];
}; };
/* tcp IPv4/IPv6 enum */
enum tcp_seg_placement_event { enum tcp_seg_placement_event {
TCP_EVENT_ADD_PEN, TCP_EVENT_ADD_PEN,
TCP_EVENT_ADD_NEW_ISLE, TCP_EVENT_ADD_NEW_ISLE,
...@@ -177,40 +188,41 @@ enum tcp_seg_placement_event { ...@@ -177,40 +188,41 @@ enum tcp_seg_placement_event {
MAX_TCP_SEG_PLACEMENT_EVENT MAX_TCP_SEG_PLACEMENT_EVENT
}; };
/* tcp init parameters */
struct tcp_update_params { struct tcp_update_params {
__le16 flags; __le16 flags;
#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0 #define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_SHIFT 0
#define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_MSS_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1 #define TCP_UPDATE_PARAMS_MSS_CHANGED_SHIFT 1
#define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_TTL_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2 #define TCP_UPDATE_PARAMS_TTL_CHANGED_SHIFT 2
#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3 #define TCP_UPDATE_PARAMS_TOS_OR_TC_CHANGED_SHIFT 3
#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4 #define TCP_UPDATE_PARAMS_KA_TIMEOUT_CHANGED_SHIFT 4
#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5 #define TCP_UPDATE_PARAMS_KA_INTERVAL_CHANGED_SHIFT 5
#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6 #define TCP_UPDATE_PARAMS_MAX_RT_TIME_CHANGED_SHIFT 6
#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7 #define TCP_UPDATE_PARAMS_FLOW_LABEL_CHANGED_SHIFT 7
#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8 #define TCP_UPDATE_PARAMS_INITIAL_RCV_WND_CHANGED_SHIFT 8
#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9 #define TCP_UPDATE_PARAMS_KA_MAX_PROBE_CNT_CHANGED_SHIFT 9
#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_KA_EN_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10 #define TCP_UPDATE_PARAMS_KA_EN_CHANGED_SHIFT 10
#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1 #define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_MASK 0x1
#define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11 #define TCP_UPDATE_PARAMS_NAGLE_EN_CHANGED_SHIFT 11
#define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1 #define TCP_UPDATE_PARAMS_KA_EN_MASK 0x1
#define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12 #define TCP_UPDATE_PARAMS_KA_EN_SHIFT 12
#define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1 #define TCP_UPDATE_PARAMS_NAGLE_EN_MASK 0x1
#define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13 #define TCP_UPDATE_PARAMS_NAGLE_EN_SHIFT 13
#define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1 #define TCP_UPDATE_PARAMS_KA_RESTART_MASK 0x1
#define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14 #define TCP_UPDATE_PARAMS_KA_RESTART_SHIFT 14
#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1 #define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_MASK 0x1
#define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15 #define TCP_UPDATE_PARAMS_RETRANSMIT_RESTART_SHIFT 15
__le16 remote_mac_addr_lo; __le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid; __le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi; __le16 remote_mac_addr_hi;
...@@ -226,6 +238,7 @@ struct tcp_update_params { ...@@ -226,6 +238,7 @@ struct tcp_update_params {
u8 reserved1[7]; u8 reserved1[7];
}; };
/* toe upload parameters */
struct tcp_upload_params { struct tcp_upload_params {
__le32 rcv_next; __le32 rcv_next;
__le32 snd_una; __le32 snd_una;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment