Commit a2e7699e authored by Tomer Tayar's avatar Tomer Tayar Committed by David S. Miller

qed*: Refactoring and rearranging FW API with no functional impact

This patch refactors and reorders the FW API files in preparation of
upgrading the code to support new FW.

- Make use of the BIT macro in appropriate places.
- Whitespace changes to align values and code blocks.
- Comments are updated (spelling mistakes, removed if not clear).
- Group together code blocks which are related or deal with similar
 matters.
Signed-off-by: default avatarAriel Elior <Ariel.Elior@cavium.com>
Signed-off-by: default avatarMichal Kalderon <Michal.Kalderon@cavium.com>
Signed-off-by: default avatarTomer Tayar <Tomer.Tayar@cavium.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bbb6189d
......@@ -494,7 +494,7 @@ struct rdma_sq_fmr_wqe {
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7
__le32 Reserved5;
__le32 reserved5;
};
/* First element (16 bytes) of fmr wqe */
......@@ -574,7 +574,7 @@ struct rdma_sq_fmr_wqe_3rd {
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7
__le32 Reserved5;
__le32 reserved5;
};
struct rdma_sq_local_inv_wqe {
......
......@@ -110,7 +110,7 @@ struct src_ent {
};
#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12))
#define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12)
#define CONN_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
......@@ -2326,7 +2326,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
elem = (union type1_task_context *)elem_start;
SET_FIELD(elem->roce_ctx.tdif_context.flags1,
TDIF_TASK_CONTEXT_REFTAGMASK, 0xf);
TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
}
}
......
......@@ -358,20 +358,14 @@ struct phy_defs {
(arr)[i] = qed_rd(dev, ptt, addr); \
} while (0)
#ifndef DWORDS_TO_BYTES
#define DWORDS_TO_BYTES(dwords) ((dwords) * BYTES_IN_DWORD)
#endif
#ifndef BYTES_TO_DWORDS
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
#endif
/* extra lines include a signature line + optional latency events line */
#ifndef NUM_DBG_LINES
/* Extra lines include a signature line + optional latency events line */
#define NUM_EXTRA_DBG_LINES(block_desc) \
(1 + ((block_desc)->has_latency_events ? 1 : 0))
#define NUM_DBG_LINES(block_desc) \
((block_desc)->num_of_lines + NUM_EXTRA_DBG_LINES(block_desc))
#endif
#define RAM_LINES_TO_DWORDS(lines) ((lines) * 2)
#define RAM_LINES_TO_BYTES(lines) \
......@@ -441,23 +435,17 @@ struct phy_defs {
#define FW_IMG_MAIN 1
#ifndef REG_FIFO_ELEMENT_DWORDS
#define REG_FIFO_ELEMENT_DWORDS 2
#endif
#define REG_FIFO_DEPTH_ELEMENTS 32
#define REG_FIFO_DEPTH_DWORDS \
(REG_FIFO_ELEMENT_DWORDS * REG_FIFO_DEPTH_ELEMENTS)
#ifndef IGU_FIFO_ELEMENT_DWORDS
#define IGU_FIFO_ELEMENT_DWORDS 4
#endif
#define IGU_FIFO_DEPTH_ELEMENTS 64
#define IGU_FIFO_DEPTH_DWORDS \
(IGU_FIFO_ELEMENT_DWORDS * IGU_FIFO_DEPTH_ELEMENTS)
#ifndef PROTECTION_OVERRIDE_ELEMENT_DWORDS
#define PROTECTION_OVERRIDE_ELEMENT_DWORDS 2
#endif
#define PROTECTION_OVERRIDE_DEPTH_ELEMENTS 20
#define PROTECTION_OVERRIDE_DEPTH_DWORDS \
(PROTECTION_OVERRIDE_DEPTH_ELEMENTS * \
......@@ -1089,6 +1077,20 @@ static struct block_defs block_xyld_defs = {
true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 12
};
static struct block_defs block_ptld_defs = {
"ptld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_ypld_defs = {
"ypld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_prm_defs = {
"prm",
{true, true}, false, 0,
......@@ -1221,6 +1223,34 @@ static struct block_defs block_cau_defs = {
true, true, DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, 19
};
static struct block_defs block_rgfs_defs = {
"rgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_rgsrc_defs = {
"rgsrc", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_tgfs_defs = {
"tgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_tgsrc_defs = {
"tgsrc", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_umac_defs = {
"umac",
{false, true}, false, 0,
......@@ -1338,48 +1368,6 @@ static struct block_defs block_avs_wrap_defs = {
true, false, DBG_RESET_REG_MISCS_PL_UA, 11
};
static struct block_defs block_rgfs_defs = {
"rgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_rgsrc_defs = {
"rgsrc", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_tgfs_defs = {
"tgfs", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_tgsrc_defs = {
"tgsrc", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_ptld_defs = {
"ptld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_ypld_defs = {
"ypld", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
0, 0, 0, 0, 0,
false, false, MAX_DBG_RESET_REGS, 0
};
static struct block_defs block_misc_aeu_defs = {
"misc_aeu", {false, false}, false, 0,
{MAX_DBG_BUS_CLIENTS, MAX_DBG_BUS_CLIENTS},
......@@ -5596,10 +5584,6 @@ struct igu_fifo_addr_data {
#define PROTECTION_OVERRIDE_ELEMENT_ADDR_FACTOR 4
/********************************* Macros ************************************/
#define BYTES_TO_DWORDS(bytes) ((bytes) / BYTES_IN_DWORD)
/***************************** Constant Arrays *******************************/
struct user_dbg_array {
......
......@@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* This function reconfigures the QM pf on the fly.
* For this purpose we:
* 1. reconfigure the QM database
* 2. set new values to runtime arrat
* 2. set new values to runtime array
* 3. send an sdm_qm_cmd through the rbc interface to stop the QM
* 4. activate init tool in QM_PF stage
* 5. send an sdm_qm_cmd through rbc interface to release the QM
......@@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
}
/* Protocl Configuration */
/* Protocol Configuration */
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
(p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
......
This diff is collapsed.
......@@ -47,50 +47,87 @@
#define QM_PQ_SIZE_256B(pq_size) (pq_size ? DIV_ROUND_UP(pq_size, \
0x100) - 1 : 0)
#define QM_INVALID_PQ_ID 0xffff
/* Feature enable */
#define QM_BYPASS_EN 1
#define QM_BYTE_CRD_EN 1
/* Other PQ constants */
#define QM_OTHER_PQS_PER_PF 4
/* WFQ constants */
/* Upper bound in MB, 10 * burst size of 1ms in 50Gbps */
#define QM_WFQ_UPPER_BOUND 62500000
/* Bit of VOQ in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_VOQ_SHIFT 0
/* Bit of PF in WFQ VP PQ map */
#define QM_WFQ_VP_PQ_PF_SHIFT 5
/* 0x9000 = 4*9*1024 */
#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000)
/* Max WFQ increment value is 0.7 * upper bound */
#define QM_WFQ_MAX_INC_VAL 43750000
/* RL constants */
#define QM_RL_UPPER_BOUND 62500000
#define QM_RL_PERIOD 5 /* in us */
/* Period in us */
#define QM_RL_PERIOD 5
/* Period in 25MHz cycles */
#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD)
#define QM_RL_MAX_INC_VAL 43750000
/* RL increment value - rate is specified in mbps */
#define QM_RL_INC_VAL(rate) max_t(u32, \
(u32)(((rate ? rate : \
1000000) * \
QM_RL_PERIOD * \
101) / (8 * 100)), 1)
/* PF RL Upper bound is set to 10 * burst size of 1ms in 50Gbps */
#define QM_RL_UPPER_BOUND 62500000
/* Max PF RL increment value is 0.7 * upper bound */
#define QM_RL_MAX_INC_VAL 43750000
/* AFullOprtnstcCrdMask constants */
#define QM_OPPOR_LINE_VOQ_DEF 1
#define QM_OPPOR_FW_STOP_DEF 0
#define QM_OPPOR_PQ_EMPTY_DEF 1
/* Command Queue constants */
/* Pure LB CmdQ lines (+spare) */
#define PBF_CMDQ_PURE_LB_LINES 150
#define PBF_CMDQ_LINES_RT_OFFSET(voq) ( \
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + voq * \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
#define PBF_CMDQ_LINES_RT_OFFSET(ext_voq) \
(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \
(ext_voq) * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET - \
PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET))
#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) ( \
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \
(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
#define PBF_BTB_GUARANTEED_RT_OFFSET(ext_voq) \
(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + \
(ext_voq) * (PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - \
PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET))
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) ((((pbf_cmd_lines) - \
4) * \
2) | QM_LINE_CRD_REG_SIGN_BIT)
#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \
((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT)
/* BTB: blocks constants (block size = 256B) */
/* 256B blocks in 9700B packet */
#define BTB_JUMBO_PKT_BLOCKS 38
/* Headroom per-port */
#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS
#define BTB_PURE_LB_FACTOR 10
/* Factored (hence really 0.7) */
#define BTB_PURE_LB_RATIO 7
/* QM stop command constants */
#define QM_STOP_PQ_MASK_WIDTH 32
#define QM_STOP_CMD_ADDR 2
......@@ -108,11 +145,9 @@
#define QM_STOP_CMD_POLL_PERIOD_US 500
/* QM command macros */
#define QM_CMD_STRUCT_SIZE(cmd) cmd ## \
_STRUCT_SIZE
#define QM_CMD_SET_FIELD(var, cmd, field, \
value) SET_FIELD(var[cmd ## _ ## field ## \
_OFFSET], \
#define QM_CMD_STRUCT_SIZE(cmd) cmd ## _STRUCT_SIZE
#define QM_CMD_SET_FIELD(var, cmd, field, value) \
SET_FIELD(var[cmd ## _ ## field ## _OFFSET], \
cmd ## _ ## field, \
value)
/* QM: VOQ macros */
......@@ -128,6 +163,7 @@
max_phy_tcs_pr_port) \
: LB_VOQ(port))
/******************** INTERNAL IMPLEMENTATION *********************/
/* Prepare PF RL enable/disable runtime init values */
static void qed_enable_pf_rl(struct qed_hwfn *p_hwfn, bool pf_rl_en)
{
......
......@@ -82,7 +82,7 @@ struct aeu_invert_reg_bit {
#define ATTENTION_LENGTH_SHIFT (4)
#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
ATTENTION_LENGTH_SHIFT)
#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT)
#define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
ATTENTION_PARITY)
......
......@@ -62,22 +62,6 @@
#include "qed_sriov.h"
#include "qed_reg_addr.h"
static int
qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
u16 echo, union event_ring_data *data, u8 fw_return_code)
{
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
return p_iscsi->event_cb(p_iscsi->event_context,
fw_event_code, data);
} else {
DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
return -EINVAL;
}
}
struct qed_iscsi_conn {
struct list_head list_entry;
bool free_on_delete;
......@@ -161,6 +145,22 @@ struct qed_iscsi_conn {
u8 abortive_dsconnect;
};
static int
qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
u16 echo, union event_ring_data *data, u8 fw_return_code)
{
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
return p_iscsi->event_cb(p_iscsi->event_context,
fw_event_code, data);
} else {
DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
return -EINVAL;
}
}
static int
qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
......@@ -276,7 +276,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
val = p_params->tx_sws_timer;
p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt;
p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt;
p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb;
......@@ -304,8 +304,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
int rc = 0;
u32 dval;
u16 wval;
u8 i;
u16 *p;
u8 i;
/* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data));
......
......@@ -342,56 +342,57 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
cpu_to_le16(dif_task_params->application_tag_mask);
SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_CRC_SEED,
dif_task_params->crc_seed ? 1 : 0);
SET_FIELD(rdif_context->flags0, RDIF_TASK_CONTEXT_HOSTGUARDTYPE,
SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
dif_task_params->host_guard_type);
SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_PROTECTIONTYPE,
RDIF_TASK_CONTEXT_PROTECTION_TYPE,
dif_task_params->protection_type);
SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_INITIALREFTAGVALID, 1);
RDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID, 1);
SET_FIELD(rdif_context->flags0,
RDIF_TASK_CONTEXT_KEEPREFTAGCONST,
RDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
dif_task_params->keep_ref_tag_const ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_VALIDATEAPPTAG,
RDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
(dif_task_params->validate_app_tag &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_VALIDATEGUARD,
RDIF_TASK_CONTEXT_VALIDATE_GUARD,
(dif_task_params->validate_guard &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_VALIDATEREFTAG,
RDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
(dif_task_params->validate_ref_tag &&
dif_task_params->dif_on_network) ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_HOSTINTERFACE,
RDIF_TASK_CONTEXT_HOST_INTERFACE,
dif_task_params->dif_on_host ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_NETWORKINTERFACE,
RDIF_TASK_CONTEXT_NETWORK_INTERFACE,
dif_task_params->dif_on_network ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDGUARD,
RDIF_TASK_CONTEXT_FORWARD_GUARD,
dif_task_params->forward_guard ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDAPPTAG,
RDIF_TASK_CONTEXT_FORWARD_APP_TAG,
dif_task_params->forward_app_tag ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDREFTAG,
RDIF_TASK_CONTEXT_FORWARD_REF_TAG,
dif_task_params->forward_ref_tag ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
RDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
dif_task_params->forward_app_tag_with_mask ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
RDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
SET_FIELD(rdif_context->flags1,
RDIF_TASK_CONTEXT_INTERVALSIZE,
RDIF_TASK_CONTEXT_INTERVAL_SIZE,
dif_task_params->dif_block_size_log - 9);
SET_FIELD(rdif_context->state,
RDIF_TASK_CONTEXT_REFTAGMASK,
RDIF_TASK_CONTEXT_REF_TAG_MASK,
dif_task_params->ref_tag_mask);
SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNOREAPPTAG,
SET_FIELD(rdif_context->state, RDIF_TASK_CONTEXT_IGNORE_APP_TAG,
dif_task_params->ignore_app_tag);
}
......@@ -399,7 +400,7 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
task_type == ISCSI_TASK_TYPE_INITIATOR_WRITE) {
tdif_context->app_tag_value =
cpu_to_le16(dif_task_params->application_tag);
tdif_context->partial_crc_valueB =
tdif_context->partial_crc_value_b =
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
tdif_context->partial_crc_value_a =
cpu_to_le16(dif_task_params->crc_seed ? 0xffff : 0x0000);
......@@ -407,59 +408,63 @@ void init_rtdif_task_context(struct rdif_task_context *rdif_context,
dif_task_params->crc_seed ? 1 : 0);
SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_SETERRORWITHEOP,
TDIF_TASK_CONTEXT_SET_ERROR_WITH_EOP,
dif_task_params->tx_dif_conn_err_en ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDGUARD,
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARD_GUARD,
dif_task_params->forward_guard ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDAPPTAG,
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_FORWARD_APP_TAG,
dif_task_params->forward_app_tag ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_FORWARDREFTAG,
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_FORWARD_REF_TAG,
dif_task_params->forward_ref_tag ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVALSIZE,
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_INTERVAL_SIZE,
dif_task_params->dif_block_size_log - 9);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_HOSTINTERFACE,
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_HOST_INTERFACE,
dif_task_params->dif_on_host ? 1 : 0);
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_NETWORKINTERFACE,
TDIF_TASK_CONTEXT_NETWORK_INTERFACE,
dif_task_params->dif_on_network ? 1 : 0);
val = cpu_to_le32(dif_task_params->initial_ref_tag);
tdif_context->initial_ref_tag = val;
tdif_context->app_tag_mask =
cpu_to_le16(dif_task_params->application_tag_mask);
SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_HOSTGUARDTYPE,
TDIF_TASK_CONTEXT_HOST_GUARD_TYPE,
dif_task_params->host_guard_type);
SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_PROTECTIONTYPE,
TDIF_TASK_CONTEXT_PROTECTION_TYPE,
dif_task_params->protection_type);
SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_INITIALREFTAGVALID,
TDIF_TASK_CONTEXT_INITIAL_REF_TAG_VALID,
dif_task_params->initial_ref_tag_is_valid ? 1 : 0);
SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_KEEPREFTAGCONST,
TDIF_TASK_CONTEXT_KEEP_REF_TAG_CONST,
dif_task_params->keep_ref_tag_const ? 1 : 0);
SET_FIELD(tdif_context->flags1, TDIF_TASK_CONTEXT_VALIDATEGUARD,
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_VALIDATE_GUARD,
(dif_task_params->validate_guard &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_VALIDATEAPPTAG,
TDIF_TASK_CONTEXT_VALIDATE_APP_TAG,
(dif_task_params->validate_app_tag &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_VALIDATEREFTAG,
TDIF_TASK_CONTEXT_VALIDATE_REF_TAG,
(dif_task_params->validate_ref_tag &&
dif_task_params->dif_on_host) ? 1 : 0);
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_FORWARDAPPTAGWITHMASK,
TDIF_TASK_CONTEXT_FORWARD_APP_TAG_WITH_MASK,
dif_task_params->forward_app_tag_with_mask ? 1 : 0);
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_FORWARDREFTAGWITHMASK,
TDIF_TASK_CONTEXT_FORWARD_REF_TAG_WITH_MASK,
dif_task_params->forward_ref_tag_with_mask ? 1 : 0);
SET_FIELD(tdif_context->flags1,
TDIF_TASK_CONTEXT_REFTAGMASK,
TDIF_TASK_CONTEXT_REF_TAG_MASK,
dif_task_params->ref_tag_mask);
SET_FIELD(tdif_context->flags0,
TDIF_TASK_CONTEXT_IGNOREAPPTAG,
TDIF_TASK_CONTEXT_IGNORE_APP_TAG,
dif_task_params->ignore_app_tag ? 1 : 0);
}
}
......
This diff is collapsed.
......@@ -36,6 +36,7 @@
/********************/
/* ETH FW CONSTANTS */
/********************/
#define ETH_HSI_VER_MAJOR 3
#define ETH_HSI_VER_MINOR 10
......@@ -78,16 +79,16 @@
#define ETH_RX_MAX_BUFF_PER_PKT 5
#define ETH_RX_BD_THRESHOLD 12
/* num of MAC/VLAN filters */
/* Num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
#define ETH_NUM_VLAN_FILTERS 512
/* approx. multicast constants */
/* Approx. multicast constants */
#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0
#define ETH_MULTICAST_MAC_BINS 256
#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32)
/* ethernet vport update constants */
/* Ethernet vport update constants */
#define ETH_FILTER_RULES_COUNT 10
#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128
#define ETH_RSS_KEY_SIZE_REGS 10
......@@ -123,7 +124,7 @@ struct eth_tx_1st_bd_flags {
#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7
};
/* The parsing information data fo rthe first tx bd of a given packet. */
/* The parsing information data fo rthe first tx bd of a given packet */
struct eth_tx_data_1st_bd {
__le16 vlan;
u8 nbds;
......@@ -137,7 +138,7 @@ struct eth_tx_data_1st_bd {
#define ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT 2
};
/* The parsing information data for the second tx bd of a given packet. */
/* The parsing information data for the second tx bd of a given packet */
struct eth_tx_data_2nd_bd {
__le16 tunn_ip_size;
__le16 bitfields1;
......@@ -168,7 +169,7 @@ struct eth_tx_data_2nd_bd {
#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13
};
/* Firmware data for L2-EDPM packet. */
/* Firmware data for L2-EDPM packet */
struct eth_edpm_fw_data {
struct eth_tx_data_1st_bd data_1st_bd;
struct eth_tx_data_2nd_bd data_2nd_bd;
......@@ -179,7 +180,7 @@ struct eth_fast_path_cqe_fw_debug {
__le16 reserved2;
};
/* tunneling parsing flags */
/* Tunneling parsing flags */
struct eth_tunnel_parsing_flags {
u8 flags;
#define ETH_TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3
......@@ -207,7 +208,7 @@ struct eth_pmd_flow_flags {
#define ETH_PMD_FLOW_FLAGS_RESERVED_SHIFT 2
};
/* Regular ETH Rx FP CQE. */
/* Regular ETH Rx FP CQE */
struct eth_fast_path_rx_reg_cqe {
u8 type;
u8 bitfields;
......@@ -231,7 +232,7 @@ struct eth_fast_path_rx_reg_cqe {
struct eth_pmd_flow_flags pmd_flags;
};
/* TPA-continue ETH Rx FP CQE. */
/* TPA-continue ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_cont_cqe {
u8 type;
u8 tpa_agg_index;
......@@ -243,7 +244,7 @@ struct eth_fast_path_rx_tpa_cont_cqe {
struct eth_pmd_flow_flags pmd_flags;
};
/* TPA-end ETH Rx FP CQE. */
/* TPA-end ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_end_cqe {
u8 type;
u8 tpa_agg_index;
......@@ -259,7 +260,7 @@ struct eth_fast_path_rx_tpa_end_cqe {
struct eth_pmd_flow_flags pmd_flags;
};
/* TPA-start ETH Rx FP CQE. */
/* TPA-start ETH Rx FP CQE */
struct eth_fast_path_rx_tpa_start_cqe {
u8 type;
u8 bitfields;
......@@ -295,7 +296,7 @@ struct eth_rx_bd {
struct regpair addr;
};
/* regular ETH Rx SP CQE */
/* Regular ETH Rx SP CQE */
struct eth_slow_path_rx_cqe {
u8 type;
u8 ramrod_cmd_id;
......@@ -306,7 +307,7 @@ struct eth_slow_path_rx_cqe {
struct eth_pmd_flow_flags pmd_flags;
};
/* union for all ETH Rx CQE types */
/* Union for all ETH Rx CQE types */
union eth_rx_cqe {
struct eth_fast_path_rx_reg_cqe fast_path_regular;
struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start;
......@@ -366,7 +367,7 @@ struct eth_tx_2nd_bd {
struct eth_tx_data_2nd_bd data;
};
/* The parsing information data for the third tx bd of a given packet. */
/* The parsing information data for the third tx bd of a given packet */
struct eth_tx_data_3rd_bd {
__le16 lso_mss;
__le16 bitfields;
......@@ -389,7 +390,7 @@ struct eth_tx_3rd_bd {
struct eth_tx_data_3rd_bd data;
};
/* Complementary information for the regular tx bd of a given packet. */
/* Complementary information for the regular tx bd of a given packet */
struct eth_tx_data_bd {
__le16 reserved0;
__le16 bitfields;
......@@ -448,4 +449,16 @@ struct eth_db_data {
__le16 bd_prod;
};
/* RSS hash type */
enum rss_hash_type {
RSS_HASH_TYPE_DEFAULT = 0,
RSS_HASH_TYPE_IPV4 = 1,
RSS_HASH_TYPE_TCP_IPV4 = 2,
RSS_HASH_TYPE_IPV6 = 3,
RSS_HASH_TYPE_TCP_IPV6 = 4,
RSS_HASH_TYPE_UDP_IPV4 = 5,
RSS_HASH_TYPE_UDP_IPV6 = 6,
MAX_RSS_HASH_TYPE
};
#endif /* __ETH_COMMON__ */
This diff is collapsed.
This diff is collapsed.
......@@ -29,9 +29,12 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#ifndef __IWARP_COMMON__
#define __IWARP_COMMON__
#include <linux/qed/rdma_common.h>
/************************/
/* IWARP FW CONSTANTS */
/************************/
......
......@@ -32,6 +32,7 @@
#ifndef __RDMA_COMMON__
#define __RDMA_COMMON__
/************************/
/* RDMA FW CONSTANTS */
/************************/
......
......@@ -33,6 +33,10 @@
#ifndef __ROCE_COMMON__
#define __ROCE_COMMON__
/************************/
/* ROCE FW CONSTANTS */
/************************/
#define ROCE_REQ_MAX_INLINE_DATA_SIZE (256)
#define ROCE_REQ_MAX_SINGLE_SQ_WQE_SIZE (288)
......@@ -40,6 +44,7 @@
#define ROCE_DCQCN_NP_MAX_QPS (64)
#define ROCE_DCQCN_RP_MAX_QPS (64)
/* Affiliated asynchronous events / errors enumeration */
enum roce_async_events_type {
ROCE_ASYNC_EVENT_NONE = 0,
ROCE_ASYNC_EVENT_COMM_EST = 1,
......
......@@ -33,6 +33,10 @@
#ifndef __STORAGE_COMMON__
#define __STORAGE_COMMON__
/*********************/
/* SCSI CONSTANTS */
/*********************/
#define NUM_OF_CMDQS_CQS (NUM_OF_GLOBAL_QUEUES / 2)
#define BDQ_NUM_RESOURCES (4)
......@@ -42,34 +46,40 @@
#define SCSI_NUM_SGES_SLOW_SGL_THR 8
#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
#define BDQ_MAX_EXTERNAL_RING_SIZE BIT(15)
/* SCSI buffer descriptor */
struct scsi_bd {
struct regpair address;
struct regpair opaque;
};
/* Scsi Drv BDQ struct */
struct scsi_bdq_ram_drv_data {
__le16 external_producer;
__le16 reserved0[3];
};
/* SCSI SGE entry */
struct scsi_sge {
struct regpair sge_addr;
__le32 sge_len;
__le32 reserved;
};
/* Cached SGEs section */
struct scsi_cached_sges {
struct scsi_sge sge[4];
};
/* Scsi Drv CMDQ struct */
struct scsi_drv_cmdq {
__le16 cmdq_cons;
__le16 reserved0;
__le32 reserved1;
};
/* Common SCSI init params passed by driver to FW in function init ramrod */
struct scsi_init_func_params {
__le16 num_tasks;
u8 log_page_size;
......@@ -77,6 +87,7 @@ struct scsi_init_func_params {
u8 reserved2[12];
};
/* SCSI RQ/CQ/CMDQ firmware function init parameters */
struct scsi_init_func_queues {
struct regpair glbl_q_params_addr;
__le16 rq_buffer_size;
......@@ -107,16 +118,19 @@ struct scsi_init_func_queues {
__le32 reserved1;
};
/* Scsi Drv BDQ Data struct (2 BDQ IDs: 0 - RQ, 1 - Immediate Data) */
struct scsi_ram_per_bdq_resource_drv_data {
struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
};
/* SCSI SGL types */
enum scsi_sgl_mode {
SCSI_TX_SLOW_SGL,
SCSI_FAST_SGL,
MAX_SCSI_SGL_MODE
};
/* SCSI SGL parameters */
struct scsi_sgl_params {
struct regpair sgl_addr;
__le32 sgl_total_length;
......@@ -126,6 +140,7 @@ struct scsi_sgl_params {
u8 reserved;
};
/* SCSI terminate connection params */
struct scsi_terminate_extra_params {
__le16 unsolicited_cq_count;
__le16 cmdq_count;
......
......@@ -33,8 +33,13 @@
#ifndef __TCP_COMMON__
#define __TCP_COMMON__
/********************/
/* TCP FW CONSTANTS */
/********************/
#define TCP_INVALID_TIMEOUT_VAL -1
/* OOO opaque data received from LL2 */
struct ooo_opaque {
__le32 cid;
u8 drop_isle;
......@@ -43,25 +48,29 @@ struct ooo_opaque {
u8 ooo_isle;
};
/* tcp connect mode enum */
enum tcp_connect_mode {
TCP_CONNECT_ACTIVE,
TCP_CONNECT_PASSIVE,
MAX_TCP_CONNECT_MODE
};
/* tcp function init parameters */
struct tcp_init_params {
__le32 two_msl_timer;
__le16 tx_sws_timer;
u8 maxfinrt;
u8 max_fin_rt;
u8 reserved[9];
};
/* tcp IPv4/IPv6 enum */
enum tcp_ip_version {
TCP_IPV4,
TCP_IPV6,
MAX_TCP_IP_VERSION
};
/* tcp offload parameters */
struct tcp_offload_params {
__le16 local_mac_addr_lo;
__le16 local_mac_addr_mid;
......@@ -132,6 +141,7 @@ struct tcp_offload_params {
__le32 reserved3[2];
};
/* tcp offload parameters */
struct tcp_offload_params_opt2 {
__le16 local_mac_addr_lo;
__le16 local_mac_addr_mid;
......@@ -166,6 +176,7 @@ struct tcp_offload_params_opt2 {
__le32 reserved1[22];
};
/* tcp IPv4/IPv6 enum */
enum tcp_seg_placement_event {
TCP_EVENT_ADD_PEN,
TCP_EVENT_ADD_NEW_ISLE,
......@@ -177,6 +188,7 @@ enum tcp_seg_placement_event {
MAX_TCP_SEG_PLACEMENT_EVENT
};
/* tcp init parameters */
struct tcp_update_params {
__le16 flags;
#define TCP_UPDATE_PARAMS_REMOTE_MAC_ADDR_CHANGED_MASK 0x1
......@@ -226,6 +238,7 @@ struct tcp_update_params {
u8 reserved1[7];
};
/* toe upload parameters */
struct tcp_upload_params {
__le32 rcv_next;
__le32 snd_una;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment