Commit 0500a70d authored by Michal Kalderon's avatar Michal Kalderon Committed by David S. Miller

qed: FW 8.42.2.0 HSI changes

This patch contains several HSI changes. The changes are part of
features like RDMA VF and OVS, the patch also contains a fix to
how the init code determines if the dmae is ready to be used.
Signed-off-by: default avatarAriel Elior <ariel.elior@marvell.com>
Signed-off-by: default avatarMichal Kalderon <michal.kalderon@marvell.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6459d936
This diff is collapsed.
......@@ -490,10 +490,10 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
int qed_init_run(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
{
bool b_dmae = (phase != PHASE_ENGINE);
struct qed_dev *cdev = p_hwfn->cdev;
u32 cmd_num, num_init_ops;
union init_op *init_ops;
bool b_dmae = false;
int rc = 0;
num_init_ops = cdev->fw_data->init_ops_size;
......@@ -522,7 +522,6 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
case INIT_OP_IF_PHASE:
cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
phase, phase_id);
b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
break;
case INIT_OP_DELAY:
/* qed_init_run is always invoked from
......@@ -533,6 +532,9 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
case INIT_OP_CALLBACK:
rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
if (phase == PHASE_ENGINE &&
cmd->callback.callback_id == DMAE_READY_CB)
b_dmae = true;
break;
}
......
......@@ -48,6 +48,8 @@
#include "qed_reg_addr.h"
#include "qed_sriov.h"
#define GRCBASE_MCP 0xe00000
#define QED_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
......
......@@ -900,7 +900,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
goto err_resp;
out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags),
ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
......
......@@ -848,13 +848,13 @@ static void qede_tpa_start(struct qede_dev *edev,
qede_set_gro_params(edev, tpa_info->skb, cqe);
cons_buf: /* We still need to handle bd_len_list to consume buffers */
if (likely(cqe->ext_bd_len_list[0]))
if (likely(cqe->bw_ext_bd_len_list[0]))
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->ext_bd_len_list[0]));
le16_to_cpu(cqe->bw_ext_bd_len_list[0]));
if (unlikely(cqe->ext_bd_len_list[1])) {
if (unlikely(cqe->bw_ext_bd_len_list[1])) {
DP_ERR(edev,
"Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
"Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n");
tpa_info->state = QEDE_AGG_STATE_ERROR;
}
}
......
......@@ -76,7 +76,6 @@
#define FW_ASSERT_GENERAL_ATTN_IDX 32
#define MAX_PINNED_CCFC 32
/* Queue Zone sizes in bytes */
#define TSTORM_QZONE_SIZE 8
......@@ -139,10 +138,10 @@
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS (MAX_FUNCTION_NUMBER_K2)
#define MAX_NUM_VPORTS_K2 (208)
#define MAX_NUM_VPORTS_BB (160)
......@@ -229,6 +228,7 @@
#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
#define DQ_XCM_ROCE_ACK_EDPM_DORQ_SEQ_CMD DQ_XCM_AGG_VAL_SEL_WORD5
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
......@@ -406,6 +406,7 @@
/* Number of Protocol Indices per Status Block */
#define PIS_PER_SB_E4 12
#define MAX_PIS_PER_SB PIS_PER_SB
#define CAU_HC_STOPPED_STATE 3
#define CAU_HC_DISABLE_STATE 4
......@@ -436,8 +437,6 @@
#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
#define IGU_CMD_INT_ACK_BASE 0x0400
#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
MAX_TOT_SB_PER_PATH - 1)
#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
......@@ -450,8 +449,6 @@
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
#define IGU_CMD_PROD_UPD_BASE 0x0600
#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
MAX_TOT_SB_PER_PATH - 1)
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
/*****************/
......@@ -741,6 +738,8 @@ enum protocol_type {
PROTOCOLID_PREROCE,
PROTOCOLID_COMMON,
PROTOCOLID_RESERVED1,
PROTOCOLID_RDMA,
PROTOCOLID_SCSI,
MAX_PROTOCOL_TYPE
};
......@@ -761,6 +760,10 @@ union rdma_eqe_data {
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
};
struct tstorm_queue_zone {
__le32 reserved[2];
};
/* Ustorm Queue Zone */
struct ustorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset;
......@@ -883,8 +886,8 @@ struct db_l2_dpm_data {
#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
#define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1
#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31
#define DB_L2_DPM_DATA_TGFS_SRC_EN_MASK 0x1
#define DB_L2_DPM_DATA_TGFS_SRC_EN_SHIFT 31
};
/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */
......
......@@ -38,9 +38,11 @@
/********************/
#define ETH_HSI_VER_MAJOR 3
#define ETH_HSI_VER_MINOR 10
#define ETH_HSI_VER_MINOR 11
#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
/* Maximum number of pinned L2 connections (CIDs) */
#define ETH_PINNED_CONN_MAX_NUM 32
#define ETH_CACHE_LINE_SIZE 64
#define ETH_RX_CQE_GAP 32
......@@ -61,6 +63,7 @@
#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
#define ETH_TX_MIN_BDS_PER_PKT_W_VPORT_FORWARDING 4
#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
#define ETH_TX_MAX_LSO_HDR_BYTES 510
#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
......@@ -75,9 +78,8 @@
#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
/* Maximum number of buffers, used for RX packet placement */
#define ETH_RX_MAX_BUFF_PER_PKT 5
#define ETH_RX_BD_THRESHOLD 12
#define ETH_RX_BD_THRESHOLD 16
/* Num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
......@@ -96,24 +98,24 @@
#define ETH_RSS_ENGINE_NUM_BB 127
/* TPA constants */
#define ETH_TPA_MAX_AGGS_NUM 64
#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
#define ETH_TPA_MAX_AGGS_NUM 64
#define ETH_TPA_CQE_START_BW_LEN_LIST_SIZE 2
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
/* Control frame check constants */
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
/* GFS constants */
#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
/* Destination port mode */
enum dest_port_mode {
DEST_PORT_PHY,
DEST_PORT_LOOPBACK,
DEST_PORT_PHY_LOOPBACK,
DEST_PORT_DROP,
MAX_DEST_PORT_MODE
enum dst_port_mode {
DST_PORT_PHY,
DST_PORT_LOOPBACK,
DST_PORT_PHY_LOOPBACK,
DST_PORT_DROP,
MAX_DST_PORT_MODE
};
/* Ethernet address type */
......@@ -167,8 +169,8 @@ struct eth_tx_data_2nd_bd {
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_SHIFT 6
#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
......@@ -244,8 +246,9 @@ struct eth_fast_path_rx_reg_cqe {
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 bd_num;
u8 reserved;
__le16 flow_id;
u8 reserved1[11];
__le16 reserved2;
__le32 flow_id_or_resource_id;
u8 reserved1[7];
struct eth_pmd_flow_flags pmd_flags;
};
......@@ -296,9 +299,10 @@ struct eth_fast_path_rx_tpa_start_cqe {
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 tpa_agg_index;
u8 header_len;
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
__le16 flow_id;
u8 reserved;
__le16 bw_ext_bd_len_list[ETH_TPA_CQE_START_BW_LEN_LIST_SIZE];
__le16 reserved2;
__le32 flow_id_or_resource_id;
u8 reserved[3];
struct eth_pmd_flow_flags pmd_flags;
};
......@@ -407,6 +411,29 @@ struct eth_tx_3rd_bd {
struct eth_tx_data_3rd_bd data;
};
/* The parsing information data for the forth tx bd of a given packet. */
struct eth_tx_data_4th_bd {
u8 dst_vport_id;
u8 reserved4;
__le16 bitfields;
#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_MASK 0x1
#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_SHIFT 0
#define ETH_TX_DATA_4TH_BD_RESERVED1_MASK 0x7F
#define ETH_TX_DATA_4TH_BD_RESERVED1_SHIFT 1
#define ETH_TX_DATA_4TH_BD_START_BD_MASK 0x1
#define ETH_TX_DATA_4TH_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_4TH_BD_RESERVED2_MASK 0x7F
#define ETH_TX_DATA_4TH_BD_RESERVED2_SHIFT 9
__le16 reserved3;
};
/* The forth tx bd of a given packet */
struct eth_tx_4th_bd {
struct regpair addr; /* Single continuous buffer */
__le16 nbytes; /* Number of bytes in this BD */
struct eth_tx_data_4th_bd data; /* Parsing information data */
};
/* Complementary information for the regular tx bd of a given packet */
struct eth_tx_data_bd {
__le16 reserved0;
......@@ -431,6 +458,7 @@ union eth_tx_bd_types {
struct eth_tx_1st_bd first_bd;
struct eth_tx_2nd_bd second_bd;
struct eth_tx_3rd_bd third_bd;
struct eth_tx_4th_bd fourth_bd;
struct eth_tx_bd reg_bd;
};
......@@ -443,6 +471,12 @@ enum eth_tx_tunn_type {
MAX_ETH_TX_TUNN_TYPE
};
/* Mstorm Queue Zone */
struct mstorm_eth_queue_zone {
struct eth_rx_prod_data rx_producers;
__le32 reserved[3];
};
/* Ystorm Queue Zone */
struct xstorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment