Commit 8e5aa617 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-Utilize-FW-8.42.2.0'

Michal Kalderon says:

====================
qed*: Utilize FW 8.42.2.0

This FW contains several fixes and features, main ones listed below.
We have taken into consideration past comments on previous FW versions
that were uploaded and tried to separate this one to smaller patches to
ease review.

- RoCE
	- SRIOV support
	- Fixes in following flows:
		- latency optimization flow for inline WQEs
		- iwarp OOO packed DDPs flow
		- tx-dif workaround calculations flow
		- XRC-SRQ exceed cache num

- iSCSI
	- Fixes:
		- iSCSI TCP out-of-order handling.
		- iscsi retransmit flow

- Fcoe
	- Fixes:
		- upload + cleanup flows

- Debug
	- Better handling of extracting data during traffic
	- ILT Dump -> dumping host memory used by chip
	- MDUMP -> collect debug data on system crash and extract after
	  reboot

Patches prefixed with FW 8.42.2.0 are required to work with binary
8.42.2.0 FW where as the rest are FW related but do not require the
binary.

Changes from V2
---------------
- Move FW version to the start of the series to maintain minimal compatibility
- Fix some kbuild errors:
	- frame size larger than 1024 (Queue Manager patch - remove redundant
	  field from struct)
	- sparse warning on endianity (Dmae patch fix - wrong use of __le32 for field
	  used only on host, should be u32)
	- static should be used for some functions (Debug feature ilt and mdump)
Reported-by: default avatarkbuild test robot <lkp@intel.com>

Changes from V1
---------------
- Remove epoch + kernel version from device debug dump
- don't bump driver version
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3d4864b3 2d22bc83
......@@ -253,7 +253,8 @@ enum qed_resources {
QED_VLAN,
QED_RDMA_CNQ_RAM,
QED_ILT,
QED_LL2_QUEUE,
QED_LL2_RAM_QUEUE,
QED_LL2_CTX_QUEUE,
QED_CMDQS_CQS,
QED_RDMA_STATS_QUEUE,
QED_BDQ,
......@@ -461,6 +462,8 @@ struct qed_fw_data {
const u8 *modes_tree_buf;
union init_op *init_ops;
const u32 *arr_data;
const u32 *fw_overlays;
u32 fw_overlays_len;
u32 init_ops_size;
};
......@@ -531,6 +534,23 @@ struct qed_nvm_image_info {
bool valid;
};
enum qed_hsi_def_type {
QED_HSI_DEF_MAX_NUM_VFS,
QED_HSI_DEF_MAX_NUM_L2_QUEUES,
QED_HSI_DEF_MAX_NUM_PORTS,
QED_HSI_DEF_MAX_SB_PER_PATH,
QED_HSI_DEF_MAX_NUM_PFS,
QED_HSI_DEF_MAX_NUM_VPORTS,
QED_HSI_DEF_NUM_ETH_RSS_ENGINE,
QED_HSI_DEF_MAX_QM_TX_QUEUES,
QED_HSI_DEF_NUM_PXP_ILT_RECORDS,
QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS,
QED_HSI_DEF_MAX_QM_GLOBAL_RLS,
QED_HSI_DEF_MAX_PBF_CMD_LINES,
QED_HSI_DEF_MAX_BTB_BLOCKS,
QED_NUM_HSI_DEFS
};
#define DRV_MODULE_VERSION \
__stringify(QED_MAJOR_VERSION) "." \
__stringify(QED_MINOR_VERSION) "." \
......@@ -646,6 +666,7 @@ struct qed_hwfn {
struct dbg_tools_data dbg_info;
void *dbg_user_info;
struct virt_mem_desc dbg_arrays[MAX_BIN_DBG_BUFFER_TYPE];
/* PWM region specific data */
u16 wid_count;
......@@ -668,6 +689,7 @@ struct qed_hwfn {
/* Nvm images number and attributes */
struct qed_nvm_image_info nvm_info;
struct phys_mem_desc *fw_overlay_mem;
struct qed_ptt *p_arfs_ptt;
struct qed_simd_fp_handler simd_proto_handler[64];
......@@ -796,8 +818,8 @@ struct qed_dev {
u8 cache_shift;
/* Init */
const struct iro *iro_arr;
#define IRO (p_hwfn->cdev->iro_arr)
const u32 *iro_arr;
#define IRO ((const struct iro *)p_hwfn->cdev->iro_arr)
/* HW functions */
u8 num_hwfns;
......@@ -856,6 +878,8 @@ struct qed_dev {
struct qed_cb_ll2_info *ll2;
u8 ll2_mac_address[ETH_ALEN];
#endif
struct qed_dbg_feature dbg_features[DBG_FEATURE_NUM];
bool disable_ilt_dump;
DECLARE_HASHTABLE(connections, 10);
const struct firmware *firmware;
......@@ -868,16 +892,35 @@ struct qed_dev {
bool iwarp_cmt;
};
#define NUM_OF_VFS(dev) (QED_IS_BB(dev) ? MAX_NUM_VFS_BB \
: MAX_NUM_VFS_K2)
#define NUM_OF_L2_QUEUES(dev) (QED_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \
: MAX_NUM_L2_QUEUES_K2)
#define NUM_OF_PORTS(dev) (QED_IS_BB(dev) ? MAX_NUM_PORTS_BB \
: MAX_NUM_PORTS_K2)
#define NUM_OF_SBS(dev) (QED_IS_BB(dev) ? MAX_SB_PER_PATH_BB \
: MAX_SB_PER_PATH_K2)
#define NUM_OF_ENG_PFS(dev) (QED_IS_BB(dev) ? MAX_NUM_PFS_BB \
: MAX_NUM_PFS_K2)
u32 qed_get_hsi_def_val(struct qed_dev *cdev, enum qed_hsi_def_type type);
#define NUM_OF_VFS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VFS)
#define NUM_OF_L2_QUEUES(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_L2_QUEUES)
#define NUM_OF_PORTS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PORTS)
#define NUM_OF_SBS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_SB_PER_PATH)
#define NUM_OF_ENG_PFS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PFS)
#define NUM_OF_VPORTS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VPORTS)
#define NUM_OF_RSS_ENGINES(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_ETH_RSS_ENGINE)
#define NUM_OF_QM_TX_QUEUES(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_TX_QUEUES)
#define NUM_OF_PXP_ILT_RECORDS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_PXP_ILT_RECORDS)
#define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)
#define NUM_OF_QM_GLOBAL_RLS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_GLOBAL_RLS)
#define NUM_OF_PBF_CMD_LINES(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_PBF_CMD_LINES)
#define NUM_OF_BTB_BLOCKS(dev) \
qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
/**
* @brief qed_concrete_to_sw_fid - get the sw function id from
......
This diff is collapsed.
......@@ -242,4 +242,134 @@ int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
#define QED_CTX_FL_MEM 1
int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
u32 tid, u8 ctx_type, void **task_ctx);
/* Max number of connection types in HW (DQ/CDU etc.) */
#define MAX_CONN_TYPES PROTOCOLID_COMMON
#define NUM_TASK_TYPES 2
#define NUM_TASK_PF_SEGMENTS 4
#define NUM_TASK_VF_SEGMENTS 1
/* PF per protocl configuration object */
#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
struct qed_tid_seg {
u32 count;
u8 type;
bool has_fl_mem;
};
struct qed_conn_type_cfg {
u32 cid_count;
u32 cids_per_vf;
struct qed_tid_seg tid_seg[TASK_SEGMENTS];
};
/* ILT Client configuration,
* Per connection type (protocol) resources (cids, tis, vf cids etc.)
* 1 - for connection context (CDUC) and for each task context we need two
* values, for regular task context and for force load memory
*/
#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
#define CDUC_BLK (0)
#define SRQ_BLK (0)
#define CDUT_SEG_BLK(n) (1 + (u8)(n))
#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
struct ilt_cfg_pair {
u32 reg;
u32 val;
};
struct qed_ilt_cli_blk {
u32 total_size; /* 0 means not active */
u32 real_size_in_page;
u32 start_line;
u32 dynamic_line_offset;
u32 dynamic_line_cnt;
};
struct qed_ilt_client_cfg {
bool active;
/* ILT boundaries */
struct ilt_cfg_pair first;
struct ilt_cfg_pair last;
struct ilt_cfg_pair p_size;
/* ILT client blocks for PF */
struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
u32 pf_total_lines;
/* ILT client blocks for VFs */
struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
u32 vf_total_lines;
};
struct qed_cid_acquired_map {
u32 start_cid;
u32 max_count;
unsigned long *cid_map;
};
struct qed_src_t2 {
struct phys_mem_desc *dma_mem;
u32 num_pages;
u64 first_free;
u64 last_free;
};
struct qed_cxt_mngr {
/* Per protocl configuration */
struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
/* computed ILT structure */
struct qed_ilt_client_cfg clients[MAX_ILT_CLIENTS];
/* Task type sizes */
u32 task_type_size[NUM_TASK_TYPES];
/* total number of VFs for this hwfn -
* ALL VFs are symmetric in terms of HW resources
*/
u32 vf_count;
u32 first_vf_in_pf;
/* Acquired CIDs */
struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
struct qed_cid_acquired_map
acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
/* ILT shadow table */
struct phys_mem_desc *ilt_shadow;
u32 ilt_shadow_size;
u32 pf_start_line;
/* Mutex for a dynamic ILT allocation */
struct mutex mutex;
/* SRC T2 */
struct qed_src_t2 src_t2;
u32 t2_num_pages;
u64 first_free;
u64 last_free;
/* total number of SRQ's for this hwfn */
u32 srq_count;
/* Maximal number of L2 steering filters */
u32 arfs_count;
u8 task_type_id;
u16 task_ctx_size;
u16 conn_ctx_size;
};
u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
#endif
This diff is collapsed.
......@@ -14,11 +14,13 @@ enum qed_dbg_features {
DBG_FEATURE_IGU_FIFO,
DBG_FEATURE_PROTECTION_OVERRIDE,
DBG_FEATURE_FW_ASSERTS,
DBG_FEATURE_ILT,
DBG_FEATURE_NUM
};
/* Forward Declaration */
struct qed_dev;
struct qed_hwfn;
int qed_dbg_grc(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
int qed_dbg_grc_size(struct qed_dev *cdev);
......@@ -37,6 +39,8 @@ int qed_dbg_protection_override_size(struct qed_dev *cdev);
int qed_dbg_fw_asserts(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes);
int qed_dbg_fw_asserts_size(struct qed_dev *cdev);
int qed_dbg_ilt(struct qed_dev *cdev, void *buffer, u32 *num_dumped_bytes);
int qed_dbg_ilt_size(struct qed_dev *cdev);
int qed_dbg_mcp_trace(struct qed_dev *cdev, void *buffer,
u32 *num_dumped_bytes);
int qed_dbg_mcp_trace_size(struct qed_dev *cdev);
......
This diff is collapsed.
......@@ -230,30 +230,6 @@ enum qed_dmae_address_type_t {
QED_DMAE_ADDRESS_GRC
};
/* value of flags If QED_DMAE_FLAG_RW_REPL_SRC flag is set and the
* source is a block of length DMAE_MAX_RW_SIZE and the
* destination is larger, the source block will be duplicated as
* many times as required to fill the destination block. This is
* used mostly to write a zeroed buffer to destination address
* using DMA
*/
#define QED_DMAE_FLAG_RW_REPL_SRC 0x00000001
#define QED_DMAE_FLAG_VF_SRC 0x00000002
#define QED_DMAE_FLAG_VF_DST 0x00000004
#define QED_DMAE_FLAG_COMPLETION_DST 0x00000008
#define QED_DMAE_FLAG_PORT 0x00000010
#define QED_DMAE_FLAG_PF_SRC 0x00000020
#define QED_DMAE_FLAG_PF_DST 0x00000040
struct qed_dmae_params {
u32 flags; /* consists of QED_DMAE_FLAG_* values */
u8 src_vfid;
u8 dst_vfid;
u8 port_id;
u8 src_pfid;
u8 dst_pfid;
};
/**
* @brief qed_dmae_host2grc - copy data from source addr to
* dmae registers using the given ptt
......
......@@ -167,6 +167,8 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
goto err;
}
p_cxt = cxt_info.p_cxt;
memset(p_cxt, 0, sizeof(*p_cxt));
SET_FIELD(p_cxt->tstorm_ag_context.flags3,
E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -393,7 +393,7 @@ u32 qed_vfid_to_concrete(struct qed_hwfn *p_hwfn, u8 vfid)
/* DMAE */
#define QED_DMAE_FLAGS_IS_SET(params, flag) \
((params) != NULL && ((params)->flags & QED_DMAE_FLAG_##flag))
((params) != NULL && GET_FIELD((params)->flags, QED_DMAE_PARAMS_##flag))
static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
const u8 is_src_type_grc,
......@@ -408,62 +408,55 @@ static void qed_dmae_opcode(struct qed_hwfn *p_hwfn,
* 0- The source is the PCIe
* 1- The source is the GRC.
*/
opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC
: DMAE_CMD_SRC_MASK_PCIE) <<
DMAE_CMD_SRC_SHIFT;
src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_SRC) ?
p_params->src_pfid : p_hwfn->rel_pf_id;
opcode |= ((src_pfid & DMAE_CMD_SRC_PF_ID_MASK) <<
DMAE_CMD_SRC_PF_ID_SHIFT);
SET_FIELD(opcode, DMAE_CMD_SRC,
(is_src_type_grc ? dmae_cmd_src_grc : dmae_cmd_src_pcie));
src_pfid = QED_DMAE_FLAGS_IS_SET(p_params, SRC_PF_VALID) ?
p_params->src_pfid : p_hwfn->rel_pf_id;
SET_FIELD(opcode, DMAE_CMD_SRC_PF_ID, src_pfid);
/* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */
opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC
: DMAE_CMD_DST_MASK_PCIE) <<
DMAE_CMD_DST_SHIFT;
dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, PF_DST) ?
p_params->dst_pfid : p_hwfn->rel_pf_id;
opcode |= ((dst_pfid & DMAE_CMD_DST_PF_ID_MASK) <<
DMAE_CMD_DST_PF_ID_SHIFT);
SET_FIELD(opcode, DMAE_CMD_DST,
(is_dst_type_grc ? dmae_cmd_dst_grc : dmae_cmd_dst_pcie));
dst_pfid = QED_DMAE_FLAGS_IS_SET(p_params, DST_PF_VALID) ?
p_params->dst_pfid : p_hwfn->rel_pf_id;
SET_FIELD(opcode, DMAE_CMD_DST_PF_ID, dst_pfid);
/* Whether to write a completion word to the completion destination:
* 0-Do not write a completion word
* 1-Write the completion word
*/
opcode |= (DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT);
opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
DMAE_CMD_SRC_ADDR_RESET_SHIFT);
SET_FIELD(opcode, DMAE_CMD_COMP_WORD_EN, 1);
SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
if (QED_DMAE_FLAGS_IS_SET(p_params, COMPLETION_DST))
opcode |= (1 << DMAE_CMD_COMP_FUNC_SHIFT);
SET_FIELD(opcode, DMAE_CMD_COMP_FUNC, 1);
opcode |= (DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT);
/* swapping mode 3 - big endian */
SET_FIELD(opcode, DMAE_CMD_ENDIANITY_MODE, DMAE_CMD_ENDIANITY);
port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT)) ?
p_params->port_id : p_hwfn->port_id;
opcode |= (port_id << DMAE_CMD_PORT_ID_SHIFT);
port_id = (QED_DMAE_FLAGS_IS_SET(p_params, PORT_VALID)) ?
p_params->port_id : p_hwfn->port_id;
SET_FIELD(opcode, DMAE_CMD_PORT_ID, port_id);
/* reset source address in next go */
opcode |= (DMAE_CMD_SRC_ADDR_RESET_MASK <<
DMAE_CMD_SRC_ADDR_RESET_SHIFT);
SET_FIELD(opcode, DMAE_CMD_SRC_ADDR_RESET, 1);
/* reset dest address in next go */
opcode |= (DMAE_CMD_DST_ADDR_RESET_MASK <<
DMAE_CMD_DST_ADDR_RESET_SHIFT);
SET_FIELD(opcode, DMAE_CMD_DST_ADDR_RESET, 1);
/* SRC/DST VFID: all 1's - pf, otherwise VF id */
if (QED_DMAE_FLAGS_IS_SET(p_params, VF_SRC)) {
opcode |= 1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT;
opcode_b |= p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT;
if (QED_DMAE_FLAGS_IS_SET(p_params, SRC_VF_VALID)) {
SET_FIELD(opcode, DMAE_CMD_SRC_VF_ID_VALID, 1);
SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, p_params->src_vfid);
} else {
opcode_b |= DMAE_CMD_SRC_VF_ID_MASK <<
DMAE_CMD_SRC_VF_ID_SHIFT;
SET_FIELD(opcode_b, DMAE_CMD_SRC_VF_ID, 0xFF);
}
if (QED_DMAE_FLAGS_IS_SET(p_params, VF_DST)) {
opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT;
opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT;
if (QED_DMAE_FLAGS_IS_SET(p_params, DST_VF_VALID)) {
SET_FIELD(opcode, DMAE_CMD_DST_VF_ID_VALID, 1);
SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, p_params->dst_vfid);
} else {
opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT;
SET_FIELD(opcode_b, DMAE_CMD_DST_VF_ID, 0xFF);
}
p_hwfn->dmae_info.p_dmae_cmd->opcode = cpu_to_le32(opcode);
......
......@@ -54,15 +54,15 @@ static u32 pxp_global_win[] = {
0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */
0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */
0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */
0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */
0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */
0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */
0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */
0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */
0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */
0,
0,
0,
0x1d02, /* win 6: addr=0x1d02000, size=4096 bytes */
0x1d80, /* win 7: addr=0x1d80000, size=4096 bytes */
0x1d81, /* win 8: addr=0x1d81000, size=4096 bytes */
0x1d82, /* win 9: addr=0x1d82000, size=4096 bytes */
0x1e00, /* win 10: addr=0x1e00000, size=4096 bytes */
0x1e01, /* win 11: addr=0x1e01000, size=4096 bytes */
0x1e80, /* win 12: addr=0x1e80000, size=4096 bytes */
0x1f00, /* win 13: addr=0x1f00000, size=4096 bytes */
0x1c08, /* win 14: addr=0x1c08000, size=4096 bytes */
0,
0,
0,
......@@ -74,15 +74,6 @@ void qed_init_iro_array(struct qed_dev *cdev)
cdev->iro_arr = iro_arr;
}
/* Runtime configuration helpers */
void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn)
{
int i;
for (i = 0; i < RUNTIME_ARRAY_SIZE; i++)
p_hwfn->rt_data.b_valid[i] = false;
}
void qed_init_store_rt_reg(struct qed_hwfn *p_hwfn, u32 rt_offset, u32 val)
{
p_hwfn->rt_data.init_val[rt_offset] = val;
......@@ -106,7 +97,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
{
u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset];
bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset];
u16 i, segment;
u16 i, j, segment;
int rc = 0;
/* Since not all RT entries are initialized, go over the RT and
......@@ -121,6 +112,7 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
*/
if (!b_must_dmae) {
qed_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]);
p_valid[i] = false;
continue;
}
......@@ -135,6 +127,10 @@ static int qed_init_rt(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
/* invalidate after writing */
for (j = i; j < i + segment; j++)
p_valid[j] = false;
/* Jump over the entire segment, including invalid entry */
i += segment;
}
......@@ -215,7 +211,7 @@ static int qed_init_fill_dmae(struct qed_hwfn *p_hwfn,
* 3. p_hwfb->temp_data,
* 4. fill_count
*/
params.flags = QED_DMAE_FLAG_RW_REPL_SRC;
SET_FIELD(params.flags, QED_DMAE_PARAMS_RW_REPL_SRC, 0x1);
return qed_dmae_host2grc(p_hwfn, p_ptt,
(uintptr_t)(&zero_buffer[0]),
addr, fill_count, &params);
......@@ -490,10 +486,10 @@ static u32 qed_init_cmd_phase(struct qed_hwfn *p_hwfn,
int qed_init_run(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int phase, int phase_id, int modes)
{
bool b_dmae = (phase != PHASE_ENGINE);
struct qed_dev *cdev = p_hwfn->cdev;
u32 cmd_num, num_init_ops;
union init_op *init_ops;
bool b_dmae = false;
int rc = 0;
num_init_ops = cdev->fw_data->init_ops_size;
......@@ -522,7 +518,6 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
case INIT_OP_IF_PHASE:
cmd_num += qed_init_cmd_phase(p_hwfn, &cmd->if_phase,
phase, phase_id);
b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE);
break;
case INIT_OP_DELAY:
/* qed_init_run is always invoked from
......@@ -533,6 +528,9 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
case INIT_OP_CALLBACK:
rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
if (phase == PHASE_ENGINE &&
cmd->callback.callback_id == DMAE_READY_CB)
b_dmae = true;
break;
}
......@@ -587,5 +585,10 @@ int qed_init_fw_data(struct qed_dev *cdev, const u8 *data)
len = buf_hdr[BIN_BUF_INIT_CMD].length;
fw->init_ops_size = len / sizeof(struct init_raw_op);
offset = buf_hdr[BIN_BUF_INIT_OVERLAYS].offset;
fw->fw_overlays = (u32 *)(data + offset);
len = buf_hdr[BIN_BUF_INIT_OVERLAYS].length;
fw->fw_overlays_len = len;
return 0;
}
......@@ -80,14 +80,6 @@ int qed_init_alloc(struct qed_hwfn *p_hwfn);
*/
void qed_init_free(struct qed_hwfn *p_hwfn);
/**
* @brief qed_init_clear_rt_data - Clears the runtime init array.
*
*
* @param p_hwfn
*/
void qed_init_clear_rt_data(struct qed_hwfn *p_hwfn);
/**
* @brief qed_init_store_rt_reg - Store a configuration value in the RT array.
*
......
......@@ -204,17 +204,14 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
return -EINVAL;
}
SET_FIELD(p_init->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, ISCSI_SLOW_PATH_LAYER_CODE);
p_init->hdr.op_code = ISCSI_RAMROD_CMD_ID_INIT_FUNC;
val = p_params->half_way_close_timeout;
p_init->half_way_close_timeout = cpu_to_le16(val);
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
p_params->ll2_ooo_queue_id;
p_init->ll2_rx_queue_id =
p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] +
p_params->ll2_ooo_queue_id;
p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks;
......@@ -331,12 +328,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_conn->physical_q1 = cpu_to_le16(physical_q);
p_ramrod->iscsi.physical_q1 = cpu_to_le16(physical_q);
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_OFFLOAD_CONN;
SET_FIELD(p_ramrod->hdr.flags, ISCSI_SLOW_PATH_HDR_LAYER_CODE,
p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
DMA_REGPAIR_LE(p_ramrod->iscsi.sq_pbl_addr, p_conn->sq_pbl_addr);
......@@ -492,12 +484,8 @@ static int qed_sp_iscsi_conn_update(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_update;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_UPDATE_CONN;
SET_FIELD(p_ramrod->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
p_ramrod->flags = p_conn->update_flag;
p_ramrod->max_seq_size = cpu_to_le32(p_conn->max_seq_size);
dval = p_conn->max_recv_pdu_length;
......@@ -537,12 +525,8 @@ qed_sp_iscsi_mac_update(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_mac_update;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_MAC_UPDATE;
SET_FIELD(p_ramrod->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
ucval = p_conn->remote_mac[1];
((u8 *)(&p_ramrod->remote_mac_addr_hi))[0] = ucval;
ucval = p_conn->remote_mac[0];
......@@ -583,12 +567,8 @@ static int qed_sp_iscsi_conn_terminate(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.iscsi_conn_terminate;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_TERMINATION_CONN;
SET_FIELD(p_ramrod->hdr.flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
p_ramrod->conn_id = cpu_to_le16(p_conn->conn_id);
p_ramrod->fw_cid = cpu_to_le32(p_conn->icid);
p_ramrod->abortive = p_conn->abortive_dsconnect;
DMA_REGPAIR_LE(p_ramrod->query_params_addr,
......@@ -603,7 +583,6 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
struct iscsi_slow_path_hdr *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = -EINVAL;
......@@ -621,11 +600,6 @@ static int qed_sp_iscsi_conn_clear_sq(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.iscsi_empty;
p_ramrod->op_code = ISCSI_RAMROD_CMD_ID_CLEAR_SQ;
SET_FIELD(p_ramrod->flags,
ISCSI_SLOW_PATH_HDR_LAYER_CODE, p_conn->layer_code);
return qed_spq_post(p_hwfn, p_ent, NULL);
}
......@@ -633,7 +607,6 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode,
struct qed_spq_comp_cb *p_comp_addr)
{
struct iscsi_spe_func_dstry *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data;
int rc = 0;
......@@ -651,9 +624,6 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
p_ramrod = &p_ent->ramrod.iscsi_destroy;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC;
rc = qed_spq_post(p_hwfn, p_ent, NULL);
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI);
......
......@@ -137,8 +137,8 @@ qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
struct iwarp_init_func_ramrod_data *p_ramrod)
{
p_ramrod->iwarp.ll2_ooo_q_index =
RESC_START(p_hwfn, QED_LL2_QUEUE) +
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
RESC_START(p_hwfn, QED_LL2_RAM_QUEUE) +
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
......@@ -2651,6 +2651,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
memset(&data, 0, sizeof(data));
data.input.conn_type = QED_LL2_TYPE_IWARP;
/* SYN will use ctx based queues */
data.input.rx_conn_type = QED_LL2_RX_TYPE_CTX;
data.input.mtu = params->max_mtu;
data.input.rx_num_desc = QED_IWARP_LL2_SYN_RX_SIZE;
data.input.tx_num_desc = QED_IWARP_LL2_SYN_TX_SIZE;
......@@ -2683,6 +2685,8 @@ qed_iwarp_ll2_start(struct qed_hwfn *p_hwfn,
/* Start OOO connection */
data.input.conn_type = QED_LL2_TYPE_OOO;
/* OOO/unaligned will use legacy ll2 queues (ram based) */
data.input.rx_conn_type = QED_LL2_RX_TYPE_LEGACY;
data.input.mtu = params->max_mtu;
n_ooo_bufs = (QED_IWARP_MAX_OOO * rcv_wnd_size) /
......
......@@ -962,7 +962,7 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
return rc;
p_ramrod = &p_ent->ramrod.core_rx_queue_start;
memset(p_ramrod, 0, sizeof(*p_ramrod));
p_ramrod->sb_id = cpu_to_le16(qed_int_get_sp_sb_id(p_hwfn));
p_ramrod->sb_index = p_rx->rx_sb_index;
p_ramrod->complete_event_flg = 1;
......@@ -996,6 +996,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
p_ramrod->action_on_error.error_type = action_on_error;
p_ramrod->gsi_offload_flag = p_ll2_conn->input.gsi_enable;
p_ramrod->zero_prod_flg = 1;
return qed_spq_post(p_hwfn, p_ent, NULL);
}
......@@ -1317,6 +1319,25 @@ qed_ll2_set_cbs(struct qed_ll2_info *p_ll2_info, const struct qed_ll2_cbs *cbs)
return 0;
}
static void _qed_ll2_calc_allowed_conns(struct qed_hwfn *p_hwfn,
struct qed_ll2_acquire_data *data,
u8 *start_idx, u8 *last_idx)
{
/* LL2 queues handles will be split as follows:
* First will be the legacy queues, and then the ctx based.
*/
if (data->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
*start_idx = QED_LL2_LEGACY_CONN_BASE_PF;
*last_idx = *start_idx +
QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF;
} else {
/* QED_LL2_RX_TYPE_CTX */
*start_idx = QED_LL2_CTX_CONN_BASE_PF;
*last_idx = *start_idx +
QED_MAX_NUM_OF_CTX_LL2_CONNS_PF;
}
}
static enum core_error_handle
qed_ll2_get_error_choice(enum qed_ll2_error_handle err)
{
......@@ -1337,14 +1358,16 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
struct qed_hwfn *p_hwfn = cxt;
qed_int_comp_cb_t comp_rx_cb, comp_tx_cb;
struct qed_ll2_info *p_ll2_info = NULL;
u8 i, *p_tx_max;
u8 i, first_idx, last_idx, *p_tx_max;
int rc;
if (!data->p_connection_handle || !p_hwfn->p_ll2_info)
return -EINVAL;
_qed_ll2_calc_allowed_conns(p_hwfn, data, &first_idx, &last_idx);
/* Find a free connection to be used */
for (i = 0; (i < QED_MAX_NUM_OF_LL2_CONNECTIONS); i++) {
for (i = first_idx; i < last_idx; i++) {
mutex_lock(&p_hwfn->p_ll2_info[i].mutex);
if (p_hwfn->p_ll2_info[i].b_active) {
mutex_unlock(&p_hwfn->p_ll2_info[i].mutex);
......@@ -1448,6 +1471,7 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
enum qed_ll2_error_handle error_input;
enum core_error_handle error_mode;
u8 action_on_error = 0;
int rc;
if (!QED_LL2_RX_REGISTERED(p_ll2_conn))
return 0;
......@@ -1461,7 +1485,18 @@ static int qed_ll2_establish_connection_rx(struct qed_hwfn *p_hwfn,
error_mode = qed_ll2_get_error_choice(error_input);
SET_FIELD(action_on_error, CORE_RX_ACTION_ON_ERROR_NO_BUFF, error_mode);
return qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
rc = qed_sp_ll2_rx_queue_start(p_hwfn, p_ll2_conn, action_on_error);
if (rc)
return rc;
if (p_ll2_conn->rx_queue.ctx_based) {
rc = qed_db_recovery_add(p_hwfn->cdev,
p_ll2_conn->rx_queue.set_prod_addr,
&p_ll2_conn->rx_queue.db_data,
DB_REC_WIDTH_64B, DB_REC_KERNEL);
}
return rc;
}
static void
......@@ -1475,13 +1510,41 @@ qed_ll2_establish_connection_ooo(struct qed_hwfn *p_hwfn,
qed_ooo_submit_rx_buffers(p_hwfn, p_ll2_conn);
}
static inline u8 qed_ll2_handle_to_queue_id(struct qed_hwfn *p_hwfn,
u8 handle,
u8 ll2_queue_type)
{
u8 qid;
if (ll2_queue_type == QED_LL2_RX_TYPE_LEGACY)
return p_hwfn->hw_info.resc_start[QED_LL2_RAM_QUEUE] + handle;
/* QED_LL2_RX_TYPE_CTX
* FW distinguishes between the legacy queues (ram based) and the
* ctx based queues by the queue_id.
* The first MAX_NUM_LL2_RX_RAM_QUEUES queues are legacy
* and the queue ids above that are ctx base.
*/
qid = p_hwfn->hw_info.resc_start[QED_LL2_CTX_QUEUE] +
MAX_NUM_LL2_RX_RAM_QUEUES;
/* See comment on the acquire connection for how the ll2
* queues handles are divided.
*/
qid += (handle - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF);
return qid;
}
int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
{
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_info *p_ll2_conn;
struct e4_core_conn_context *p_cxt;
struct qed_ll2_tx_packet *p_pkt;
struct qed_ll2_info *p_ll2_conn;
struct qed_hwfn *p_hwfn = cxt;
struct qed_ll2_rx_queue *p_rx;
struct qed_ll2_tx_queue *p_tx;
struct qed_cxt_info cxt_info;
struct qed_ptt *p_ptt;
int rc = -EINVAL;
u32 i, capacity;
......@@ -1539,13 +1602,46 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
rc = qed_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_ll2_conn->cid);
if (rc)
goto out;
cxt_info.iid = p_ll2_conn->cid;
rc = qed_cxt_get_cid_info(p_hwfn, &cxt_info);
if (rc) {
DP_NOTICE(p_hwfn, "Cannot find context info for cid=%d\n",
p_ll2_conn->cid);
goto out;
}
p_cxt = cxt_info.p_cxt;
memset(p_cxt, 0, sizeof(*p_cxt));
qid = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + connection_handle;
qid = qed_ll2_handle_to_queue_id(p_hwfn, connection_handle,
p_ll2_conn->input.rx_conn_type);
p_ll2_conn->queue_id = qid;
p_ll2_conn->tx_stats_id = qid;
p_rx->set_prod_addr = (u8 __iomem *)p_hwfn->regview +
GTT_BAR0_MAP_REG_TSDM_RAM +
TSTORM_LL2_RX_PRODS_OFFSET(qid);
DP_VERBOSE(p_hwfn, QED_MSG_LL2,
"Establishing ll2 queue. PF %d ctx_based=%d abs qid=%d\n",
p_hwfn->rel_pf_id, p_ll2_conn->input.rx_conn_type, qid);
if (p_ll2_conn->input.rx_conn_type == QED_LL2_RX_TYPE_LEGACY) {
p_rx->set_prod_addr = p_hwfn->regview +
GTT_BAR0_MAP_REG_TSDM_RAM + TSTORM_LL2_RX_PRODS_OFFSET(qid);
} else {
/* QED_LL2_RX_TYPE_CTX - using doorbell */
p_rx->ctx_based = 1;
p_rx->set_prod_addr = p_hwfn->doorbells +
p_hwfn->dpi_start_offset +
DB_ADDR_SHIFT(DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE);
/* prepare db data */
p_rx->db_data.icid = cpu_to_le16((u16)p_ll2_conn->cid);
SET_FIELD(p_rx->db_data.params,
CORE_PWM_PROD_UPDATE_DATA_AGG_CMD, DB_AGG_CMD_SET);
SET_FIELD(p_rx->db_data.params,
CORE_PWM_PROD_UPDATE_DATA_RESERVED1, 0);
}
p_tx->doorbell_addr = (u8 __iomem *)p_hwfn->doorbells +
qed_db_addr(p_ll2_conn->cid,
DQ_DEMS_LEGACY);
......@@ -1556,7 +1652,6 @@ int qed_ll2_establish_connection(void *cxt, u8 connection_handle)
DQ_XCM_CORE_TX_BD_PROD_CMD);
p_tx->db_msg.agg_flags = DQ_XCM_CORE_DQ_CF_CMD;
rc = qed_ll2_establish_connection_rx(p_hwfn, p_ll2_conn);
if (rc)
goto out;
......@@ -1590,7 +1685,7 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
struct qed_ll2_rx_packet *p_curp)
{
struct qed_ll2_rx_packet *p_posting_packet = NULL;
struct core_ll2_rx_prod rx_prod = { 0, 0, 0 };
struct core_ll2_rx_prod rx_prod = { 0, 0 };
bool b_notify_fw = false;
u16 bd_prod, cq_prod;
......@@ -1615,13 +1710,27 @@ static void qed_ll2_post_rx_buffer_notify_fw(struct qed_hwfn *p_hwfn,
bd_prod = qed_chain_get_prod_idx(&p_rx->rxq_chain);
cq_prod = qed_chain_get_prod_idx(&p_rx->rcq_chain);
rx_prod.bd_prod = cpu_to_le16(bd_prod);
rx_prod.cqe_prod = cpu_to_le16(cq_prod);
if (p_rx->ctx_based) {
/* update producer by giving a doorbell */
p_rx->db_data.prod.bd_prod = cpu_to_le16(bd_prod);
p_rx->db_data.prod.cqe_prod = cpu_to_le16(cq_prod);
/* Make sure chain element is updated before ringing the
* doorbell
*/
dma_wmb();
DIRECT_REG_WR64(p_rx->set_prod_addr,
*((u64 *)&p_rx->db_data));
} else {
rx_prod.bd_prod = cpu_to_le16(bd_prod);
rx_prod.cqe_prod = cpu_to_le16(cq_prod);
/* Make sure chain element is updated before ringing the doorbell */
dma_wmb();
/* Make sure chain element is updated before ringing the
* doorbell
*/
dma_wmb();
DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
DIRECT_REG_WR(p_rx->set_prod_addr, *((u32 *)&rx_prod));
}
}
int qed_ll2_post_rx_buffer(void *cxt,
......@@ -1965,6 +2074,12 @@ int qed_ll2_terminate_connection(void *cxt, u8 connection_handle)
if (QED_LL2_RX_REGISTERED(p_ll2_conn)) {
p_ll2_conn->rx_queue.b_cb_registered = false;
smp_wmb(); /* Make sure this is seen by ll2_lb_rxq_completion */
if (p_ll2_conn->rx_queue.ctx_based)
qed_db_recovery_del(p_hwfn->cdev,
p_ll2_conn->rx_queue.set_prod_addr,
&p_ll2_conn->rx_queue.db_data);
rc = qed_sp_ll2_rx_queue_stop(p_hwfn, p_ll2_conn);
if (rc)
goto out;
......
......@@ -46,6 +46,18 @@
#include "qed_sp.h"
#define QED_MAX_NUM_OF_LL2_CONNECTIONS (4)
/* LL2 queues handles will be split as follows:
* first will be legacy queues, and then the ctx based queues.
*/
#define QED_MAX_NUM_OF_LL2_CONNS_PF (4)
#define QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF (3)
#define QED_MAX_NUM_OF_CTX_LL2_CONNS_PF \
(QED_MAX_NUM_OF_LL2_CONNS_PF - QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF)
#define QED_LL2_LEGACY_CONN_BASE_PF 0
#define QED_LL2_CTX_CONN_BASE_PF QED_MAX_NUM_OF_LEGACY_LL2_CONNS_PF
struct qed_ll2_rx_packet {
struct list_head list_entry;
......@@ -79,6 +91,7 @@ struct qed_ll2_rx_queue {
struct qed_chain rxq_chain;
struct qed_chain rcq_chain;
u8 rx_sb_index;
u8 ctx_based;
bool b_cb_registered;
__le16 *p_fw_cons;
struct list_head active_descq;
......@@ -86,6 +99,7 @@ struct qed_ll2_rx_queue {
struct list_head posting_descq;
struct qed_ll2_rx_packet *descq_array;
void __iomem *set_prod_addr;
struct core_pwm_prod_update_data db_data;
};
struct qed_ll2_tx_queue {
......
......@@ -2637,7 +2637,7 @@ static int qed_set_grc_config(struct qed_dev *cdev, u32 cfg_id, u32 val)
if (!ptt)
return -EAGAIN;
rc = qed_dbg_grc_config(hwfn, ptt, cfg_id, val);
rc = qed_dbg_grc_config(hwfn, cfg_id, val);
qed_ptt_release(hwfn, ptt);
......
......@@ -48,6 +48,8 @@
#include "qed_reg_addr.h"
#include "qed_sriov.h"
#define GRCBASE_MCP 0xe00000
#define QED_MCP_RESP_ITER_US 10
#define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
......@@ -3165,6 +3167,9 @@ qed_mcp_get_nvm_image_att(struct qed_hwfn *p_hwfn,
case QED_NVM_IMAGE_FCOE_CFG:
type = NVM_TYPE_FCOE_CFG;
break;
case QED_NVM_IMAGE_MDUMP:
type = NVM_TYPE_MDUMP;
break;
case QED_NVM_IMAGE_NVM_CFG1:
type = NVM_TYPE_NVM_CFG1;
break;
......@@ -3261,9 +3266,12 @@ static enum resource_id_enum qed_mcp_get_mfw_res_id(enum qed_resources res_id)
case QED_ILT:
mfw_res_id = RESOURCE_ILT_E;
break;
case QED_LL2_QUEUE:
case QED_LL2_RAM_QUEUE:
mfw_res_id = RESOURCE_LL2_QUEUE_E;
break;
case QED_LL2_CTX_QUEUE:
mfw_res_id = RESOURCE_LL2_CQS_E;
break;
case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */
......
......@@ -178,6 +178,8 @@
0x008c80UL
#define MCP_REG_SCRATCH \
0xe20000UL
#define MCP_REG_SCRATCH_SIZE \
57344
#define CNIG_REG_NW_PORT_MODE_BB \
0x218200UL
#define MISCS_REG_CHIP_NUM \
......@@ -212,6 +214,8 @@
0x580900UL
#define DBG_REG_CLIENT_ENABLE \
0x010004UL
#define DBG_REG_TIMESTAMP_VALID_EN \
0x010b58UL
#define DMAE_REG_INIT \
0x00c000UL
#define DORQ_REG_IFEN \
......@@ -350,6 +354,10 @@
0x24000cUL
#define PSWRQ2_REG_ILT_MEMORY \
0x260000UL
#define PSWRQ2_REG_ILT_MEMORY_SIZE_BB \
15200
#define PSWRQ2_REG_ILT_MEMORY_SIZE_K2 \
22000
#define PSWHST_REG_DISCARD_INTERNAL_WRITES \
0x2a0040UL
#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \
......@@ -1453,6 +1461,8 @@
0x1401404UL
#define XSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1401408UL
#define XSEM_REG_DBG_GPRE_VECT \
0x1401410UL
#define XSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1401420UL
#define XSEM_REG_FAST_MEMORY \
......@@ -1465,6 +1475,8 @@
0x1501404UL
#define YSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1501408UL
#define YSEM_REG_DBG_GPRE_VECT \
0x1501410UL
#define YSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1501420UL
#define YSEM_REG_FAST_MEMORY \
......@@ -1479,6 +1491,8 @@
0x1601404UL
#define PSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1601408UL
#define PSEM_REG_DBG_GPRE_VECT \
0x1601410UL
#define PSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1601420UL
#define PSEM_REG_FAST_MEMORY \
......@@ -1493,6 +1507,8 @@
0x1701404UL
#define TSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1701408UL
#define TSEM_REG_DBG_GPRE_VECT \
0x1701410UL
#define TSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1701420UL
#define TSEM_REG_FAST_MEMORY \
......@@ -1507,12 +1523,16 @@
0x1801404UL
#define MSEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1801408UL
#define MSEM_REG_DBG_GPRE_VECT \
0x1801410UL
#define MSEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1801420UL
#define MSEM_REG_FAST_MEMORY \
0x1840000UL
#define USEM_REG_SLOW_DBG_EMPTY_BB_K2 \
0x1901140UL
#define SEM_FAST_REG_INT_RAM_SIZE \
20480
#define USEM_REG_SYNC_DBG_EMPTY \
0x1901160UL
#define USEM_REG_SLOW_DBG_ACTIVE_BB_K2 \
......@@ -1521,14 +1541,26 @@
0x1901404UL
#define USEM_REG_DBG_FRAME_MODE_BB_K2 \
0x1901408UL
#define USEM_REG_DBG_GPRE_VECT \
0x1901410UL
#define USEM_REG_DBG_MODE1_CFG_BB_K2 \
0x1901420UL
#define USEM_REG_FAST_MEMORY \
0x1940000UL
#define SEM_FAST_REG_DBG_MODE23_SRC_DISABLE \
0x000748UL
#define SEM_FAST_REG_DBG_MODE4_SRC_DISABLE \
0x00074cUL
#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE \
0x000750UL
#define SEM_FAST_REG_DEBUG_ACTIVE \
0x000740UL
#define SEM_FAST_REG_INT_RAM \
0x020000UL
#define SEM_FAST_REG_INT_RAM_SIZE_BB_K2 \
20480
#define SEM_FAST_REG_RECORD_FILTER_ENABLE \
0x000768UL
#define GRC_REG_TRACE_FIFO_VALID_DATA \
0x050064UL
#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW \
......@@ -1583,14 +1615,20 @@
0x181530UL
#define DBG_REG_DBG_BLOCK_ON \
0x010454UL
#define DBG_REG_FILTER_ENABLE \
0x0109d0UL
#define DBG_REG_FRAMING_MODE \
0x010058UL
#define DBG_REG_TRIGGER_ENABLE \
0x01054cUL
#define SEM_FAST_REG_VFC_DATA_WR \
0x000b40UL
#define SEM_FAST_REG_VFC_ADDR \
0x000b44UL
#define SEM_FAST_REG_VFC_DATA_RD \
0x000b48UL
#define SEM_FAST_REG_VFC_STATUS \
0x000b4cUL
#define RSS_REG_RSS_RAM_DATA \
0x238c20UL
#define RSS_REG_RSS_RAM_DATA_SIZE \
......
......@@ -900,7 +900,7 @@ int qed_roce_query_qp(struct qed_hwfn *p_hwfn,
goto err_resp;
out_params->rq_psn = le32_to_cpu(p_resp_ramrod_res->psn);
rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->err_flag),
rq_err_state = GET_FIELD(le32_to_cpu(p_resp_ramrod_res->flags),
ROCE_QUERY_QP_RESP_OUTPUT_PARAMS_ERROR_FLG);
dma_free_coherent(&p_hwfn->cdev->pdev->dev, sizeof(*p_resp_ramrod_res),
......
......@@ -120,9 +120,7 @@ union ramrod_data {
struct fcoe_conn_terminate_ramrod_params fcoe_conn_terminate;
struct fcoe_stat_ramrod_params fcoe_stat;
struct iscsi_slow_path_hdr iscsi_empty;
struct iscsi_init_ramrod_params iscsi_init;
struct iscsi_spe_func_dstry iscsi_destroy;
struct iscsi_spe_conn_offload iscsi_conn_offload;
struct iscsi_conn_update_ramrod_params iscsi_conn_update;
struct iscsi_spe_conn_mac_update iscsi_conn_mac_update;
......
......@@ -352,7 +352,7 @@ static int qed_iov_post_vf_bulletin(struct qed_hwfn *p_hwfn,
/* propagate bulletin board via dmae to vm memory */
memset(&params, 0, sizeof(params));
params.flags = QED_DMAE_FLAG_VF_DST;
SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
params.dst_vfid = p_vf->abs_vf_id;
return qed_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys,
p_vf->vf_bulletin, p_vf->bulletin.size / 4,
......@@ -1225,8 +1225,8 @@ static void qed_iov_send_response(struct qed_hwfn *p_hwfn,
eng_vf_id = p_vf->abs_vf_id;
memset(&params, 0, sizeof(struct qed_dmae_params));
params.flags = QED_DMAE_FLAG_VF_DST;
memset(&params, 0, sizeof(params));
SET_FIELD(params.flags, QED_DMAE_PARAMS_DST_VF_VALID, 0x1);
params.dst_vfid = eng_vf_id;
qed_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64),
......@@ -4103,8 +4103,9 @@ static int qed_iov_copy_vf_msg(struct qed_hwfn *p_hwfn, struct qed_ptt *ptt,
if (!vf_info)
return -EINVAL;
memset(&params, 0, sizeof(struct qed_dmae_params));
params.flags = QED_DMAE_FLAG_VF_SRC | QED_DMAE_FLAG_COMPLETION_DST;
memset(&params, 0, sizeof(params));
SET_FIELD(params.flags, QED_DMAE_PARAMS_SRC_VF_VALID, 0x1);
SET_FIELD(params.flags, QED_DMAE_PARAMS_COMPLETION_DST, 0x1);
params.src_vfid = vf_info->abs_vf_id;
if (qed_dmae_host2host(p_hwfn, ptt,
......@@ -4354,9 +4355,9 @@ qed_iov_bulletin_get_forced_vlan(struct qed_hwfn *p_hwfn, u16 rel_vf_id)
static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, int vfid, int val)
{
struct qed_mcp_link_state *p_link;
struct qed_vf_info *vf;
u8 abs_vp_id = 0;
u16 rl_id;
int rc;
vf = qed_iov_get_vf_info(p_hwfn, (u16)vfid, true);
......@@ -4367,10 +4368,8 @@ static int qed_iov_configure_tx_rate(struct qed_hwfn *p_hwfn,
if (rc)
return rc;
p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
return qed_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val,
p_link->speed);
rl_id = abs_vp_id; /* The "rl_id" is set as the "vport_id" */
return qed_init_global_rl(p_hwfn, p_ptt, rl_id, (u32)val);
}
static int
......
......@@ -848,13 +848,13 @@ static void qede_tpa_start(struct qede_dev *edev,
qede_set_gro_params(edev, tpa_info->skb, cqe);
cons_buf: /* We still need to handle bd_len_list to consume buffers */
if (likely(cqe->ext_bd_len_list[0]))
if (likely(cqe->bw_ext_bd_len_list[0]))
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->ext_bd_len_list[0]));
le16_to_cpu(cqe->bw_ext_bd_len_list[0]));
if (unlikely(cqe->ext_bd_len_list[1])) {
if (unlikely(cqe->bw_ext_bd_len_list[1])) {
DP_ERR(edev,
"Unlikely - got a TPA aggregation with more than one ext_bd_len_list entry in the TPA start\n");
"Unlikely - got a TPA aggregation with more than one bw_ext_bd_len_list entry in the TPA start\n");
tpa_info->state = QEDE_AGG_STATE_ERROR;
}
}
......
......@@ -76,7 +76,6 @@
#define FW_ASSERT_GENERAL_ATTN_IDX 32
#define MAX_PINNED_CCFC 32
/* Queue Zone sizes in bytes */
#define TSTORM_QZONE_SIZE 8
......@@ -105,12 +104,19 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
#define MAX_NUM_LL2_RX_QUEUES 48
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
/* Number of LL2 RAM based queues */
#define MAX_NUM_LL2_RX_RAM_QUEUES 32
/* Number of LL2 context based queues */
#define MAX_NUM_LL2_RX_CTX_QUEUES 208
#define MAX_NUM_LL2_RX_QUEUES \
(MAX_NUM_LL2_RX_RAM_QUEUES + MAX_NUM_LL2_RX_CTX_QUEUES)
#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
#define FW_MINOR_VERSION 37
#define FW_REVISION_VERSION 7
#define FW_MINOR_VERSION 42
#define FW_REVISION_VERSION 2
#define FW_ENGINEERING_VERSION 0
/***********************/
......@@ -132,10 +138,10 @@
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS (MAX_FUNCTION_NUMBER_K2)
#define MAX_NUM_VPORTS_K2 (208)
#define MAX_NUM_VPORTS_BB (160)
......@@ -222,6 +228,7 @@
#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
#define DQ_XCM_ROCE_ACK_EDPM_DORQ_SEQ_CMD DQ_XCM_AGG_VAL_SEL_WORD5
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
......@@ -340,6 +347,10 @@
#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
/* DQ_DEMS_AGG_VAL_BASE */
#define DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE \
(DQ_PWM_OFFSET_TCM32_BASE + DQ_TCM_AGG_VAL_SEL_REG9 - 4)
#define DQ_REGION_SHIFT (12)
/* DPM */
......@@ -395,6 +406,7 @@
/* Number of Protocol Indices per Status Block */
#define PIS_PER_SB_E4 12
#define MAX_PIS_PER_SB PIS_PER_SB
#define CAU_HC_STOPPED_STATE 3
#define CAU_HC_DISABLE_STATE 4
......@@ -425,8 +437,6 @@
#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
#define IGU_CMD_INT_ACK_BASE 0x0400
#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
MAX_TOT_SB_PER_PATH - 1)
#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
......@@ -439,8 +449,6 @@
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
#define IGU_CMD_PROD_UPD_BASE 0x0600
#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
MAX_TOT_SB_PER_PATH - 1)
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
/*****************/
......@@ -652,8 +660,8 @@
#define PBF_MAX_CMD_LINES 3328
/* Number of BTB blocks. Each block is 256B. */
#define BTB_MAX_BLOCKS 1440
#define BTB_MAX_BLOCKS_BB 1440
#define BTB_MAX_BLOCKS_K2 1840
/*****************/
/* PRS CONSTANTS */
/*****************/
......@@ -730,6 +738,8 @@ enum protocol_type {
PROTOCOLID_PREROCE,
PROTOCOLID_COMMON,
PROTOCOLID_RESERVED1,
PROTOCOLID_RDMA,
PROTOCOLID_SCSI,
MAX_PROTOCOL_TYPE
};
......@@ -750,6 +760,10 @@ union rdma_eqe_data {
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
};
struct tstorm_queue_zone {
__le32 reserved[2];
};
/* Ustorm Queue Zone */
struct ustorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset;
......@@ -872,8 +886,8 @@ struct db_l2_dpm_data {
#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
#define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1
#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31
#define DB_L2_DPM_DATA_TGFS_SRC_EN_MASK 0x1
#define DB_L2_DPM_DATA_TGFS_SRC_EN_SHIFT 31
};
/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */
......
......@@ -38,9 +38,11 @@
/********************/
#define ETH_HSI_VER_MAJOR 3
#define ETH_HSI_VER_MINOR 10
#define ETH_HSI_VER_MINOR 11
#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
/* Maximum number of pinned L2 connections (CIDs) */
#define ETH_PINNED_CONN_MAX_NUM 32
#define ETH_CACHE_LINE_SIZE 64
#define ETH_RX_CQE_GAP 32
......@@ -61,6 +63,7 @@
#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
#define ETH_TX_MIN_BDS_PER_PKT_W_VPORT_FORWARDING 4
#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
#define ETH_TX_MAX_LSO_HDR_BYTES 510
#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
......@@ -75,9 +78,8 @@
#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
/* Maximum number of buffers, used for RX packet placement */
#define ETH_RX_MAX_BUFF_PER_PKT 5
#define ETH_RX_BD_THRESHOLD 12
#define ETH_RX_BD_THRESHOLD 16
/* Num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
......@@ -96,24 +98,24 @@
#define ETH_RSS_ENGINE_NUM_BB 127
/* TPA constants */
#define ETH_TPA_MAX_AGGS_NUM 64
#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
#define ETH_TPA_MAX_AGGS_NUM 64
#define ETH_TPA_CQE_START_BW_LEN_LIST_SIZE 2
#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
/* Control frame check constants */
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
/* GFS constants */
#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
/* Destination port mode */
enum dest_port_mode {
DEST_PORT_PHY,
DEST_PORT_LOOPBACK,
DEST_PORT_PHY_LOOPBACK,
DEST_PORT_DROP,
MAX_DEST_PORT_MODE
enum dst_port_mode {
DST_PORT_PHY,
DST_PORT_LOOPBACK,
DST_PORT_PHY_LOOPBACK,
DST_PORT_DROP,
MAX_DST_PORT_MODE
};
/* Ethernet address type */
......@@ -167,8 +169,8 @@ struct eth_tx_data_2nd_bd {
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_SHIFT 6
#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
......@@ -244,8 +246,9 @@ struct eth_fast_path_rx_reg_cqe {
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 bd_num;
u8 reserved;
__le16 flow_id;
u8 reserved1[11];
__le16 reserved2;
__le32 flow_id_or_resource_id;
u8 reserved1[7];
struct eth_pmd_flow_flags pmd_flags;
};
......@@ -296,9 +299,10 @@ struct eth_fast_path_rx_tpa_start_cqe {
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 tpa_agg_index;
u8 header_len;
__le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
__le16 flow_id;
u8 reserved;
__le16 bw_ext_bd_len_list[ETH_TPA_CQE_START_BW_LEN_LIST_SIZE];
__le16 reserved2;
__le32 flow_id_or_resource_id;
u8 reserved[3];
struct eth_pmd_flow_flags pmd_flags;
};
......@@ -407,6 +411,29 @@ struct eth_tx_3rd_bd {
struct eth_tx_data_3rd_bd data;
};
/* The parsing information data for the forth tx bd of a given packet. */
struct eth_tx_data_4th_bd {
u8 dst_vport_id;
u8 reserved4;
__le16 bitfields;
#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_MASK 0x1
#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_SHIFT 0
#define ETH_TX_DATA_4TH_BD_RESERVED1_MASK 0x7F
#define ETH_TX_DATA_4TH_BD_RESERVED1_SHIFT 1
#define ETH_TX_DATA_4TH_BD_START_BD_MASK 0x1
#define ETH_TX_DATA_4TH_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_4TH_BD_RESERVED2_MASK 0x7F
#define ETH_TX_DATA_4TH_BD_RESERVED2_SHIFT 9
__le16 reserved3;
};
/* The forth tx bd of a given packet */
struct eth_tx_4th_bd {
struct regpair addr; /* Single continuous buffer */
__le16 nbytes; /* Number of bytes in this BD */
struct eth_tx_data_4th_bd data; /* Parsing information data */
};
/* Complementary information for the regular tx bd of a given packet */
struct eth_tx_data_bd {
__le16 reserved0;
......@@ -431,6 +458,7 @@ union eth_tx_bd_types {
struct eth_tx_1st_bd first_bd;
struct eth_tx_2nd_bd second_bd;
struct eth_tx_3rd_bd third_bd;
struct eth_tx_4th_bd fourth_bd;
struct eth_tx_bd reg_bd;
};
......@@ -443,6 +471,12 @@ enum eth_tx_tunn_type {
MAX_ETH_TX_TUNN_TYPE
};
/* Mstorm Queue Zone */
struct mstorm_eth_queue_zone {
struct eth_rx_prod_data rx_producers;
__le32 reserved[3];
};
/* Ystorm Queue Zone */
struct xstorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset;
......
......@@ -999,7 +999,6 @@ struct iscsi_conn_offload_params {
struct regpair r2tq_pbl_addr;
struct regpair xhq_pbl_addr;
struct regpair uhq_pbl_addr;
__le32 initial_ack;
__le16 physical_q0;
__le16 physical_q1;
u8 flags;
......@@ -1011,10 +1010,10 @@ struct iscsi_conn_offload_params {
#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
u8 pbl_page_size_log;
u8 pbe_page_size_log;
u8 default_cq;
__le16 reserved0;
__le32 stat_sn;
__le32 initial_ack;
};
/* iSCSI connection statistics */
......@@ -1029,25 +1028,14 @@ struct iscsi_conn_stats_params {
__le32 reserved;
};
/* spe message header */
struct iscsi_slow_path_hdr {
u8 op_code;
u8 flags;
#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
};
/* iSCSI connection update params passed by driver to FW in ISCSI update
*ramrod.
*/
struct iscsi_conn_update_ramrod_params {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le16 conn_id;
__le32 fw_cid;
__le32 reserved1;
u8 flags;
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
......@@ -1065,7 +1053,7 @@ struct iscsi_conn_update_ramrod_params {
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7
u8 reserved0[3];
u8 reserved3[3];
__le32 max_seq_size;
__le32 max_send_pdu_length;
__le32 max_recv_pdu_length;
......@@ -1251,22 +1239,22 @@ enum iscsi_ramrod_cmd_id {
/* iSCSI connection termination request */
struct iscsi_spe_conn_mac_update {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le16 conn_id;
__le32 fw_cid;
__le32 reserved1;
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
u8 reserved0[2];
u8 reserved2[2];
};
/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in
* iSCSI offload ramrod.
*/
struct iscsi_spe_conn_offload {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le16 conn_id;
__le32 fw_cid;
__le32 reserved1;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params tcp;
};
......@@ -1275,44 +1263,36 @@ struct iscsi_spe_conn_offload {
* iSCSI offload ramrod.
*/
struct iscsi_spe_conn_offload_option2 {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le16 conn_id;
__le32 fw_cid;
__le32 reserved1;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params_opt2 tcp;
};
/* iSCSI collect connection statistics request */
struct iscsi_spe_conn_statistics {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le16 conn_id;
__le32 fw_cid;
__le32 reserved1;
u8 reset_stats;
u8 reserved0[7];
u8 reserved2[7];
struct regpair stats_cnts_addr;
};
/* iSCSI connection termination request */
struct iscsi_spe_conn_termination {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le16 conn_id;
__le32 fw_cid;
__le32 reserved1;
u8 abortive;
u8 reserved0[7];
u8 reserved2[7];
struct regpair queue_cnts_addr;
struct regpair query_params_addr;
};
/* iSCSI firmware function destroy parameters */
struct iscsi_spe_func_dstry {
struct iscsi_slow_path_hdr hdr;
__le16 reserved0;
__le32 reserved1;
};
/* iSCSI firmware function init parameters */
struct iscsi_spe_func_init {
struct iscsi_slow_path_hdr hdr;
__le16 half_way_close_timeout;
u8 num_sq_pages_in_ring;
u8 num_r2tq_pages_in_ring;
......@@ -1324,8 +1304,12 @@ struct iscsi_spe_func_init {
#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F
#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1
struct iscsi_debug_modes debug_mode;
__le16 reserved1;
__le32 reserved2;
u8 params;
#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF
#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0
#define ISCSI_SPE_FUNC_INIT_RESERVED1_MASK 0xF
#define ISCSI_SPE_FUNC_INIT_RESERVED1_SHIFT 4
u8 reserved2[7];
struct scsi_init_func_params func_params;
struct scsi_init_func_queues q_params;
};
......
......@@ -159,6 +159,7 @@ struct qed_dcbx_get {
enum qed_nvm_images {
QED_NVM_IMAGE_ISCSI_CFG,
QED_NVM_IMAGE_FCOE_CFG,
QED_NVM_IMAGE_MDUMP,
QED_NVM_IMAGE_NVM_CFG1,
QED_NVM_IMAGE_DEFAULT_CFG,
QED_NVM_IMAGE_NVM_META,
......@@ -463,7 +464,7 @@ enum qed_db_rec_space {
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
#define DIRECT_REG_WR64(reg_addr, val) writeq((u32)val, \
#define DIRECT_REG_WR64(reg_addr, val) writeq((u64)val, \
(void __iomem *)(reg_addr))
#define QED_COALESCE_MAX 0x1FF
......@@ -1177,6 +1178,17 @@ struct qed_common_ops {
#define GET_FIELD(value, name) \
(((value) >> (name ## _SHIFT)) & name ## _MASK)
#define GET_MFW_FIELD(name, field) \
(((name) & (field ## _MASK)) >> (field ## _OFFSET))
#define SET_MFW_FIELD(name, field, value) \
do { \
(name) &= ~(field ## _MASK); \
(name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));\
} while (0)
#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
/* Debug print definitions */
#define DP_ERR(cdev, fmt, ...) \
do { \
......
......@@ -52,6 +52,12 @@ enum qed_ll2_conn_type {
QED_LL2_TYPE_ROCE,
QED_LL2_TYPE_IWARP,
QED_LL2_TYPE_RESERVED3,
MAX_QED_LL2_CONN_TYPE
};
enum qed_ll2_rx_conn_type {
QED_LL2_RX_TYPE_LEGACY,
QED_LL2_RX_TYPE_CTX,
MAX_QED_LL2_RX_CONN_TYPE
};
......@@ -165,6 +171,7 @@ struct qed_ll2_cbs {
};
struct qed_ll2_acquire_data_inputs {
enum qed_ll2_rx_conn_type rx_conn_type;
enum qed_ll2_conn_type conn_type;
u16 mtu;
u16 rx_num_desc;
......
......@@ -107,8 +107,9 @@ struct scsi_drv_cmdq {
struct scsi_init_func_params {
__le16 num_tasks;
u8 log_page_size;
u8 log_page_size_conn;
u8 debug_mode;
u8 reserved2[12];
u8 reserved2[11];
};
/* SCSI RQ/CQ/CMDQ firmware function init parameters */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment