Commit 4e3b95f1 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-Advance-to-FW-8.33.1.0'

Tomer Tayar says:

====================
qed*: Advance to FW 8.33.1.0

This series advances all qed* drivers to use firmware 8.33.1.0 which brings
new capabilities and initial support of new HW. The changes are mostly in
qed, and include changes in the FW interface files, as well as updating the
FW initialization and debug collection code. The protocol drivers have
minor functional changes for this firmware.

Patch 1 Rearranges and refactors the FW interface files in preparation of
the new FW (no functional change).
Patch 2 Prepares the code for support of new HW (no functional change).
Patch 3 Actual utilization of the new FW.
Patch 4 Advances drivers' version.

v3->v4:
Fix a compilation issue which was reported by krobot (dependency on CRC8).

v2->v3:
Resend the series with a fixed title in the cover letter.

v1->v2:
- Break the previous single patch into several patches.
- Fix compilation issues which were reported by krobot.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bbb6189d 41e87c91
...@@ -264,7 +264,7 @@ static int qedr_register_device(struct qedr_dev *dev) ...@@ -264,7 +264,7 @@ static int qedr_register_device(struct qedr_dev *dev)
static int qedr_alloc_mem_sb(struct qedr_dev *dev, static int qedr_alloc_mem_sb(struct qedr_dev *dev,
struct qed_sb_info *sb_info, u16 sb_id) struct qed_sb_info *sb_info, u16 sb_id)
{ {
struct status_block *sb_virt; struct status_block_e4 *sb_virt;
dma_addr_t sb_phys; dma_addr_t sb_phys;
int rc; int rc;
......
...@@ -164,6 +164,13 @@ struct rdma_srq_sge { ...@@ -164,6 +164,13 @@ struct rdma_srq_sge {
__le32 l_key; __le32 l_key;
}; };
/* Rdma doorbell data for flags update */
struct rdma_pwm_flags_data {
__le16 icid; /* internal CID */
u8 agg_flags; /* aggregative flags */
u8 reserved;
};
/* Rdma doorbell data for SQ and RQ */ /* Rdma doorbell data for SQ and RQ */
struct rdma_pwm_val16_data { struct rdma_pwm_val16_data {
__le16 icid; __le16 icid;
...@@ -184,8 +191,12 @@ struct rdma_pwm_val32_data { ...@@ -184,8 +191,12 @@ struct rdma_pwm_val32_data {
#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0 #define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2 #define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F #define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_MASK 0x1
#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3 #define RDMA_PWM_VAL32_DATA_CONN_TYPE_IS_IWARP_SHIFT 3
#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_MASK 0x1
#define RDMA_PWM_VAL32_DATA_SET_16B_VAL_SHIFT 4
#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x7
#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 5
__le32 value; __le32 value;
}; };
...@@ -492,9 +503,11 @@ struct rdma_sq_fmr_wqe { ...@@ -492,9 +503,11 @@ struct rdma_sq_fmr_wqe {
#define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5 #define RDMA_SQ_FMR_WQE_DIF_VALIDATE_APP_TAG_SHIFT 5
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_MASK 0x1
#define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6 #define RDMA_SQ_FMR_WQE_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0x1FF #define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_MASK 0x1
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 7 #define RDMA_SQ_FMR_WQE_DIF_RX_REF_TAG_CONST_SHIFT 7
__le32 Reserved5; #define RDMA_SQ_FMR_WQE_RESERVED4_MASK 0xFF
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
__le32 reserved5;
}; };
/* First element (16 bytes) of fmr wqe */ /* First element (16 bytes) of fmr wqe */
...@@ -572,9 +585,11 @@ struct rdma_sq_fmr_wqe_3rd { ...@@ -572,9 +585,11 @@ struct rdma_sq_fmr_wqe_3rd {
#define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5 #define RDMA_SQ_FMR_WQE_3RD_DIF_VALIDATE_APP_TAG_SHIFT 5
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6 #define RDMA_SQ_FMR_WQE_3RD_DIF_CRC_SEED_SHIFT 6
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0x1FF #define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_MASK 0x1
#define RDMA_SQ_FMR_WQE_3RD_RESERVED4_SHIFT 7 #define RDMA_SQ_FMR_WQE_3RD_DIF_RX_REF_TAG_CONST_SHIFT 7
__le32 Reserved5; #define RDMA_SQ_FMR_WQE_3RD_RESERVED4_MASK 0xFF
#define RDMA_SQ_FMR_WQE_RESERVED4_SHIFT 8
__le32 reserved5;
}; };
struct rdma_sq_local_inv_wqe { struct rdma_sq_local_inv_wqe {
...@@ -618,8 +633,10 @@ struct rdma_sq_rdma_wqe { ...@@ -618,8 +633,10 @@ struct rdma_sq_rdma_wqe {
#define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4 #define RDMA_SQ_RDMA_WQE_INLINE_FLG_SHIFT 4
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5 #define RDMA_SQ_RDMA_WQE_DIF_ON_HOST_FLG_SHIFT 5
#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x3 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_MASK 0x1
#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 6 #define RDMA_SQ_RDMA_WQE_READ_INV_FLG_SHIFT 6
#define RDMA_SQ_RDMA_WQE_RESERVED0_MASK 0x1
#define RDMA_SQ_RDMA_WQE_RESERVED0_SHIFT 7
u8 wqe_size; u8 wqe_size;
u8 prev_wqe_size; u8 prev_wqe_size;
struct regpair remote_va; struct regpair remote_va;
......
...@@ -85,6 +85,7 @@ config QED ...@@ -85,6 +85,7 @@ config QED
tristate "QLogic QED 25/40/100Gb core driver" tristate "QLogic QED 25/40/100Gb core driver"
depends on PCI depends on PCI
select ZLIB_INFLATE select ZLIB_INFLATE
select CRC8
---help--- ---help---
This enables the support for ... This enables the support for ...
......
...@@ -53,9 +53,9 @@ ...@@ -53,9 +53,9 @@
extern const struct qed_common_ops qed_common_ops_pass; extern const struct qed_common_ops qed_common_ops_pass;
#define QED_MAJOR_VERSION 8 #define QED_MAJOR_VERSION 8
#define QED_MINOR_VERSION 10 #define QED_MINOR_VERSION 33
#define QED_REVISION_VERSION 11 #define QED_REVISION_VERSION 0
#define QED_ENGINEERING_VERSION 21 #define QED_ENGINEERING_VERSION 20
#define QED_VERSION \ #define QED_VERSION \
((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \ ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
...@@ -778,8 +778,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev, ...@@ -778,8 +778,8 @@ static inline u8 qed_concrete_to_sw_fid(struct qed_dev *cdev,
return sw_fid; return sw_fid;
} }
#define PURE_LB_TC 8
#define PKT_LB_TC 9 #define PKT_LB_TC 9
#define MAX_NUM_VOQS_E4 20
int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate); int qed_configure_vport_wfq(struct qed_dev *cdev, u16 vp_id, u32 rate);
void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev, void qed_configure_vp_wfq_on_link_change(struct qed_dev *cdev,
......
...@@ -86,22 +86,22 @@ ...@@ -86,22 +86,22 @@
/* connection context union */ /* connection context union */
union conn_context { union conn_context {
struct core_conn_context core_ctx; struct e4_core_conn_context core_ctx;
struct eth_conn_context eth_ctx; struct e4_eth_conn_context eth_ctx;
struct iscsi_conn_context iscsi_ctx; struct e4_iscsi_conn_context iscsi_ctx;
struct fcoe_conn_context fcoe_ctx; struct e4_fcoe_conn_context fcoe_ctx;
struct roce_conn_context roce_ctx; struct e4_roce_conn_context roce_ctx;
}; };
/* TYPE-0 task context - iSCSI, FCOE */ /* TYPE-0 task context - iSCSI, FCOE */
union type0_task_context { union type0_task_context {
struct iscsi_task_context iscsi_ctx; struct e4_iscsi_task_context iscsi_ctx;
struct fcoe_task_context fcoe_ctx; struct e4_fcoe_task_context fcoe_ctx;
}; };
/* TYPE-1 task context - ROCE */ /* TYPE-1 task context - ROCE */
union type1_task_context { union type1_task_context {
struct rdma_task_context roce_ctx; struct e4_rdma_task_context roce_ctx;
}; };
struct src_ent { struct src_ent {
...@@ -110,7 +110,7 @@ struct src_ent { ...@@ -110,7 +110,7 @@ struct src_ent {
}; };
#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ #define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */
#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12)) #define CDUT_SEG_ALIGNMET_IN_BYTES BIT(CDUT_SEG_ALIGNMET + 12)
#define CONN_CXT_SIZE(p_hwfn) \ #define CONN_CXT_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) ALIGNED_TYPE_SIZE(union conn_context, p_hwfn)
...@@ -742,7 +742,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count) ...@@ -742,7 +742,7 @@ int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *line_count)
p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]); p_blk = qed_cxt_set_blk(&p_cli->pf_blks[0]);
qed_cxt_qm_iids(p_hwfn, &qm_iids); qed_cxt_qm_iids(p_hwfn, &qm_iids);
total = qed_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, total = qed_qm_pf_mem_size(qm_iids.cids,
qm_iids.vf_cids, qm_iids.tids, qm_iids.vf_cids, qm_iids.tids,
p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_pqs,
p_hwfn->qm_info.num_vf_pqs); p_hwfn->qm_info.num_vf_pqs);
...@@ -1496,20 +1496,24 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn) ...@@ -1496,20 +1496,24 @@ static void qed_cdu_init_pf(struct qed_hwfn *p_hwfn)
} }
} }
void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool is_pf_loading)
{ {
struct qed_qm_pf_rt_init_params params;
struct qed_qm_info *qm_info = &p_hwfn->qm_info; struct qed_qm_info *qm_info = &p_hwfn->qm_info;
struct qed_qm_pf_rt_init_params params;
struct qed_mcp_link_state *p_link;
struct qed_qm_iids iids; struct qed_qm_iids iids;
memset(&iids, 0, sizeof(iids)); memset(&iids, 0, sizeof(iids));
qed_cxt_qm_iids(p_hwfn, &iids); qed_cxt_qm_iids(p_hwfn, &iids);
p_link = &QED_LEADING_HWFN(p_hwfn->cdev)->mcp_info->link_output;
memset(&params, 0, sizeof(params)); memset(&params, 0, sizeof(params));
params.port_id = p_hwfn->port_id; params.port_id = p_hwfn->port_id;
params.pf_id = p_hwfn->rel_pf_id; params.pf_id = p_hwfn->rel_pf_id;
params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port; params.max_phys_tcs_per_port = qm_info->max_phys_tcs_per_port;
params.is_first_pf = p_hwfn->first_on_engine; params.is_pf_loading = is_pf_loading;
params.num_pf_cids = iids.cids; params.num_pf_cids = iids.cids;
params.num_vf_cids = iids.vf_cids; params.num_vf_cids = iids.vf_cids;
params.num_tids = iids.tids; params.num_tids = iids.tids;
...@@ -1520,6 +1524,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -1520,6 +1524,7 @@ void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
params.num_vports = qm_info->num_vports; params.num_vports = qm_info->num_vports;
params.pf_wfq = qm_info->pf_wfq; params.pf_wfq = qm_info->pf_wfq;
params.pf_rl = qm_info->pf_rl; params.pf_rl = qm_info->pf_rl;
params.link_speed = p_link->speed;
params.pq_params = qm_info->qm_pq_params; params.pq_params = qm_info->qm_pq_params;
params.vport_params = qm_info->qm_vport_params; params.vport_params = qm_info->qm_vport_params;
...@@ -1883,7 +1888,7 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn) ...@@ -1883,7 +1888,7 @@ void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn)
void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
qed_qm_init_pf(p_hwfn, p_ptt); qed_qm_init_pf(p_hwfn, p_ptt, true);
qed_cm_init_pf(p_hwfn); qed_cm_init_pf(p_hwfn);
qed_dq_init_pf(p_hwfn); qed_dq_init_pf(p_hwfn);
qed_cdu_init_pf(p_hwfn); qed_cdu_init_pf(p_hwfn);
...@@ -2326,7 +2331,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn, ...@@ -2326,7 +2331,7 @@ qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
for (elem_i = 0; elem_i < elems_per_p; elem_i++) { for (elem_i = 0; elem_i < elems_per_p; elem_i++) {
elem = (union type1_task_context *)elem_start; elem = (union type1_task_context *)elem_start;
SET_FIELD(elem->roce_ctx.tdif_context.flags1, SET_FIELD(elem->roce_ctx.tdif_context.flags1,
TDIF_TASK_CONTEXT_REFTAGMASK, 0xf); TDIF_TASK_CONTEXT_REF_TAG_MASK, 0xf);
elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn); elem_start += TYPE1_TASK_CXT_SIZE(p_hwfn);
} }
} }
......
...@@ -169,8 +169,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); ...@@ -169,8 +169,10 @@ void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
* *
* @param p_hwfn * @param p_hwfn
* @param p_ptt * @param p_ptt
* @param is_pf_loading
*/ */
void qed_qm_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, bool is_pf_loading);
/** /**
* @brief Reconfigures QM pf on the fly * @brief Reconfigures QM pf on the fly
......
...@@ -954,9 +954,7 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src, ...@@ -954,9 +954,7 @@ void qed_dcbx_set_pf_update_params(struct qed_dcbx_results *p_src,
struct pf_update_ramrod_data *p_dest) struct pf_update_ramrod_data *p_dest)
{ {
struct protocol_dcb_data *p_dcb_data; struct protocol_dcb_data *p_dcb_data;
bool update_flag = false; u8 update_flag;
p_dest->pf_id = p_src->pf_id;
update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update; update_flag = p_src->arr[DCBX_PROTOCOL_FCOE].update;
p_dest->update_fcoe_dcb_data_mode = update_flag; p_dest->update_fcoe_dcb_data_mode = update_flag;
......
This diff is collapsed.
...@@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn) ...@@ -758,7 +758,7 @@ static void qed_init_qm_info(struct qed_hwfn *p_hwfn)
/* This function reconfigures the QM pf on the fly. /* This function reconfigures the QM pf on the fly.
* For this purpose we: * For this purpose we:
* 1. reconfigure the QM database * 1. reconfigure the QM database
* 2. set new values to runtime arrat * 2. set new values to runtime array
* 3. send an sdm_qm_cmd through the rbc interface to stop the QM * 3. send an sdm_qm_cmd through the rbc interface to stop the QM
* 4. activate init tool in QM_PF stage * 4. activate init tool in QM_PF stage
* 5. send an sdm_qm_cmd through rbc interface to release the QM * 5. send an sdm_qm_cmd through rbc interface to release the QM
...@@ -784,7 +784,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -784,7 +784,7 @@ int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
qed_init_clear_rt_data(p_hwfn); qed_init_clear_rt_data(p_hwfn);
/* prepare QM portion of runtime array */ /* prepare QM portion of runtime array */
qed_qm_init_pf(p_hwfn, p_ptt); qed_qm_init_pf(p_hwfn, p_ptt, false);
/* activate init tool on runtime array */ /* activate init tool on runtime array */
rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, rc = qed_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id,
...@@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, ...@@ -1515,7 +1515,7 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1); NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, 1);
} }
/* Protocl Configuration */ /* Protocol Configuration */
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET,
(p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0); (p_hwfn->hw_info.personality == QED_PCI_ISCSI) ? 1 : 0);
STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET, STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_FCOE_RT_OFFSET,
...@@ -1527,6 +1527,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn, ...@@ -1527,6 +1527,11 @@ static int qed_hw_init_pf(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
return rc; return rc;
/* Sanity check before the PF init sequence that uses DMAE */
rc = qed_dmae_sanity(p_hwfn, p_ptt, "pf_phase");
if (rc)
return rc;
/* PF Init sequence */ /* PF Init sequence */
rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); rc = qed_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode);
if (rc) if (rc)
...@@ -2192,7 +2197,7 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -2192,7 +2197,7 @@ qed_hw_set_soft_resc_size(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
/* No need for a case for QED_CMDQS_CQS since /* No need for a case for QED_CMDQS_CQS since
* CNQ/CMDQS are the same resource. * CNQ/CMDQS are the same resource.
*/ */
resc_max_val = NUM_OF_CMDQS_CQS; resc_max_val = NUM_OF_GLOBAL_QUEUES;
break; break;
case QED_RDMA_STATS_QUEUE: case QED_RDMA_STATS_QUEUE:
resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 resc_max_val = b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2
...@@ -2267,7 +2272,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn, ...@@ -2267,7 +2272,7 @@ int qed_hw_get_dflt_resc(struct qed_hwfn *p_hwfn,
case QED_RDMA_CNQ_RAM: case QED_RDMA_CNQ_RAM:
case QED_CMDQS_CQS: case QED_CMDQS_CQS:
/* CNQ/CMDQS are the same resource */ /* CNQ/CMDQS are the same resource */
*p_resc_num = NUM_OF_CMDQS_CQS / num_funcs; *p_resc_num = NUM_OF_GLOBAL_QUEUES / num_funcs;
break; break;
case QED_RDMA_STATS_QUEUE: case QED_RDMA_STATS_QUEUE:
*p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 : *p_resc_num = (b_ah ? RDMA_NUM_STATISTIC_COUNTERS_K2 :
......
...@@ -115,7 +115,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, ...@@ -115,7 +115,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
struct qed_fcoe_pf_params *fcoe_pf_params = NULL; struct qed_fcoe_pf_params *fcoe_pf_params = NULL;
struct fcoe_init_ramrod_params *p_ramrod = NULL; struct fcoe_init_ramrod_params *p_ramrod = NULL;
struct fcoe_init_func_ramrod_data *p_data; struct fcoe_init_func_ramrod_data *p_data;
struct fcoe_conn_context *p_cxt = NULL; struct e4_fcoe_conn_context *p_cxt = NULL;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
struct qed_sp_init_data init_data; struct qed_sp_init_data init_data;
struct qed_cxt_info cxt_info; struct qed_cxt_info cxt_info;
...@@ -167,7 +167,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn, ...@@ -167,7 +167,7 @@ qed_sp_fcoe_func_start(struct qed_hwfn *p_hwfn,
} }
p_cxt = cxt_info.p_cxt; p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->tstorm_ag_context.flags3, SET_FIELD(p_cxt->tstorm_ag_context.flags3,
TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1); E4_TSTORM_FCOE_CONN_AG_CTX_DUMMY_TIMER_CF_EN, 1);
fcoe_pf_params->dummy_icid = (u16)dummy_cid; fcoe_pf_params->dummy_icid = (u16)dummy_cid;
...@@ -568,7 +568,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn) ...@@ -568,7 +568,7 @@ int qed_fcoe_alloc(struct qed_hwfn *p_hwfn)
void qed_fcoe_setup(struct qed_hwfn *p_hwfn) void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
{ {
struct fcoe_task_context *p_task_ctx = NULL; struct e4_fcoe_task_context *p_task_ctx = NULL;
int rc; int rc;
u32 i; u32 i;
...@@ -580,13 +580,13 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn) ...@@ -580,13 +580,13 @@ void qed_fcoe_setup(struct qed_hwfn *p_hwfn)
if (rc) if (rc)
continue; continue;
memset(p_task_ctx, 0, sizeof(struct fcoe_task_context)); memset(p_task_ctx, 0, sizeof(struct e4_fcoe_task_context));
SET_FIELD(p_task_ctx->timer_context.logical_client_0, SET_FIELD(p_task_ctx->timer_context.logical_client_0,
TIMERS_CONTEXT_VALIDLC0, 1); TIMERS_CONTEXT_VALIDLC0, 1);
SET_FIELD(p_task_ctx->timer_context.logical_client_1, SET_FIELD(p_task_ctx->timer_context.logical_client_1,
TIMERS_CONTEXT_VALIDLC1, 1); TIMERS_CONTEXT_VALIDLC1, 1);
SET_FIELD(p_task_ctx->tstorm_ag_context.flags0, SET_FIELD(p_task_ctx->tstorm_ag_context.flags0,
TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1); E4_TSTORM_FCOE_TASK_AG_CTX_CONNECTION_TYPE, 1);
} }
} }
......
This diff is collapsed.
...@@ -807,3 +807,71 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, ...@@ -807,3 +807,71 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, const char *phase)
{
u32 size = PAGE_SIZE / 2, val;
struct qed_dmae_params params;
int rc = 0;
dma_addr_t p_phys;
void *p_virt;
u32 *p_tmp;
p_virt = dma_alloc_coherent(&p_hwfn->cdev->pdev->dev,
2 * size, &p_phys, GFP_KERNEL);
if (!p_virt) {
DP_NOTICE(p_hwfn,
"DMAE sanity [%s]: failed to allocate memory\n",
phase);
return -ENOMEM;
}
/* Fill the bottom half of the allocated memory with a known pattern */
for (p_tmp = (u32 *)p_virt;
p_tmp < (u32 *)((u8 *)p_virt + size); p_tmp++) {
/* Save the address itself as the value */
val = (u32)(uintptr_t)p_tmp;
*p_tmp = val;
}
/* Zero the top half of the allocated memory */
memset((u8 *)p_virt + size, 0, size);
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
"DMAE sanity [%s]: src_addr={phys 0x%llx, virt %p}, dst_addr={phys 0x%llx, virt %p}, size 0x%x\n",
phase,
(u64)p_phys,
p_virt, (u64)(p_phys + size), (u8 *)p_virt + size, size);
memset(&params, 0, sizeof(params));
rc = qed_dmae_host2host(p_hwfn, p_ptt, p_phys, p_phys + size,
size / 4 /* size_in_dwords */, &params);
if (rc) {
DP_NOTICE(p_hwfn,
"DMAE sanity [%s]: qed_dmae_host2host() failed. rc = %d.\n",
phase, rc);
goto out;
}
/* Verify that the top half of the allocated memory has the pattern */
for (p_tmp = (u32 *)((u8 *)p_virt + size);
p_tmp < (u32 *)((u8 *)p_virt + (2 * size)); p_tmp++) {
/* The corresponding address in the bottom half */
val = (u32)(uintptr_t)p_tmp - size;
if (*p_tmp != val) {
DP_NOTICE(p_hwfn,
"DMAE sanity [%s]: addr={phys 0x%llx, virt %p}, read_val 0x%08x, expected_val 0x%08x\n",
phase,
(u64)p_phys + ((u8 *)p_tmp - (u8 *)p_virt),
p_tmp, *p_tmp, val);
rc = -EINVAL;
goto out;
}
}
out:
dma_free_coherent(&p_hwfn->cdev->pdev->dev, 2 * size, p_virt, p_phys);
return rc;
}
...@@ -299,4 +299,8 @@ union qed_qm_pq_params { ...@@ -299,4 +299,8 @@ union qed_qm_pq_params {
int qed_init_fw_data(struct qed_dev *cdev, int qed_init_fw_data(struct qed_dev *cdev,
const u8 *fw_data); const u8 *fw_data);
int qed_dmae_sanity(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, const char *phase);
#endif #endif
...@@ -414,11 +414,23 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn, ...@@ -414,11 +414,23 @@ static void qed_init_cmd_rd(struct qed_hwfn *p_hwfn,
} }
/* init_ops callbacks entry point */ /* init_ops callbacks entry point */
static void qed_init_cmd_cb(struct qed_hwfn *p_hwfn, static int qed_init_cmd_cb(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt, struct qed_ptt *p_ptt,
struct init_callback_op *p_cmd) struct init_callback_op *p_cmd)
{ {
DP_NOTICE(p_hwfn, "Currently init values have no need of callbacks\n"); int rc;
switch (p_cmd->callback_id) {
case DMAE_READY_CB:
rc = qed_dmae_sanity(p_hwfn, p_ptt, "engine_phase");
break;
default:
DP_NOTICE(p_hwfn, "Unexpected init op callback ID %d\n",
p_cmd->callback_id);
return -EINVAL;
}
return rc;
} }
static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn, static u8 qed_init_cmd_mode_match(struct qed_hwfn *p_hwfn,
...@@ -519,7 +531,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn, ...@@ -519,7 +531,7 @@ int qed_init_run(struct qed_hwfn *p_hwfn,
break; break;
case INIT_OP_CALLBACK: case INIT_OP_CALLBACK:
qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); rc = qed_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback);
break; break;
} }
......
...@@ -62,7 +62,7 @@ struct qed_sb_sp_info { ...@@ -62,7 +62,7 @@ struct qed_sb_sp_info {
struct qed_sb_info sb_info; struct qed_sb_info sb_info;
/* per protocol index data */ /* per protocol index data */
struct qed_pi_info pi_info_arr[PIS_PER_SB]; struct qed_pi_info pi_info_arr[PIS_PER_SB_E4];
}; };
enum qed_attention_type { enum qed_attention_type {
...@@ -82,7 +82,7 @@ struct aeu_invert_reg_bit { ...@@ -82,7 +82,7 @@ struct aeu_invert_reg_bit {
#define ATTENTION_LENGTH_SHIFT (4) #define ATTENTION_LENGTH_SHIFT (4)
#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ #define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \
ATTENTION_LENGTH_SHIFT) ATTENTION_LENGTH_SHIFT)
#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) #define ATTENTION_SINGLE BIT(ATTENTION_LENGTH_SHIFT)
#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) #define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY)
#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ #define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \
ATTENTION_PARITY) ATTENTION_PARITY)
...@@ -1313,7 +1313,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn, ...@@ -1313,7 +1313,7 @@ static void qed_int_cau_conf_pi(struct qed_hwfn *p_hwfn,
if (IS_VF(p_hwfn->cdev)) if (IS_VF(p_hwfn->cdev))
return; return;
sb_offset = igu_sb_id * PIS_PER_SB; sb_offset = igu_sb_id * PIS_PER_SB_E4;
memset(&pi_entry, 0, sizeof(struct cau_pi_entry)); memset(&pi_entry, 0, sizeof(struct cau_pi_entry));
SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset);
......
...@@ -197,7 +197,7 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev); ...@@ -197,7 +197,7 @@ void qed_int_disable_post_isr_release(struct qed_dev *cdev);
#define QED_SB_EVENT_MASK 0x0003 #define QED_SB_EVENT_MASK 0x0003
#define SB_ALIGNED_SIZE(p_hwfn) \ #define SB_ALIGNED_SIZE(p_hwfn) \
ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) ALIGNED_TYPE_SIZE(struct status_block_e4, p_hwfn)
#define QED_SB_INVALID_IDX 0xffff #define QED_SB_INVALID_IDX 0xffff
......
...@@ -62,22 +62,6 @@ ...@@ -62,22 +62,6 @@
#include "qed_sriov.h" #include "qed_sriov.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
static int
qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
u16 echo, union event_ring_data *data, u8 fw_return_code)
{
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
return p_iscsi->event_cb(p_iscsi->event_context,
fw_event_code, data);
} else {
DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
return -EINVAL;
}
}
struct qed_iscsi_conn { struct qed_iscsi_conn {
struct list_head list_entry; struct list_head list_entry;
bool free_on_delete; bool free_on_delete;
...@@ -105,7 +89,7 @@ struct qed_iscsi_conn { ...@@ -105,7 +89,7 @@ struct qed_iscsi_conn {
u8 local_mac[6]; u8 local_mac[6];
u8 remote_mac[6]; u8 remote_mac[6];
u16 vlan_id; u16 vlan_id;
u8 tcp_flags; u16 tcp_flags;
u8 ip_version; u8 ip_version;
u32 remote_ip[4]; u32 remote_ip[4];
u32 local_ip[4]; u32 local_ip[4];
...@@ -122,7 +106,6 @@ struct qed_iscsi_conn { ...@@ -122,7 +106,6 @@ struct qed_iscsi_conn {
u32 ss_thresh; u32 ss_thresh;
u16 srtt; u16 srtt;
u16 rtt_var; u16 rtt_var;
u32 ts_time;
u32 ts_recent; u32 ts_recent;
u32 ts_recent_age; u32 ts_recent_age;
u32 total_rt; u32 total_rt;
...@@ -144,7 +127,6 @@ struct qed_iscsi_conn { ...@@ -144,7 +127,6 @@ struct qed_iscsi_conn {
u16 mss; u16 mss;
u8 snd_wnd_scale; u8 snd_wnd_scale;
u8 rcv_wnd_scale; u8 rcv_wnd_scale;
u32 ts_ticks_per_second;
u16 da_timeout_value; u16 da_timeout_value;
u8 ack_frequency; u8 ack_frequency;
...@@ -161,6 +143,22 @@ struct qed_iscsi_conn { ...@@ -161,6 +143,22 @@ struct qed_iscsi_conn {
u8 abortive_dsconnect; u8 abortive_dsconnect;
}; };
static int
qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
u16 echo, union event_ring_data *data, u8 fw_return_code)
{
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
return p_iscsi->event_cb(p_iscsi->event_context,
fw_event_code, data);
} else {
DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
return -EINVAL;
}
}
static int static int
qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
enum spq_mode comp_mode, enum spq_mode comp_mode,
...@@ -214,9 +212,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, ...@@ -214,9 +212,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring; p_init->num_sq_pages_in_ring = p_params->num_sq_pages_in_ring;
p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring; p_init->num_r2tq_pages_in_ring = p_params->num_r2tq_pages_in_ring;
p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring; p_init->num_uhq_pages_in_ring = p_params->num_uhq_pages_in_ring;
p_init->ooo_enable = p_params->ooo_enable;
p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] + p_init->ll2_rx_queue_id = p_hwfn->hw_info.resc_start[QED_LL2_QUEUE] +
p_params->ll2_ooo_queue_id; p_params->ll2_ooo_queue_id;
p_init->func_params.log_page_size = p_params->log_page_size; p_init->func_params.log_page_size = p_params->log_page_size;
val = p_params->num_tasks; val = p_params->num_tasks;
p_init->func_params.num_tasks = cpu_to_le16(val); p_init->func_params.num_tasks = cpu_to_le16(val);
...@@ -276,7 +274,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, ...@@ -276,7 +274,7 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer); p_ramrod->tcp_init.two_msl_timer = cpu_to_le32(p_params->two_msl_timer);
val = p_params->tx_sws_timer; val = p_params->tx_sws_timer;
p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val); p_ramrod->tcp_init.tx_sws_timer = cpu_to_le16(val);
p_ramrod->tcp_init.maxfinrt = p_params->max_fin_rt; p_ramrod->tcp_init.max_fin_rt = p_params->max_fin_rt;
p_hwfn->p_iscsi_info->event_context = event_context; p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb; p_hwfn->p_iscsi_info->event_cb = async_event_cb;
...@@ -304,8 +302,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, ...@@ -304,8 +302,8 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
int rc = 0; int rc = 0;
u32 dval; u32 dval;
u16 wval; u16 wval;
u8 i;
u16 *p; u16 *p;
u8 i;
/* Get SPQ entry */ /* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data)); memset(&init_data, 0, sizeof(init_data));
...@@ -371,7 +369,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, ...@@ -371,7 +369,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id); p_tcp->vlan_id = cpu_to_le16(p_conn->vlan_id);
p_tcp->flags = p_conn->tcp_flags; p_tcp->flags = cpu_to_le16(p_conn->tcp_flags);
p_tcp->ip_version = p_conn->ip_version; p_tcp->ip_version = p_conn->ip_version;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
dval = p_conn->remote_ip[i]; dval = p_conn->remote_ip[i];
...@@ -436,7 +434,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, ...@@ -436,7 +434,7 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2)); p_tcp2->remote_mac_addr_lo = swab16(get_unaligned(p + 2));
p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id); p_tcp2->vlan_id = cpu_to_le16(p_conn->vlan_id);
p_tcp2->flags = p_conn->tcp_flags; p_tcp2->flags = cpu_to_le16(p_conn->tcp_flags);
p_tcp2->ip_version = p_conn->ip_version; p_tcp2->ip_version = p_conn->ip_version;
for (i = 0; i < 4; i++) { for (i = 0; i < 4; i++) {
...@@ -458,6 +456,11 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn, ...@@ -458,6 +456,11 @@ static int qed_sp_iscsi_conn_offload(struct qed_hwfn *p_hwfn,
p_tcp2->syn_ip_payload_length = cpu_to_le16(wval); p_tcp2->syn_ip_payload_length = cpu_to_le16(wval);
p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr); p_tcp2->syn_phy_addr_lo = DMA_LO_LE(p_conn->syn_phy_addr);
p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr); p_tcp2->syn_phy_addr_hi = DMA_HI_LE(p_conn->syn_phy_addr);
p_tcp2->cwnd = cpu_to_le32(p_conn->cwnd);
p_tcp2->ka_max_probe_cnt = p_conn->ka_probe_cnt;
p_tcp2->ka_timeout = cpu_to_le32(p_conn->ka_timeout);
p_tcp2->max_rt_time = cpu_to_le32(p_conn->max_rt_time);
p_tcp2->ka_interval = cpu_to_le32(p_conn->ka_interval);
} }
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
...@@ -692,8 +695,7 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn, ...@@ -692,8 +695,7 @@ static void __iomem *qed_iscsi_get_secondary_bdq_prod(struct qed_hwfn *p_hwfn,
} }
} }
static int qed_iscsi_setup_connection(struct qed_hwfn *p_hwfn, static int qed_iscsi_setup_connection(struct qed_iscsi_conn *p_conn)
struct qed_iscsi_conn *p_conn)
{ {
if (!p_conn->queue_cnts_virt_addr) if (!p_conn->queue_cnts_virt_addr)
goto nomem; goto nomem;
...@@ -844,7 +846,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn, ...@@ -844,7 +846,7 @@ static int qed_iscsi_acquire_connection(struct qed_hwfn *p_hwfn,
rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn); rc = qed_iscsi_allocate_connection(p_hwfn, &p_conn);
if (!rc) if (!rc)
rc = qed_iscsi_setup_connection(p_hwfn, p_conn); rc = qed_iscsi_setup_connection(p_conn);
if (rc) { if (rc) {
spin_lock_bh(&p_hwfn->p_iscsi_info->lock); spin_lock_bh(&p_hwfn->p_iscsi_info->lock);
...@@ -1294,7 +1296,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev, ...@@ -1294,7 +1296,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
con->ss_thresh = conn_info->ss_thresh; con->ss_thresh = conn_info->ss_thresh;
con->srtt = conn_info->srtt; con->srtt = conn_info->srtt;
con->rtt_var = conn_info->rtt_var; con->rtt_var = conn_info->rtt_var;
con->ts_time = conn_info->ts_time;
con->ts_recent = conn_info->ts_recent; con->ts_recent = conn_info->ts_recent;
con->ts_recent_age = conn_info->ts_recent_age; con->ts_recent_age = conn_info->ts_recent_age;
con->total_rt = conn_info->total_rt; con->total_rt = conn_info->total_rt;
...@@ -1316,7 +1317,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev, ...@@ -1316,7 +1317,6 @@ static int qed_iscsi_offload_conn(struct qed_dev *cdev,
con->mss = conn_info->mss; con->mss = conn_info->mss;
con->snd_wnd_scale = conn_info->snd_wnd_scale; con->snd_wnd_scale = conn_info->snd_wnd_scale;
con->rcv_wnd_scale = conn_info->rcv_wnd_scale; con->rcv_wnd_scale = conn_info->rcv_wnd_scale;
con->ts_ticks_per_second = conn_info->ts_ticks_per_second;
con->da_timeout_value = conn_info->da_timeout_value; con->da_timeout_value = conn_info->da_timeout_value;
con->ack_frequency = conn_info->ack_frequency; con->ack_frequency = conn_info->ack_frequency;
......
...@@ -64,14 +64,21 @@ struct mpa_v2_hdr { ...@@ -64,14 +64,21 @@ struct mpa_v2_hdr {
#define QED_IWARP_INVALID_TCP_CID 0xffffffff #define QED_IWARP_INVALID_TCP_CID 0xffffffff
#define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024) #define QED_IWARP_RCV_WND_SIZE_DEF (256 * 1024)
#define QED_IWARP_RCV_WND_SIZE_MIN (64 * 1024) #define QED_IWARP_RCV_WND_SIZE_MIN (0xffff)
#define TIMESTAMP_HEADER_SIZE (12) #define TIMESTAMP_HEADER_SIZE (12)
#define QED_IWARP_MAX_FIN_RT_DEFAULT (2)
#define QED_IWARP_TS_EN BIT(0) #define QED_IWARP_TS_EN BIT(0)
#define QED_IWARP_DA_EN BIT(1) #define QED_IWARP_DA_EN BIT(1)
#define QED_IWARP_PARAM_CRC_NEEDED (1) #define QED_IWARP_PARAM_CRC_NEEDED (1)
#define QED_IWARP_PARAM_P2P (1) #define QED_IWARP_PARAM_P2P (1)
#define QED_IWARP_DEF_MAX_RT_TIME (0)
#define QED_IWARP_DEF_CWND_FACTOR (4)
#define QED_IWARP_DEF_KA_MAX_PROBE_CNT (5)
#define QED_IWARP_DEF_KA_TIMEOUT (1200000) /* 20 min */
#define QED_IWARP_DEF_KA_INTERVAL (1000) /* 1 sec */
static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn, static int qed_iwarp_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code, u16 echo, u8 fw_event_code, u16 echo,
union event_ring_data *data, union event_ring_data *data,
...@@ -120,11 +127,17 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid) ...@@ -120,11 +127,17 @@ static void qed_iwarp_cid_cleaned(struct qed_hwfn *p_hwfn, u32 cid)
spin_unlock_bh(&p_hwfn->p_rdma_info->lock); spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
} }
void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, void
struct iwarp_init_func_params *p_ramrod) qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
struct iwarp_init_func_ramrod_data *p_ramrod)
{ {
p_ramrod->ll2_ooo_q_index = RESC_START(p_hwfn, QED_LL2_QUEUE) + p_ramrod->iwarp.ll2_ooo_q_index =
RESC_START(p_hwfn, QED_LL2_QUEUE) +
p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle; p_hwfn->p_rdma_info->iwarp.ll2_ooo_handle;
p_ramrod->tcp.max_fin_rt = QED_IWARP_MAX_FIN_RT_DEFAULT;
return;
} }
static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid) static int qed_iwarp_alloc_cid(struct qed_hwfn *p_hwfn, u32 *cid)
...@@ -699,6 +712,12 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) ...@@ -699,6 +712,12 @@ qed_iwarp_tcp_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
tcp->ttl = 0x40; tcp->ttl = 0x40;
tcp->tos_or_tc = 0; tcp->tos_or_tc = 0;
tcp->max_rt_time = QED_IWARP_DEF_MAX_RT_TIME;
tcp->cwnd = QED_IWARP_DEF_CWND_FACTOR * tcp->mss;
tcp->ka_max_probe_cnt = QED_IWARP_DEF_KA_MAX_PROBE_CNT;
tcp->ka_timeout = QED_IWARP_DEF_KA_TIMEOUT;
tcp->ka_interval = QED_IWARP_DEF_KA_INTERVAL;
tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale; tcp->rcv_wnd_scale = (u8)p_hwfn->p_rdma_info->iwarp.rcv_wnd_scale;
tcp->connect_mode = ep->connect_mode; tcp->connect_mode = ep->connect_mode;
...@@ -807,6 +826,7 @@ static int ...@@ -807,6 +826,7 @@ static int
qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
{ {
struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod; struct iwarp_mpa_offload_ramrod_data *p_mpa_ramrod;
struct qed_iwarp_info *iwarp_info;
struct qed_sp_init_data init_data; struct qed_sp_init_data init_data;
dma_addr_t async_output_phys; dma_addr_t async_output_phys;
struct qed_spq_entry *p_ent; struct qed_spq_entry *p_ent;
...@@ -874,6 +894,8 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep) ...@@ -874,6 +894,8 @@ qed_iwarp_mpa_offload(struct qed_hwfn *p_hwfn, struct qed_iwarp_ep *ep)
p_mpa_ramrod->common.reject = 1; p_mpa_ramrod->common.reject = 1;
} }
iwarp_info = &p_hwfn->p_rdma_info->iwarp;
p_mpa_ramrod->rcv_wnd = iwarp_info->rcv_wnd_size;
p_mpa_ramrod->mode = ep->mpa_rev; p_mpa_ramrod->mode = ep->mpa_rev;
SET_FIELD(p_mpa_ramrod->rtr_pref, SET_FIELD(p_mpa_ramrod->rtr_pref,
IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type); IWARP_MPA_OFFLOAD_RAMROD_DATA_RTR_SUPPORTED, ep->rtr_type);
...@@ -2745,6 +2767,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -2745,6 +2767,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
/* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */ /* value 0 is used for ilog2(QED_IWARP_RCV_WND_SIZE_MIN) */
iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) - iwarp_info->rcv_wnd_scale = ilog2(rcv_wnd_size) -
ilog2(QED_IWARP_RCV_WND_SIZE_MIN); ilog2(QED_IWARP_RCV_WND_SIZE_MIN);
iwarp_info->rcv_wnd_size = rcv_wnd_size >> iwarp_info->rcv_wnd_scale;
iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED; iwarp_info->crc_needed = QED_IWARP_PARAM_CRC_NEEDED;
iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED; iwarp_info->mpa_rev = MPA_NEGOTIATION_TYPE_ENHANCED;
......
...@@ -95,6 +95,7 @@ struct qed_iwarp_info { ...@@ -95,6 +95,7 @@ struct qed_iwarp_info {
spinlock_t iw_lock; /* for iwarp resources */ spinlock_t iw_lock; /* for iwarp resources */
spinlock_t qp_lock; /* for teardown races */ spinlock_t qp_lock; /* for teardown races */
u32 rcv_wnd_scale; u32 rcv_wnd_scale;
u16 rcv_wnd_size;
u16 max_mtu; u16 max_mtu;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
u8 crc_needed; u8 crc_needed;
...@@ -187,7 +188,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -187,7 +188,7 @@ int qed_iwarp_setup(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
struct qed_rdma_start_in_params *params); struct qed_rdma_start_in_params *params);
void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn, void qed_iwarp_init_fw_ramrod(struct qed_hwfn *p_hwfn,
struct iwarp_init_func_params *p_ramrod); struct iwarp_init_func_ramrod_data *p_ramrod);
int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); int qed_iwarp_stop(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
......
...@@ -1969,33 +1969,45 @@ void qed_reset_vport_stats(struct qed_dev *cdev) ...@@ -1969,33 +1969,45 @@ void qed_reset_vport_stats(struct qed_dev *cdev)
_qed_get_vport_stats(cdev, cdev->reset_stats); _qed_get_vport_stats(cdev, cdev->reset_stats);
} }
static void static enum gft_profile_type
qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, qed_arfs_mode_to_hsi(enum qed_filter_config_mode mode)
{
if (mode == QED_FILTER_CONFIG_MODE_5_TUPLE)
return GFT_PROFILE_TYPE_4_TUPLE;
if (mode == QED_FILTER_CONFIG_MODE_IP_DEST)
return GFT_PROFILE_TYPE_IP_DST_PORT;
return GFT_PROFILE_TYPE_L4_DST_PORT;
}
void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_arfs_config_params *p_cfg_params) struct qed_arfs_config_params *p_cfg_params)
{ {
if (p_cfg_params->arfs_enable) { if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) {
qed_set_rfs_mode_enable(p_hwfn, p_ptt, p_hwfn->rel_pf_id, qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id,
p_cfg_params->tcp, p_cfg_params->udp, p_cfg_params->tcp,
p_cfg_params->ipv4, p_cfg_params->ipv6); p_cfg_params->udp,
DP_VERBOSE(p_hwfn, QED_MSG_SP, p_cfg_params->ipv4,
"tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s\n", p_cfg_params->ipv6,
qed_arfs_mode_to_hsi(p_cfg_params->mode));
DP_VERBOSE(p_hwfn,
QED_MSG_SP,
"Configured Filtering: tcp = %s, udp = %s, ipv4 = %s, ipv6 =%s mode=%08x\n",
p_cfg_params->tcp ? "Enable" : "Disable", p_cfg_params->tcp ? "Enable" : "Disable",
p_cfg_params->udp ? "Enable" : "Disable", p_cfg_params->udp ? "Enable" : "Disable",
p_cfg_params->ipv4 ? "Enable" : "Disable", p_cfg_params->ipv4 ? "Enable" : "Disable",
p_cfg_params->ipv6 ? "Enable" : "Disable"); p_cfg_params->ipv6 ? "Enable" : "Disable",
(u32)p_cfg_params->mode);
} else { } else {
qed_set_rfs_mode_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id); DP_VERBOSE(p_hwfn, QED_MSG_SP, "Disabled Filtering\n");
qed_gft_disable(p_hwfn, p_ptt, p_hwfn->rel_pf_id);
} }
DP_VERBOSE(p_hwfn, QED_MSG_SP, "Configured ARFS mode : %s\n",
p_cfg_params->arfs_enable ? "Enable" : "Disable");
} }
static int int
qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_cb, struct qed_spq_comp_cb *p_cb,
dma_addr_t p_addr, u16 length, u16 qid, struct qed_ntuple_filter_params *p_params)
u8 vport_id, bool b_is_add)
{ {
struct rx_update_gft_filter_data *p_ramrod = NULL; struct rx_update_gft_filter_data *p_ramrod = NULL;
struct qed_spq_entry *p_ent = NULL; struct qed_spq_entry *p_ent = NULL;
...@@ -2004,13 +2016,15 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -2004,13 +2016,15 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
u8 abs_vport_id = 0; u8 abs_vport_id = 0;
int rc = -EINVAL; int rc = -EINVAL;
rc = qed_fw_vport(p_hwfn, vport_id, &abs_vport_id); rc = qed_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id);
if (rc) if (rc)
return rc; return rc;
rc = qed_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
rc = qed_fw_l2_queue(p_hwfn, p_params->qid, &abs_rx_q_id);
if (rc) if (rc)
return rc; return rc;
}
/* Get SPQ entry */ /* Get SPQ entry */
memset(&init_data, 0, sizeof(init_data)); memset(&init_data, 0, sizeof(init_data));
...@@ -2032,17 +2046,27 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, ...@@ -2032,17 +2046,27 @@ qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt,
return rc; return rc;
p_ramrod = &p_ent->ramrod.rx_update_gft; p_ramrod = &p_ent->ramrod.rx_update_gft;
DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_addr);
p_ramrod->pkt_hdr_length = cpu_to_le16(length); DMA_REGPAIR_LE(p_ramrod->pkt_hdr_addr, p_params->addr);
p_ramrod->rx_qid_or_action_icid = cpu_to_le16(abs_rx_q_id); p_ramrod->pkt_hdr_length = cpu_to_le16(p_params->length);
p_ramrod->vport_id = abs_vport_id;
p_ramrod->filter_type = RFS_FILTER_TYPE; if (p_params->qid != QED_RFS_NTUPLE_QID_RSS) {
p_ramrod->filter_action = b_is_add ? GFT_ADD_FILTER : GFT_DELETE_FILTER; p_ramrod->rx_qid_valid = 1;
p_ramrod->rx_qid = cpu_to_le16(abs_rx_q_id);
}
p_ramrod->flow_id_valid = 0;
p_ramrod->flow_id = 0;
p_ramrod->vport_id = cpu_to_le16((u16)abs_vport_id);
p_ramrod->filter_action = p_params->b_is_add ? GFT_ADD_FILTER
: GFT_DELETE_FILTER;
DP_VERBOSE(p_hwfn, QED_MSG_SP, DP_VERBOSE(p_hwfn, QED_MSG_SP,
"V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n", "V[%0x], Q[%04x] - %s filter from 0x%llx [length %04xb]\n",
abs_vport_id, abs_rx_q_id, abs_vport_id, abs_rx_q_id,
b_is_add ? "Adding" : "Removing", (u64)p_addr, length); p_params->b_is_add ? "Adding" : "Removing",
(u64)p_params->addr, p_params->length);
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
...@@ -2743,7 +2767,8 @@ static int qed_configure_filter(struct qed_dev *cdev, ...@@ -2743,7 +2767,8 @@ static int qed_configure_filter(struct qed_dev *cdev,
} }
} }
static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) static int qed_configure_arfs_searcher(struct qed_dev *cdev,
enum qed_filter_config_mode mode)
{ {
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_arfs_config_params arfs_config_params; struct qed_arfs_config_params arfs_config_params;
...@@ -2753,8 +2778,7 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) ...@@ -2753,8 +2778,7 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
arfs_config_params.udp = true; arfs_config_params.udp = true;
arfs_config_params.ipv4 = true; arfs_config_params.ipv4 = true;
arfs_config_params.ipv6 = true; arfs_config_params.ipv6 = true;
arfs_config_params.arfs_enable = en_searcher; arfs_config_params.mode = mode;
qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt, qed_arfs_mode_configure(p_hwfn, p_hwfn->p_arfs_ptt,
&arfs_config_params); &arfs_config_params);
return 0; return 0;
...@@ -2762,8 +2786,8 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher) ...@@ -2762,8 +2786,8 @@ static int qed_configure_arfs_searcher(struct qed_dev *cdev, bool en_searcher)
static void static void
qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
void *cookie, union event_ring_data *data, void *cookie,
u8 fw_return_code) union event_ring_data *data, u8 fw_return_code)
{ {
struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common; struct qed_common_cb_ops *op = p_hwfn->cdev->protocol_ops.common;
void *dev = p_hwfn->cdev->ops_cookie; void *dev = p_hwfn->cdev->ops_cookie;
...@@ -2771,10 +2795,10 @@ qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn, ...@@ -2771,10 +2795,10 @@ qed_arfs_sp_response_handler(struct qed_hwfn *p_hwfn,
op->arfs_filter_op(dev, cookie, fw_return_code); op->arfs_filter_op(dev, cookie, fw_return_code);
} }
static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, static int
dma_addr_t mapping, u16 length, qed_ntuple_arfs_filter_config(struct qed_dev *cdev,
u16 vport_id, u16 rx_queue_id, void *cookie,
bool add_filter) struct qed_ntuple_filter_params *params)
{ {
struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev); struct qed_hwfn *p_hwfn = QED_LEADING_HWFN(cdev);
struct qed_spq_comp_cb cb; struct qed_spq_comp_cb cb;
...@@ -2783,9 +2807,19 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie, ...@@ -2783,9 +2807,19 @@ static int qed_ntuple_arfs_filter_config(struct qed_dev *cdev, void *cookie,
cb.function = qed_arfs_sp_response_handler; cb.function = qed_arfs_sp_response_handler;
cb.cookie = cookie; cb.cookie = cookie;
rc = qed_configure_rfs_ntuple_filter(p_hwfn, p_hwfn->p_arfs_ptt, if (params->b_is_vf) {
&cb, mapping, length, rx_queue_id, if (!qed_iov_is_valid_vfid(p_hwfn, params->vf_id, false,
vport_id, add_filter); false)) {
DP_INFO(p_hwfn, "vfid 0x%02x is out of bounds\n",
params->vf_id);
return rc;
}
params->vport_id = params->vf_id + 1;
params->qid = QED_RFS_NTUPLE_QID_RSS;
}
rc = qed_configure_rfs_ntuple_filter(p_hwfn, &cb, params);
if (rc) if (rc)
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Failed to issue a-RFS filter configuration\n"); "Failed to issue a-RFS filter configuration\n");
......
...@@ -190,7 +190,7 @@ struct qed_arfs_config_params { ...@@ -190,7 +190,7 @@ struct qed_arfs_config_params {
bool udp; bool udp;
bool ipv4; bool ipv4;
bool ipv6; bool ipv6;
bool arfs_enable; enum qed_filter_config_mode mode;
}; };
struct qed_sp_vport_update_params { struct qed_sp_vport_update_params {
...@@ -277,6 +277,37 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); ...@@ -277,6 +277,37 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
void qed_reset_vport_stats(struct qed_dev *cdev); void qed_reset_vport_stats(struct qed_dev *cdev);
/**
* *@brief qed_arfs_mode_configure -
*
**Enable or disable rfs mode. It must accept atleast one of tcp or udp true
**and atleast one of ipv4 or ipv6 true to enable rfs mode.
*
**@param p_hwfn
**@param p_ptt
**@param p_cfg_params - arfs mode configuration parameters.
*
*/
void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn,
struct qed_ptt *p_ptt,
struct qed_arfs_config_params *p_cfg_params);
/**
* @brief - qed_configure_rfs_ntuple_filter
*
* This ramrod should be used to add or remove arfs hw filter
*
* @params p_hwfn
* @params p_cb - Used for QED_SPQ_MODE_CB,where client would initialize
* it with cookie and callback function address, if not
* using this mode then client must pass NULL.
* @params p_params
*/
int
qed_configure_rfs_ntuple_filter(struct qed_hwfn *p_hwfn,
struct qed_spq_comp_cb *p_cb,
struct qed_ntuple_filter_params *p_params);
#define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8) #define MAX_QUEUES_PER_QZONE (sizeof(unsigned long) * 8)
#define QED_QUEUE_CID_SELF (0xff) #define QED_QUEUE_CID_SELF (0xff)
......
...@@ -406,6 +406,9 @@ static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn, ...@@ -406,6 +406,9 @@ static void qed_ll2_rxq_parse_gsi(struct qed_hwfn *p_hwfn,
data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi); data->opaque_data_0 = le32_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrhi);
data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo); data->opaque_data_1 = le16_to_cpu(p_cqe->rx_cqe_gsi.src_mac_addrlo);
data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error; data->u.data_length_error = p_cqe->rx_cqe_gsi.data_length_error;
data->qp_id = le16_to_cpu(p_cqe->rx_cqe_gsi.qp_id);
data->src_qp = le32_to_cpu(p_cqe->rx_cqe_gsi.src_qp);
} }
static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn, static void qed_ll2_rxq_parse_reg(struct qed_hwfn *p_hwfn,
...@@ -927,7 +930,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn, ...@@ -927,7 +930,8 @@ static int qed_sp_ll2_rx_queue_start(struct qed_hwfn *p_hwfn,
qed_chain_get_pbl_phys(&p_rx->rcq_chain)); qed_chain_get_pbl_phys(&p_rx->rcq_chain));
p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg; p_ramrod->drop_ttl0_flg = p_ll2_conn->input.rx_drop_ttl0_flg;
p_ramrod->inner_vlan_removal_en = p_ll2_conn->input.rx_vlan_removal_en; p_ramrod->inner_vlan_stripping_en =
p_ll2_conn->input.rx_vlan_removal_en;
p_ramrod->queue_id = p_ll2_conn->queue_id; p_ramrod->queue_id = p_ll2_conn->queue_id;
p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0; p_ramrod->main_func_queue = p_ll2_conn->main_func_queue ? 1 : 0;
...@@ -1299,8 +1303,20 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data) ...@@ -1299,8 +1303,20 @@ int qed_ll2_acquire_connection(void *cxt, struct qed_ll2_acquire_data *data)
memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input)); memcpy(&p_ll2_info->input, &data->input, sizeof(p_ll2_info->input));
p_ll2_info->tx_dest = (data->input.tx_dest == QED_LL2_TX_DEST_NW) ? switch (data->input.tx_dest) {
CORE_TX_DEST_NW : CORE_TX_DEST_LB; case QED_LL2_TX_DEST_NW:
p_ll2_info->tx_dest = CORE_TX_DEST_NW;
break;
case QED_LL2_TX_DEST_LB:
p_ll2_info->tx_dest = CORE_TX_DEST_LB;
break;
case QED_LL2_TX_DEST_DROP:
p_ll2_info->tx_dest = CORE_TX_DEST_DROP;
break;
default:
return -EINVAL;
}
if (data->input.conn_type == QED_LL2_TYPE_OOO || if (data->input.conn_type == QED_LL2_TYPE_OOO ||
data->input.secondary_queue) data->input.secondary_queue)
p_ll2_info->main_func_queue = false; p_ll2_info->main_func_queue = false;
...@@ -2281,8 +2297,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params) ...@@ -2281,8 +2297,7 @@ static int qed_ll2_start(struct qed_dev *cdev, struct qed_ll2_params *params)
goto release_terminate; goto release_terminate;
} }
if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI && if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI) {
cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable) {
DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n"); DP_VERBOSE(cdev, QED_MSG_STORAGE, "Starting OOO LL2 queue\n");
rc = qed_ll2_start_ooo(cdev, params); rc = qed_ll2_start_ooo(cdev, params);
if (rc) { if (rc) {
...@@ -2340,8 +2355,7 @@ static int qed_ll2_stop(struct qed_dev *cdev) ...@@ -2340,8 +2355,7 @@ static int qed_ll2_stop(struct qed_dev *cdev)
qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt); qed_ptt_release(QED_LEADING_HWFN(cdev), p_ptt);
eth_zero_addr(cdev->ll2_mac_address); eth_zero_addr(cdev->ll2_mac_address);
if (cdev->hwfns[0].hw_info.personality == QED_PCI_ISCSI && if (QED_LEADING_HWFN(cdev)->hw_info.personality == QED_PCI_ISCSI)
cdev->hwfns[0].pf_params.iscsi_pf_params.ooo_enable)
qed_ll2_stop_ooo(cdev); qed_ll2_stop_ooo(cdev);
rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev), rc = qed_ll2_terminate_connection(QED_LEADING_HWFN(cdev),
......
...@@ -2234,7 +2234,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len) ...@@ -2234,7 +2234,7 @@ int qed_mcp_nvm_read(struct qed_dev *cdev, u32 addr, u8 *p_buf, u32 len)
DRV_MSG_CODE_NVM_READ_NVRAM, DRV_MSG_CODE_NVM_READ_NVRAM,
addr + offset + addr + offset +
(bytes_to_copy << (bytes_to_copy <<
DRV_MB_PARAM_NVM_LEN_SHIFT), DRV_MB_PARAM_NVM_LEN_OFFSET),
&resp, &resp_param, &resp, &resp_param,
&read_len, &read_len,
(u32 *)(p_buf + offset)); (u32 *)(p_buf + offset));
......
...@@ -553,7 +553,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn, ...@@ -553,7 +553,7 @@ static int qed_rdma_start_fw(struct qed_hwfn *p_hwfn,
if (QED_IS_IWARP_PERSONALITY(p_hwfn)) { if (QED_IS_IWARP_PERSONALITY(p_hwfn)) {
qed_iwarp_init_fw_ramrod(p_hwfn, qed_iwarp_init_fw_ramrod(p_hwfn,
&p_ent->ramrod.iwarp_init_func.iwarp); &p_ent->ramrod.iwarp_init_func);
p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma; p_ramrod = &p_ent->ramrod.iwarp_init_func.rdma;
} else { } else {
p_ramrod = &p_ent->ramrod.roce_init_func.rdma; p_ramrod = &p_ent->ramrod.roce_init_func.rdma;
......
...@@ -351,7 +351,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -351,7 +351,9 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n"); DP_NOTICE(p_hwfn, "Unsupported MF mode, init as DEFAULT\n");
p_ramrod->mf_mode = MF_NPAR; p_ramrod->mf_mode = MF_NPAR;
} }
p_ramrod->outer_tag = p_hwfn->hw_info.ovlan;
p_ramrod->outer_tag_config.outer_tag.tci =
cpu_to_le16(p_hwfn->hw_info.ovlan);
/* Place EQ address in RAMROD */ /* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
...@@ -396,8 +398,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -396,8 +398,8 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR; p_ramrod->hsi_fp_ver.minor_ver_arr[ETH_VER_KEY] = ETH_HSI_VER_MINOR;
DP_VERBOSE(p_hwfn, QED_MSG_SPQ, DP_VERBOSE(p_hwfn, QED_MSG_SPQ,
"Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", "Setting event_ring_sb [id %04x index %02x], outer_tag.tci [%d]\n",
sb, sb_index, p_ramrod->outer_tag); sb, sb_index, p_ramrod->outer_tag_config.outer_tag.tci);
rc = qed_spq_post(p_hwfn, p_ent, NULL); rc = qed_spq_post(p_hwfn, p_ent, NULL);
......
...@@ -215,7 +215,7 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn, ...@@ -215,7 +215,7 @@ static int qed_spq_fill_entry(struct qed_hwfn *p_hwfn,
static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
struct qed_spq *p_spq) struct qed_spq *p_spq)
{ {
struct core_conn_context *p_cxt; struct e4_core_conn_context *p_cxt;
struct qed_cxt_info cxt_info; struct qed_cxt_info cxt_info;
u16 physical_q; u16 physical_q;
int rc; int rc;
...@@ -233,11 +233,11 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn, ...@@ -233,11 +233,11 @@ static void qed_spq_hw_initialize(struct qed_hwfn *p_hwfn,
p_cxt = cxt_info.p_cxt; p_cxt = cxt_info.p_cxt;
SET_FIELD(p_cxt->xstorm_ag_context.flags10, SET_FIELD(p_cxt->xstorm_ag_context.flags10,
XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags1, SET_FIELD(p_cxt->xstorm_ag_context.flags1,
XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); E4_XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1);
SET_FIELD(p_cxt->xstorm_ag_context.flags9, SET_FIELD(p_cxt->xstorm_ag_context.flags9,
XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); E4_XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1);
/* QM physical queue */ /* QM physical queue */
physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB); physical_q = qed_get_cm_pq_idx(p_hwfn, PQ_FLAGS_LB);
......
This diff is collapsed.
...@@ -273,6 +273,23 @@ enum qed_iov_wq_flag { ...@@ -273,6 +273,23 @@ enum qed_iov_wq_flag {
}; };
#ifdef CONFIG_QED_SRIOV #ifdef CONFIG_QED_SRIOV
/**
* @brief Check if given VF ID @vfid is valid
* w.r.t. @b_enabled_only value
* if b_enabled_only = true - only enabled VF id is valid
* else any VF id less than max_vfs is valid
*
* @param p_hwfn
* @param rel_vf_id - Relative VF ID
* @param b_enabled_only - consider only enabled VF
* @param b_non_malicious - true iff we want to validate vf isn't malicious.
*
* @return bool - true for valid VF ID
*/
bool qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
int rel_vf_id,
bool b_enabled_only, bool b_non_malicious);
/** /**
* @brief - Given a VF index, return index of next [including that] active VF. * @brief - Given a VF index, return index of next [including that] active VF.
* *
...@@ -376,6 +393,13 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev); ...@@ -376,6 +393,13 @@ void qed_vf_start_iov_wq(struct qed_dev *cdev);
int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled); int qed_sriov_disable(struct qed_dev *cdev, bool pci_enabled);
void qed_inform_vf_link_state(struct qed_hwfn *hwfn); void qed_inform_vf_link_state(struct qed_hwfn *hwfn);
#else #else
static inline bool
qed_iov_is_valid_vfid(struct qed_hwfn *p_hwfn,
int rel_vf_id, bool b_enabled_only, bool b_non_malicious)
{
return false;
}
static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn, static inline u16 qed_iov_get_next_active_vf(struct qed_hwfn *p_hwfn,
u16 rel_vf_id) u16 rel_vf_id)
{ {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment