Commit 4914a4f6 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/next-queue

Tony Nguyen says:

====================
100GbE Intel Wired LAN Driver Updates 2021-04-08

This series contains updates to ice driver only.

Chinh adds retrying of sending some AQ commands when receiving EBUSY
error.

Victor modifies how nodes are added to reduce stack usage.

Ani renames some variables to either follow spec naming or to be inline
with naming in the rest of the driver. Ignores EMODE error as there are
cases where this error is expected. Performs some cleanup such as
removing unnecessary checks, doing variable assignments over copies, and
removing unneeded variables. Revises some error codes returned in link
settings to be more appropriate. He also implements support for new
firmware option to get default link configuration which accounts for
any needed NVM based overrides for PHY configuration. He also removes
the rx_gro_dropped stat as the value no longer changes.

Jeb removes setting specific link modes on firmwares that no longer
require it.

Brett removes unnecessary checks when adding and removing VLANs.

Tony fixes a checkpatch warning for unnecessary blank line.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 4667bf71 2e20521b
...@@ -265,7 +265,6 @@ struct ice_vsi { ...@@ -265,7 +265,6 @@ struct ice_vsi {
u32 tx_busy; u32 tx_busy;
u32 rx_buf_failed; u32 rx_buf_failed;
u32 rx_page_failed; u32 rx_page_failed;
u32 rx_gro_dropped;
u16 num_q_vectors; u16 num_q_vectors;
u16 base_vector; /* IRQ base for OS reserved vectors */ u16 base_vector; /* IRQ base for OS reserved vectors */
enum ice_vsi_type type; enum ice_vsi_type type;
......
...@@ -877,16 +877,18 @@ struct ice_aqc_get_phy_caps { ...@@ -877,16 +877,18 @@ struct ice_aqc_get_phy_caps {
__le16 param0; __le16 param0;
/* 18.0 - Report qualified modules */ /* 18.0 - Report qualified modules */
#define ICE_AQC_GET_PHY_RQM BIT(0) #define ICE_AQC_GET_PHY_RQM BIT(0)
/* 18.1 - 18.2 : Report mode /* 18.1 - 18.3 : Report mode
* 00b - Report NVM capabilities * 000b - Report NVM capabilities
* 01b - Report topology capabilities * 001b - Report topology capabilities
* 10b - Report SW configured * 010b - Report SW configured
* 100b - Report default capabilities
*/ */
#define ICE_AQC_REPORT_MODE_S 1 #define ICE_AQC_REPORT_MODE_S 1
#define ICE_AQC_REPORT_MODE_M (3 << ICE_AQC_REPORT_MODE_S) #define ICE_AQC_REPORT_MODE_M (7 << ICE_AQC_REPORT_MODE_S)
#define ICE_AQC_REPORT_NVM_CAP 0 #define ICE_AQC_REPORT_TOPO_CAP_NO_MEDIA 0
#define ICE_AQC_REPORT_TOPO_CAP BIT(1) #define ICE_AQC_REPORT_TOPO_CAP_MEDIA BIT(1)
#define ICE_AQC_REPORT_SW_CFG BIT(2) #define ICE_AQC_REPORT_ACTIVE_CFG BIT(2)
#define ICE_AQC_REPORT_DFLT_CFG BIT(3)
__le32 reserved1; __le32 reserved1;
__le32 addr_high; __le32 addr_high;
__le32 addr_low; __le32 addr_low;
......
...@@ -158,6 +158,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, ...@@ -158,6 +158,10 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
hw = pi->hw; hw = pi->hw;
if (report_mode == ICE_AQC_REPORT_DFLT_CFG &&
!ice_fw_supports_report_dflt_cfg(hw))
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_phy_caps);
if (qual_mods) if (qual_mods)
...@@ -191,7 +195,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode, ...@@ -191,7 +195,7 @@ ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n", ice_debug(hw, ICE_DBG_LINK, " module_type[2] = 0x%x\n",
pcaps->module_type[2]); pcaps->module_type[2]);
if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP) { if (!status && report_mode == ICE_AQC_REPORT_TOPO_CAP_MEDIA) {
pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low); pi->phy.phy_type_low = le64_to_cpu(pcaps->phy_type_low);
pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high); pi->phy.phy_type_high = le64_to_cpu(pcaps->phy_type_high);
memcpy(pi->phy.link_info.module_type, &pcaps->module_type, memcpy(pi->phy.link_info.module_type, &pcaps->module_type,
...@@ -922,7 +926,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ...@@ -922,7 +926,8 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
/* Initialize port_info struct with PHY capabilities */ /* Initialize port_info struct with PHY capabilities */
status = ice_aq_get_phy_caps(hw->port_info, false, status = ice_aq_get_phy_caps(hw->port_info, false,
ICE_AQC_REPORT_TOPO_CAP, pcaps, NULL); ICE_AQC_REPORT_TOPO_CAP_MEDIA, pcaps,
NULL);
devm_kfree(ice_hw_to_dev(hw), pcaps); devm_kfree(ice_hw_to_dev(hw), pcaps);
if (status) if (status)
dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n", dev_warn(ice_hw_to_dev(hw), "Get PHY capabilities failed status = %d, continuing anyway\n",
...@@ -1292,6 +1297,85 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = { ...@@ -1292,6 +1297,85 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
*/ */
DEFINE_MUTEX(ice_global_cfg_lock_sw); DEFINE_MUTEX(ice_global_cfg_lock_sw);
/**
* ice_should_retry_sq_send_cmd
* @opcode: AQ opcode
*
* Decide if we should retry the send command routine for the ATQ, depending
* on the opcode.
*/
static bool ice_should_retry_sq_send_cmd(u16 opcode)
{
switch (opcode) {
case ice_aqc_opc_get_link_topo:
case ice_aqc_opc_lldp_stop:
case ice_aqc_opc_lldp_start:
case ice_aqc_opc_lldp_filter_ctrl:
return true;
}
return false;
}
/**
* ice_sq_send_cmd_retry - send command to Control Queue (ATQ)
* @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue
* @desc: prefilled descriptor describing the command
* @buf: buffer to use for indirect commands (or NULL for direct commands)
* @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure
*
* Retry sending the FW Admin Queue command, multiple times, to the FW Admin
* Queue if the EBUSY AQ error is returned.
*/
static enum ice_status
ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq,
struct ice_aq_desc *desc, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aq_desc desc_cpy;
enum ice_status status;
bool is_cmd_for_retry;
u8 *buf_cpy = NULL;
u8 idx = 0;
u16 opcode;
opcode = le16_to_cpu(desc->opcode);
is_cmd_for_retry = ice_should_retry_sq_send_cmd(opcode);
memset(&desc_cpy, 0, sizeof(desc_cpy));
if (is_cmd_for_retry) {
if (buf) {
buf_cpy = kzalloc(buf_size, GFP_KERNEL);
if (!buf_cpy)
return ICE_ERR_NO_MEMORY;
}
memcpy(&desc_cpy, desc, sizeof(desc_cpy));
}
do {
status = ice_sq_send_cmd(hw, cq, desc, buf, buf_size, cd);
if (!is_cmd_for_retry || !status ||
hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY)
break;
if (buf_cpy)
memcpy(buf, buf_cpy, buf_size);
memcpy(desc, &desc_cpy, sizeof(desc_cpy));
mdelay(ICE_SQ_SEND_DELAY_TIME_MS);
} while (++idx < ICE_SQ_SEND_MAX_EXECUTE);
kfree(buf_cpy);
return status;
}
/** /**
* ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue * ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
...@@ -1333,7 +1417,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf, ...@@ -1333,7 +1417,7 @@ ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
break; break;
} }
status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd); status = ice_sq_send_cmd_retry(hw, &hw->adminq, desc, buf, buf_size, cd);
if (lock_acquired) if (lock_acquired)
mutex_unlock(&ice_global_cfg_lock_sw); mutex_unlock(&ice_global_cfg_lock_sw);
...@@ -2655,7 +2739,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi) ...@@ -2655,7 +2739,7 @@ enum ice_status ice_update_link_info(struct ice_port_info *pi)
if (!pcaps) if (!pcaps)
return ICE_ERR_NO_MEMORY; return ICE_ERR_NO_MEMORY;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP_MEDIA,
pcaps, NULL); pcaps, NULL);
devm_kfree(ice_hw_to_dev(hw), pcaps); devm_kfree(ice_hw_to_dev(hw), pcaps);
...@@ -2815,8 +2899,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) ...@@ -2815,8 +2899,8 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
return ICE_ERR_NO_MEMORY; return ICE_ERR_NO_MEMORY;
/* Get the current PHY config */ /* Get the current PHY config */
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps, status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_ACTIVE_CFG,
NULL); pcaps, NULL);
if (status) { if (status) {
*aq_failures = ICE_SET_FC_AQ_FAIL_GET; *aq_failures = ICE_SET_FC_AQ_FAIL_GET;
goto out; goto out;
...@@ -2929,17 +3013,6 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi, ...@@ -2929,17 +3013,6 @@ ice_copy_phy_caps_to_cfg(struct ice_port_info *pi,
cfg->link_fec_opt = caps->link_fec_options; cfg->link_fec_opt = caps->link_fec_options;
cfg->module_compliance_enforcement = cfg->module_compliance_enforcement =
caps->module_compliance_enforcement; caps->module_compliance_enforcement;
if (ice_fw_supports_link_override(pi->hw)) {
struct ice_link_default_override_tlv tlv;
if (ice_get_link_default_override(&tlv, pi))
return;
if (tlv.options & ICE_LINK_OVERRIDE_STRICT_MODE)
cfg->module_compliance_enforcement |=
ICE_LINK_OVERRIDE_STRICT_MODE;
}
} }
/** /**
...@@ -2954,16 +3027,21 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, ...@@ -2954,16 +3027,21 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
{ {
struct ice_aqc_get_phy_caps_data *pcaps; struct ice_aqc_get_phy_caps_data *pcaps;
enum ice_status status; enum ice_status status;
struct ice_hw *hw;
if (!pi || !cfg) if (!pi || !cfg)
return ICE_ERR_BAD_PTR; return ICE_ERR_BAD_PTR;
hw = pi->hw;
pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL); pcaps = kzalloc(sizeof(*pcaps), GFP_KERNEL);
if (!pcaps) if (!pcaps)
return ICE_ERR_NO_MEMORY; return ICE_ERR_NO_MEMORY;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP, pcaps, status = ice_aq_get_phy_caps(pi, false,
NULL); (ice_fw_supports_report_dflt_cfg(hw) ?
ICE_AQC_REPORT_DFLT_CFG :
ICE_AQC_REPORT_TOPO_CAP_MEDIA), pcaps, NULL);
if (status) if (status)
goto out; goto out;
...@@ -3002,7 +3080,8 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg, ...@@ -3002,7 +3080,8 @@ ice_cfg_phy_fec(struct ice_port_info *pi, struct ice_aqc_set_phy_cfg_data *cfg,
break; break;
} }
if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(pi->hw)) { if (fec == ICE_FEC_AUTO && ice_fw_supports_link_override(hw) &&
!ice_fw_supports_report_dflt_cfg(hw)) {
struct ice_link_default_override_tlv tlv; struct ice_link_default_override_tlv tlv;
if (ice_get_link_default_override(&tlv, pi)) if (ice_get_link_default_override(&tlv, pi))
...@@ -4412,3 +4491,23 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add) ...@@ -4412,3 +4491,23 @@ ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add)
return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL); return ice_aq_send_cmd(hw, &desc, NULL, 0, NULL);
} }
/**
* ice_fw_supports_report_dflt_cfg
* @hw: pointer to the hardware structure
*
* Checks if the firmware supports report default configuration
*/
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw)
{
if (hw->api_maj_ver == ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
if (hw->api_min_ver > ICE_FW_API_REPORT_DFLT_CFG_MIN)
return true;
if (hw->api_min_ver == ICE_FW_API_REPORT_DFLT_CFG_MIN &&
hw->api_patch >= ICE_FW_API_REPORT_DFLT_CFG_PATCH)
return true;
} else if (hw->api_maj_ver > ICE_FW_API_REPORT_DFLT_CFG_MAJ) {
return true;
}
return false;
}
...@@ -11,6 +11,9 @@ ...@@ -11,6 +11,9 @@
#include "ice_switch.h" #include "ice_switch.h"
#include <linux/avf/virtchnl.h> #include <linux/avf/virtchnl.h>
#define ICE_SQ_SEND_DELAY_TIME_MS 10
#define ICE_SQ_SEND_MAX_EXECUTE 3
enum ice_status ice_init_hw(struct ice_hw *hw); enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw); void ice_deinit_hw(struct ice_hw *hw);
enum ice_status ice_check_reset(struct ice_hw *hw); enum ice_status ice_check_reset(struct ice_hw *hw);
...@@ -176,4 +179,5 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size, ...@@ -176,4 +179,5 @@ ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw); bool ice_fw_supports_lldp_fltr_ctrl(struct ice_hw *hw);
enum ice_status enum ice_status
ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add); ice_lldp_fltr_add_remove(struct ice_hw *hw, u16 vsi_num, bool add);
bool ice_fw_supports_report_dflt_cfg(struct ice_hw *hw);
#endif /* _ICE_COMMON_H_ */ #endif /* _ICE_COMMON_H_ */
...@@ -892,7 +892,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq) ...@@ -892,7 +892,7 @@ static bool ice_sq_done(struct ice_hw *hw, struct ice_ctl_q_info *cq)
* ice_sq_send_cmd - send command to Control Queue (ATQ) * ice_sq_send_cmd - send command to Control Queue (ATQ)
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* @cq: pointer to the specific Control queue * @cq: pointer to the specific Control queue
* @desc: prefilled descriptor describing the command (non DMA mem) * @desc: prefilled descriptor describing the command
* @buf: buffer to use for indirect commands (or NULL for direct commands) * @buf: buffer to use for indirect commands (or NULL for direct commands)
* @buf_size: size of buffer for indirect commands (or 0 for direct commands) * @buf_size: size of buffer for indirect commands (or 0 for direct commands)
* @cd: pointer to command details structure * @cd: pointer to command details structure
......
...@@ -3423,3 +3423,40 @@ int ice_clear_dflt_vsi(struct ice_sw *sw) ...@@ -3423,3 +3423,40 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
return 0; return 0;
} }
/**
* ice_set_link - turn on/off physical link
* @vsi: VSI to modify physical link on
* @ena: turn on/off physical link
*/
int ice_set_link(struct ice_vsi *vsi, bool ena)
{
struct device *dev = ice_pf_to_dev(vsi->back);
struct ice_port_info *pi = vsi->port_info;
struct ice_hw *hw = pi->hw;
enum ice_status status;
if (vsi->type != ICE_VSI_PF)
return -EINVAL;
status = ice_aq_set_link_restart_an(pi, ena, NULL);
/* if link is owned by manageability, FW will return ICE_AQ_RC_EMODE.
* this is not a fatal error, so print a warning message and return
* a success code. Return an error if FW returns an error code other
* than ICE_AQ_RC_EMODE
*/
if (status == ICE_ERR_AQ_ERROR) {
if (hw->adminq.sq_last_status == ICE_AQ_RC_EMODE)
dev_warn(dev, "can't set link to %s, err %s aq_err %s. not fatal, continuing\n",
(ena ? "ON" : "OFF"), ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
} else if (status) {
dev_err(dev, "can't set link to %s, err %s aq_err %s\n",
(ena ? "ON" : "OFF"), ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
return -EIO;
}
return 0;
}
...@@ -45,6 +45,8 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc); ...@@ -45,6 +45,8 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create); void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
int ice_set_link(struct ice_vsi *vsi, bool ena);
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc); int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
#endif /* CONFIG_DCB */ #endif /* CONFIG_DCB */
......
This diff is collapsed.
...@@ -919,7 +919,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, ...@@ -919,7 +919,7 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
} }
/** /**
* ice_sched_add_nodes_to_layer - Add nodes to a given layer * ice_sched_add_nodes_to_hw_layer - Add nodes to HW layer
* @pi: port information structure * @pi: port information structure
* @tc_node: pointer to TC node * @tc_node: pointer to TC node
* @parent: pointer to parent node * @parent: pointer to parent node
...@@ -928,82 +928,106 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, ...@@ -928,82 +928,106 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
* @first_node_teid: pointer to the first node TEID * @first_node_teid: pointer to the first node TEID
* @num_nodes_added: pointer to number of nodes added * @num_nodes_added: pointer to number of nodes added
* *
* This function add nodes to a given layer. * Add nodes into specific HW layer.
*/ */
static enum ice_status static enum ice_status
ice_sched_add_nodes_to_layer(struct ice_port_info *pi, ice_sched_add_nodes_to_hw_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node, struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer, struct ice_sched_node *parent, u8 layer,
u16 num_nodes, u32 *first_node_teid, u16 num_nodes, u32 *first_node_teid,
u16 *num_nodes_added) u16 *num_nodes_added)
{ {
u32 *first_teid_ptr = first_node_teid; u16 max_child_nodes;
u16 new_num_nodes, max_child_nodes;
enum ice_status status = 0;
struct ice_hw *hw = pi->hw;
u16 num_added = 0;
u32 temp;
*num_nodes_added = 0; *num_nodes_added = 0;
if (!num_nodes) if (!num_nodes)
return status; return 0;
if (!parent || layer < hw->sw_entry_point_layer) if (!parent || layer < pi->hw->sw_entry_point_layer)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
/* max children per node per layer */ /* max children per node per layer */
max_child_nodes = hw->max_children[parent->tx_sched_layer]; max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
/* current number of children + required nodes exceed max children ? */ /* current number of children + required nodes exceed max children */
if ((parent->num_children + num_nodes) > max_child_nodes) { if ((parent->num_children + num_nodes) > max_child_nodes) {
/* Fail if the parent is a TC node */ /* Fail if the parent is a TC node */
if (parent == tc_node) if (parent == tc_node)
return ICE_ERR_CFG; return ICE_ERR_CFG;
return ICE_ERR_MAX_LIMIT;
}
return ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
num_nodes_added, first_node_teid);
}
/**
* ice_sched_add_nodes_to_layer - Add nodes to a given layer
* @pi: port information structure
* @tc_node: pointer to TC node
* @parent: pointer to parent node
* @layer: layer number to add nodes
* @num_nodes: number of nodes to be added
* @first_node_teid: pointer to the first node TEID
* @num_nodes_added: pointer to number of nodes added
*
* This function add nodes to a given layer.
*/
static enum ice_status
ice_sched_add_nodes_to_layer(struct ice_port_info *pi,
struct ice_sched_node *tc_node,
struct ice_sched_node *parent, u8 layer,
u16 num_nodes, u32 *first_node_teid,
u16 *num_nodes_added)
{
u32 *first_teid_ptr = first_node_teid;
u16 new_num_nodes = num_nodes;
enum ice_status status = 0;
*num_nodes_added = 0;
while (*num_nodes_added < num_nodes) {
u16 max_child_nodes, num_added = 0;
u32 temp;
status = ice_sched_add_nodes_to_hw_layer(pi, tc_node, parent,
layer, new_num_nodes,
first_teid_ptr,
&num_added);
if (!status)
*num_nodes_added += num_added;
/* added more nodes than requested ? */
if (*num_nodes_added > num_nodes) {
ice_debug(pi->hw, ICE_DBG_SCHED, "added extra nodes %d %d\n", num_nodes,
*num_nodes_added);
status = ICE_ERR_CFG;
break;
}
/* break if all the nodes are added successfully */
if (!status && (*num_nodes_added == num_nodes))
break;
/* break if the error is not max limit */
if (status && status != ICE_ERR_MAX_LIMIT)
break;
/* Exceeded the max children */
max_child_nodes = pi->hw->max_children[parent->tx_sched_layer];
/* utilize all the spaces if the parent is not full */ /* utilize all the spaces if the parent is not full */
if (parent->num_children < max_child_nodes) { if (parent->num_children < max_child_nodes) {
new_num_nodes = max_child_nodes - parent->num_children; new_num_nodes = max_child_nodes - parent->num_children;
/* this recursion is intentional, and wouldn't } else {
* go more than 2 calls /* This parent is full, try the next sibling */
parent = parent->sibling;
/* Don't modify the first node TEID memory if the
* first node was added already in the above call.
* Instead send some temp memory for all other
* recursive calls.
*/ */
status = ice_sched_add_nodes_to_layer(pi, tc_node, if (num_added)
parent, layer, first_teid_ptr = &temp;
new_num_nodes,
first_node_teid,
&num_added);
if (status)
return status;
*num_nodes_added += num_added; new_num_nodes = num_nodes - *num_nodes_added;
} }
/* Don't modify the first node TEID memory if the first node was
* added already in the above call. Instead send some temp
* memory for all other recursive calls.
*/
if (num_added)
first_teid_ptr = &temp;
new_num_nodes = num_nodes - num_added;
/* This parent is full, try the next sibling */
parent = parent->sibling;
/* this recursion is intentional, for 1024 queues
* per VSI, it goes max of 16 iterations.
* 1024 / 8 = 128 layer 8 nodes
* 128 /8 = 16 (add 8 nodes per iteration)
*/
status = ice_sched_add_nodes_to_layer(pi, tc_node, parent,
layer, new_num_nodes,
first_teid_ptr,
&num_added);
*num_nodes_added += num_added;
return status;
} }
status = ice_sched_add_elems(pi, tc_node, parent, layer, num_nodes,
num_nodes_added, first_node_teid);
return status; return status;
} }
......
...@@ -192,7 +192,6 @@ struct ice_rxq_stats { ...@@ -192,7 +192,6 @@ struct ice_rxq_stats {
u64 non_eop_descs; u64 non_eop_descs;
u64 alloc_page_failed; u64 alloc_page_failed;
u64 alloc_buf_failed; u64 alloc_buf_failed;
u64 gro_dropped; /* GRO returned dropped */
}; };
enum ice_ring_state_t { enum ice_ring_state_t {
......
...@@ -941,4 +941,9 @@ struct ice_aq_get_set_rss_lut_params { ...@@ -941,4 +941,9 @@ struct ice_aq_get_set_rss_lut_params {
#define ICE_FW_API_LLDP_FLTR_MIN 7 #define ICE_FW_API_LLDP_FLTR_MIN 7
#define ICE_FW_API_LLDP_FLTR_PATCH 1 #define ICE_FW_API_LLDP_FLTR_PATCH 1
/* AQ API version for report default configuration */
#define ICE_FW_API_REPORT_DFLT_CFG_MAJ 1
#define ICE_FW_API_REPORT_DFLT_CFG_MIN 7
#define ICE_FW_API_REPORT_DFLT_CFG_PATCH 3
#endif /* _ICE_TYPE_H_ */ #endif /* _ICE_TYPE_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment