Commit 7b9ffc76 authored by Anirudh Venkataramanan's avatar Anirudh Venkataramanan Committed by Jeff Kirsher

ice: Add code for DCB initialization part 3/4

This patch adds a new function ice_pf_dcb_cfg (and related helpers)
which applies the DCB configuration obtained from the firmware. As
part of this, VSIs/netdevs are updated with traffic class information.

This patch requires a bit of a refactor of existing code.

1. For a MIB change event, the associated VSI is closed and brought up
   again. The gap between closing and opening the VSI can cause a race
   condition. Fix this by grabbing the rtnl_lock prior to closing the
   VSI and then only free it after re-opening the VSI during a MIB
   change event.

2. ice_sched_query_elem is used in ice_sched.c and with this patch, in
   ice_dcb.c as well. However, ice_dcb.c is not built when CONFIG_DCB is
   unset. This results in namespace warnings (ice_sched.o: Externally
   defined symbols with no external references) when CONFIG_DCB is unset.
   To avoid this move ice_sched_query_elem from ice_sched.c to
   ice_common.c.
Signed-off-by: default avatarAnirudh Venkataramanan <anirudh.venkataramanan@intel.com>
Tested-by: default avatarAndrew Bowers <andrewx.bowers@intel.com>
Signed-off-by: default avatarJeff Kirsher <jeffrey.t.kirsher@intel.com>
parent 0ebd3ff1
...@@ -378,6 +378,9 @@ struct ice_pf { ...@@ -378,6 +378,9 @@ struct ice_pf {
struct ice_hw_port_stats stats_prev; struct ice_hw_port_stats stats_prev;
struct ice_hw hw; struct ice_hw hw;
u8 stat_prev_loaded; /* has previous stats been loaded */ u8 stat_prev_loaded; /* has previous stats been loaded */
#ifdef CONFIG_DCB
u16 dcbx_cap;
#endif /* CONFIG_DCB */
u32 tx_timeout_count; u32 tx_timeout_count;
unsigned long tx_timeout_last_recovery; unsigned long tx_timeout_last_recovery;
u32 tx_timeout_recovery_level; u32 tx_timeout_recovery_level;
...@@ -414,12 +417,6 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi, ...@@ -414,12 +417,6 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
wr32(hw, GLINT_DYN_CTL(vector), val); wr32(hw, GLINT_DYN_CTL(vector), val);
} }
static inline void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
vsi->tc_cfg.ena_tc = ICE_DFLT_TRAFFIC_CLASS;
vsi->tc_cfg.numtc = 1;
}
void ice_set_ethtool_ops(struct net_device *netdev); void ice_set_ethtool_ops(struct net_device *netdev);
int ice_up(struct ice_vsi *vsi); int ice_up(struct ice_vsi *vsi);
int ice_down(struct ice_vsi *vsi); int ice_down(struct ice_vsi *vsi);
...@@ -428,5 +425,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size); ...@@ -428,5 +425,9 @@ int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size); void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup); void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
void ice_napi_del(struct ice_vsi *vsi); void ice_napi_del(struct ice_vsi *vsi);
#ifdef CONFIG_DCB
int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
#endif /* CONFIG_DCB */
#endif /* _ICE_H_ */ #endif /* _ICE_H_ */
...@@ -747,6 +747,32 @@ struct ice_aqc_delete_elem { ...@@ -747,6 +747,32 @@ struct ice_aqc_delete_elem {
__le32 teid[1]; __le32 teid[1];
}; };
/* Query Port ETS (indirect 0x040E)
*
* This indirect command is used to query port TC node configuration.
*/
struct ice_aqc_query_port_ets {
__le32 port_teid;
__le32 reserved;
__le32 addr_high;
__le32 addr_low;
};
struct ice_aqc_port_ets_elem {
u8 tc_valid_bits;
u8 reserved[3];
/* 3 bits for UP per TC 0-7, 4th byte reserved */
__le32 up2tc;
u8 tc_bw_share[8];
__le32 port_eir_prof_id;
__le32 port_cir_prof_id;
/* 3 bits per Node priority to TC 0-7, 4th byte reserved */
__le32 tc_node_prio;
#define ICE_TC_NODE_PRIO_S 0x4
u8 reserved1[4];
__le32 tc_node_teid[8]; /* Used for response, reserved in command */
};
/* Query Scheduler Resource Allocation (indirect 0x0412) /* Query Scheduler Resource Allocation (indirect 0x0412)
* This indirect command retrieves the scheduler resources allocated by * This indirect command retrieves the scheduler resources allocated by
* EMP Firmware to the given PF. * EMP Firmware to the given PF.
...@@ -1212,6 +1238,23 @@ struct ice_aqc_get_cee_dcb_cfg_resp { ...@@ -1212,6 +1238,23 @@ struct ice_aqc_get_cee_dcb_cfg_resp {
u8 reserved[12]; u8 reserved[12];
}; };
/* Set Local LLDP MIB (indirect 0x0A08)
* Used to replace the local MIB of a given LLDP agent. e.g. DCBx
*/
struct ice_aqc_lldp_set_local_mib {
u8 type;
#define SET_LOCAL_MIB_TYPE_DCBX_M BIT(0)
#define SET_LOCAL_MIB_TYPE_LOCAL_MIB 0
#define SET_LOCAL_MIB_TYPE_CEE_M BIT(1)
#define SET_LOCAL_MIB_TYPE_CEE_WILLING 0
#define SET_LOCAL_MIB_TYPE_CEE_NON_WILLING SET_LOCAL_MIB_TYPE_CEE_M
u8 reserved0;
__le16 length;
u8 reserved1[4];
__le32 addr_high;
__le32 addr_low;
};
/* Stop/Start LLDP Agent (direct 0x0A09) /* Stop/Start LLDP Agent (direct 0x0A09)
* Used for stopping/starting specific LLDP agent. e.g. DCBx. * Used for stopping/starting specific LLDP agent. e.g. DCBx.
* The same structure is used for the response, with the command field * The same structure is used for the response, with the command field
...@@ -1481,11 +1524,13 @@ struct ice_aq_desc { ...@@ -1481,11 +1524,13 @@ struct ice_aq_desc {
struct ice_aqc_get_topo get_topo; struct ice_aqc_get_topo get_topo;
struct ice_aqc_sched_elem_cmd sched_elem_cmd; struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res; struct ice_aqc_query_txsched_res query_sched_res;
struct ice_aqc_query_port_ets port_ets;
struct ice_aqc_nvm nvm; struct ice_aqc_nvm nvm;
struct ice_aqc_pf_vf_msg virt; struct ice_aqc_pf_vf_msg virt;
struct ice_aqc_lldp_get_mib lldp_get_mib; struct ice_aqc_lldp_get_mib lldp_get_mib;
struct ice_aqc_lldp_set_mib_change lldp_set_event; struct ice_aqc_lldp_set_mib_change lldp_set_event;
struct ice_aqc_lldp_start lldp_start; struct ice_aqc_lldp_start lldp_start;
struct ice_aqc_lldp_set_local_mib lldp_set_mib;
struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl; struct ice_aqc_lldp_stop_start_specific_agent lldp_agent_ctrl;
struct ice_aqc_get_set_rss_lut get_set_rss_lut; struct ice_aqc_get_set_rss_lut get_set_rss_lut;
struct ice_aqc_get_set_rss_key get_set_rss_key; struct ice_aqc_get_set_rss_key get_set_rss_key;
...@@ -1573,6 +1618,7 @@ enum ice_adminq_opc { ...@@ -1573,6 +1618,7 @@ enum ice_adminq_opc {
ice_aqc_opc_get_sched_elems = 0x0404, ice_aqc_opc_get_sched_elems = 0x0404,
ice_aqc_opc_suspend_sched_elems = 0x0409, ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A, ice_aqc_opc_resume_sched_elems = 0x040A,
ice_aqc_opc_query_port_ets = 0x040E,
ice_aqc_opc_delete_sched_elems = 0x040F, ice_aqc_opc_delete_sched_elems = 0x040F,
ice_aqc_opc_query_sched_res = 0x0412, ice_aqc_opc_query_sched_res = 0x0412,
...@@ -1595,6 +1641,7 @@ enum ice_adminq_opc { ...@@ -1595,6 +1641,7 @@ enum ice_adminq_opc {
ice_aqc_opc_lldp_set_mib_change = 0x0A01, ice_aqc_opc_lldp_set_mib_change = 0x0A01,
ice_aqc_opc_lldp_start = 0x0A06, ice_aqc_opc_lldp_start = 0x0A06,
ice_aqc_opc_get_cee_dcb_cfg = 0x0A07, ice_aqc_opc_get_cee_dcb_cfg = 0x0A07,
ice_aqc_opc_lldp_set_local_mib = 0x0A08,
ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09, ice_aqc_opc_lldp_stop_start_specific_agent = 0x0A09,
/* RSS commands */ /* RSS commands */
......
...@@ -3106,3 +3106,28 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, ...@@ -3106,3 +3106,28 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
/* to manage the potential roll-over */ /* to manage the potential roll-over */
*cur_stat = (new_data + BIT_ULL(32)) - *prev_stat; *cur_stat = (new_data + BIT_ULL(32)) - *prev_stat;
} }
/**
* ice_sched_query_elem - query element information from HW
* @hw: pointer to the HW struct
* @node_teid: node TEID to be queried
* @buf: buffer to element information
*
* This function queries HW element information
*/
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf)
{
u16 buf_size, num_elem_ret = 0;
enum ice_status status;
buf_size = sizeof(*buf);
memset(buf, 0, buf_size);
buf->generic[0].node_teid = cpu_to_le32(node_teid);
status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
NULL);
if (status || num_elem_ret != 1)
ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
return status;
}
...@@ -118,4 +118,7 @@ ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg, ...@@ -118,4 +118,7 @@ ice_stat_update40(struct ice_hw *hw, u32 hireg, u32 loreg,
void void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded, ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat); u64 *prev_stat, u64 *cur_stat);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf);
#endif /* _ICE_COMMON_H_ */ #endif /* _ICE_COMMON_H_ */
This diff is collapsed.
...@@ -70,6 +70,18 @@ ...@@ -70,6 +70,18 @@
#define ICE_IEEE_APP_PRIO_S 5 #define ICE_IEEE_APP_PRIO_S 5
#define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S) #define ICE_IEEE_APP_PRIO_M (0x7 << ICE_IEEE_APP_PRIO_S)
/* TLV definitions for preparing MIB */
#define ICE_IEEE_TLV_ID_ETS_CFG 3
#define ICE_IEEE_TLV_ID_ETS_REC 4
#define ICE_IEEE_TLV_ID_PFC_CFG 5
#define ICE_IEEE_TLV_ID_APP_PRI 6
#define ICE_TLV_ID_END_OF_LLDPPDU 7
#define ICE_TLV_ID_START ICE_IEEE_TLV_ID_ETS_CFG
#define ICE_IEEE_ETS_TLV_LEN 25
#define ICE_IEEE_PFC_TLV_LEN 6
#define ICE_IEEE_APP_TLV_LEN 11
/* IEEE 802.1AB LLDP Organization specific TLV */ /* IEEE 802.1AB LLDP Organization specific TLV */
struct ice_lldp_org_tlv { struct ice_lldp_org_tlv {
__be16 typelen; __be16 typelen;
...@@ -108,7 +120,12 @@ struct ice_cee_app_prio { ...@@ -108,7 +120,12 @@ struct ice_cee_app_prio {
} __packed; } __packed;
u8 ice_get_dcbx_status(struct ice_hw *hw); u8 ice_get_dcbx_status(struct ice_hw *hw);
enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi);
enum ice_status ice_init_dcb(struct ice_hw *hw); enum ice_status ice_init_dcb(struct ice_hw *hw);
enum ice_status
ice_query_port_ets(struct ice_port_info *pi,
struct ice_aqc_port_ets_elem *buf, u16 buf_size,
struct ice_sq_cd *cmd_details);
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd); enum ice_status ice_aq_start_lldp(struct ice_hw *hw, struct ice_sq_cd *cd);
enum ice_status enum ice_status
......
...@@ -3,6 +3,189 @@ ...@@ -3,6 +3,189 @@
#include "ice_dcb_lib.h" #include "ice_dcb_lib.h"
/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
* @dcbcfg: DCB config to evaluate for enabled TCs
*/
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg)
{
u8 i, num_tc, ena_tc = 1;
num_tc = ice_dcb_get_num_tc(dcbcfg);
for (i = 0; i < num_tc; i++)
ena_tc |= BIT(i);
return ena_tc;
}
/**
* ice_dcb_get_num_tc - Get the number of TCs from DCBX config
* @dcbcfg: config to retrieve number of TCs from
*/
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
{
bool tc_unused = false;
u8 num_tc = 0;
u8 ret = 0;
int i;
/* Scan the ETS Config Priority Table to find traffic classes
* enabled and create a bitmask of enabled TCs
*/
for (i = 0; i < CEE_DCBX_MAX_PRIO; i++)
num_tc |= BIT(dcbcfg->etscfg.prio_table[i]);
/* Scan bitmask for contiguous TCs starting with TC0 */
for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
if (num_tc & BIT(i)) {
if (!tc_unused) {
ret++;
} else {
pr_err("Non-contiguous TCs - Disabling DCB\n");
return 1;
}
} else {
tc_unused = true;
}
}
/* There is always at least 1 TC */
if (!ret)
ret = 1;
return ret;
}
/**
* ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
* @pf: pointer to the PF struct
*
* Assumed caller has already disabled all VSIs before
* calling this function. Reconfiguring DCB based on
* local_dcbx_cfg.
*/
static void ice_pf_dcb_recfg(struct ice_pf *pf)
{
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
u8 tc_map = 0;
int v, ret;
/* Update each VSI */
ice_for_each_vsi(pf, v) {
if (!pf->vsi[v])
continue;
if (pf->vsi[v]->type == ICE_VSI_PF)
tc_map = ice_dcb_get_ena_tc(dcbcfg);
else
tc_map = ICE_DFLT_TRAFFIC_CLASS;
ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
if (ret)
dev_err(&pf->pdev->dev,
"Failed to config TC for VSI index: %d\n",
pf->vsi[v]->idx);
else
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
}
}
/**
* ice_pf_dcb_cfg - Apply new DCB configuration
* @pf: pointer to the PF struct
* @new_cfg: DCBX config to apply
*/
static int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg)
{
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
int ret = 0;
curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
/* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(new_cfg) > 1) {
dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
set_bit(ICE_FLAG_DCB_ENA, pf->flags);
} else {
dev_dbg(&pf->pdev->dev, "DCB tagging disabled (num TC = 1)\n");
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
if (!memcmp(new_cfg, curr_cfg, sizeof(*new_cfg))) {
dev_dbg(&pf->pdev->dev, "No change in DCB config required\n");
return ret;
}
/* Store old config in case FW config fails */
old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL);
memcpy(old_cfg, curr_cfg, sizeof(*old_cfg));
/* avoid race conditions by holding the lock while disabling and
* re-enabling the VSI
*/
rtnl_lock();
ice_pf_dis_all_vsi(pf, true);
memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
/* Only send new config to HW if we are in SW LLDP mode. Otherwise,
* the new config came from the HW in the first place.
*/
if (pf->hw.port_info->is_sw_lldp) {
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(&pf->pdev->dev, "Set DCB Config failed\n");
/* Restore previous settings to local config */
memcpy(curr_cfg, old_cfg, sizeof(*curr_cfg));
goto out;
}
}
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(&pf->pdev->dev, "Query Port ETS failed\n");
goto out;
}
ice_pf_dcb_recfg(pf);
out:
ice_pf_ena_all_vsi(pf, true);
rtnl_unlock();
devm_kfree(&pf->pdev->dev, old_cfg);
return ret;
}
/**
* ice_dcb_init_cfg - set the initial DCB config in SW
* @pf: pf to apply config to
*/
static int ice_dcb_init_cfg(struct ice_pf *pf)
{
struct ice_dcbx_cfg *newcfg;
struct ice_port_info *pi;
int ret = 0;
pi = pf->hw.port_info;
newcfg = devm_kzalloc(&pf->pdev->dev, sizeof(*newcfg), GFP_KERNEL);
if (!newcfg)
return -ENOMEM;
memcpy(newcfg, &pi->local_dcbx_cfg, sizeof(*newcfg));
memset(&pi->local_dcbx_cfg, 0, sizeof(*newcfg));
dev_info(&pf->pdev->dev, "Configuring initial DCB values\n");
if (ice_pf_dcb_cfg(pf, newcfg))
ret = -EINVAL;
devm_kfree(&pf->pdev->dev, newcfg);
return ret;
}
/** /**
* ice_init_pf_dcb - initialize DCB for a PF * ice_init_pf_dcb - initialize DCB for a PF
* @pf: pf to initiialize DCB for * @pf: pf to initiialize DCB for
...@@ -12,6 +195,7 @@ int ice_init_pf_dcb(struct ice_pf *pf) ...@@ -12,6 +195,7 @@ int ice_init_pf_dcb(struct ice_pf *pf)
struct device *dev = &pf->pdev->dev; struct device *dev = &pf->pdev->dev;
struct ice_port_info *port_info; struct ice_port_info *port_info;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
int err;
port_info = hw->port_info; port_info = hw->port_info;
...@@ -38,5 +222,23 @@ int ice_init_pf_dcb(struct ice_pf *pf) ...@@ -38,5 +222,23 @@ int ice_init_pf_dcb(struct ice_pf *pf)
ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL); ice_aq_start_stop_dcbx(hw, true, &dcbx_status, NULL);
} }
return ice_init_dcb(hw); err = ice_init_dcb(hw);
if (err)
goto dcb_init_err;
/* DCBX in FW and LLDP enabled in FW */
pf->dcbx_cap = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_IEEE;
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
err = ice_dcb_init_cfg(pf);
if (err)
goto dcb_init_err;
dev_info(&pf->pdev->dev, "DCBX offload supported\n");
return err;
dcb_init_err:
dev_err(dev, "DCB init failed\n");
return err;
} }
...@@ -8,12 +8,25 @@ ...@@ -8,12 +8,25 @@
#include "ice_lib.h" #include "ice_lib.h"
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
int ice_init_pf_dcb(struct ice_pf *pf); int ice_init_pf_dcb(struct ice_pf *pf);
#else #else
static inline u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
{
return ICE_DFLT_TRAFFIC_CLASS;
}
static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
{
return 1;
}
static inline int ice_init_pf_dcb(struct ice_pf *pf) static inline int ice_init_pf_dcb(struct ice_pf *pf)
{ {
dev_dbg(&pf->pdev->dev, "DCB not supported\n"); dev_dbg(&pf->pdev->dev, "DCB not supported\n");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
#endif /* CONFIG_DCB */ #endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */ #endif /* _ICE_DCB_LIB_H_ */
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include "ice.h" #include "ice.h"
#include "ice_lib.h" #include "ice_lib.h"
#include "ice_dcb_lib.h"
/** /**
* ice_setup_rx_ctx - Configure a receive ring context * ice_setup_rx_ctx - Configure a receive ring context
...@@ -1301,7 +1302,11 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ...@@ -1301,7 +1302,11 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
* through the MSI-X enabling code. On a constrained vector budget, we map Tx * through the MSI-X enabling code. On a constrained vector budget, we map Tx
* and Rx rings to the vector as "efficiently" as possible. * and Rx rings to the vector as "efficiently" as possible.
*/ */
#ifdef CONFIG_DCB
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
#else
static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi) static void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi)
#endif /* CONFIG_DCB */
{ {
int q_vectors = vsi->num_q_vectors; int q_vectors = vsi->num_q_vectors;
int tx_rings_rem, rx_rings_rem; int tx_rings_rem, rx_rings_rem;
...@@ -2172,6 +2177,14 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc) ...@@ -2172,6 +2177,14 @@ int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc)
return -EIO; return -EIO;
} }
static void ice_vsi_set_tc_cfg(struct ice_vsi *vsi)
{
struct ice_dcbx_cfg *cfg = &vsi->port_info->local_dcbx_cfg;
vsi->tc_cfg.ena_tc = ice_dcb_get_ena_tc(cfg);
vsi->tc_cfg.numtc = ice_dcb_get_num_tc(cfg);
}
/** /**
* ice_vsi_setup - Set up a VSI by a given type * ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure * @pf: board private structure
...@@ -2815,3 +2828,125 @@ bool ice_is_reset_in_progress(unsigned long *state) ...@@ -2815,3 +2828,125 @@ bool ice_is_reset_in_progress(unsigned long *state)
test_bit(__ICE_CORER_REQ, state) || test_bit(__ICE_CORER_REQ, state) ||
test_bit(__ICE_GLOBR_REQ, state); test_bit(__ICE_GLOBR_REQ, state);
} }
#ifdef CONFIG_DCB
/**
* ice_vsi_update_q_map - update our copy of the VSI info with new queue map
* @vsi: VSI being configured
* @ctx: the context buffer returned from AQ VSI update command
*/
static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
{
vsi->info.mapping_flags = ctx->info.mapping_flags;
memcpy(&vsi->info.q_mapping, &ctx->info.q_mapping,
sizeof(vsi->info.q_mapping));
memcpy(&vsi->info.tc_mapping, ctx->info.tc_mapping,
sizeof(vsi->info.tc_mapping));
}
/**
* ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
* @vsi: the VSI being configured
* @ena_tc: TC map to be enabled
*/
static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
{
struct net_device *netdev = vsi->netdev;
struct ice_pf *pf = vsi->back;
struct ice_dcbx_cfg *dcbcfg;
u8 netdev_tc;
int i;
if (!netdev)
return;
if (!ena_tc) {
netdev_reset_tc(netdev);
return;
}
if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
return;
dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
ice_for_each_traffic_class(i)
if (vsi->tc_cfg.ena_tc & BIT(i))
netdev_set_tc_queue(netdev,
vsi->tc_cfg.tc_info[i].netdev_tc,
vsi->tc_cfg.tc_info[i].qcount_tx,
vsi->tc_cfg.tc_info[i].qoffset);
for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
u8 ets_tc = dcbcfg->etscfg.prio_table[i];
/* Get the mapped netdev TC# for the UP */
netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
netdev_set_prio_tc_map(netdev, i, netdev_tc);
}
}
/**
* ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
* @vsi: VSI to be configured
* @ena_tc: TC bitmap
*
* VSI queues expected to be quiesced before calling this function
*/
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_vsi_ctx *ctx;
struct ice_pf *pf = vsi->back;
enum ice_status status;
int i, ret = 0;
u8 num_tc = 0;
ice_for_each_traffic_class(i) {
/* build bitmap of enabled TCs */
if (ena_tc & BIT(i))
num_tc++;
/* populate max_txqs per TC */
max_txqs[i] = pf->num_lan_tx;
}
vsi->tc_cfg.ena_tc = ena_tc;
vsi->tc_cfg.numtc = num_tc;
ctx = devm_kzalloc(&pf->pdev->dev, sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->vf_num = 0;
ctx->info = vsi->info;
ice_vsi_setup_q_map(vsi, ctx);
/* must to indicate which section of VSI context are being modified */
ctx->info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_RXQ_MAP_VALID);
status = ice_update_vsi(&pf->hw, vsi->idx, ctx, NULL);
if (status) {
dev_info(&pf->pdev->dev, "Failed VSI Update\n");
ret = -EIO;
goto out;
}
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
dev_err(&pf->pdev->dev,
"VSI %d failed TC config, error %d\n",
vsi->vsi_num, status);
ret = -EIO;
goto out;
}
ice_vsi_update_q_map(vsi, ctx);
vsi->info.valid_sections = 0;
ice_vsi_cfg_netdev_tc(vsi, ena_tc);
out:
devm_kfree(&pf->pdev->dev, ctx);
return ret;
}
#endif /* CONFIG_DCB */
...@@ -41,6 +41,10 @@ void ice_vsi_delete(struct ice_vsi *vsi); ...@@ -41,6 +41,10 @@ void ice_vsi_delete(struct ice_vsi *vsi);
int ice_vsi_clear(struct ice_vsi *vsi); int ice_vsi_clear(struct ice_vsi *vsi);
#ifdef CONFIG_DCB
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc);
#endif /* CONFIG_DCB */
struct ice_vsi * struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 vf_id); enum ice_vsi_type type, u16 vf_id);
...@@ -62,6 +66,10 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi); ...@@ -62,6 +66,10 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi);
void ice_vsi_put_qs(struct ice_vsi *vsi); void ice_vsi_put_qs(struct ice_vsi *vsi);
#ifdef CONFIG_DCB
void ice_vsi_map_rings_to_vectors(struct ice_vsi *vsi);
#endif /* CONFIG_DCB */
void ice_vsi_dis_irq(struct ice_vsi *vsi); void ice_vsi_dis_irq(struct ice_vsi *vsi);
void ice_vsi_free_irq(struct ice_vsi *vsi); void ice_vsi_free_irq(struct ice_vsi *vsi);
......
...@@ -31,7 +31,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)"); ...@@ -31,7 +31,6 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
static struct workqueue_struct *ice_wq; static struct workqueue_struct *ice_wq;
static const struct net_device_ops ice_netdev_ops; static const struct net_device_ops ice_netdev_ops;
static void ice_pf_dis_all_vsi(struct ice_pf *pf);
static void ice_rebuild(struct ice_pf *pf); static void ice_rebuild(struct ice_pf *pf);
static void ice_vsi_release_all(struct ice_pf *pf); static void ice_vsi_release_all(struct ice_pf *pf);
...@@ -397,6 +396,51 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf) ...@@ -397,6 +396,51 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
} }
} }
/**
* ice_dis_vsi - pause a VSI
* @vsi: the VSI being paused
* @locked: is the rtnl_lock already held
*/
static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
if (test_bit(__ICE_DOWN, vsi->state))
return;
set_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
if (!locked) {
rtnl_lock();
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
rtnl_unlock();
} else {
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
}
} else {
ice_vsi_close(vsi);
}
}
}
/**
* ice_pf_dis_all_vsi - Pause all VSIs on a PF
* @pf: the PF
* @locked: is the rtnl_lock already held
*/
#ifdef CONFIG_DCB
void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
#else
static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
#endif /* CONFIG_DCB */
{
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
ice_dis_vsi(pf->vsi[v], locked);
}
/** /**
* ice_prepare_for_reset - prep for the core to reset * ice_prepare_for_reset - prep for the core to reset
* @pf: board private structure * @pf: board private structure
...@@ -417,7 +461,7 @@ ice_prepare_for_reset(struct ice_pf *pf) ...@@ -417,7 +461,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
ice_vc_notify_reset(pf); ice_vc_notify_reset(pf);
/* disable the VSIs and their queues that are not already DOWN */ /* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf); ice_pf_dis_all_vsi(pf, false);
if (hw->port_info) if (hw->port_info)
ice_sched_clear_port(hw->port_info); ice_sched_clear_port(hw->port_info);
...@@ -3581,46 +3625,30 @@ static void ice_vsi_release_all(struct ice_pf *pf) ...@@ -3581,46 +3625,30 @@ static void ice_vsi_release_all(struct ice_pf *pf)
} }
/** /**
* ice_dis_vsi - pause a VSI * ice_ena_vsi - resume a VSI
* @vsi: the VSI being paused * @vsi: the VSI being resume
* @locked: is the rtnl_lock already held * @locked: is the rtnl_lock already held
*/ */
static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
{ {
if (test_bit(__ICE_DOWN, vsi->state)) int err = 0;
return;
set_bit(__ICE_NEEDS_RESTART, vsi->state); if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
return err;
if (vsi->type == ICE_VSI_PF && vsi->netdev) { clear_bit(__ICE_NEEDS_RESTART, vsi->state);
if (netif_running(vsi->netdev)) {
if (!locked) {
rtnl_lock();
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
rtnl_unlock();
} else {
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
}
} else {
ice_vsi_close(vsi);
}
}
}
/** if (vsi->netdev && vsi->type == ICE_VSI_PF) {
* ice_ena_vsi - resume a VSI struct net_device *netd = vsi->netdev;
* @vsi: the VSI being resume
*/
static int ice_ena_vsi(struct ice_vsi *vsi)
{
int err = 0;
if (test_and_clear_bit(__ICE_NEEDS_RESTART, vsi->state) &&
vsi->netdev) {
if (netif_running(vsi->netdev)) { if (netif_running(vsi->netdev)) {
if (locked) {
err = netd->netdev_ops->ndo_open(netd);
} else {
rtnl_lock(); rtnl_lock();
err = vsi->netdev->netdev_ops->ndo_open(vsi->netdev); err = netd->netdev_ops->ndo_open(netd);
rtnl_unlock(); rtnl_unlock();
}
} else { } else {
err = ice_vsi_open(vsi); err = ice_vsi_open(vsi);
} }
...@@ -3629,30 +3657,22 @@ static int ice_ena_vsi(struct ice_vsi *vsi) ...@@ -3629,30 +3657,22 @@ static int ice_ena_vsi(struct ice_vsi *vsi)
return err; return err;
} }
/**
* ice_pf_dis_all_vsi - Pause all VSIs on a PF
* @pf: the PF
*/
static void ice_pf_dis_all_vsi(struct ice_pf *pf)
{
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
ice_dis_vsi(pf->vsi[v], false);
}
/** /**
* ice_pf_ena_all_vsi - Resume all VSIs on a PF * ice_pf_ena_all_vsi - Resume all VSIs on a PF
* @pf: the PF * @pf: the PF
* @locked: is the rtnl_lock already held
*/ */
static int ice_pf_ena_all_vsi(struct ice_pf *pf) #ifdef CONFIG_DCB
int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
#else
static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
#endif /* CONFIG_DCB */
{ {
int v; int v;
ice_for_each_vsi(pf, v) ice_for_each_vsi(pf, v)
if (pf->vsi[v]) if (pf->vsi[v])
if (ice_ena_vsi(pf->vsi[v])) if (ice_ena_vsi(pf->vsi[v], locked))
return -EIO; return -EIO;
return 0; return 0;
...@@ -3800,7 +3820,7 @@ static void ice_rebuild(struct ice_pf *pf) ...@@ -3800,7 +3820,7 @@ static void ice_rebuild(struct ice_pf *pf)
} }
/* restart the VSIs that were rebuilt and running before the reset */ /* restart the VSIs that were rebuilt and running before the reset */
err = ice_pf_ena_all_vsi(pf); err = ice_pf_ena_all_vsi(pf, false);
if (err) { if (err) {
dev_err(&pf->pdev->dev, "error enabling VSIs\n"); dev_err(&pf->pdev->dev, "error enabling VSIs\n");
/* no need to disable VSIs in tear down path in ice_rebuild() /* no need to disable VSIs in tear down path in ice_rebuild()
......
...@@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc, ...@@ -127,7 +127,7 @@ ice_aqc_send_sched_elem_cmd(struct ice_hw *hw, enum ice_adminq_opc cmd_opc,
* *
* Query scheduling elements (0x0404) * Query scheduling elements (0x0404)
*/ */
static enum ice_status enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_get_elem *buf, u16 buf_size, struct ice_aqc_get_elem *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd) u16 *elems_ret, struct ice_sq_cd *cd)
...@@ -137,31 +137,6 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req, ...@@ -137,31 +137,6 @@ ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
elems_ret, cd); elems_ret, cd);
} }
/**
* ice_sched_query_elem - query element information from HW
* @hw: pointer to the HW struct
* @node_teid: node TEID to be queried
* @buf: buffer to element information
*
* This function queries HW element information
*/
static enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf)
{
u16 buf_size, num_elem_ret = 0;
enum ice_status status;
buf_size = sizeof(*buf);
memset(buf, 0, buf_size);
buf->generic[0].node_teid = cpu_to_le32(node_teid);
status = ice_aq_query_sched_elems(hw, 1, buf, buf_size, &num_elem_ret,
NULL);
if (status || num_elem_ret != 1)
ice_debug(hw, ICE_DBG_SCHED, "query element failed\n");
return status;
}
/** /**
* ice_sched_add_node - Insert the Tx scheduler node in SW DB * ice_sched_add_node - Insert the Tx scheduler node in SW DB
* @pi: port information structure * @pi: port information structure
......
...@@ -24,6 +24,10 @@ struct ice_sched_agg_info { ...@@ -24,6 +24,10 @@ struct ice_sched_agg_info {
}; };
/* FW AQ command calls */ /* FW AQ command calls */
enum ice_status
ice_aq_query_sched_elems(struct ice_hw *hw, u16 elems_req,
struct ice_aqc_get_elem *buf, u16 buf_size,
u16 *elems_ret, struct ice_sq_cd *cd);
enum ice_status ice_sched_init_port(struct ice_port_info *pi); enum ice_status ice_sched_init_port(struct ice_port_info *pi);
enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw); enum ice_status ice_sched_query_res_alloc(struct ice_hw *hw);
void ice_sched_clear_port(struct ice_port_info *pi); void ice_sched_clear_port(struct ice_port_info *pi);
......
...@@ -215,6 +215,8 @@ struct ice_nvm_info { ...@@ -215,6 +215,8 @@ struct ice_nvm_info {
#define ice_for_each_traffic_class(_i) \ #define ice_for_each_traffic_class(_i) \
for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++) for ((_i) = 0; (_i) < ICE_MAX_TRAFFIC_CLASS; (_i)++)
#define ICE_INVAL_TEID 0xFFFFFFFF
struct ice_sched_node { struct ice_sched_node {
struct ice_sched_node *parent; struct ice_sched_node *parent;
struct ice_sched_node *sibling; /* next sibling in the same layer */ struct ice_sched_node *sibling; /* next sibling in the same layer */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment