Commit 8e4a2adc authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-08-26

This series contains updates to ice driver only.

Usha fixes the statistics reported on 4 port NICs which were reporting
the incorrect statistics due to using the incorrect port identifier.

Victor fixes an issue when trying to traverse to the first node of a
requested layer by adding a sibling head pointer for each layer per
traffic class.

Anirudh cleans up the locking and logic for enabling and disabling
VSI's to make it more consistent.  Updates the driver to do dynamic
allocation of queue management bitmaps and arrays, rather than
statically allocating them which consumes more memory than required.
Refactor the logic in ice_ena_msix_range() for clarity and add
additional checks for when requested resources exceed what is available.

Jesse updates the debugging print statements to make it more useful when
dealing with link and PHY related issues.

Krzysztof adds a local variable to the VSI rebuild path to improve
readability.

Akeem limits the reporting of MDD events from VFs so that the kernel
log is not clogged up with MDD events which are duplicate or potentially
false positives.  Fixed a reset issue that would result in the system
getting into a state that could only be resolved by a reboot by
testing if the VF is in a disabled state during a reset.

Michal adds a check to avoid trying to access memory that has not be
allocated by checking the number of queue pairs.

Jake fixes a static analysis warning due to a cast of a u8 to unsigned
long, so just update ice_is_tc_ena() to take a unsigned long so that a
cast is not necessary.

Colin Ian King fixes a potential infinite loop where a u8 is being
compared to an int.

Maciej refactors the queue handling functions that work on queue arrays
so that the logic can be done for a single queue.

Paul adds support for VFs to enable and disable single queues.

Henry fixed the order of operations in ice_remove() which was trying to
use adminq operations that were already disabled.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3894793e ae2bdbb4
...@@ -73,8 +73,6 @@ extern const char ice_drv_ver[]; ...@@ -73,8 +73,6 @@ extern const char ice_drv_ver[];
#define ICE_MBXRQ_LEN 512 #define ICE_MBXRQ_LEN 512
#define ICE_MIN_MSIX 2 #define ICE_MIN_MSIX 2
#define ICE_NO_VSI 0xffff #define ICE_NO_VSI 0xffff
#define ICE_MAX_TXQS 2048
#define ICE_MAX_RXQS 2048
#define ICE_VSI_MAP_CONTIG 0 #define ICE_VSI_MAP_CONTIG 0
#define ICE_VSI_MAP_SCATTER 1 #define ICE_VSI_MAP_SCATTER 1
#define ICE_MAX_SCATTER_TXQS 16 #define ICE_MAX_SCATTER_TXQS 16
...@@ -284,8 +282,8 @@ struct ice_vsi { ...@@ -284,8 +282,8 @@ struct ice_vsi {
/* queue information */ /* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */ u8 rx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
u16 txq_map[ICE_MAX_TXQS]; /* index in pf->avail_txqs */ u16 *txq_map; /* index in pf->avail_txqs */
u16 rxq_map[ICE_MAX_RXQS]; /* index in pf->avail_rxqs */ u16 *rxq_map; /* index in pf->avail_rxqs */
u16 alloc_txq; /* Allocated Tx queues */ u16 alloc_txq; /* Allocated Tx queues */
u16 num_txq; /* Used Tx queues */ u16 num_txq; /* Used Tx queues */
u16 alloc_rxq; /* Allocated Rx queues */ u16 alloc_rxq; /* Allocated Rx queues */
...@@ -355,9 +353,9 @@ struct ice_pf { ...@@ -355,9 +353,9 @@ struct ice_pf {
u16 num_vf_qps; /* num queue pairs per VF */ u16 num_vf_qps; /* num queue pairs per VF */
u16 num_vf_msix; /* num vectors per VF */ u16 num_vf_msix; /* num vectors per VF */
DECLARE_BITMAP(state, __ICE_STATE_NBITS); DECLARE_BITMAP(state, __ICE_STATE_NBITS);
DECLARE_BITMAP(avail_txqs, ICE_MAX_TXQS);
DECLARE_BITMAP(avail_rxqs, ICE_MAX_RXQS);
DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS); DECLARE_BITMAP(flags, ICE_PF_FLAGS_NBITS);
unsigned long *avail_txqs; /* bitmap to track PF Tx queue usage */
unsigned long *avail_rxqs; /* bitmap to track PF Rx queue usage */
unsigned long serv_tmr_period; unsigned long serv_tmr_period;
unsigned long serv_tmr_prev; unsigned long serv_tmr_prev;
struct timer_list serv_tmr; struct timer_list serv_tmr;
...@@ -368,6 +366,8 @@ struct ice_pf { ...@@ -368,6 +366,8 @@ struct ice_pf {
u32 hw_csum_rx_error; u32 hw_csum_rx_error;
u32 oicr_idx; /* Other interrupt cause MSIX vector index */ u32 oicr_idx; /* Other interrupt cause MSIX vector index */
u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */ u32 num_avail_sw_msix; /* remaining MSIX SW vectors left unclaimed */
u16 max_pf_txqs; /* Total Tx queues PF wide */
u16 max_pf_rxqs; /* Total Rx queues PF wide */
u32 num_lan_msix; /* Total MSIX vectors for base driver */ u32 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num LAN Tx queues setup */ u16 num_lan_tx; /* num LAN Tx queues setup */
u16 num_lan_rx; /* num LAN Rx queues setup */ u16 num_lan_rx; /* num LAN Rx queues setup */
......
...@@ -263,21 +263,23 @@ enum ice_status ...@@ -263,21 +263,23 @@ enum ice_status
ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
struct ice_link_status *link, struct ice_sq_cd *cd) struct ice_link_status *link, struct ice_sq_cd *cd)
{ {
struct ice_link_status *hw_link_info_old, *hw_link_info;
struct ice_aqc_get_link_status_data link_data = { 0 }; struct ice_aqc_get_link_status_data link_data = { 0 };
struct ice_aqc_get_link_status *resp; struct ice_aqc_get_link_status *resp;
struct ice_link_status *li_old, *li;
enum ice_media_type *hw_media_type; enum ice_media_type *hw_media_type;
struct ice_fc_info *hw_fc_info; struct ice_fc_info *hw_fc_info;
bool tx_pause, rx_pause; bool tx_pause, rx_pause;
struct ice_aq_desc desc; struct ice_aq_desc desc;
enum ice_status status; enum ice_status status;
struct ice_hw *hw;
u16 cmd_flags; u16 cmd_flags;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
hw_link_info_old = &pi->phy.link_info_old; hw = pi->hw;
li_old = &pi->phy.link_info_old;
hw_media_type = &pi->phy.media_type; hw_media_type = &pi->phy.media_type;
hw_link_info = &pi->phy.link_info; li = &pi->phy.link_info;
hw_fc_info = &pi->fc; hw_fc_info = &pi->fc;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status); ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_link_status);
...@@ -286,27 +288,27 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, ...@@ -286,27 +288,27 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
resp->cmd_flags = cpu_to_le16(cmd_flags); resp->cmd_flags = cpu_to_le16(cmd_flags);
resp->lport_num = pi->lport; resp->lport_num = pi->lport;
status = ice_aq_send_cmd(pi->hw, &desc, &link_data, sizeof(link_data), status = ice_aq_send_cmd(hw, &desc, &link_data, sizeof(link_data), cd);
cd);
if (status) if (status)
return status; return status;
/* save off old link status information */ /* save off old link status information */
*hw_link_info_old = *hw_link_info; *li_old = *li;
/* update current link status information */ /* update current link status information */
hw_link_info->link_speed = le16_to_cpu(link_data.link_speed); li->link_speed = le16_to_cpu(link_data.link_speed);
hw_link_info->phy_type_low = le64_to_cpu(link_data.phy_type_low); li->phy_type_low = le64_to_cpu(link_data.phy_type_low);
hw_link_info->phy_type_high = le64_to_cpu(link_data.phy_type_high); li->phy_type_high = le64_to_cpu(link_data.phy_type_high);
*hw_media_type = ice_get_media_type(pi); *hw_media_type = ice_get_media_type(pi);
hw_link_info->link_info = link_data.link_info; li->link_info = link_data.link_info;
hw_link_info->an_info = link_data.an_info; li->an_info = link_data.an_info;
hw_link_info->ext_info = link_data.ext_info; li->ext_info = link_data.ext_info;
hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size); li->max_frame_size = le16_to_cpu(link_data.max_frame_size);
hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK; li->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
hw_link_info->topo_media_conflict = link_data.topo_media_conflict; li->topo_media_conflict = link_data.topo_media_conflict;
hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M; li->pacing = link_data.cfg & (ICE_AQ_CFG_PACING_M |
ICE_AQ_CFG_PACING_TYPE_M);
/* update fc info */ /* update fc info */
tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX); tx_pause = !!(link_data.an_info & ICE_AQ_LINK_PAUSE_TX);
...@@ -320,12 +322,24 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, ...@@ -320,12 +322,24 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
else else
hw_fc_info->current_mode = ICE_FC_NONE; hw_fc_info->current_mode = ICE_FC_NONE;
hw_link_info->lse_ena = li->lse_ena = !!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
!!(resp->cmd_flags & cpu_to_le16(ICE_AQ_LSE_IS_ENABLED));
ice_debug(hw, ICE_DBG_LINK, "link_speed = 0x%x\n", li->link_speed);
ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
(unsigned long long)li->phy_type_low);
ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
(unsigned long long)li->phy_type_high);
ice_debug(hw, ICE_DBG_LINK, "media_type = 0x%x\n", *hw_media_type);
ice_debug(hw, ICE_DBG_LINK, "link_info = 0x%x\n", li->link_info);
ice_debug(hw, ICE_DBG_LINK, "an_info = 0x%x\n", li->an_info);
ice_debug(hw, ICE_DBG_LINK, "ext_info = 0x%x\n", li->ext_info);
ice_debug(hw, ICE_DBG_LINK, "lse_ena = 0x%x\n", li->lse_ena);
ice_debug(hw, ICE_DBG_LINK, "max_frame = 0x%x\n", li->max_frame_size);
ice_debug(hw, ICE_DBG_LINK, "pacing = 0x%x\n", li->pacing);
/* save link status information */ /* save link status information */
if (link) if (link)
*link = *hw_link_info; *link = *li;
/* flag cleared so calling functions don't call AQ again */ /* flag cleared so calling functions don't call AQ again */
pi->phy.get_link_info = false; pi->phy.get_link_info = false;
...@@ -2000,6 +2014,17 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, ...@@ -2000,6 +2014,17 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
desc.params.set_phy.lport_num = lport; desc.params.set_phy.lport_num = lport;
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD); desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
ice_debug(hw, ICE_DBG_LINK, "phy_type_low = 0x%llx\n",
(unsigned long long)le64_to_cpu(cfg->phy_type_low));
ice_debug(hw, ICE_DBG_LINK, "phy_type_high = 0x%llx\n",
(unsigned long long)le64_to_cpu(cfg->phy_type_high));
ice_debug(hw, ICE_DBG_LINK, "caps = 0x%x\n", cfg->caps);
ice_debug(hw, ICE_DBG_LINK, "low_power_ctrl = 0x%x\n",
cfg->low_power_ctrl);
ice_debug(hw, ICE_DBG_LINK, "eee_cap = 0x%x\n", cfg->eee_cap);
ice_debug(hw, ICE_DBG_LINK, "eeer_value = 0x%x\n", cfg->eeer_value);
ice_debug(hw, ICE_DBG_LINK, "link_fec_opt = 0x%x\n", cfg->link_fec_opt);
return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd); return ice_aq_send_cmd(hw, &desc, cfg, sizeof(*cfg), cd);
} }
......
...@@ -500,30 +500,31 @@ void ice_update_dcb_stats(struct ice_pf *pf) ...@@ -500,30 +500,31 @@ void ice_update_dcb_stats(struct ice_pf *pf)
{ {
struct ice_hw_port_stats *prev_ps, *cur_ps; struct ice_hw_port_stats *prev_ps, *cur_ps;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
u8 pf_id = hw->pf_id; u8 port;
int i; int i;
port = hw->port_info->lport;
prev_ps = &pf->stats_prev; prev_ps = &pf->stats_prev;
cur_ps = &pf->stats; cur_ps = &pf->stats;
for (i = 0; i < 8; i++) { for (i = 0; i < 8; i++) {
ice_stat_update32(hw, GLPRT_PXOFFRXC(pf_id, i), ice_stat_update32(hw, GLPRT_PXOFFRXC(port, i),
pf->stat_prev_loaded, pf->stat_prev_loaded,
&prev_ps->priority_xoff_rx[i], &prev_ps->priority_xoff_rx[i],
&cur_ps->priority_xoff_rx[i]); &cur_ps->priority_xoff_rx[i]);
ice_stat_update32(hw, GLPRT_PXONRXC(pf_id, i), ice_stat_update32(hw, GLPRT_PXONRXC(port, i),
pf->stat_prev_loaded, pf->stat_prev_loaded,
&prev_ps->priority_xon_rx[i], &prev_ps->priority_xon_rx[i],
&cur_ps->priority_xon_rx[i]); &cur_ps->priority_xon_rx[i]);
ice_stat_update32(hw, GLPRT_PXONTXC(pf_id, i), ice_stat_update32(hw, GLPRT_PXONTXC(port, i),
pf->stat_prev_loaded, pf->stat_prev_loaded,
&prev_ps->priority_xon_tx[i], &prev_ps->priority_xon_tx[i],
&cur_ps->priority_xon_tx[i]); &cur_ps->priority_xon_tx[i]);
ice_stat_update32(hw, GLPRT_PXOFFTXC(pf_id, i), ice_stat_update32(hw, GLPRT_PXOFFTXC(port, i),
pf->stat_prev_loaded, pf->stat_prev_loaded,
&prev_ps->priority_xoff_tx[i], &prev_ps->priority_xoff_tx[i],
&cur_ps->priority_xoff_tx[i]); &cur_ps->priority_xoff_tx[i]);
ice_stat_update32(hw, GLPRT_RXON2OFFCNT(pf_id, i), ice_stat_update32(hw, GLPRT_RXON2OFFCNT(port, i),
pf->stat_prev_loaded, pf->stat_prev_loaded,
&prev_ps->priority_xon_2_xoff[i], &prev_ps->priority_xon_2_xoff[i],
&cur_ps->priority_xon_2_xoff[i]); &cur_ps->priority_xon_2_xoff[i]);
......
...@@ -191,41 +191,58 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena) ...@@ -191,41 +191,58 @@ static int ice_pf_rxq_wait(struct ice_pf *pf, int pf_q, bool ena)
} }
/** /**
* ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings * ice_vsi_ctrl_rx_ring - Start or stop a VSI's Rx ring
* @vsi: the VSI being configured * @vsi: the VSI being configured
* @ena: start or stop the Rx rings * @ena: start or stop the Rx rings
* @rxq_idx: Rx queue index
*/ */
static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena) #ifndef CONFIG_PCI_IOV
static
#endif /* !CONFIG_PCI_IOV */
int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
{ {
int pf_q = vsi->rxq_map[rxq_idx];
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
int i, ret = 0; int ret = 0;
u32 rx_reg;
for (i = 0; i < vsi->num_rxq; i++) { rx_reg = rd32(hw, QRX_CTRL(pf_q));
int pf_q = vsi->rxq_map[i];
u32 rx_reg;
rx_reg = rd32(hw, QRX_CTRL(pf_q)); /* Skip if the queue is already in the requested state */
if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M))
return 0;
/* Skip if the queue is already in the requested state */ /* turn on/off the queue */
if (ena == !!(rx_reg & QRX_CTRL_QENA_STAT_M)) if (ena)
continue; rx_reg |= QRX_CTRL_QENA_REQ_M;
else
rx_reg &= ~QRX_CTRL_QENA_REQ_M;
wr32(hw, QRX_CTRL(pf_q), rx_reg);
/* turn on/off the queue */ /* wait for the change to finish */
if (ena) ret = ice_pf_rxq_wait(pf, pf_q, ena);
rx_reg |= QRX_CTRL_QENA_REQ_M; if (ret)
else dev_err(&pf->pdev->dev,
rx_reg &= ~QRX_CTRL_QENA_REQ_M; "VSI idx %d Rx ring %d %sable timeout\n",
wr32(hw, QRX_CTRL(pf_q), rx_reg); vsi->idx, pf_q, (ena ? "en" : "dis"));
/* wait for the change to finish */ return ret;
ret = ice_pf_rxq_wait(pf, pf_q, ena); }
if (ret) {
dev_err(&pf->pdev->dev, /**
"VSI idx %d Rx ring %d %sable timeout\n", * ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
vsi->idx, pf_q, (ena ? "en" : "dis")); * @vsi: the VSI being configured
* @ena: start or stop the Rx rings
*/
static int ice_vsi_ctrl_rx_rings(struct ice_vsi *vsi, bool ena)
{
int i, ret = 0;
for (i = 0; i < vsi->num_rxq; i++) {
ret = ice_vsi_ctrl_rx_ring(vsi, ena, i);
if (ret)
break; break;
}
} }
return ret; return ret;
...@@ -246,12 +263,24 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) ...@@ -246,12 +263,24 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq, vsi->tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
sizeof(*vsi->tx_rings), GFP_KERNEL); sizeof(*vsi->tx_rings), GFP_KERNEL);
if (!vsi->tx_rings) if (!vsi->tx_rings)
goto err_txrings; return -ENOMEM;
vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq, vsi->rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
sizeof(*vsi->rx_rings), GFP_KERNEL); sizeof(*vsi->rx_rings), GFP_KERNEL);
if (!vsi->rx_rings) if (!vsi->rx_rings)
goto err_rxrings; goto err_rings;
vsi->txq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
sizeof(*vsi->txq_map), GFP_KERNEL);
if (!vsi->txq_map)
goto err_txq_map;
vsi->rxq_map = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
sizeof(*vsi->rxq_map), GFP_KERNEL);
if (!vsi->rxq_map)
goto err_rxq_map;
/* There is no need to allocate q_vectors for a loopback VSI. */ /* There is no need to allocate q_vectors for a loopback VSI. */
if (vsi->type == ICE_VSI_LB) if (vsi->type == ICE_VSI_LB)
...@@ -266,10 +295,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi) ...@@ -266,10 +295,13 @@ static int ice_vsi_alloc_arrays(struct ice_vsi *vsi)
return 0; return 0;
err_vectors: err_vectors:
devm_kfree(&pf->pdev->dev, vsi->rxq_map);
err_rxq_map:
devm_kfree(&pf->pdev->dev, vsi->txq_map);
err_txq_map:
devm_kfree(&pf->pdev->dev, vsi->rx_rings); devm_kfree(&pf->pdev->dev, vsi->rx_rings);
err_rxrings: err_rings:
devm_kfree(&pf->pdev->dev, vsi->tx_rings); devm_kfree(&pf->pdev->dev, vsi->tx_rings);
err_txrings:
return -ENOMEM; return -ENOMEM;
} }
...@@ -416,6 +448,14 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi) ...@@ -416,6 +448,14 @@ static void ice_vsi_free_arrays(struct ice_vsi *vsi)
devm_kfree(&pf->pdev->dev, vsi->rx_rings); devm_kfree(&pf->pdev->dev, vsi->rx_rings);
vsi->rx_rings = NULL; vsi->rx_rings = NULL;
} }
if (vsi->txq_map) {
devm_kfree(&pf->pdev->dev, vsi->txq_map);
vsi->txq_map = NULL;
}
if (vsi->rxq_map) {
devm_kfree(&pf->pdev->dev, vsi->rxq_map);
vsi->rxq_map = NULL;
}
} }
/** /**
...@@ -647,7 +687,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi) ...@@ -647,7 +687,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
struct ice_qs_cfg tx_qs_cfg = { struct ice_qs_cfg tx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex, .qs_mutex = &pf->avail_q_mutex,
.pf_map = pf->avail_txqs, .pf_map = pf->avail_txqs,
.pf_map_size = ICE_MAX_TXQS, .pf_map_size = pf->max_pf_txqs,
.q_count = vsi->alloc_txq, .q_count = vsi->alloc_txq,
.scatter_count = ICE_MAX_SCATTER_TXQS, .scatter_count = ICE_MAX_SCATTER_TXQS,
.vsi_map = vsi->txq_map, .vsi_map = vsi->txq_map,
...@@ -657,7 +697,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi) ...@@ -657,7 +697,7 @@ static int ice_vsi_get_qs(struct ice_vsi *vsi)
struct ice_qs_cfg rx_qs_cfg = { struct ice_qs_cfg rx_qs_cfg = {
.qs_mutex = &pf->avail_q_mutex, .qs_mutex = &pf->avail_q_mutex,
.pf_map = pf->avail_rxqs, .pf_map = pf->avail_rxqs,
.pf_map_size = ICE_MAX_RXQS, .pf_map_size = pf->max_pf_rxqs,
.q_count = vsi->alloc_rxq, .q_count = vsi->alloc_rxq,
.scatter_count = ICE_MAX_SCATTER_RXQS, .scatter_count = ICE_MAX_SCATTER_RXQS,
.vsi_map = vsi->rxq_map, .vsi_map = vsi->rxq_map,
...@@ -1648,6 +1688,62 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) ...@@ -1648,6 +1688,62 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
return 0; return 0;
} }
/**
* ice_vsi_cfg_txq - Configure single Tx queue
* @vsi: the VSI that queue belongs to
* @ring: Tx ring to be configured
* @tc_q_idx: queue index within given TC
* @qg_buf: queue group buffer
* @tc: TC that Tx ring belongs to
*/
static int
ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, u16 tc_q_idx,
struct ice_aqc_add_tx_qgrp *qg_buf, u8 tc)
{
struct ice_tlan_ctx tlan_ctx = { 0 };
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back;
u8 buf_len = sizeof(*qg_buf);
enum ice_status status;
u16 pf_q;
pf_q = ring->reg_idx;
ice_setup_tx_ctx(ring, &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
ice_tlan_ctx_info);
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
*/
ring->tail = pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
/* Add unique software queue handle of the Tx queue per
* TC into the VSI Tx ring
*/
ring->q_handle = tc_q_idx;
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1, qg_buf, buf_len, NULL);
if (status) {
dev_err(&pf->pdev->dev,
"Failed to set LAN Tx queue context, error: %d\n",
status);
return -ENODEV;
}
/* Add Tx Queue TEID into the VSI Tx ring from the
* response. This will complete configuring and
* enabling the queue.
*/
txq = &qg_buf->txqs[0];
if (pf_q == le16_to_cpu(txq->txq_id))
ring->txq_teid = le32_to_cpu(txq->q_teid);
return 0;
}
/** /**
* ice_vsi_cfg_txqs - Configure the VSI for Tx * ice_vsi_cfg_txqs - Configure the VSI for Tx
* @vsi: the VSI being configured * @vsi: the VSI being configured
...@@ -1661,20 +1757,16 @@ static int ...@@ -1661,20 +1757,16 @@ static int
ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
{ {
struct ice_aqc_add_tx_qgrp *qg_buf; struct ice_aqc_add_tx_qgrp *qg_buf;
struct ice_aqc_add_txqs_perq *txq;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
u8 num_q_grps, q_idx = 0; u16 q_idx = 0, i;
enum ice_status status; int err = 0;
u16 buf_len, i, pf_q; u8 tc;
int err = 0, tc;
buf_len = sizeof(*qg_buf); qg_buf = devm_kzalloc(&pf->pdev->dev, sizeof(*qg_buf), GFP_KERNEL);
qg_buf = devm_kzalloc(&pf->pdev->dev, buf_len, GFP_KERNEL);
if (!qg_buf) if (!qg_buf)
return -ENOMEM; return -ENOMEM;
qg_buf->num_txqs = 1; qg_buf->num_txqs = 1;
num_q_grps = 1;
/* set up and configure the Tx queues for each enabled TC */ /* set up and configure the Tx queues for each enabled TC */
ice_for_each_traffic_class(tc) { ice_for_each_traffic_class(tc) {
...@@ -1682,39 +1774,10 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) ...@@ -1682,39 +1774,10 @@ ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset)
break; break;
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
struct ice_tlan_ctx tlan_ctx = { 0 }; err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset,
qg_buf, tc);
pf_q = vsi->txq_map[q_idx + offset]; if (err)
ice_setup_tx_ctx(rings[q_idx], &tlan_ctx, pf_q);
/* copy context contents into the qg_buf */
qg_buf->txqs[0].txq_id = cpu_to_le16(pf_q);
ice_set_ctx((u8 *)&tlan_ctx, qg_buf->txqs[0].txq_ctx,
ice_tlan_ctx_info);
/* init queue specific tail reg. It is referred as
* transmit comm scheduler queue doorbell.
*/
rings[q_idx]->tail =
pf->hw.hw_addr + QTX_COMM_DBELL(pf_q);
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc,
i, num_q_grps, qg_buf,
buf_len, NULL);
if (status) {
dev_err(&pf->pdev->dev,
"Failed to set LAN Tx queue context, error: %d\n",
status);
err = -ENODEV;
goto err_cfg_txqs; goto err_cfg_txqs;
}
/* Add Tx Queue TEID into the VSI Tx ring from the
* response. This will complete configuring and
* enabling the queue.
*/
txq = &qg_buf->txqs[0];
if (pf_q == le16_to_cpu(txq->txq_id))
rings[q_idx]->txq_teid =
le32_to_cpu(txq->q_teid);
q_idx++; q_idx++;
} }
...@@ -2061,45 +2124,112 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector) ...@@ -2061,45 +2124,112 @@ void ice_trigger_sw_intr(struct ice_hw *hw, struct ice_q_vector *q_vector)
} }
/** /**
* ice_vsi_stop_tx_rings - Disable Tx rings * ice_vsi_stop_tx_ring - Disable single Tx ring
* @vsi: the VSI being configured * @vsi: the VSI being configured
* @rst_src: reset source * @rst_src: reset source
* @rel_vmvf_num: Relative ID of VF/VM * @rel_vmvf_num: Relative ID of VF/VM
* @rings: Tx ring array to be stopped * @ring: Tx ring to be stopped
* @offset: offset within vsi->txq_map * @txq_meta: Meta data of Tx ring to be stopped
*/ */
static int #ifndef CONFIG_PCI_IOV
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, static
u16 rel_vmvf_num, struct ice_ring **rings, int offset) #endif /* !CONFIG_PCI_IOV */
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring *ring,
struct ice_txq_meta *txq_meta)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_q_vector *q_vector;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
int tc, q_idx = 0, err = 0;
u16 *q_ids, *q_handles, i;
enum ice_status status; enum ice_status status;
u32 *q_teids, val; u32 val;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS) /* clear cause_ena bit for disabled queues */
return -EINVAL; val = rd32(hw, QINT_TQCTL(ring->reg_idx));
val &= ~QINT_TQCTL_CAUSE_ENA_M;
wr32(hw, QINT_TQCTL(ring->reg_idx), val);
q_teids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_teids), /* software is expected to wait for 100 ns */
GFP_KERNEL); ndelay(100);
if (!q_teids)
return -ENOMEM;
q_ids = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, sizeof(*q_ids), /* trigger a software interrupt for the vector
GFP_KERNEL); * associated to the queue to schedule NAPI handler
if (!q_ids) { */
err = -ENOMEM; q_vector = ring->q_vector;
goto err_alloc_q_ids; if (q_vector)
ice_trigger_sw_intr(hw, q_vector);
status = ice_dis_vsi_txq(vsi->port_info, txq_meta->vsi_idx,
txq_meta->tc, 1, &txq_meta->q_handle,
&txq_meta->q_id, &txq_meta->q_teid, rst_src,
rel_vmvf_num, NULL);
/* if the disable queue command was exercised during an
* active reset flow, ICE_ERR_RESET_ONGOING is returned.
* This is not an error as the reset operation disables
* queues at the hardware level anyway.
*/
if (status == ICE_ERR_RESET_ONGOING) {
dev_dbg(&vsi->back->pdev->dev,
"Reset in progress. LAN Tx queues already disabled\n");
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(&vsi->back->pdev->dev,
"LAN Tx queues do not exist, nothing to disable\n");
} else if (status) {
dev_err(&vsi->back->pdev->dev,
"Failed to disable LAN Tx queues, error: %d\n", status);
return -ENODEV;
} }
q_handles = devm_kcalloc(&pf->pdev->dev, vsi->num_txq, return 0;
sizeof(*q_handles), GFP_KERNEL); }
if (!q_handles) {
err = -ENOMEM; /**
goto err_alloc_q_handles; * ice_fill_txq_meta - Prepare the Tx queue's meta data
} * @vsi: VSI that ring belongs to
* @ring: ring that txq_meta will be based on
* @txq_meta: a helper struct that wraps Tx queue's information
*
* Set up a helper struct that will contain all the necessary fields that
* are needed for stopping Tx queue
*/
#ifndef CONFIG_PCI_IOV
static
#endif /* !CONFIG_PCI_IOV */
void
ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
struct ice_txq_meta *txq_meta)
{
u8 tc = 0;
#ifdef CONFIG_DCB
tc = ring->dcb_tc;
#endif /* CONFIG_DCB */
txq_meta->q_id = ring->reg_idx;
txq_meta->q_teid = ring->txq_teid;
txq_meta->q_handle = ring->q_handle;
txq_meta->vsi_idx = vsi->idx;
txq_meta->tc = tc;
}
/**
* ice_vsi_stop_tx_rings - Disable Tx rings
* @vsi: the VSI being configured
* @rst_src: reset source
* @rel_vmvf_num: Relative ID of VF/VM
* @rings: Tx ring array to be stopped
*/
static int
ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring **rings)
{
u16 i, q_idx = 0;
int status;
u8 tc;
if (vsi->num_txq > ICE_LAN_TXQ_MAX_QDIS)
return -EINVAL;
/* set up the Tx queue list to be disabled for each enabled TC */ /* set up the Tx queue list to be disabled for each enabled TC */
ice_for_each_traffic_class(tc) { ice_for_each_traffic_class(tc) {
...@@ -2107,67 +2237,24 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ...@@ -2107,67 +2237,24 @@ ice_vsi_stop_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
break; break;
for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) { for (i = 0; i < vsi->tc_cfg.tc_info[tc].qcount_tx; i++) {
struct ice_q_vector *q_vector; struct ice_txq_meta txq_meta = { };
if (!rings || !rings[q_idx]) { if (!rings || !rings[q_idx])
err = -EINVAL; return -EINVAL;
goto err_out;
}
q_ids[i] = vsi->txq_map[q_idx + offset];
q_teids[i] = rings[q_idx]->txq_teid;
q_handles[i] = i;
/* clear cause_ena bit for disabled queues */ ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta);
val = rd32(hw, QINT_TQCTL(rings[i]->reg_idx)); status = ice_vsi_stop_tx_ring(vsi, rst_src,
val &= ~QINT_TQCTL_CAUSE_ENA_M; rel_vmvf_num,
wr32(hw, QINT_TQCTL(rings[i]->reg_idx), val); rings[q_idx], &txq_meta);
/* software is expected to wait for 100 ns */ if (status)
ndelay(100); return status;
/* trigger a software interrupt for the vector
* associated to the queue to schedule NAPI handler
*/
q_vector = rings[i]->q_vector;
if (q_vector)
ice_trigger_sw_intr(hw, q_vector);
q_idx++; q_idx++;
} }
status = ice_dis_vsi_txq(vsi->port_info, vsi->idx, tc,
vsi->num_txq, q_handles, q_ids,
q_teids, rst_src, rel_vmvf_num, NULL);
/* if the disable queue command was exercised during an active
* reset flow, ICE_ERR_RESET_ONGOING is returned. This is not
* an error as the reset operation disables queues at the
* hardware level anyway.
*/
if (status == ICE_ERR_RESET_ONGOING) {
dev_dbg(&pf->pdev->dev,
"Reset in progress. LAN Tx queues already disabled\n");
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(&pf->pdev->dev,
"LAN Tx queues does not exist, nothing to disabled\n");
} else if (status) {
dev_err(&pf->pdev->dev,
"Failed to disable LAN Tx queues, error: %d\n",
status);
err = -ENODEV;
}
} }
err_out: return 0;
devm_kfree(&pf->pdev->dev, q_handles);
err_alloc_q_handles:
devm_kfree(&pf->pdev->dev, q_ids);
err_alloc_q_ids:
devm_kfree(&pf->pdev->dev, q_teids);
return err;
} }
/** /**
...@@ -2180,8 +2267,7 @@ int ...@@ -2180,8 +2267,7 @@ int
ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num) u16 rel_vmvf_num)
{ {
return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings, return ice_vsi_stop_tx_rings(vsi, rst_src, rel_vmvf_num, vsi->tx_rings);
0);
} }
/** /**
...@@ -2955,6 +3041,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ...@@ -2955,6 +3041,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
vsi->base_vector = 0; vsi->base_vector = 0;
} }
ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi); ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi); ice_vsi_free_arrays(vsi);
ice_dev_onetime_setup(&pf->hw); ice_dev_onetime_setup(&pf->hw);
...@@ -2962,6 +3049,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ...@@ -2962,6 +3049,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
ice_vsi_set_num_qs(vsi, vf->vf_id); ice_vsi_set_num_qs(vsi, vf->vf_id);
else else
ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID); ice_vsi_set_num_qs(vsi, ICE_INVAL_VFID);
ret = ice_vsi_alloc_arrays(vsi);
if (ret < 0)
goto err_vsi;
ice_vsi_get_qs(vsi);
ice_vsi_set_tc_cfg(vsi); ice_vsi_set_tc_cfg(vsi);
/* Initialize VSI struct elements and create VSI in FW */ /* Initialize VSI struct elements and create VSI in FW */
...@@ -2969,9 +3062,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ...@@ -2969,9 +3062,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
if (ret < 0) if (ret < 0)
goto err_vsi; goto err_vsi;
ret = ice_vsi_alloc_arrays(vsi);
if (ret < 0)
goto err_vsi;
switch (vsi->type) { switch (vsi->type) {
case ICE_VSI_PF: case ICE_VSI_PF:
......
...@@ -6,8 +6,22 @@ ...@@ -6,8 +6,22 @@
#include "ice.h" #include "ice.h"
int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, struct ice_txq_meta {
const u8 *macaddr); /* Tx-scheduler element identifier */
u32 q_teid;
/* Entry in VSI's txq_map bitmap */
u16 q_id;
/* Relative index of Tx queue within TC */
u16 q_handle;
/* VSI index that Tx queue belongs to */
u16 vsi_idx;
/* TC number that Tx queue belongs to */
u8 tc;
};
int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
const u8 *macaddr);
void ice_free_fltr_list(struct device *dev, struct list_head *h); void ice_free_fltr_list(struct device *dev, struct list_head *h);
...@@ -25,6 +39,16 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx); ...@@ -25,6 +39,16 @@ ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
void void
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx); ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
int
ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
u16 rel_vmvf_num, struct ice_ring *ring,
struct ice_txq_meta *txq_meta);
void ice_fill_txq_meta(struct ice_vsi *vsi, struct ice_ring *ring,
struct ice_txq_meta *txq_meta);
int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx);
#endif /* CONFIG_PCI_IOV */ #endif /* CONFIG_PCI_IOV */
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid); int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
......
...@@ -436,13 +436,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi, bool locked) ...@@ -436,13 +436,13 @@ static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
if (vsi->type == ICE_VSI_PF && vsi->netdev) { if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) { if (netif_running(vsi->netdev)) {
if (!locked) { if (!locked)
rtnl_lock(); rtnl_lock();
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
ice_stop(vsi->netdev);
if (!locked)
rtnl_unlock(); rtnl_unlock();
} else {
vsi->netdev->netdev_ops->ndo_stop(vsi->netdev);
}
} else { } else {
ice_vsi_close(vsi); ice_vsi_close(vsi);
} }
...@@ -477,7 +477,7 @@ static void ...@@ -477,7 +477,7 @@ static void
ice_prepare_for_reset(struct ice_pf *pf) ice_prepare_for_reset(struct ice_pf *pf)
{ {
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
u8 i; int i;
/* already prepared for reset */ /* already prepared for reset */
if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state)) if (test_bit(__ICE_PREPARED_FOR_RESET, pf->state))
...@@ -489,7 +489,7 @@ ice_prepare_for_reset(struct ice_pf *pf) ...@@ -489,7 +489,7 @@ ice_prepare_for_reset(struct ice_pf *pf)
/* Disable VFs until reset is completed */ /* Disable VFs until reset is completed */
for (i = 0; i < pf->num_alloc_vfs; i++) for (i = 0; i < pf->num_alloc_vfs; i++)
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); ice_set_vf_state_qs_dis(&pf->vf[i]);
/* disable the VSIs and their queues that are not already DOWN */ /* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf, false); ice_pf_dis_all_vsi(pf, false);
...@@ -1315,8 +1315,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf) ...@@ -1315,8 +1315,10 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
if (vf_mdd_detected) { if (vf_mdd_detected) {
vf->num_mdd_events++; vf->num_mdd_events++;
if (vf->num_mdd_events > 1) if (vf->num_mdd_events &&
dev_info(&pf->pdev->dev, "VF %d has had %llu MDD events since last boot\n", vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
dev_info(&pf->pdev->dev,
"VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
i, vf->num_mdd_events); i, vf->num_mdd_events);
} }
} }
...@@ -2205,13 +2207,23 @@ static void ice_deinit_pf(struct ice_pf *pf) ...@@ -2205,13 +2207,23 @@ static void ice_deinit_pf(struct ice_pf *pf)
ice_service_task_stop(pf); ice_service_task_stop(pf);
mutex_destroy(&pf->sw_mutex); mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->avail_q_mutex); mutex_destroy(&pf->avail_q_mutex);
if (pf->avail_txqs) {
bitmap_free(pf->avail_txqs);
pf->avail_txqs = NULL;
}
if (pf->avail_rxqs) {
bitmap_free(pf->avail_rxqs);
pf->avail_rxqs = NULL;
}
} }
/** /**
* ice_init_pf - Initialize general software structures (struct ice_pf) * ice_init_pf - Initialize general software structures (struct ice_pf)
* @pf: board private structure to initialize * @pf: board private structure to initialize
*/ */
static void ice_init_pf(struct ice_pf *pf) static int ice_init_pf(struct ice_pf *pf)
{ {
bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS); bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
...@@ -2227,12 +2239,6 @@ static void ice_init_pf(struct ice_pf *pf) ...@@ -2227,12 +2239,6 @@ static void ice_init_pf(struct ice_pf *pf)
mutex_init(&pf->sw_mutex); mutex_init(&pf->sw_mutex);
mutex_init(&pf->avail_q_mutex); mutex_init(&pf->avail_q_mutex);
/* Clear avail_[t|r]x_qs bitmaps (set all to avail) */
mutex_lock(&pf->avail_q_mutex);
bitmap_zero(pf->avail_txqs, ICE_MAX_TXQS);
bitmap_zero(pf->avail_rxqs, ICE_MAX_RXQS);
mutex_unlock(&pf->avail_q_mutex);
if (pf->hw.func_caps.common_cap.rss_table_size) if (pf->hw.func_caps.common_cap.rss_table_size)
set_bit(ICE_FLAG_RSS_ENA, pf->flags); set_bit(ICE_FLAG_RSS_ENA, pf->flags);
...@@ -2241,6 +2247,22 @@ static void ice_init_pf(struct ice_pf *pf) ...@@ -2241,6 +2247,22 @@ static void ice_init_pf(struct ice_pf *pf)
pf->serv_tmr_period = HZ; pf->serv_tmr_period = HZ;
INIT_WORK(&pf->serv_task, ice_service_task); INIT_WORK(&pf->serv_task, ice_service_task);
clear_bit(__ICE_SERVICE_SCHED, pf->state); clear_bit(__ICE_SERVICE_SCHED, pf->state);
pf->max_pf_txqs = pf->hw.func_caps.common_cap.num_txq;
pf->max_pf_rxqs = pf->hw.func_caps.common_cap.num_rxq;
pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
if (!pf->avail_txqs)
return -ENOMEM;
pf->avail_rxqs = bitmap_zalloc(pf->max_pf_rxqs, GFP_KERNEL);
if (!pf->avail_rxqs) {
devm_kfree(&pf->pdev->dev, pf->avail_txqs);
pf->avail_txqs = NULL;
return -ENOMEM;
}
return 0;
} }
/** /**
...@@ -2259,13 +2281,18 @@ static int ice_ena_msix_range(struct ice_pf *pf) ...@@ -2259,13 +2281,18 @@ static int ice_ena_msix_range(struct ice_pf *pf)
/* reserve one vector for miscellaneous handler */ /* reserve one vector for miscellaneous handler */
needed = 1; needed = 1;
if (v_left < needed)
goto no_hw_vecs_left_err;
v_budget += needed; v_budget += needed;
v_left -= needed; v_left -= needed;
/* reserve vectors for LAN traffic */ /* reserve vectors for LAN traffic */
pf->num_lan_msix = min_t(int, num_online_cpus(), v_left); needed = min_t(int, num_online_cpus(), v_left);
v_budget += pf->num_lan_msix; if (v_left < needed)
v_left -= pf->num_lan_msix; goto no_hw_vecs_left_err;
pf->num_lan_msix = needed;
v_budget += needed;
v_left -= needed;
pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget, pf->msix_entries = devm_kcalloc(&pf->pdev->dev, v_budget,
sizeof(*pf->msix_entries), GFP_KERNEL); sizeof(*pf->msix_entries), GFP_KERNEL);
...@@ -2290,18 +2317,18 @@ static int ice_ena_msix_range(struct ice_pf *pf) ...@@ -2290,18 +2317,18 @@ static int ice_ena_msix_range(struct ice_pf *pf)
if (v_actual < v_budget) { if (v_actual < v_budget) {
dev_warn(&pf->pdev->dev, dev_warn(&pf->pdev->dev,
"not enough vectors. requested = %d, obtained = %d\n", "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual); v_budget, v_actual);
if (v_actual >= (pf->num_lan_msix + 1)) { /* 2 vectors for LAN (traffic + OICR) */
pf->num_avail_sw_msix = v_actual - #define ICE_MIN_LAN_VECS 2
(pf->num_lan_msix + 1);
} else if (v_actual >= 2) { if (v_actual < ICE_MIN_LAN_VECS) {
pf->num_lan_msix = 1; /* error if we can't get minimum vectors */
pf->num_avail_sw_msix = v_actual - 2;
} else {
pci_disable_msix(pf->pdev); pci_disable_msix(pf->pdev);
err = -ERANGE; err = -ERANGE;
goto msix_err; goto msix_err;
} else {
pf->num_lan_msix = ICE_MIN_LAN_VECS;
} }
} }
...@@ -2311,6 +2338,11 @@ static int ice_ena_msix_range(struct ice_pf *pf) ...@@ -2311,6 +2338,11 @@ static int ice_ena_msix_range(struct ice_pf *pf)
devm_kfree(&pf->pdev->dev, pf->msix_entries); devm_kfree(&pf->pdev->dev, pf->msix_entries);
goto exit_err; goto exit_err;
no_hw_vecs_left_err:
dev_err(&pf->pdev->dev,
"not enough device MSI-X vectors. requested = %d, available = %d\n",
needed, v_left);
err = -ERANGE;
exit_err: exit_err:
pf->num_lan_msix = 0; pf->num_lan_msix = 0;
return err; return err;
...@@ -2465,7 +2497,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent) ...@@ -2465,7 +2497,11 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build, hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
hw->api_maj_ver, hw->api_min_ver); hw->api_maj_ver, hw->api_min_ver);
ice_init_pf(pf); err = ice_init_pf(pf);
if (err) {
dev_err(dev, "ice_init_pf failed: %d\n", err);
goto err_init_pf_unroll;
}
err = ice_init_pf_dcb(pf, false); err = ice_init_pf_dcb(pf, false);
if (err) { if (err) {
...@@ -2597,9 +2633,9 @@ static void ice_remove(struct pci_dev *pdev) ...@@ -2597,9 +2633,9 @@ static void ice_remove(struct pci_dev *pdev)
continue; continue;
ice_vsi_free_q_vectors(pf->vsi[i]); ice_vsi_free_q_vectors(pf->vsi[i]);
} }
ice_clear_interrupt_scheme(pf);
ice_deinit_pf(pf); ice_deinit_pf(pf);
ice_deinit_hw(&pf->hw); ice_deinit_hw(&pf->hw);
ice_clear_interrupt_scheme(pf);
pci_disable_pcie_error_reporting(pdev); pci_disable_pcie_error_reporting(pdev);
} }
...@@ -3262,25 +3298,25 @@ void ice_update_pf_stats(struct ice_pf *pf) ...@@ -3262,25 +3298,25 @@ void ice_update_pf_stats(struct ice_pf *pf)
{ {
struct ice_hw_port_stats *prev_ps, *cur_ps; struct ice_hw_port_stats *prev_ps, *cur_ps;
struct ice_hw *hw = &pf->hw; struct ice_hw *hw = &pf->hw;
u8 pf_id; u8 port;
port = hw->port_info->lport;
prev_ps = &pf->stats_prev; prev_ps = &pf->stats_prev;
cur_ps = &pf->stats; cur_ps = &pf->stats;
pf_id = hw->pf_id;
ice_stat_update40(hw, GLPRT_GORCL(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_GORCL(port), pf->stat_prev_loaded,
&prev_ps->eth.rx_bytes, &prev_ps->eth.rx_bytes,
&cur_ps->eth.rx_bytes); &cur_ps->eth.rx_bytes);
ice_stat_update40(hw, GLPRT_UPRCL(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_UPRCL(port), pf->stat_prev_loaded,
&prev_ps->eth.rx_unicast, &prev_ps->eth.rx_unicast,
&cur_ps->eth.rx_unicast); &cur_ps->eth.rx_unicast);
ice_stat_update40(hw, GLPRT_MPRCL(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_MPRCL(port), pf->stat_prev_loaded,
&prev_ps->eth.rx_multicast, &prev_ps->eth.rx_multicast,
&cur_ps->eth.rx_multicast); &cur_ps->eth.rx_multicast);
ice_stat_update40(hw, GLPRT_BPRCL(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_BPRCL(port), pf->stat_prev_loaded,
&prev_ps->eth.rx_broadcast, &prev_ps->eth.rx_broadcast,
&cur_ps->eth.rx_broadcast); &cur_ps->eth.rx_broadcast);
...@@ -3288,109 +3324,109 @@ void ice_update_pf_stats(struct ice_pf *pf) ...@@ -3288,109 +3324,109 @@ void ice_update_pf_stats(struct ice_pf *pf)
&prev_ps->eth.rx_discards, &prev_ps->eth.rx_discards,
&cur_ps->eth.rx_discards); &cur_ps->eth.rx_discards);
ice_stat_update40(hw, GLPRT_GOTCL(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_GOTCL(port), pf->stat_prev_loaded,
&prev_ps->eth.tx_bytes, &prev_ps->eth.tx_bytes,
&cur_ps->eth.tx_bytes); &cur_ps->eth.tx_bytes);
ice_stat_update40(hw, GLPRT_UPTCL(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_UPTCL(port), pf->stat_prev_loaded,
&prev_ps->eth.tx_unicast, &prev_ps->eth.tx_unicast,
&cur_ps->eth.tx_unicast); &cur_ps->eth.tx_unicast);
ice_stat_update40(hw, GLPRT_MPTCL(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_MPTCL(port), pf->stat_prev_loaded,
&prev_ps->eth.tx_multicast, &prev_ps->eth.tx_multicast,
&cur_ps->eth.tx_multicast); &cur_ps->eth.tx_multicast);
ice_stat_update40(hw, GLPRT_BPTCL(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_BPTCL(port), pf->stat_prev_loaded,
&prev_ps->eth.tx_broadcast, &prev_ps->eth.tx_broadcast,
&cur_ps->eth.tx_broadcast); &cur_ps->eth.tx_broadcast);
ice_stat_update32(hw, GLPRT_TDOLD(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_TDOLD(port), pf->stat_prev_loaded,
&prev_ps->tx_dropped_link_down, &prev_ps->tx_dropped_link_down,
&cur_ps->tx_dropped_link_down); &cur_ps->tx_dropped_link_down);
ice_stat_update40(hw, GLPRT_PRC64L(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_PRC64L(port), pf->stat_prev_loaded,
&prev_ps->rx_size_64, &cur_ps->rx_size_64); &prev_ps->rx_size_64, &cur_ps->rx_size_64);
ice_stat_update40(hw, GLPRT_PRC127L(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_PRC127L(port), pf->stat_prev_loaded,
&prev_ps->rx_size_127, &cur_ps->rx_size_127); &prev_ps->rx_size_127, &cur_ps->rx_size_127);
ice_stat_update40(hw, GLPRT_PRC255L(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_PRC255L(port), pf->stat_prev_loaded,
&prev_ps->rx_size_255, &cur_ps->rx_size_255); &prev_ps->rx_size_255, &cur_ps->rx_size_255);
ice_stat_update40(hw, GLPRT_PRC511L(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_PRC511L(port), pf->stat_prev_loaded,
&prev_ps->rx_size_511, &cur_ps->rx_size_511); &prev_ps->rx_size_511, &cur_ps->rx_size_511);
ice_stat_update40(hw, GLPRT_PRC1023L(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_PRC1023L(port), pf->stat_prev_loaded,
&prev_ps->rx_size_1023, &cur_ps->rx_size_1023); &prev_ps->rx_size_1023, &cur_ps->rx_size_1023);
ice_stat_update40(hw, GLPRT_PRC1522L(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_PRC1522L(port), pf->stat_prev_loaded,
&prev_ps->rx_size_1522, &cur_ps->rx_size_1522); &prev_ps->rx_size_1522, &cur_ps->rx_size_1522);
ice_stat_update40(hw, GLPRT_PRC9522L(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_PRC9522L(port), pf->stat_prev_loaded,
&prev_ps->rx_size_big, &cur_ps->rx_size_big); &prev_ps->rx_size_big, &cur_ps->rx_size_big);
ice_stat_update40(hw, GLPRT_PTC64L(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_PTC64L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_64, &cur_ps->tx_size_64); &prev_ps->tx_size_64, &cur_ps->tx_size_64);
ice_stat_update40(hw, GLPRT_PTC127L(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_PTC127L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_127, &cur_ps->tx_size_127); &prev_ps->tx_size_127, &cur_ps->tx_size_127);
ice_stat_update40(hw, GLPRT_PTC255L(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_PTC255L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_255, &cur_ps->tx_size_255); &prev_ps->tx_size_255, &cur_ps->tx_size_255);
ice_stat_update40(hw, GLPRT_PTC511L(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_PTC511L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_511, &cur_ps->tx_size_511); &prev_ps->tx_size_511, &cur_ps->tx_size_511);
ice_stat_update40(hw, GLPRT_PTC1023L(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_PTC1023L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_1023, &cur_ps->tx_size_1023); &prev_ps->tx_size_1023, &cur_ps->tx_size_1023);
ice_stat_update40(hw, GLPRT_PTC1522L(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_PTC1522L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_1522, &cur_ps->tx_size_1522); &prev_ps->tx_size_1522, &cur_ps->tx_size_1522);
ice_stat_update40(hw, GLPRT_PTC9522L(pf_id), pf->stat_prev_loaded, ice_stat_update40(hw, GLPRT_PTC9522L(port), pf->stat_prev_loaded,
&prev_ps->tx_size_big, &cur_ps->tx_size_big); &prev_ps->tx_size_big, &cur_ps->tx_size_big);
ice_stat_update32(hw, GLPRT_LXONRXC(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_LXONRXC(port), pf->stat_prev_loaded,
&prev_ps->link_xon_rx, &cur_ps->link_xon_rx); &prev_ps->link_xon_rx, &cur_ps->link_xon_rx);
ice_stat_update32(hw, GLPRT_LXOFFRXC(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_LXOFFRXC(port), pf->stat_prev_loaded,
&prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx); &prev_ps->link_xoff_rx, &cur_ps->link_xoff_rx);
ice_stat_update32(hw, GLPRT_LXONTXC(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_LXONTXC(port), pf->stat_prev_loaded,
&prev_ps->link_xon_tx, &cur_ps->link_xon_tx); &prev_ps->link_xon_tx, &cur_ps->link_xon_tx);
ice_stat_update32(hw, GLPRT_LXOFFTXC(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_LXOFFTXC(port), pf->stat_prev_loaded,
&prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx); &prev_ps->link_xoff_tx, &cur_ps->link_xoff_tx);
ice_update_dcb_stats(pf); ice_update_dcb_stats(pf);
ice_stat_update32(hw, GLPRT_CRCERRS(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_CRCERRS(port), pf->stat_prev_loaded,
&prev_ps->crc_errors, &cur_ps->crc_errors); &prev_ps->crc_errors, &cur_ps->crc_errors);
ice_stat_update32(hw, GLPRT_ILLERRC(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_ILLERRC(port), pf->stat_prev_loaded,
&prev_ps->illegal_bytes, &cur_ps->illegal_bytes); &prev_ps->illegal_bytes, &cur_ps->illegal_bytes);
ice_stat_update32(hw, GLPRT_MLFC(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_MLFC(port), pf->stat_prev_loaded,
&prev_ps->mac_local_faults, &prev_ps->mac_local_faults,
&cur_ps->mac_local_faults); &cur_ps->mac_local_faults);
ice_stat_update32(hw, GLPRT_MRFC(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_MRFC(port), pf->stat_prev_loaded,
&prev_ps->mac_remote_faults, &prev_ps->mac_remote_faults,
&cur_ps->mac_remote_faults); &cur_ps->mac_remote_faults);
ice_stat_update32(hw, GLPRT_RLEC(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_RLEC(port), pf->stat_prev_loaded,
&prev_ps->rx_len_errors, &cur_ps->rx_len_errors); &prev_ps->rx_len_errors, &cur_ps->rx_len_errors);
ice_stat_update32(hw, GLPRT_RUC(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_RUC(port), pf->stat_prev_loaded,
&prev_ps->rx_undersize, &cur_ps->rx_undersize); &prev_ps->rx_undersize, &cur_ps->rx_undersize);
ice_stat_update32(hw, GLPRT_RFC(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_RFC(port), pf->stat_prev_loaded,
&prev_ps->rx_fragments, &cur_ps->rx_fragments); &prev_ps->rx_fragments, &cur_ps->rx_fragments);
ice_stat_update32(hw, GLPRT_ROC(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_ROC(port), pf->stat_prev_loaded,
&prev_ps->rx_oversize, &cur_ps->rx_oversize); &prev_ps->rx_oversize, &cur_ps->rx_oversize);
ice_stat_update32(hw, GLPRT_RJC(pf_id), pf->stat_prev_loaded, ice_stat_update32(hw, GLPRT_RJC(port), pf->stat_prev_loaded,
&prev_ps->rx_jabber, &cur_ps->rx_jabber); &prev_ps->rx_jabber, &cur_ps->rx_jabber);
pf->stat_prev_loaded = true; pf->stat_prev_loaded = true;
...@@ -3654,21 +3690,19 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked) ...@@ -3654,21 +3690,19 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
int err = 0; int err = 0;
if (!test_bit(__ICE_NEEDS_RESTART, vsi->state)) if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
return err; return 0;
clear_bit(__ICE_NEEDS_RESTART, vsi->state); clear_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->netdev && vsi->type == ICE_VSI_PF) { if (vsi->netdev && vsi->type == ICE_VSI_PF) {
struct net_device *netd = vsi->netdev;
if (netif_running(vsi->netdev)) { if (netif_running(vsi->netdev)) {
if (locked) { if (!locked)
err = netd->netdev_ops->ndo_open(netd);
} else {
rtnl_lock(); rtnl_lock();
err = netd->netdev_ops->ndo_open(netd);
err = ice_open(vsi->netdev);
if (!locked)
rtnl_unlock(); rtnl_unlock();
}
} }
} }
...@@ -3706,22 +3740,23 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf) ...@@ -3706,22 +3740,23 @@ static int ice_vsi_rebuild_all(struct ice_pf *pf)
/* loop through pf->vsi array and reinit the VSI if found */ /* loop through pf->vsi array and reinit the VSI if found */
ice_for_each_vsi(pf, i) { ice_for_each_vsi(pf, i) {
struct ice_vsi *vsi = pf->vsi[i];
int err; int err;
if (!pf->vsi[i]) if (!vsi)
continue; continue;
err = ice_vsi_rebuild(pf->vsi[i]); err = ice_vsi_rebuild(vsi);
if (err) { if (err) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"VSI at index %d rebuild failed\n", "VSI at index %d rebuild failed\n",
pf->vsi[i]->idx); vsi->idx);
return err; return err;
} }
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"VSI at index %d rebuilt. vsi_num = 0x%x\n", "VSI at index %d rebuilt. vsi_num = 0x%x\n",
pf->vsi[i]->idx, pf->vsi[i]->vsi_num); vsi->idx, vsi->vsi_num);
} }
return 0; return 0;
...@@ -3739,25 +3774,27 @@ static int ice_vsi_replay_all(struct ice_pf *pf) ...@@ -3739,25 +3774,27 @@ static int ice_vsi_replay_all(struct ice_pf *pf)
/* loop through pf->vsi array and replay the VSI if found */ /* loop through pf->vsi array and replay the VSI if found */
ice_for_each_vsi(pf, i) { ice_for_each_vsi(pf, i) {
if (!pf->vsi[i]) struct ice_vsi *vsi = pf->vsi[i];
if (!vsi)
continue; continue;
ret = ice_replay_vsi(hw, pf->vsi[i]->idx); ret = ice_replay_vsi(hw, vsi->idx);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"VSI at index %d replay failed %d\n", "VSI at index %d replay failed %d\n",
pf->vsi[i]->idx, ret); vsi->idx, ret);
return -EIO; return -EIO;
} }
/* Re-map HW VSI number, using VSI handle that has been /* Re-map HW VSI number, using VSI handle that has been
* previously validated in ice_replay_vsi() call above * previously validated in ice_replay_vsi() call above
*/ */
pf->vsi[i]->vsi_num = ice_get_hw_vsi_num(hw, pf->vsi[i]->idx); vsi->vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"VSI at index %d filter replayed successfully - vsi_num %i\n", "VSI at index %d filter replayed successfully - vsi_num %i\n",
pf->vsi[i]->idx, pf->vsi[i]->vsi_num); vsi->idx, vsi->vsi_num);
} }
/* Clean up replay filter after successful re-configuration */ /* Clean up replay filter after successful re-configuration */
......
...@@ -260,33 +260,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent, ...@@ -260,33 +260,17 @@ ice_sched_remove_elems(struct ice_hw *hw, struct ice_sched_node *parent,
/** /**
* ice_sched_get_first_node - get the first node of the given layer * ice_sched_get_first_node - get the first node of the given layer
* @hw: pointer to the HW struct * @pi: port information structure
* @parent: pointer the base node of the subtree * @parent: pointer the base node of the subtree
* @layer: layer number * @layer: layer number
* *
* This function retrieves the first node of the given layer from the subtree * This function retrieves the first node of the given layer from the subtree
*/ */
static struct ice_sched_node * static struct ice_sched_node *
ice_sched_get_first_node(struct ice_hw *hw, struct ice_sched_node *parent, ice_sched_get_first_node(struct ice_port_info *pi,
u8 layer) struct ice_sched_node *parent, u8 layer)
{ {
u8 i; return pi->sib_head[parent->tc_num][layer];
if (layer < hw->sw_entry_point_layer)
return NULL;
for (i = 0; i < parent->num_children; i++) {
struct ice_sched_node *node = parent->children[i];
if (node) {
if (node->tx_sched_layer == layer)
return node;
/* this recursion is intentional, and wouldn't
* go more than 9 calls
*/
return ice_sched_get_first_node(hw, node, layer);
}
}
return NULL;
} }
/** /**
...@@ -342,7 +326,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) ...@@ -342,7 +326,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
parent = node->parent; parent = node->parent;
/* root has no parent */ /* root has no parent */
if (parent) { if (parent) {
struct ice_sched_node *p, *tc_node; struct ice_sched_node *p;
/* update the parent */ /* update the parent */
for (i = 0; i < parent->num_children; i++) for (i = 0; i < parent->num_children; i++)
...@@ -354,16 +338,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) ...@@ -354,16 +338,7 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
break; break;
} }
/* search for previous sibling that points to this node and p = ice_sched_get_first_node(pi, node, node->tx_sched_layer);
* remove the reference
*/
tc_node = ice_sched_get_tc_node(pi, node->tc_num);
if (!tc_node) {
ice_debug(hw, ICE_DBG_SCHED,
"Invalid TC number %d\n", node->tc_num);
goto err_exit;
}
p = ice_sched_get_first_node(hw, tc_node, node->tx_sched_layer);
while (p) { while (p) {
if (p->sibling == node) { if (p->sibling == node) {
p->sibling = node->sibling; p->sibling = node->sibling;
...@@ -371,8 +346,13 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node) ...@@ -371,8 +346,13 @@ void ice_free_sched_node(struct ice_port_info *pi, struct ice_sched_node *node)
} }
p = p->sibling; p = p->sibling;
} }
/* update the sibling head if head is getting removed */
if (pi->sib_head[node->tc_num][node->tx_sched_layer] == node)
pi->sib_head[node->tc_num][node->tx_sched_layer] =
node->sibling;
} }
err_exit:
/* leaf nodes have no children */ /* leaf nodes have no children */
if (node->children) if (node->children)
devm_kfree(ice_hw_to_dev(hw), node->children); devm_kfree(ice_hw_to_dev(hw), node->children);
...@@ -743,13 +723,17 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node, ...@@ -743,13 +723,17 @@ ice_sched_add_elems(struct ice_port_info *pi, struct ice_sched_node *tc_node,
/* add it to previous node sibling pointer */ /* add it to previous node sibling pointer */
/* Note: siblings are not linked across branches */ /* Note: siblings are not linked across branches */
prev = ice_sched_get_first_node(hw, tc_node, layer); prev = ice_sched_get_first_node(pi, tc_node, layer);
if (prev && prev != new_node) { if (prev && prev != new_node) {
while (prev->sibling) while (prev->sibling)
prev = prev->sibling; prev = prev->sibling;
prev->sibling = new_node; prev->sibling = new_node;
} }
/* initialize the sibling head */
if (!pi->sib_head[tc_node->tc_num][layer])
pi->sib_head[tc_node->tc_num][layer] = new_node;
if (i == 0) if (i == 0)
*first_node_teid = teid; *first_node_teid = teid;
} }
...@@ -1160,7 +1144,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc, ...@@ -1160,7 +1144,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
goto lan_q_exit; goto lan_q_exit;
/* get the first queue group node from VSI sub-tree */ /* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi->hw, vsi_node, qgrp_layer); qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
while (qgrp_node) { while (qgrp_node) {
/* make sure the qgroup node is part of the VSI subtree */ /* make sure the qgroup node is part of the VSI subtree */
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node)) if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
...@@ -1191,7 +1175,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node, ...@@ -1191,7 +1175,7 @@ ice_sched_get_vsi_node(struct ice_hw *hw, struct ice_sched_node *tc_node,
u8 vsi_layer; u8 vsi_layer;
vsi_layer = ice_sched_get_vsi_layer(hw); vsi_layer = ice_sched_get_vsi_layer(hw);
node = ice_sched_get_first_node(hw, tc_node, vsi_layer); node = ice_sched_get_first_node(hw->port_info, tc_node, vsi_layer);
/* Check whether it already exists */ /* Check whether it already exists */
while (node) { while (node) {
...@@ -1316,7 +1300,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, ...@@ -1316,7 +1300,8 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
/* If intermediate nodes are reached max children /* If intermediate nodes are reached max children
* then add a new one. * then add a new one.
*/ */
node = ice_sched_get_first_node(hw, tc_node, (u8)i); node = ice_sched_get_first_node(hw->port_info, tc_node,
(u8)i);
/* scan all the siblings */ /* scan all the siblings */
while (node) { while (node) {
if (node->num_children < hw->max_children[i]) if (node->num_children < hw->max_children[i])
......
...@@ -13,9 +13,9 @@ ...@@ -13,9 +13,9 @@
#define ICE_BYTES_PER_WORD 2 #define ICE_BYTES_PER_WORD 2
#define ICE_BYTES_PER_DWORD 4 #define ICE_BYTES_PER_DWORD 4
static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{ {
return test_bit(tc, (unsigned long *)&bitmap); return test_bit(tc, &bitmap);
} }
/* Driver always calls main vsi_handle first */ /* Driver always calls main vsi_handle first */
...@@ -347,6 +347,8 @@ struct ice_port_info { ...@@ -347,6 +347,8 @@ struct ice_port_info {
struct ice_mac_info mac; struct ice_mac_info mac;
struct ice_phy_info phy; struct ice_phy_info phy;
struct mutex sched_lock; /* protect access to TXSched tree */ struct mutex sched_lock; /* protect access to TXSched tree */
struct ice_sched_node *
sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */ struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
/* DCBX info */ /* DCBX info */
struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */ struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
......
...@@ -251,6 +251,35 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf) ...@@ -251,6 +251,35 @@ static int ice_sriov_free_msix_res(struct ice_pf *pf)
return 0; return 0;
} }
/**
* ice_set_vf_state_qs_dis - Set VF queues state to disabled
* @vf: pointer to the VF structure
*/
void ice_set_vf_state_qs_dis(struct ice_vf *vf)
{
/* Clear Rx/Tx enabled queues flag */
bitmap_zero(vf->txq_ena, ICE_MAX_BASE_QS_PER_VF);
bitmap_zero(vf->rxq_ena, ICE_MAX_BASE_QS_PER_VF);
vf->num_qs_ena = 0;
clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
}
/**
* ice_dis_vf_qs - Disable the VF queues
* @vf: pointer to the VF structure
*/
static void ice_dis_vf_qs(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi;
vsi = pf->vsi[vf->lan_vsi_idx];
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id);
ice_vsi_stop_rx_rings(vsi);
ice_set_vf_state_qs_dis(vf);
}
/** /**
* ice_free_vfs - Free all VFs * ice_free_vfs - Free all VFs
* @pf: pointer to the PF structure * @pf: pointer to the PF structure
...@@ -267,19 +296,9 @@ void ice_free_vfs(struct ice_pf *pf) ...@@ -267,19 +296,9 @@ void ice_free_vfs(struct ice_pf *pf)
usleep_range(1000, 2000); usleep_range(1000, 2000);
/* Avoid wait time by stopping all VFs at the same time */ /* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) { for (i = 0; i < pf->num_alloc_vfs; i++)
struct ice_vsi *vsi; if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[i].vf_states))
ice_dis_vf_qs(&pf->vf[i]);
if (!test_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states))
continue;
vsi = pf->vsi[pf->vf[i].lan_vsi_idx];
/* stop rings without wait time */
ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, i);
ice_vsi_stop_rx_rings(vsi);
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
}
/* Disable IOV before freeing resources. This lets any VF drivers /* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank * running in the host get themselves cleaned up before we yank
...@@ -1055,17 +1074,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ...@@ -1055,17 +1074,9 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
for (v = 0; v < pf->num_alloc_vfs; v++) for (v = 0; v < pf->num_alloc_vfs; v++)
ice_trigger_vf_reset(&pf->vf[v], is_vflr); ice_trigger_vf_reset(&pf->vf[v], is_vflr);
for (v = 0; v < pf->num_alloc_vfs; v++) { for (v = 0; v < pf->num_alloc_vfs; v++)
struct ice_vsi *vsi; if (test_bit(ICE_VF_STATE_QS_ENA, pf->vf[v].vf_states))
ice_dis_vf_qs(&pf->vf[v]);
vf = &pf->vf[v];
vsi = pf->vsi[vf->lan_vsi_idx];
if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) {
ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id);
ice_vsi_stop_rx_rings(vsi);
clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
}
}
/* HW requires some time to make sure it can flush the FIFO for a VF /* HW requires some time to make sure it can flush the FIFO for a VF
* when it resets it. Poll the VPGEN_VFRSTAT register for each VF in * when it resets it. Poll the VPGEN_VFRSTAT register for each VF in
...@@ -1141,27 +1152,31 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ...@@ -1141,27 +1152,31 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
u32 reg; u32 reg;
int i; int i;
/* If the VFs have been disabled, this means something else is /* If the PF has been disabled, there is no need resetting VF until
* resetting the VF, so we shouldn't continue. * PF is active again.
*/ */
if (test_and_set_bit(__ICE_VF_DIS, pf->state)) if (test_bit(__ICE_VF_DIS, pf->state))
return false;
/* If the VF has been disabled, this means something else is
* resetting the VF, so we shouldn't continue. Otherwise, set
* disable VF state bit for actual reset, and continue.
*/
if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states))
return false; return false;
ice_trigger_vf_reset(vf, is_vflr); ice_trigger_vf_reset(vf, is_vflr);
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
if (test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { if (test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states))
ice_vsi_stop_lan_tx_rings(vsi, ICE_VF_RESET, vf->vf_id); ice_dis_vf_qs(vf);
ice_vsi_stop_rx_rings(vsi); else
clear_bit(ICE_VF_STATE_ENA, vf->vf_states);
} else {
/* Call Disable LAN Tx queue AQ call even when queues are not /* Call Disable LAN Tx queue AQ call even when queues are not
* enabled. This is needed for successful completiom of VFR * enabled. This is needed for successful completion of VFR
*/ */
ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL, ice_dis_vsi_txq(vsi->port_info, vsi->idx, 0, 0, NULL, NULL,
NULL, ICE_VF_RESET, vf->vf_id, NULL); NULL, ICE_VF_RESET, vf->vf_id, NULL);
}
hw = &pf->hw; hw = &pf->hw;
/* poll VPGEN_VFRSTAT reg to make sure /* poll VPGEN_VFRSTAT reg to make sure
...@@ -1210,7 +1225,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr) ...@@ -1210,7 +1225,6 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
ice_cleanup_and_realloc_vf(vf); ice_cleanup_and_realloc_vf(vf);
ice_flush(hw); ice_flush(hw);
clear_bit(__ICE_VF_DIS, pf->state);
return true; return true;
} }
...@@ -1712,6 +1726,21 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid) ...@@ -1712,6 +1726,21 @@ static bool ice_vc_isvalid_q_id(struct ice_vf *vf, u16 vsi_id, u8 qid)
return (vsi && (qid < vsi->alloc_txq)); return (vsi && (qid < vsi->alloc_txq));
} }
/**
* ice_vc_isvalid_ring_len
* @ring_len: length of ring
*
* check for the valid ring count, should be multiple of ICE_REQ_DESC_MULTIPLE
* or zero
*/
static bool ice_vc_isvalid_ring_len(u16 ring_len)
{
return ring_len == 0 ||
(ring_len >= ICE_MIN_NUM_DESC &&
ring_len <= ICE_MAX_NUM_DESC &&
!(ring_len % ICE_REQ_DESC_MULTIPLE));
}
/** /**
* ice_vc_config_rss_key * ice_vc_config_rss_key
* @vf: pointer to the VF info * @vf: pointer to the VF info
...@@ -1864,6 +1893,8 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1864,6 +1893,8 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_queue_select *)msg; (struct virtchnl_queue_select *)msg;
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi; struct ice_vsi *vsi;
unsigned long q_map;
u16 vf_q_id;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
...@@ -1896,12 +1927,48 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1896,12 +1927,48 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
* Tx queue group list was configured and the context bits were * Tx queue group list was configured and the context bits were
* programmed using ice_vsi_cfg_txqs * programmed using ice_vsi_cfg_txqs
*/ */
if (ice_vsi_start_rx_rings(vsi)) q_map = vqs->rx_queues;
v_ret = VIRTCHNL_STATUS_ERR_PARAM; for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Skip queue if enabled */
if (test_bit(vf_q_id, vf->rxq_ena))
continue;
if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
dev_err(&vsi->back->pdev->dev,
"Failed to enable Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
set_bit(vf_q_id, vf->rxq_ena);
vf->num_qs_ena++;
}
vsi = pf->vsi[vf->lan_vsi_idx];
q_map = vqs->tx_queues;
for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Skip queue if enabled */
if (test_bit(vf_q_id, vf->txq_ena))
continue;
set_bit(vf_q_id, vf->txq_ena);
vf->num_qs_ena++;
}
/* Set flag to indicate that queues are enabled */ /* Set flag to indicate that queues are enabled */
if (v_ret == VIRTCHNL_STATUS_SUCCESS) if (v_ret == VIRTCHNL_STATUS_SUCCESS)
set_bit(ICE_VF_STATE_ENA, vf->vf_states); set_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
...@@ -1924,9 +1991,11 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1924,9 +1991,11 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
(struct virtchnl_queue_select *)msg; (struct virtchnl_queue_select *)msg;
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi; struct ice_vsi *vsi;
unsigned long q_map;
u16 vf_q_id;
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) && if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) &&
!test_bit(ICE_VF_STATE_ENA, vf->vf_states)) { !test_bit(ICE_VF_STATE_QS_ENA, vf->vf_states)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -1953,23 +2022,69 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1953,23 +2022,69 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param; goto error_param;
} }
if (ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, vf->vf_id)) { if (vqs->tx_queues) {
dev_err(&vsi->back->pdev->dev, q_map = vqs->tx_queues;
"Failed to stop tx rings on VSI %d\n",
vsi->vsi_num); for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; struct ice_ring *ring = vsi->tx_rings[vf_q_id];
struct ice_txq_meta txq_meta = { 0 };
if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Skip queue if not enabled */
if (!test_bit(vf_q_id, vf->txq_ena))
continue;
ice_fill_txq_meta(vsi, ring, &txq_meta);
if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
ring, &txq_meta)) {
dev_err(&vsi->back->pdev->dev,
"Failed to stop Tx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Clear enabled queues flag */
clear_bit(vf_q_id, vf->txq_ena);
vf->num_qs_ena--;
}
} }
if (ice_vsi_stop_rx_rings(vsi)) { if (vqs->rx_queues) {
dev_err(&vsi->back->pdev->dev, q_map = vqs->rx_queues;
"Failed to stop rx rings on VSI %d\n",
vsi->vsi_num); for_each_set_bit(vf_q_id, &q_map, ICE_MAX_BASE_QS_PER_VF) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; if (!ice_vc_isvalid_q_id(vf, vqs->vsi_id, vf_q_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Skip queue if not enabled */
if (!test_bit(vf_q_id, vf->rxq_ena))
continue;
if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
dev_err(&vsi->back->pdev->dev,
"Failed to stop Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Clear enabled queues flag */
clear_bit(vf_q_id, vf->rxq_ena);
vf->num_qs_ena--;
}
} }
/* Clear enabled queues flag */ /* Clear enabled queues flag */
if (v_ret == VIRTCHNL_STATUS_SUCCESS) if (v_ret == VIRTCHNL_STATUS_SUCCESS && !vf->num_qs_ena)
clear_bit(ICE_VF_STATE_ENA, vf->vf_states); clear_bit(ICE_VF_STATE_QS_ENA, vf->vf_states);
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
...@@ -2093,6 +2208,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2093,6 +2208,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
struct virtchnl_vsi_queue_config_info *qci = struct virtchnl_vsi_queue_config_info *qci =
(struct virtchnl_vsi_queue_config_info *)msg; (struct virtchnl_vsi_queue_config_info *)msg;
struct virtchnl_queue_pair_info *qpi; struct virtchnl_queue_pair_info *qpi;
u16 num_rxq = 0, num_txq = 0;
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
struct ice_vsi *vsi; struct ice_vsi *vsi;
int i; int i;
...@@ -2107,16 +2223,17 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2107,16 +2223,17 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
goto error_param; goto error_param;
} }
if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF) { vsi = pf->vsi[vf->lan_vsi_idx];
dev_err(&pf->pdev->dev, if (!vsi) {
"VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, qci->num_queue_pairs);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
vsi = pf->vsi[vf->lan_vsi_idx]; if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
if (!vsi) { qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(&pf->pdev->dev,
"VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -2127,37 +2244,51 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2127,37 +2244,51 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
qpi->rxq.vsi_id != qci->vsi_id || qpi->rxq.vsi_id != qci->vsi_id ||
qpi->rxq.queue_id != qpi->txq.queue_id || qpi->rxq.queue_id != qpi->txq.queue_id ||
qpi->txq.headwb_enabled || qpi->txq.headwb_enabled ||
!ice_vc_isvalid_ring_len(qpi->txq.ring_len) ||
!ice_vc_isvalid_ring_len(qpi->rxq.ring_len) ||
!ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) { !ice_vc_isvalid_q_id(vf, qci->vsi_id, qpi->txq.queue_id)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
/* copy Tx queue info from VF into VSI */ /* copy Tx queue info from VF into VSI */
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; if (qpi->txq.ring_len > 0) {
vsi->tx_rings[i]->count = qpi->txq.ring_len; num_txq++;
/* copy Rx queue info from VF into VSI */ vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; vsi->tx_rings[i]->count = qpi->txq.ring_len;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
} }
vsi->rx_buf_len = qpi->rxq.databuffer_size;
if (qpi->rxq.max_pkt_size >= (16 * 1024) || /* copy Rx queue info from VF into VSI */
qpi->rxq.max_pkt_size < 64) { if (qpi->rxq.ring_len > 0) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; num_rxq++;
goto error_param; vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
if (qpi->rxq.databuffer_size != 0 &&
(qpi->rxq.databuffer_size > ((16 * 1024) - 128) ||
qpi->rxq.databuffer_size < 1024)) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
vsi->rx_buf_len = qpi->rxq.databuffer_size;
vsi->rx_rings[i]->rx_buf_len = vsi->rx_buf_len;
if (qpi->rxq.max_pkt_size >= (16 * 1024) ||
qpi->rxq.max_pkt_size < 64) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
} }
vsi->max_frame = qpi->rxq.max_pkt_size; vsi->max_frame = qpi->rxq.max_pkt_size;
} }
/* VF can request to configure less than allocated queues /* VF can request to configure less than allocated queues
* or default allocated queues. So update the VSI with new number * or default allocated queues. So update the VSI with new number
*/ */
vsi->num_txq = qci->num_queue_pairs; vsi->num_txq = num_txq;
vsi->num_rxq = qci->num_queue_pairs; vsi->num_rxq = num_rxq;
/* All queues of VF VSI are in TC 0 */ /* All queues of VF VSI are in TC 0 */
vsi->tc_cfg.tc_info[0].qcount_tx = qci->num_queue_pairs; vsi->tc_cfg.tc_info[0].qcount_tx = num_txq;
vsi->tc_cfg.tc_info[0].qcount_rx = qci->num_queue_pairs; vsi->tc_cfg.tc_info[0].qcount_rx = num_rxq;
if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi)) if (ice_vsi_cfg_lan_txqs(vsi) || ice_vsi_cfg_rxqs(vsi))
v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR; v_ret = VIRTCHNL_STATUS_ERR_ADMIN_QUEUE_ERROR;
......
...@@ -15,8 +15,8 @@ ...@@ -15,8 +15,8 @@
#define ICE_MAX_MACADDR_PER_VF 12 #define ICE_MAX_MACADDR_PER_VF 12
/* Malicious Driver Detection */ /* Malicious Driver Detection */
#define ICE_DFLT_NUM_MDD_EVENTS_ALLOWED 3
#define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10 #define ICE_DFLT_NUM_INVAL_MSGS_ALLOWED 10
#define ICE_MDD_EVENTS_THRESHOLD 30
/* Static VF transaction/status register def */ /* Static VF transaction/status register def */
#define VF_DEVICE_STATUS 0xAA #define VF_DEVICE_STATUS 0xAA
...@@ -41,9 +41,9 @@ ...@@ -41,9 +41,9 @@
/* Specific VF states */ /* Specific VF states */
enum ice_vf_states { enum ice_vf_states {
ICE_VF_STATE_INIT = 0, ICE_VF_STATE_INIT = 0, /* PF is initializing VF */
ICE_VF_STATE_ACTIVE, ICE_VF_STATE_ACTIVE, /* VF resources are allocated for use */
ICE_VF_STATE_ENA, ICE_VF_STATE_QS_ENA, /* VF queue(s) enabled */
ICE_VF_STATE_DIS, ICE_VF_STATE_DIS,
ICE_VF_STATE_MC_PROMISC, ICE_VF_STATE_MC_PROMISC,
ICE_VF_STATE_UC_PROMISC, ICE_VF_STATE_UC_PROMISC,
...@@ -68,6 +68,8 @@ struct ice_vf { ...@@ -68,6 +68,8 @@ struct ice_vf {
struct virtchnl_version_info vf_ver; struct virtchnl_version_info vf_ver;
u32 driver_caps; /* reported by VF driver */ u32 driver_caps; /* reported by VF driver */
struct virtchnl_ether_addr dflt_lan_addr; struct virtchnl_ether_addr dflt_lan_addr;
DECLARE_BITMAP(txq_ena, ICE_MAX_BASE_QS_PER_VF);
DECLARE_BITMAP(rxq_ena, ICE_MAX_BASE_QS_PER_VF);
u16 port_vlan_id; u16 port_vlan_id;
u8 pf_set_mac:1; /* VF MAC address set by VMM admin */ u8 pf_set_mac:1; /* VF MAC address set by VMM admin */
u8 trusted:1; u8 trusted:1;
...@@ -90,6 +92,7 @@ struct ice_vf { ...@@ -90,6 +92,7 @@ struct ice_vf {
u16 num_mac; u16 num_mac;
u16 num_vlan; u16 num_vlan;
u16 num_vf_qs; /* num of queue configured per VF */ u16 num_vf_qs; /* num of queue configured per VF */
u16 num_qs_ena; /* total num of Tx/Rx queue enabled */
}; };
#ifdef CONFIG_PCI_IOV #ifdef CONFIG_PCI_IOV
...@@ -116,12 +119,15 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state); ...@@ -116,12 +119,15 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state);
int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena); int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena);
int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector); int ice_calc_vf_reg_idx(struct ice_vf *vf, struct ice_q_vector *q_vector);
void ice_set_vf_state_qs_dis(struct ice_vf *vf);
#else /* CONFIG_PCI_IOV */ #else /* CONFIG_PCI_IOV */
#define ice_process_vflr_event(pf) do {} while (0) #define ice_process_vflr_event(pf) do {} while (0)
#define ice_free_vfs(pf) do {} while (0) #define ice_free_vfs(pf) do {} while (0)
#define ice_vc_process_vf_msg(pf, event) do {} while (0) #define ice_vc_process_vf_msg(pf, event) do {} while (0)
#define ice_vc_notify_link_state(pf) do {} while (0) #define ice_vc_notify_link_state(pf) do {} while (0)
#define ice_vc_notify_reset(pf) do {} while (0) #define ice_vc_notify_reset(pf) do {} while (0)
#define ice_set_vf_state_qs_dis(vf) do {} while (0)
static inline bool static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf, ice_reset_all_vfs(struct ice_pf __always_unused *pf,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment