Commit 84115f0e authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue

Tony Nguyen says:

====================
Intel Wired LAN Driver Updates 2023-01-27 (ice)

This series contains updates to ice driver only.

Dave prevents modifying channels when RDMA is active as this will break
RDMA traffic.

Michal fixes a broken URL.

* '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/tnguy/net-queue:
  ice: Fix broken link in ice NAPI doc
  ice: Prevent set_channel from changing queues while RDMA active
====================

Link: https://lore.kernel.org/r/20230127225333.1534783-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 73a87602 53b9b77d
...@@ -819,7 +819,7 @@ NAPI ...@@ -819,7 +819,7 @@ NAPI
---- ----
This driver supports NAPI (Rx polling mode). This driver supports NAPI (Rx polling mode).
For more information on NAPI, see For more information on NAPI, see
https://www.linuxfoundation.org/collaborate/workgroups/networking/napi https://wiki.linuxfoundation.org/networking/napi
MACVLAN MACVLAN
......
...@@ -880,7 +880,7 @@ void ice_set_ethtool_repr_ops(struct net_device *netdev); ...@@ -880,7 +880,7 @@ void ice_set_ethtool_repr_ops(struct net_device *netdev);
void ice_set_ethtool_safe_mode_ops(struct net_device *netdev); void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf); u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf); u16 ice_get_avail_rxq_count(struct ice_pf *pf);
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx); int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked);
void ice_update_vsi_stats(struct ice_vsi *vsi); void ice_update_vsi_stats(struct ice_vsi *vsi);
void ice_update_pf_stats(struct ice_pf *pf); void ice_update_pf_stats(struct ice_pf *pf);
void void
......
...@@ -441,7 +441,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked) ...@@ -441,7 +441,7 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
goto out; goto out;
} }
ice_pf_dcb_recfg(pf); ice_pf_dcb_recfg(pf, false);
out: out:
/* enable previously downed VSIs */ /* enable previously downed VSIs */
...@@ -731,12 +731,13 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf) ...@@ -731,12 +731,13 @@ static int ice_dcb_noncontig_cfg(struct ice_pf *pf)
/** /**
* ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs * ice_pf_dcb_recfg - Reconfigure all VEBs and VSIs
* @pf: pointer to the PF struct * @pf: pointer to the PF struct
* @locked: is adev device lock held
* *
* Assumed caller has already disabled all VSIs before * Assumed caller has already disabled all VSIs before
* calling this function. Reconfiguring DCB based on * calling this function. Reconfiguring DCB based on
* local_dcbx_cfg. * local_dcbx_cfg.
*/ */
void ice_pf_dcb_recfg(struct ice_pf *pf) void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked)
{ {
struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg; struct ice_dcbx_cfg *dcbcfg = &pf->hw.port_info->qos_cfg.local_dcbx_cfg;
struct iidc_event *event; struct iidc_event *event;
...@@ -783,14 +784,16 @@ void ice_pf_dcb_recfg(struct ice_pf *pf) ...@@ -783,14 +784,16 @@ void ice_pf_dcb_recfg(struct ice_pf *pf)
if (vsi->type == ICE_VSI_PF) if (vsi->type == ICE_VSI_PF)
ice_dcbnl_set_all(vsi); ice_dcbnl_set_all(vsi);
} }
/* Notify the AUX drivers that TC change is finished */ if (!locked) {
event = kzalloc(sizeof(*event), GFP_KERNEL); /* Notify the AUX drivers that TC change is finished */
if (!event) event = kzalloc(sizeof(*event), GFP_KERNEL);
return; if (!event)
return;
set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type); set_bit(IIDC_EVENT_AFTER_TC_CHANGE, event->type);
ice_send_event_to_aux(pf, event); ice_send_event_to_aux(pf, event);
kfree(event); kfree(event);
}
} }
/** /**
...@@ -1044,7 +1047,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -1044,7 +1047,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
} }
/* changes in configuration update VSI */ /* changes in configuration update VSI */
ice_pf_dcb_recfg(pf); ice_pf_dcb_recfg(pf, false);
/* enable previously downed VSIs */ /* enable previously downed VSIs */
ice_dcb_ena_dis_vsi(pf, true, true); ice_dcb_ena_dis_vsi(pf, true, true);
......
...@@ -23,7 +23,7 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index); ...@@ -23,7 +23,7 @@ u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked); ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg); int ice_dcb_bwchk(struct ice_pf *pf, struct ice_dcbx_cfg *dcbcfg);
void ice_pf_dcb_recfg(struct ice_pf *pf); void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi); void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked); int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf); void ice_update_dcb_stats(struct ice_pf *pf);
...@@ -128,7 +128,7 @@ static inline u8 ice_get_pfc_mode(struct ice_pf *pf) ...@@ -128,7 +128,7 @@ static inline u8 ice_get_pfc_mode(struct ice_pf *pf)
return 0; return 0;
} }
static inline void ice_pf_dcb_recfg(struct ice_pf *pf) { } static inline void ice_pf_dcb_recfg(struct ice_pf *pf, bool locked) { }
static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { } static inline void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi) { }
static inline void ice_update_dcb_stats(struct ice_pf *pf) { } static inline void ice_update_dcb_stats(struct ice_pf *pf) { }
static inline void static inline void
......
...@@ -3641,7 +3641,9 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) ...@@ -3641,7 +3641,9 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
int new_rx = 0, new_tx = 0; int new_rx = 0, new_tx = 0;
bool locked = false;
u32 curr_combined; u32 curr_combined;
int ret = 0;
/* do not support changing channels in Safe Mode */ /* do not support changing channels in Safe Mode */
if (ice_is_safe_mode(pf)) { if (ice_is_safe_mode(pf)) {
...@@ -3705,15 +3707,33 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch) ...@@ -3705,15 +3707,33 @@ static int ice_set_channels(struct net_device *dev, struct ethtool_channels *ch)
return -EINVAL; return -EINVAL;
} }
ice_vsi_recfg_qs(vsi, new_rx, new_tx); if (pf->adev) {
mutex_lock(&pf->adev_mutex);
device_lock(&pf->adev->dev);
locked = true;
if (pf->adev->dev.driver) {
netdev_err(dev, "Cannot change channels when RDMA is active\n");
ret = -EBUSY;
goto adev_unlock;
}
}
ice_vsi_recfg_qs(vsi, new_rx, new_tx, locked);
if (!netif_is_rxfh_configured(dev)) if (!netif_is_rxfh_configured(dev)) {
return ice_vsi_set_dflt_rss_lut(vsi, new_rx); ret = ice_vsi_set_dflt_rss_lut(vsi, new_rx);
goto adev_unlock;
}
/* Update rss_size due to change in Rx queues */ /* Update rss_size due to change in Rx queues */
vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx); vsi->rss_size = ice_get_valid_rss_size(&pf->hw, new_rx);
return 0; adev_unlock:
if (locked) {
device_unlock(&pf->adev->dev);
mutex_unlock(&pf->adev_mutex);
}
return ret;
} }
/** /**
......
...@@ -4195,12 +4195,13 @@ bool ice_is_wol_supported(struct ice_hw *hw) ...@@ -4195,12 +4195,13 @@ bool ice_is_wol_supported(struct ice_hw *hw)
* @vsi: VSI being changed * @vsi: VSI being changed
* @new_rx: new number of Rx queues * @new_rx: new number of Rx queues
* @new_tx: new number of Tx queues * @new_tx: new number of Tx queues
* @locked: is adev device_lock held
* *
* Only change the number of queues if new_tx, or new_rx is non-0. * Only change the number of queues if new_tx, or new_rx is non-0.
* *
* Returns 0 on success. * Returns 0 on success.
*/ */
int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx, bool locked)
{ {
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
int err = 0, timeout = 50; int err = 0, timeout = 50;
...@@ -4229,7 +4230,7 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx) ...@@ -4229,7 +4230,7 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
ice_vsi_close(vsi); ice_vsi_close(vsi);
ice_vsi_rebuild(vsi, false); ice_vsi_rebuild(vsi, false);
ice_pf_dcb_recfg(pf); ice_pf_dcb_recfg(pf, locked);
ice_vsi_open(vsi); ice_vsi_open(vsi);
done: done:
clear_bit(ICE_CFG_BUSY, pf->state); clear_bit(ICE_CFG_BUSY, pf->state);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment