Commit 89e960b5 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2020-02-12

This series contains fixes to only the ice driver.

Dave fixes logic flaws in the DCB rebuild function which is used after a
reset.  Also fixed a configuration issue when switching between firmware
and software LLDP mode where the number of TLV's configured was getting
out of sync with what lldpad thinks is configured.

Paul fixes how the driver displayed all the supported and advertised
link modes by basing it on the PHY capabilities, and in the process
cleaned up a lot of code.

Brett fixes duplicate receive tail bumps by comparing the value we are
writing to tail with the previously written tail value.  Also cleaned up
workarounds that are no longer needed with the latest NVM images.

Anirudh cleaned up unnecessary CONFIG_PCI_IOV wrappers.  Updated the
driver to use ice_pf_to_dev() instead of &pf->pdev->dev or
&vsi->back->pdev->dev.  Cleaned up the string format in print function
calls to remove newlines where applicable.

Akeem updates the link message logging to include "Full Duplex" and
"Negotiated", to help distinguish from "Requested" for FEC.

Bruce fixes and consolidates the logging of firmware/NVM information
during driver load, since the information is duplicate of what is
available via ethtool.  Fixed the checking of the Unit Load Status bits
after reset to ensure they are 0x7FF before continuing, by updating the
mask.  Cleanup up possible NULL dereferences that were created by a
previous commit.

Ben fixes the driver to use the correct netif_msg_tx/rx_error() to
determine whether to print the MDD event type.

Tony provides several trivial fixes, which include whitespace, typos,
function header comments, reverse Christmas tree issues.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b9287f2a 4ee656bb
...@@ -1660,6 +1660,7 @@ struct ice_aqc_get_pkg_info_resp { ...@@ -1660,6 +1660,7 @@ struct ice_aqc_get_pkg_info_resp {
__le32 count; __le32 count;
struct ice_aqc_get_pkg_info pkg_info[1]; struct ice_aqc_get_pkg_info pkg_info[1];
}; };
/** /**
* struct ice_aq_desc - Admin Queue (AQ) descriptor * struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags * @flags: ICE_AQ_FLAG_* flags
......
...@@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
if (err) if (err)
return err; return err;
dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n", dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
ring->q_index); ring->q_index);
} else { } else {
ring->zca.free = NULL; ring->zca.free = NULL;
...@@ -405,8 +405,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -405,8 +405,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Absolute queue number out of 2K needs to be passed */ /* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q); err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
if (err) { if (err) {
dev_err(&vsi->back->pdev->dev, dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
"Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
pf_q, err); pf_q, err);
return -EIO; return -EIO;
} }
...@@ -428,8 +427,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -428,8 +427,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) : ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
if (err) if (err)
dev_info(&vsi->back->pdev->dev, dev_info(ice_pf_to_dev(vsi->back), "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
"Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
ring->xsk_umem ? "UMEM enabled " : "", ring->xsk_umem ? "UMEM enabled " : "",
ring->q_index, pf_q); ring->q_index, pf_q);
...@@ -490,8 +488,7 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) ...@@ -490,8 +488,7 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
/* wait for the change to finish */ /* wait for the change to finish */
ret = ice_pf_rxq_wait(pf, pf_q, ena); ret = ice_pf_rxq_wait(pf, pf_q, ena);
if (ret) if (ret)
dev_err(ice_pf_to_dev(pf), dev_err(ice_pf_to_dev(pf), "VSI idx %d Rx ring %d %sable timeout\n",
"VSI idx %d Rx ring %d %sable timeout\n",
vsi->idx, pf_q, (ena ? "en" : "dis")); vsi->idx, pf_q, (ena ? "en" : "dis"));
return ret; return ret;
...@@ -506,20 +503,15 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx) ...@@ -506,20 +503,15 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
*/ */
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi) int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
{ {
struct ice_pf *pf = vsi->back; struct device *dev = ice_pf_to_dev(vsi->back);
int v_idx = 0, num_q_vectors; int v_idx, err;
struct device *dev;
int err;
dev = ice_pf_to_dev(pf);
if (vsi->q_vectors[0]) { if (vsi->q_vectors[0]) {
dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num); dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
return -EEXIST; return -EEXIST;
} }
num_q_vectors = vsi->num_q_vectors; for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
err = ice_vsi_alloc_q_vector(vsi, v_idx); err = ice_vsi_alloc_q_vector(vsi, v_idx);
if (err) if (err)
goto err_out; goto err_out;
...@@ -648,8 +640,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring, ...@@ -648,8 +640,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle, status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1, qg_buf, buf_len, NULL); 1, qg_buf, buf_len, NULL);
if (status) { if (status) {
dev_err(ice_pf_to_dev(pf), dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
"Failed to set LAN Tx queue context, error: %d\n",
status); status);
return -ENODEV; return -ENODEV;
} }
...@@ -815,14 +806,12 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ...@@ -815,14 +806,12 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* queues at the hardware level anyway. * queues at the hardware level anyway.
*/ */
if (status == ICE_ERR_RESET_ONGOING) { if (status == ICE_ERR_RESET_ONGOING) {
dev_dbg(&vsi->back->pdev->dev, dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
"Reset in progress. LAN Tx queues already disabled\n");
} else if (status == ICE_ERR_DOES_NOT_EXIST) { } else if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(&vsi->back->pdev->dev, dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
"LAN Tx queues do not exist, nothing to disable\n");
} else if (status) { } else if (status) {
dev_err(&vsi->back->pdev->dev, dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
"Failed to disable LAN Tx queues, error: %d\n", status); status);
return -ENODEV; return -ENODEV;
} }
......
...@@ -24,20 +24,6 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw) ...@@ -24,20 +24,6 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
return 0; return 0;
} }
/**
* ice_dev_onetime_setup - Temporary HW/FW workarounds
* @hw: pointer to the HW structure
*
* This function provides temporary workarounds for certain issues
* that are expected to be fixed in the HW/FW.
*/
void ice_dev_onetime_setup(struct ice_hw *hw)
{
#define MBX_PF_VT_PFALLOC 0x00231E80
/* set VFs per PF */
wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
}
/** /**
* ice_clear_pf_cfg - Clear PF configuration * ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
...@@ -602,10 +588,10 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) ...@@ -602,10 +588,10 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
} }
/** /**
* ice_get_itr_intrl_gran - determine int/intrl granularity * ice_get_itr_intrl_gran
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* *
* Determines the ITR/intrl granularities based on the maximum aggregate * Determines the ITR/INTRL granularities based on the maximum aggregate
* bandwidth according to the device's configuration during power-on. * bandwidth according to the device's configuration during power-on.
*/ */
static void ice_get_itr_intrl_gran(struct ice_hw *hw) static void ice_get_itr_intrl_gran(struct ice_hw *hw)
...@@ -763,8 +749,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ...@@ -763,8 +749,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status) if (status)
goto err_unroll_sched; goto err_unroll_sched;
ice_dev_onetime_setup(hw);
/* Get MAC information */ /* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */ /* A single port can report up to two (LAN and WoL) addresses */
mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2, mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
...@@ -834,7 +818,7 @@ void ice_deinit_hw(struct ice_hw *hw) ...@@ -834,7 +818,7 @@ void ice_deinit_hw(struct ice_hw *hw)
*/ */
enum ice_status ice_check_reset(struct ice_hw *hw) enum ice_status ice_check_reset(struct ice_hw *hw)
{ {
u32 cnt, reg = 0, grst_delay; u32 cnt, reg = 0, grst_delay, uld_mask;
/* Poll for Device Active state in case a recent CORER, GLOBR, /* Poll for Device Active state in case a recent CORER, GLOBR,
* or EMPR has occurred. The grst delay value is in 100ms units. * or EMPR has occurred. The grst delay value is in 100ms units.
...@@ -856,13 +840,20 @@ enum ice_status ice_check_reset(struct ice_hw *hw) ...@@ -856,13 +840,20 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
return ICE_ERR_RESET_FAILED; return ICE_ERR_RESET_FAILED;
} }
#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \ #define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
GLNVM_ULD_GLOBR_DONE_M) GLNVM_ULD_PCIER_DONE_1_M |\
GLNVM_ULD_CORER_DONE_M |\
GLNVM_ULD_GLOBR_DONE_M |\
GLNVM_ULD_POR_DONE_M |\
GLNVM_ULD_POR_DONE_1_M |\
GLNVM_ULD_PCIER_DONE_2_M)
uld_mask = ICE_RESET_DONE_MASK;
/* Device is Active; check Global Reset processes are done */ /* Device is Active; check Global Reset processes are done */
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) { for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK; reg = rd32(hw, GLNVM_ULD) & uld_mask;
if (reg == ICE_RESET_DONE_MASK) { if (reg == uld_mask) {
ice_debug(hw, ICE_DBG_INIT, ice_debug(hw, ICE_DBG_INIT,
"Global reset processes done. %d\n", cnt); "Global reset processes done. %d\n", cnt);
break; break;
......
...@@ -54,8 +54,6 @@ enum ice_status ice_get_caps(struct ice_hw *hw); ...@@ -54,8 +54,6 @@ enum ice_status ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw); void ice_set_safe_mode_caps(struct ice_hw *hw);
void ice_dev_onetime_setup(struct ice_hw *hw);
enum ice_status enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx, ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index); u32 rxq_index);
......
...@@ -1323,13 +1323,13 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi) ...@@ -1323,13 +1323,13 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
} }
/** /**
* ice_aq_query_port_ets - query port ets configuration * ice_aq_query_port_ets - query port ETS configuration
* @pi: port information structure * @pi: port information structure
* @buf: pointer to buffer * @buf: pointer to buffer
* @buf_size: buffer size in bytes * @buf_size: buffer size in bytes
* @cd: pointer to command details structure or NULL * @cd: pointer to command details structure or NULL
* *
* query current port ets configuration * query current port ETS configuration
*/ */
static enum ice_status static enum ice_status
ice_aq_query_port_ets(struct ice_port_info *pi, ice_aq_query_port_ets(struct ice_port_info *pi,
...@@ -1416,13 +1416,13 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi, ...@@ -1416,13 +1416,13 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
} }
/** /**
* ice_query_port_ets - query port ets configuration * ice_query_port_ets - query port ETS configuration
* @pi: port information structure * @pi: port information structure
* @buf: pointer to buffer * @buf: pointer to buffer
* @buf_size: buffer size in bytes * @buf_size: buffer size in bytes
* @cd: pointer to command details structure or NULL * @cd: pointer to command details structure or NULL
* *
* query current port ets configuration and update the * query current port ETS configuration and update the
* SW DB with the TC changes * SW DB with the TC changes
*/ */
enum ice_status enum ice_status
......
...@@ -315,9 +315,9 @@ ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg, ...@@ -315,9 +315,9 @@ ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
*/ */
void ice_dcb_rebuild(struct ice_pf *pf) void ice_dcb_rebuild(struct ice_pf *pf)
{ {
struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg;
struct ice_aqc_port_ets_elem buf = { 0 }; struct ice_aqc_port_ets_elem buf = { 0 };
struct device *dev = ice_pf_to_dev(pf); struct device *dev = ice_pf_to_dev(pf);
struct ice_dcbx_cfg *err_cfg;
enum ice_status ret; enum ice_status ret;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
...@@ -330,53 +330,25 @@ void ice_dcb_rebuild(struct ice_pf *pf) ...@@ -330,53 +330,25 @@ void ice_dcb_rebuild(struct ice_pf *pf)
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags)) if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return; return;
local_dcbx_cfg = &pf->hw.port_info->local_dcbx_cfg; mutex_lock(&pf->tc_mutex);
desired_dcbx_cfg = &pf->hw.port_info->desired_dcbx_cfg;
/* Save current willing state and force FW to unwilling */
local_dcbx_cfg->etscfg.willing = 0x0;
local_dcbx_cfg->pfc.willing = 0x0;
local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING;
if (!pf->hw.port_info->is_sw_lldp)
ice_cfg_etsrec_defaults(pf->hw.port_info); ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info); ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) { if (ret) {
dev_err(dev, "Failed to set DCB to unwilling\n"); dev_err(dev, "Failed to set DCB config in rebuild\n");
goto dcb_error; goto dcb_error;
} }
/* Retrieve DCB config and ensure same as current in SW */ if (!pf->hw.port_info->is_sw_lldp) {
prev_cfg = kmemdup(local_dcbx_cfg, sizeof(*prev_cfg), GFP_KERNEL); ret = ice_cfg_lldp_mib_change(&pf->hw, true);
if (!prev_cfg) if (ret && !pf->hw.port_info->is_sw_lldp) {
goto dcb_error; dev_err(dev, "Failed to register for MIB changes\n");
ice_init_dcb(&pf->hw, true);
if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
pf->hw.port_info->is_sw_lldp = true;
else
pf->hw.port_info->is_sw_lldp = false;
if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) {
/* difference in cfg detected - disable DCB till next MIB */
dev_err(dev, "Set local MIB not accurate\n");
kfree(prev_cfg);
goto dcb_error; goto dcb_error;
} }
/* fetched config congruent to previous configuration */
kfree(prev_cfg);
/* Set the local desired config */
if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE)
memcpy(local_dcbx_cfg, desired_dcbx_cfg,
sizeof(*local_dcbx_cfg));
ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(dev, "Failed to set desired config\n");
goto dcb_error;
} }
dev_info(dev, "DCB restored after reset\n"); dev_info(dev, "DCB restored after reset\n");
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) { if (ret) {
...@@ -384,26 +356,32 @@ void ice_dcb_rebuild(struct ice_pf *pf) ...@@ -384,26 +356,32 @@ void ice_dcb_rebuild(struct ice_pf *pf)
goto dcb_error; goto dcb_error;
} }
mutex_unlock(&pf->tc_mutex);
return; return;
dcb_error: dcb_error:
dev_err(dev, "Disabling DCB until new settings occur\n"); dev_err(dev, "Disabling DCB until new settings occur\n");
prev_cfg = kzalloc(sizeof(*prev_cfg), GFP_KERNEL); err_cfg = kzalloc(sizeof(*err_cfg), GFP_KERNEL);
if (!prev_cfg) if (!err_cfg) {
mutex_unlock(&pf->tc_mutex);
return; return;
}
prev_cfg->etscfg.willing = true; err_cfg->etscfg.willing = true;
prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW; err_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS; err_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec)); memcpy(&err_cfg->etsrec, &err_cfg->etscfg, sizeof(err_cfg->etsrec));
/* Coverity warns the return code of ice_pf_dcb_cfg() is not checked /* Coverity warns the return code of ice_pf_dcb_cfg() is not checked
* here as is done for other calls to that function. That check is * here as is done for other calls to that function. That check is
* not necessary since this is in this function's error cleanup path. * not necessary since this is in this function's error cleanup path.
* Suppress the Coverity warning with the following comment... * Suppress the Coverity warning with the following comment...
*/ */
/* coverity[check_return] */ /* coverity[check_return] */
ice_pf_dcb_cfg(pf, prev_cfg, false); ice_pf_dcb_cfg(pf, err_cfg, false);
kfree(prev_cfg); kfree(err_cfg);
mutex_unlock(&pf->tc_mutex);
} }
/** /**
...@@ -434,9 +412,9 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked) ...@@ -434,9 +412,9 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
} }
/** /**
* ice_dcb_sw_default_config - Apply a default DCB config * ice_dcb_sw_dflt_cfg - Apply a default DCB config
* @pf: PF to apply config to * @pf: PF to apply config to
* @ets_willing: configure ets willing * @ets_willing: configure ETS willing
* @locked: was this function called with RTNL held * @locked: was this function called with RTNL held
*/ */
static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked) static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
...@@ -599,8 +577,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) ...@@ -599,8 +577,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
goto dcb_init_err; goto dcb_init_err;
} }
dev_info(dev, dev_info(dev, "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
"DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
pf->hw.func_caps.common_cap.maxtc); pf->hw.func_caps.common_cap.maxtc);
if (err) { if (err) {
struct ice_vsi *pf_vsi; struct ice_vsi *pf_vsi;
...@@ -610,8 +587,8 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) ...@@ -610,8 +587,8 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags); clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
err = ice_dcb_sw_dflt_cfg(pf, true, locked); err = ice_dcb_sw_dflt_cfg(pf, true, locked);
if (err) { if (err) {
dev_err(dev, dev_err(dev, "Failed to set local DCB config %d\n",
"Failed to set local DCB config %d\n", err); err);
err = -EIO; err = -EIO;
goto dcb_init_err; goto dcb_init_err;
} }
...@@ -777,6 +754,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -777,6 +754,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
} }
} }
mutex_lock(&pf->tc_mutex);
/* store the old configuration */ /* store the old configuration */
tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg; tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg;
...@@ -787,20 +766,20 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -787,20 +766,20 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
ret = ice_get_dcb_cfg(pf->hw.port_info); ret = ice_get_dcb_cfg(pf->hw.port_info);
if (ret) { if (ret) {
dev_err(dev, "Failed to get DCB config\n"); dev_err(dev, "Failed to get DCB config\n");
return; goto out;
} }
/* No change detected in DCBX configs */ /* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) { if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
dev_dbg(dev, "No change detected in DCBX configuration.\n"); dev_dbg(dev, "No change detected in DCBX configuration.\n");
return; goto out;
} }
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg, need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg); &pi->local_dcbx_cfg);
ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg); ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
if (!need_reconfig) if (!need_reconfig)
return; goto out;
/* Enable DCB tagging only when more than one TC */ /* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) { if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
...@@ -814,7 +793,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -814,7 +793,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pf_vsi = ice_get_main_vsi(pf); pf_vsi = ice_get_main_vsi(pf);
if (!pf_vsi) { if (!pf_vsi) {
dev_dbg(dev, "PF VSI doesn't exist\n"); dev_dbg(dev, "PF VSI doesn't exist\n");
return; goto out;
} }
rtnl_lock(); rtnl_lock();
...@@ -823,13 +802,15 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -823,13 +802,15 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL); ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) { if (ret) {
dev_err(dev, "Query Port ETS failed\n"); dev_err(dev, "Query Port ETS failed\n");
rtnl_unlock(); goto unlock_rtnl;
return;
} }
/* changes in configuration update VSI */ /* changes in configuration update VSI */
ice_pf_dcb_recfg(pf); ice_pf_dcb_recfg(pf);
ice_ena_vsi(pf_vsi, true); ice_ena_vsi(pf_vsi, true);
unlock_rtnl:
rtnl_unlock(); rtnl_unlock();
out:
mutex_unlock(&pf->tc_mutex);
} }
...@@ -297,8 +297,7 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting) ...@@ -297,8 +297,7 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
return; return;
*setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1; *setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
dev_dbg(ice_pf_to_dev(pf), dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
"Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
prio, *setting, pi->local_dcbx_cfg.pfc.pfcena); prio, *setting, pi->local_dcbx_cfg.pfc.pfcena);
} }
...@@ -418,8 +417,8 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio, ...@@ -418,8 +417,8 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
return; return;
*pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio]; *pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
dev_dbg(ice_pf_to_dev(pf), dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio,
"Get PG config prio=%d tc=%d\n", prio, *pgid); *pgid);
} }
/** /**
...@@ -713,13 +712,13 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app) ...@@ -713,13 +712,13 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
return -EINVAL; return -EINVAL;
mutex_lock(&pf->tc_mutex); mutex_lock(&pf->tc_mutex);
ret = dcb_ieee_delapp(netdev, app);
if (ret)
goto delapp_out;
old_cfg = &pf->hw.port_info->local_dcbx_cfg; old_cfg = &pf->hw.port_info->local_dcbx_cfg;
if (old_cfg->numapps == 1) if (old_cfg->numapps <= 1)
goto delapp_out;
ret = dcb_ieee_delapp(netdev, app);
if (ret)
goto delapp_out; goto delapp_out;
new_cfg = &pf->hw.port_info->desired_dcbx_cfg; new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
...@@ -882,8 +881,7 @@ ice_dcbnl_vsi_del_app(struct ice_vsi *vsi, ...@@ -882,8 +881,7 @@ ice_dcbnl_vsi_del_app(struct ice_vsi *vsi,
sapp.protocol = app->prot_id; sapp.protocol = app->prot_id;
sapp.priority = app->priority; sapp.priority = app->priority;
err = ice_dcbnl_delapp(vsi->netdev, &sapp); err = ice_dcbnl_delapp(vsi->netdev, &sapp);
dev_dbg(&vsi->back->pdev->dev, dev_dbg(ice_pf_to_dev(vsi->back), "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
"Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
vsi->idx, err, app->selector, app->prot_id, app->priority); vsi->idx, err, app->selector, app->prot_id, app->priority);
} }
......
...@@ -267,8 +267,14 @@ ...@@ -267,8 +267,14 @@
#define GLNVM_GENS_SR_SIZE_S 5 #define GLNVM_GENS_SR_SIZE_S 5
#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5) #define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5)
#define GLNVM_ULD 0x000B6008 #define GLNVM_ULD 0x000B6008
#define GLNVM_ULD_PCIER_DONE_M BIT(0)
#define GLNVM_ULD_PCIER_DONE_1_M BIT(1)
#define GLNVM_ULD_CORER_DONE_M BIT(3) #define GLNVM_ULD_CORER_DONE_M BIT(3)
#define GLNVM_ULD_GLOBR_DONE_M BIT(4) #define GLNVM_ULD_GLOBR_DONE_M BIT(4)
#define GLNVM_ULD_POR_DONE_M BIT(5)
#define GLNVM_ULD_POR_DONE_1_M BIT(8)
#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
#define GLNVM_ULD_PE_DONE_M BIT(10)
#define GLPCI_CNF2 0x000BE004 #define GLPCI_CNF2 0x000BE004
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1) #define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880 #define PF_FUNC_RID 0x0009E880
...@@ -331,7 +337,6 @@ ...@@ -331,7 +337,6 @@
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4)) #define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8)) #define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8)) #define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
#define PF_VT_PFALLOC_HIF 0x0009DD80
#define VSIQF_HKEY_MAX_INDEX 12 #define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15 #define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4)) #define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
......
...@@ -117,8 +117,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi) ...@@ -117,8 +117,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC; vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
break; break;
default: default:
dev_dbg(&vsi->back->pdev->dev, dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
"Not setting number of Tx/Rx descriptors for VSI type %d\n",
vsi->type); vsi->type);
break; break;
} }
...@@ -724,7 +723,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt) ...@@ -724,7 +723,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->num_txq = tx_count; vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) { if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n"); dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
/* since there is a chance that num_rxq could have been changed /* since there is a chance that num_rxq could have been changed
* in the above for loop, make num_txq equal to num_rxq. * in the above for loop, make num_txq equal to num_rxq.
*/ */
...@@ -929,8 +928,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi) ...@@ -929,8 +928,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors, vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx); vsi->idx);
if (vsi->base_vector < 0) { if (vsi->base_vector < 0) {
dev_err(dev, dev_err(dev, "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
"Failed to get tracking for %d vectors for VSI %d, err=%d\n",
num_q_vectors, vsi->vsi_num, vsi->base_vector); num_q_vectors, vsi->vsi_num, vsi->base_vector);
return -ENOENT; return -ENOENT;
} }
...@@ -1232,7 +1230,8 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi) ...@@ -1232,7 +1230,8 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
* *
* Returns 0 on success or ENOMEM on failure. * Returns 0 on success or ENOMEM on failure.
*/ */
int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list, int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
const u8 *macaddr) const u8 *macaddr)
{ {
struct ice_fltr_list_entry *tmp; struct ice_fltr_list_entry *tmp;
...@@ -1392,12 +1391,10 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid) ...@@ -1392,12 +1391,10 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
status = ice_remove_vlan(&pf->hw, &tmp_add_list); status = ice_remove_vlan(&pf->hw, &tmp_add_list);
if (status == ICE_ERR_DOES_NOT_EXIST) { if (status == ICE_ERR_DOES_NOT_EXIST) {
dev_dbg(dev, dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
"Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
vid, vsi->vsi_num, status); vid, vsi->vsi_num, status);
} else if (status) { } else if (status) {
dev_err(dev, dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
"Error removing VLAN %d on vsi %i error: %d\n",
vid, vsi->vsi_num, status); vid, vsi->vsi_num, status);
err = -EIO; err = -EIO;
} }
...@@ -1453,8 +1450,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi) ...@@ -1453,8 +1450,7 @@ int ice_vsi_cfg_rxqs(struct ice_vsi *vsi)
err = ice_setup_rx_ctx(vsi->rx_rings[i]); err = ice_setup_rx_ctx(vsi->rx_rings[i]);
if (err) { if (err) {
dev_err(&vsi->back->pdev->dev, dev_err(ice_pf_to_dev(vsi->back), "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
"ice_setup_rx_ctx failed for RxQ %d, err %d\n",
i, err); i, err);
return err; return err;
} }
...@@ -1623,7 +1619,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi) ...@@ -1623,7 +1619,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) { if (status) {
dev_err(&vsi->back->pdev->dev, "update VSI for VLAN insert failed, err %d aq_err %d\n", dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status); status, hw->adminq.sq_last_status);
ret = -EIO; ret = -EIO;
goto out; goto out;
...@@ -1669,7 +1665,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena) ...@@ -1669,7 +1665,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) { if (status) {
dev_err(&vsi->back->pdev->dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n", dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
ena, status, hw->adminq.sq_last_status); ena, status, hw->adminq.sq_last_status);
ret = -EIO; ret = -EIO;
goto out; goto out;
...@@ -1834,8 +1830,7 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi) ...@@ -1834,8 +1830,7 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
struct ice_q_vector *q_vector = vsi->q_vectors[i]; struct ice_q_vector *q_vector = vsi->q_vectors[i];
if (!q_vector) { if (!q_vector) {
dev_err(&vsi->back->pdev->dev, dev_err(ice_pf_to_dev(vsi->back), "Failed to set reg_idx on q_vector %d VSI %d\n",
"Failed to set reg_idx on q_vector %d VSI %d\n",
i, vsi->vsi_num); i, vsi->vsi_num);
goto clear_reg_idx; goto clear_reg_idx;
} }
...@@ -1898,8 +1893,7 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule) ...@@ -1898,8 +1893,7 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list); status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
if (status) if (status)
dev_err(dev, dev_err(dev, "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
"Failure Adding or Removing Ethertype on VSI %i error: %d\n",
vsi->vsi_num, status); vsi->vsi_num, status);
ice_free_fltr_list(dev, &tmp_add_list); ice_free_fltr_list(dev, &tmp_add_list);
...@@ -2384,8 +2378,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id) ...@@ -2384,8 +2378,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
return -EINVAL; return -EINVAL;
if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) { if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
dev_err(ice_pf_to_dev(pf), dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
"param err: needed=%d, num_entries = %d id=0x%04x\n",
needed, res->num_entries, id); needed, res->num_entries, id);
return -EINVAL; return -EINVAL;
} }
...@@ -2686,7 +2679,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) ...@@ -2686,7 +2679,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_put_qs(vsi); ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi); ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi); ice_vsi_free_arrays(vsi);
ice_dev_onetime_setup(&pf->hw);
if (vsi->type == ICE_VSI_VF) if (vsi->type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf->vf_id); ice_vsi_set_num_qs(vsi, vf->vf_id);
else else
...@@ -2765,8 +2757,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi) ...@@ -2765,8 +2757,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs); max_txqs);
if (status) { if (status) {
dev_err(ice_pf_to_dev(pf), dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
"VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, status); vsi->vsi_num, status);
if (init_vsi) { if (init_vsi) {
ret = -EIO; ret = -EIO;
...@@ -2834,8 +2825,8 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx) ...@@ -2834,8 +2825,8 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{ {
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_vsi_ctx *ctx;
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
struct ice_vsi_ctx *ctx;
enum ice_status status; enum ice_status status;
struct device *dev; struct device *dev;
int i, ret = 0; int i, ret = 0;
...@@ -2891,25 +2882,6 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc) ...@@ -2891,25 +2882,6 @@ int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
} }
#endif /* CONFIG_DCB */ #endif /* CONFIG_DCB */
/**
* ice_nvm_version_str - format the NVM version strings
* @hw: ptr to the hardware info
*/
char *ice_nvm_version_str(struct ice_hw *hw)
{
u8 oem_ver, oem_patch, ver_hi, ver_lo;
static char buf[ICE_NVM_VER_LEN];
u16 oem_build;
ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
&ver_lo);
snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo,
hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
return buf;
}
/** /**
* ice_update_ring_stats - Update ring statistics * ice_update_ring_stats - Update ring statistics
* @ring: ring to update * @ring: ring to update
...@@ -2981,7 +2953,7 @@ ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set) ...@@ -2981,7 +2953,7 @@ ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
status = ice_remove_mac(&vsi->back->hw, &tmp_add_list); status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
cfg_mac_fltr_exit: cfg_mac_fltr_exit:
ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list); ice_free_fltr_list(ice_pf_to_dev(vsi->back), &tmp_add_list);
return status; return status;
} }
...@@ -3043,16 +3015,14 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi) ...@@ -3043,16 +3015,14 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
/* another VSI is already the default VSI for this switch */ /* another VSI is already the default VSI for this switch */
if (ice_is_dflt_vsi_in_use(sw)) { if (ice_is_dflt_vsi_in_use(sw)) {
dev_err(dev, dev_err(dev, "Default forwarding VSI %d already in use, disable it and try again\n",
"Default forwarding VSI %d already in use, disable it and try again\n",
sw->dflt_vsi->vsi_num); sw->dflt_vsi->vsi_num);
return -EEXIST; return -EEXIST;
} }
status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX); status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
if (status) { if (status) {
dev_err(dev, dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
"Failed to set VSI %d as the default forwarding VSI, error %d\n",
vsi->vsi_num, status); vsi->vsi_num, status);
return -EIO; return -EIO;
} }
...@@ -3091,8 +3061,7 @@ int ice_clear_dflt_vsi(struct ice_sw *sw) ...@@ -3091,8 +3061,7 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false, status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
ICE_FLTR_RX); ICE_FLTR_RX);
if (status) { if (status) {
dev_err(dev, dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
"Failed to clear the default forwarding VSI %d, error %d\n",
dflt_vsi->vsi_num, status); dflt_vsi->vsi_num, status);
return -EIO; return -EIO;
} }
......
...@@ -97,8 +97,6 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi); ...@@ -97,8 +97,6 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran); u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
char *ice_nvm_version_str(struct ice_hw *hw);
enum ice_status enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set); ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
......
This diff is collapsed.
...@@ -644,7 +644,7 @@ static bool ice_page_is_reserved(struct page *page) ...@@ -644,7 +644,7 @@ static bool ice_page_is_reserved(struct page *page)
* Update the offset within page so that Rx buf will be ready to be reused. * Update the offset within page so that Rx buf will be ready to be reused.
* For systems with PAGE_SIZE < 8192 this function will flip the page offset * For systems with PAGE_SIZE < 8192 this function will flip the page offset
* so the second half of page assigned to Rx buffer will be used, otherwise * so the second half of page assigned to Rx buffer will be used, otherwise
* the offset is moved by the @size bytes * the offset is moved by "size" bytes
*/ */
static void static void
ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size) ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
...@@ -1078,8 +1078,6 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1078,8 +1078,6 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
skb = ice_build_skb(rx_ring, rx_buf, &xdp); skb = ice_build_skb(rx_ring, rx_buf, &xdp);
else else
skb = ice_construct_skb(rx_ring, rx_buf, &xdp); skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
} else {
skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
} }
/* exit if we failed to retrieve a buffer */ /* exit if we failed to retrieve a buffer */
if (!skb) { if (!skb) {
...@@ -1621,11 +1619,11 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, ...@@ -1621,11 +1619,11 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
{ {
u64 td_offset, td_tag, td_cmd; u64 td_offset, td_tag, td_cmd;
u16 i = tx_ring->next_to_use; u16 i = tx_ring->next_to_use;
skb_frag_t *frag;
unsigned int data_len, size; unsigned int data_len, size;
struct ice_tx_desc *tx_desc; struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf; struct ice_tx_buf *tx_buf;
struct sk_buff *skb; struct sk_buff *skb;
skb_frag_t *frag;
dma_addr_t dma; dma_addr_t dma;
td_tag = off->td_l2tag1; td_tag = off->td_l2tag1;
...@@ -1738,9 +1736,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, ...@@ -1738,9 +1736,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
ice_maybe_stop_tx(tx_ring, DESC_NEEDED); ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */ /* notify HW of packet */
if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) { if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
writel(i, tx_ring->tail); writel(i, tx_ring->tail);
}
return; return;
...@@ -2078,7 +2075,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb) ...@@ -2078,7 +2075,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
frag = &skb_shinfo(skb)->frags[0]; frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We /* Initialize size to the negative value of gso_size minus 1. We
* use this as the worst case scenerio in which the frag ahead * use this as the worst case scenario in which the frag ahead
* of us only provides one byte which is why we are limited to 6 * of us only provides one byte which is why we are limited to 6
* descriptors for a single transmit as the header and previous * descriptors for a single transmit as the header and previous
* fragment are already consuming 2 descriptors. * fragment are already consuming 2 descriptors.
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
*/ */
void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val) void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
{ {
u16 prev_ntu = rx_ring->next_to_use; u16 prev_ntu = rx_ring->next_to_use & ~0x7;
rx_ring->next_to_use = val; rx_ring->next_to_use = val;
......
...@@ -517,7 +517,7 @@ struct ice_hw { ...@@ -517,7 +517,7 @@ struct ice_hw {
struct ice_fw_log_cfg fw_log; struct ice_fw_log_cfg fw_log;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL /* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
* register. Used for determining the ITR/intrl granularity during * register. Used for determining the ITR/INTRL granularity during
* initialization. * initialization.
*/ */
#define ICE_MAX_AGG_BW_200G 0x0 #define ICE_MAX_AGG_BW_200G 0x0
......
...@@ -199,8 +199,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf) ...@@ -199,8 +199,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG) if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0); wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
else else
dev_err(dev, dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
"Scattered mode for VF Rx queues is not yet implemented\n");
} }
/** /**
...@@ -402,8 +401,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr) ...@@ -402,8 +401,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
if ((reg & VF_TRANS_PENDING_M) == 0) if ((reg & VF_TRANS_PENDING_M) == 0)
break; break;
dev_err(dev, dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id);
"VF %d PCI transactions stuck\n", vf->vf_id);
udelay(ICE_PCI_CIAD_WAIT_DELAY_US); udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
} }
} }
...@@ -462,7 +460,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable) ...@@ -462,7 +460,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL); status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) { if (status) {
dev_info(&vsi->back->pdev->dev, "update VSI for port VLAN failed, err %d aq_err %d\n", dev_info(ice_pf_to_dev(vsi->back), "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status); status, hw->adminq.sq_last_status);
ret = -EIO; ret = -EIO;
goto out; goto out;
...@@ -1095,7 +1093,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr) ...@@ -1095,7 +1093,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
* finished resetting. * finished resetting.
*/ */
for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) { for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
/* Check each VF in sequence */ /* Check each VF in sequence */
while (v < pf->num_alloc_vfs) { while (v < pf->num_alloc_vfs) {
u32 reg; u32 reg;
...@@ -1553,8 +1550,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, ...@@ -1553,8 +1550,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id, dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
v_opcode, v_retval); v_opcode, v_retval);
if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) { if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
dev_err(dev, dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
"Number of invalid messages exceeded for VF %d\n",
vf->vf_id); vf->vf_id);
dev_err(dev, "Use PF Control I/F to enable the VF\n"); dev_err(dev, "Use PF Control I/F to enable the VF\n");
set_bit(ICE_VF_STATE_DIS, vf->vf_states); set_bit(ICE_VF_STATE_DIS, vf->vf_states);
...@@ -1569,8 +1565,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode, ...@@ -1569,8 +1565,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval, aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL); msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) { if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
dev_info(dev, dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n",
"Unable to send the message to VF %d ret %d aq_err %d\n",
vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status); vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
return -EIO; return -EIO;
} }
...@@ -1914,8 +1909,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) ...@@ -1914,8 +1909,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
} }
if (vf_vsi->type != ICE_VSI_VF) { if (vf_vsi->type != ICE_VSI_VF) {
netdev_err(netdev, netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
"Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
vf_vsi->type, vf_vsi->vsi_num, vf->vf_id); vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
return -ENODEV; return -ENODEV;
} }
...@@ -1945,8 +1939,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena) ...@@ -1945,8 +1939,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL); status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
if (status) { if (status) {
dev_err(dev, dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
"Failed to %sable spoofchk on VF %d VSI %d\n error %d",
ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status); ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
ret = -EIO; ret = -EIO;
goto out; goto out;
...@@ -2063,8 +2056,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2063,8 +2056,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
continue; continue;
if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) { if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
dev_err(&vsi->back->pdev->dev, dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
"Failed to enable Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num); vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
...@@ -2166,8 +2158,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2166,8 +2158,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id, if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
ring, &txq_meta)) { ring, &txq_meta)) {
dev_err(&vsi->back->pdev->dev, dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
"Failed to stop Tx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num); vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
...@@ -2193,8 +2184,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2193,8 +2184,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
continue; continue;
if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) { if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
dev_err(&vsi->back->pdev->dev, dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
"Failed to stop Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num); vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
...@@ -2357,8 +2347,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2357,8 +2347,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF || if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) { qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
dev_err(ice_pf_to_dev(pf), dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
"VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)); vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
...@@ -2570,8 +2559,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) ...@@ -2570,8 +2559,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
*/ */
if (set && !ice_is_vf_trusted(vf) && if (set && !ice_is_vf_trusted(vf) &&
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) { (vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
dev_err(ice_pf_to_dev(pf), dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
"Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
vf->vf_id); vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit; goto handle_mac_exit;
...@@ -2648,8 +2636,8 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2648,8 +2636,8 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
u16 max_allowed_vf_queues; u16 max_allowed_vf_queues;
u16 tx_rx_queue_left; u16 tx_rx_queue_left;
u16 cur_queues;
struct device *dev; struct device *dev;
u16 cur_queues;
dev = ice_pf_to_dev(pf); dev = ice_pf_to_dev(pf);
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) { if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
...@@ -2670,8 +2658,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -2670,8 +2658,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF; vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
} else if (req_queues > cur_queues && } else if (req_queues > cur_queues &&
req_queues - cur_queues > tx_rx_queue_left) { req_queues - cur_queues > tx_rx_queue_left) {
dev_warn(dev, dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
"VF %d requested %u more queues, but only %u left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left); vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues, vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
ICE_MAX_BASE_QS_PER_VF); ICE_MAX_BASE_QS_PER_VF);
...@@ -2821,8 +2808,8 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) ...@@ -2821,8 +2808,8 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
for (i = 0; i < vfl->num_elements; i++) { for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > ICE_MAX_VLANID) { if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, dev_err(dev, "invalid VF VLAN id %d\n",
"invalid VF VLAN id %d\n", vfl->vlan_id[i]); vfl->vlan_id[i]);
goto error_param; goto error_param;
} }
} }
...@@ -2836,8 +2823,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) ...@@ -2836,8 +2823,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (add_v && !ice_is_vf_trusted(vf) && if (add_v && !ice_is_vf_trusted(vf) &&
vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) { vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
dev_info(dev, dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id); vf->vf_id);
/* There is no need to let VF know about being not trusted, /* There is no need to let VF know about being not trusted,
* so we can just return success message here * so we can just return success message here
...@@ -2860,8 +2846,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) ...@@ -2860,8 +2846,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!ice_is_vf_trusted(vf) && if (!ice_is_vf_trusted(vf) &&
vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) { vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
dev_info(dev, dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
"VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id); vf->vf_id);
/* There is no need to let VF know about being /* There is no need to let VF know about being
* not trusted, so we can just return success * not trusted, so we can just return success
...@@ -2889,8 +2874,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) ...@@ -2889,8 +2874,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
status = ice_cfg_vlan_pruning(vsi, true, false); status = ice_cfg_vlan_pruning(vsi, true, false);
if (status) { if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
"Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
vid, status); vid, status);
goto error_param; goto error_param;
} }
...@@ -2903,8 +2887,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v) ...@@ -2903,8 +2887,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
promisc_m, vid); promisc_m, vid);
if (status) { if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
dev_err(dev, dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
"Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
vid, status); vid, status);
} }
} }
...@@ -3140,8 +3123,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event) ...@@ -3140,8 +3123,7 @@ void ice_vc_process_vf_msg(struct ice_pf *pf, struct ice_rq_event_info *event)
case VIRTCHNL_OP_GET_VF_RESOURCES: case VIRTCHNL_OP_GET_VF_RESOURCES:
err = ice_vc_get_vf_res_msg(vf, msg); err = ice_vc_get_vf_res_msg(vf, msg);
if (ice_vf_init_vlan_stripping(vf)) if (ice_vf_init_vlan_stripping(vf))
dev_err(dev, dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
"Failed to initialize VLAN stripping for VF %d\n",
vf->vf_id); vf->vf_id);
ice_vc_notify_vf_link_state(vf); ice_vc_notify_vf_link_state(vf);
break; break;
...@@ -3313,8 +3295,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ...@@ -3313,8 +3295,7 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
*/ */
ether_addr_copy(vf->dflt_lan_addr.addr, mac); ether_addr_copy(vf->dflt_lan_addr.addr, mac);
vf->pf_set_mac = true; vf->pf_set_mac = true;
netdev_info(netdev, netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
"MAC on VF %d set to %pM. VF driver will be reinitialized\n",
vf_id, mac); vf_id, mac);
ice_vc_reset_vf(vf); ice_vc_reset_vf(vf);
...@@ -3332,10 +3313,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ...@@ -3332,10 +3313,8 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
{ {
struct ice_pf *pf = ice_netdev_to_pf(netdev); struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct device *dev;
struct ice_vf *vf; struct ice_vf *vf;
dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id)) if (ice_validate_vf_id(pf, vf_id))
return -EINVAL; return -EINVAL;
...@@ -3358,7 +3337,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted) ...@@ -3358,7 +3337,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
vf->trusted = trusted; vf->trusted = trusted;
ice_vc_reset_vf(vf); ice_vc_reset_vf(vf);
dev_info(dev, "VF %u is now %strusted\n", dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
vf_id, trusted ? "" : "un"); vf_id, trusted ? "" : "un");
return 0; return 0;
......
...@@ -338,8 +338,8 @@ static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem) ...@@ -338,8 +338,8 @@ static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
DMA_BIDIRECTIONAL, DMA_BIDIRECTIONAL,
ICE_RX_DMA_ATTR); ICE_RX_DMA_ATTR);
if (dma_mapping_error(dev, dma)) { if (dma_mapping_error(dev, dma)) {
dev_dbg(dev, dev_dbg(dev, "XSK UMEM DMA mapping error on page num %d\n",
"XSK UMEM DMA mapping error on page num %d", i); i);
goto out_unmap; goto out_unmap;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment