Commit a41cf09b authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Tony Nguyen says:

====================
100GbE Intel Wired LAN Driver Updates 2020-07-29

This series contains updates to the ice driver only.

Dave works around LFC settings not being preserved through link events.
Fixes link issues with GLOBR reset and handling of multiple link events.

Nick restores VF MSI-X after PCI reset.

Kiran corrects the error code returned in ice_aq_sw_rules if the rule
does not exist.

Paul prevents overwriting of user set descriptors.

Tarun adds masking before accessing rate limiting profile types and
corrects queue bandwidth configuration.

Victor modifies Tx queue scheduler distribution to spread more evenly
across queue group nodes.

Krzysztof sets need_wakeup flag for Tx AF_XDP.

Brett allows VLANs in safe mode.

Marcin cleans up VSIs on probe failure.

Bruce reduces the scope of a variable.

Ben removes a FW workaround.

Tony fixes an unused parameter warning.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 5ba2254b 6221595f
......@@ -611,8 +611,6 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_fw_logging_info);
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
status = ice_aq_send_cmd(hw, &desc, config, size, NULL);
if (!status) {
u16 i;
......@@ -3907,7 +3905,18 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
* Without setting the generic section as valid in valid_sections, the
* Admin queue command will fail with error code ICE_AQ_RC_EINVAL.
*/
buf->txqs[0].info.valid_sections = ICE_AQC_ELEM_VALID_GENERIC;
buf->txqs[0].info.valid_sections =
ICE_AQC_ELEM_VALID_GENERIC | ICE_AQC_ELEM_VALID_CIR |
ICE_AQC_ELEM_VALID_EIR;
buf->txqs[0].info.generic = 0;
buf->txqs[0].info.cir_bw.bw_profile_idx =
cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
buf->txqs[0].info.cir_bw.bw_alloc =
cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
buf->txqs[0].info.eir_bw.bw_profile_idx =
cpu_to_le16(ICE_SCHED_DFLT_RL_PROF_ID);
buf->txqs[0].info.eir_bw.bw_alloc =
cpu_to_le16(ICE_SCHED_DFLT_BW_WT);
/* add the LAN queue */
status = ice_aq_add_lan_txq(hw, num_qgrps, buf, buf_size, cd);
......@@ -4357,3 +4366,36 @@ bool ice_is_phy_caps_an_enabled(struct ice_aqc_get_phy_caps_data *caps)
return false;
}
/**
* ice_aq_set_lldp_mib - Set the LLDP MIB
* @hw: pointer to the HW struct
* @mib_type: Local, Remote or both Local and Remote MIBs
* @buf: pointer to the caller-supplied buffer to store the MIB block
* @buf_size: size of the buffer (in bytes)
* @cd: pointer to command details structure or NULL
*
* Set the LLDP MIB. (0x0A08)
*/
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_set_local_mib *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.lldp_set_mib;
if (buf_size == 0 || !buf)
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
desc.datalen = cpu_to_le16(buf_size);
cmd->type = mib_type;
cmd->length = cpu_to_le16(buf_size);
return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
}
......@@ -172,4 +172,7 @@ ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_txsched_elem_data *buf);
enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd);
#endif /* _ICE_COMMON_H_ */
......@@ -312,9 +312,10 @@ ice_cfg_rq_regs(struct ice_hw *hw, struct ice_ctl_q_info *cq)
#define ICE_FREE_CQ_BUFS(hw, qi, ring) \
do { \
int i; \
/* free descriptors */ \
if ((qi)->ring.r.ring##_bi) \
if ((qi)->ring.r.ring##_bi) { \
int i; \
\
for (i = 0; i < (qi)->num_##ring##_entries; i++) \
if ((qi)->ring.r.ring##_bi[i].pa) { \
dmam_free_coherent(ice_hw_to_dev(hw), \
......@@ -325,6 +326,7 @@ do { \
(qi)->ring.r.ring##_bi[i].pa = 0;\
(qi)->ring.r.ring##_bi[i].size = 0;\
} \
} \
/* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
......
......@@ -134,39 +134,6 @@ ice_aq_start_lldp(struct ice_hw *hw, bool persist, struct ice_sq_cd *cd)
return ice_aq_send_cmd(hw, &desc, NULL, 0, cd);
}
/**
* ice_aq_set_lldp_mib - Set the LLDP MIB
* @hw: pointer to the HW struct
* @mib_type: Local, Remote or both Local and Remote MIBs
* @buf: pointer to the caller-supplied buffer to store the MIB block
* @buf_size: size of the buffer (in bytes)
* @cd: pointer to command details structure or NULL
*
* Set the LLDP MIB. (0x0A08)
*/
static enum ice_status
ice_aq_set_lldp_mib(struct ice_hw *hw, u8 mib_type, void *buf, u16 buf_size,
struct ice_sq_cd *cd)
{
struct ice_aqc_lldp_set_local_mib *cmd;
struct ice_aq_desc desc;
cmd = &desc.params.lldp_set_mib;
if (buf_size == 0 || !buf)
return ICE_ERR_PARAM;
ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_lldp_set_local_mib);
desc.flags |= cpu_to_le16((u16)ICE_AQ_FLAG_RD);
desc.datalen = cpu_to_le16(buf_size);
cmd->type = mib_type;
cmd->length = cpu_to_le16(buf_size);
return ice_aq_send_cmd(hw, &desc, buf, buf_size, cd);
}
/**
* ice_get_dcbx_status
* @hw: pointer to the HW struct
......
......@@ -444,10 +444,6 @@ void ice_dcb_rebuild(struct ice_pf *pf)
goto dcb_error;
}
/* If DCB was not enabled previously, we are done */
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return;
mutex_lock(&pf->tc_mutex);
if (!pf->hw.port_info->is_sw_lldp)
......@@ -467,7 +463,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
}
}
dev_info(dev, "DCB restored after reset\n");
dev_info(dev, "DCB info restored\n");
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(dev, "Query Port ETS failed\n");
......
......@@ -53,6 +53,12 @@ ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
{
tlan_ctx->cgd_num = ring->dcb_tc;
}
static inline bool ice_is_dcb_active(struct ice_pf *pf)
{
return (test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags) ||
test_bit(ICE_FLAG_DCB_ENA, pf->flags));
}
#else
#define ice_dcb_rebuild(pf) do {} while (0)
......@@ -95,6 +101,11 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
return 0;
}
static inline bool ice_is_dcb_active(struct ice_pf __always_unused *pf)
{
return false;
}
static inline bool
ice_is_pfc_causing_hung_q(struct ice_pf __always_unused *pf,
unsigned int __always_unused txqueue)
......
......@@ -127,7 +127,13 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
case ICE_VSI_PF:
case ICE_VSI_CTRL:
case ICE_VSI_LB:
/* a user could change the values of num_[tr]x_desc using
* ethtool -G so we should keep those values instead of
* overwriting them with the defaults.
*/
if (!vsi->num_rx_desc)
vsi->num_rx_desc = ICE_DFLT_NUM_RX_DESC;
if (!vsi->num_tx_desc)
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
break;
default:
......
......@@ -766,6 +766,100 @@ static void ice_vsi_link_event(struct ice_vsi *vsi, bool link_up)
}
}
/**
* ice_set_dflt_mib - send a default config MIB to the FW
* @pf: private PF struct
*
* This function sends a default configuration MIB to the FW.
*
* If this function errors out at any point, the driver is still able to
* function. The main impact is that LFC may not operate as expected.
* Therefore an error state in this function should be treated with a DBG
* message and continue on with driver rebuild/reenable.
*/
static void ice_set_dflt_mib(struct ice_pf *pf)
{
struct device *dev = ice_pf_to_dev(pf);
u8 mib_type, *buf, *lldpmib = NULL;
u16 len, typelen, offset = 0;
struct ice_lldp_org_tlv *tlv;
struct ice_hw *hw;
u32 ouisubtype;
if (!pf) {
dev_dbg(dev, "%s NULL pf pointer\n", __func__);
return;
}
hw = &pf->hw;
mib_type = SET_LOCAL_MIB_TYPE_LOCAL_MIB;
lldpmib = kzalloc(ICE_LLDPDU_SIZE, GFP_KERNEL);
if (!lldpmib) {
dev_dbg(dev, "%s Failed to allocate MIB memory\n",
__func__);
return;
}
/* Add ETS CFG TLV */
tlv = (struct ice_lldp_org_tlv *)lldpmib;
typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
ICE_IEEE_ETS_TLV_LEN);
tlv->typelen = htons(typelen);
ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
ICE_IEEE_SUBTYPE_ETS_CFG);
tlv->ouisubtype = htonl(ouisubtype);
buf = tlv->tlvinfo;
buf[0] = 0;
/* ETS CFG all UPs map to TC 0. Next 4 (1 - 4) Octets = 0.
* Octets 5 - 12 are BW values, set octet 5 to 100% BW.
* Octets 13 - 20 are TSA values - leave as zeros
*/
buf[5] = 0x64;
len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
offset += len + 2;
tlv = (struct ice_lldp_org_tlv *)
((char *)tlv + sizeof(tlv->typelen) + len);
/* Add ETS REC TLV */
buf = tlv->tlvinfo;
tlv->typelen = htons(typelen);
ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
ICE_IEEE_SUBTYPE_ETS_REC);
tlv->ouisubtype = htonl(ouisubtype);
/* First octet of buf is reserved
* Octets 1 - 4 map UP to TC - all UPs map to zero
* Octets 5 - 12 are BW values - set TC 0 to 100%.
* Octets 13 - 20 are TSA value - leave as zeros
*/
buf[5] = 0x64;
offset += len + 2;
tlv = (struct ice_lldp_org_tlv *)
((char *)tlv + sizeof(tlv->typelen) + len);
/* Add PFC CFG TLV */
typelen = ((ICE_TLV_TYPE_ORG << ICE_LLDP_TLV_TYPE_S) |
ICE_IEEE_PFC_TLV_LEN);
tlv->typelen = htons(typelen);
ouisubtype = ((ICE_IEEE_8021QAZ_OUI << ICE_LLDP_TLV_OUI_S) |
ICE_IEEE_SUBTYPE_PFC_CFG);
tlv->ouisubtype = htonl(ouisubtype);
/* Octet 1 left as all zeros - PFC disabled */
buf[0] = 0x08;
len = (typelen & ICE_LLDP_TLV_LEN_M) >> ICE_LLDP_TLV_LEN_S;
offset += len + 2;
if (ice_aq_set_lldp_mib(hw, mib_type, (void *)lldpmib, offset, NULL))
dev_dbg(dev, "%s Failed to set default LLDP MIB\n", __func__);
kfree(lldpmib);
}
/**
* ice_link_event - process the link event
* @pf: PF that the link event is associated with
......@@ -800,6 +894,12 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
pi->lport);
/* Check if the link state is up after updating link info, and treat
* this event as an UP event since the link is actually UP now.
*/
if (phy_info->link_info.link_info & ICE_AQ_LINK_UP)
link_up = true;
vsi = ice_get_main_vsi(pf);
if (!vsi || !vsi->port_info)
return -EINVAL;
......@@ -821,7 +921,13 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
if (link_up == old_link && link_speed == old_link_speed)
return result;
if (ice_is_dcb_active(pf)) {
if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
ice_dcb_rebuild(pf);
} else {
if (link_up)
ice_set_dflt_mib(pf);
}
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
......@@ -3477,6 +3583,60 @@ int ice_vsi_recfg_qs(struct ice_vsi *vsi, int new_rx, int new_tx)
return err;
}
/**
* ice_set_safe_mode_vlan_cfg - configure PF VSI to allow all VLANs in safe mode
* @pf: PF to configure
*
* No VLAN offloads/filtering are advertised in safe mode so make sure the PF
* VSI can still Tx/Rx VLAN tagged packets.
*/
static void ice_set_safe_mode_vlan_cfg(struct ice_pf *pf)
{
struct ice_vsi *vsi = ice_get_main_vsi(pf);
struct ice_vsi_ctx *ctxt;
enum ice_status status;
struct ice_hw *hw;
if (!vsi)
return;
ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
if (!ctxt)
return;
hw = &pf->hw;
ctxt->info = vsi->info;
ctxt->info.valid_sections =
cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID |
ICE_AQ_VSI_PROP_SECURITY_VALID |
ICE_AQ_VSI_PROP_SW_VALID);
/* disable VLAN anti-spoof */
ctxt->info.sec_flags &= ~(ICE_AQ_VSI_SEC_TX_VLAN_PRUNE_ENA <<
ICE_AQ_VSI_SEC_TX_PRUNE_ENA_S);
/* disable VLAN pruning and keep all other settings */
ctxt->info.sw_flags2 &= ~ICE_AQ_VSI_SW_FLAG_RX_VLAN_PRUNE_ENA;
/* allow all VLANs on Tx and don't strip on Rx */
ctxt->info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL |
ICE_AQ_VSI_VLAN_EMOD_NOTHING;
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
dev_err(ice_pf_to_dev(vsi->back), "Failed to update VSI for safe mode VLANs, err %s aq_err %s\n",
ice_stat_str(status),
ice_aq_str(hw->adminq.sq_last_status));
} else {
vsi->info.sec_flags = ctxt->info.sec_flags;
vsi->info.sw_flags2 = ctxt->info.sw_flags2;
vsi->info.vlan_flags = ctxt->info.vlan_flags;
}
kfree(ctxt);
}
/**
* ice_log_pkg_init - log result of DDP package load
* @hw: pointer to hardware info
......@@ -3974,7 +4134,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
if (err) {
dev_err(dev, "probe failed sending driver version %s. error: %d\n",
UTS_RELEASE, err);
goto err_alloc_sw_unroll;
goto err_send_version_unroll;
}
/* since everything is good, start the service timer */
......@@ -3983,19 +4143,19 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_init_link_events(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_link_events failed: %d\n", err);
goto err_alloc_sw_unroll;
goto err_send_version_unroll;
}
err = ice_init_nvm_phy_type(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_nvm_phy_type failed: %d\n", err);
goto err_alloc_sw_unroll;
goto err_send_version_unroll;
}
err = ice_update_link_info(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_update_link_info failed: %d\n", err);
goto err_alloc_sw_unroll;
goto err_send_version_unroll;
}
ice_init_link_dflt_override(pf->hw.port_info);
......@@ -4006,7 +4166,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
err = ice_init_phy_user_cfg(pf->hw.port_info);
if (err) {
dev_err(dev, "ice_init_phy_user_cfg failed: %d\n", err);
goto err_alloc_sw_unroll;
goto err_send_version_unroll;
}
if (!test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, pf->flags)) {
......@@ -4033,9 +4193,10 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* Disable WoL at init, wait for user to enable */
device_set_wakeup_enable(dev, false);
/* If no DDP driven features have to be setup, we are done with probe */
if (ice_is_safe_mode(pf))
if (ice_is_safe_mode(pf)) {
ice_set_safe_mode_vlan_cfg(pf);
goto probe_done;
}
/* initialize DDP driven features */
......@@ -4059,6 +4220,8 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
clear_bit(__ICE_DOWN, pf->state);
return 0;
err_send_version_unroll:
ice_vsi_release_all(pf);
err_alloc_sw_unroll:
ice_devlink_destroy_port(pf);
set_bit(__ICE_SERVICE_DIS, pf->state);
......@@ -4517,6 +4680,8 @@ static void ice_pci_err_resume(struct pci_dev *pdev)
return;
}
ice_restore_all_vfs_msi_state(pdev);
ice_do_reset(pf, ICE_RESET_PFR);
ice_service_task_restart(pf);
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
......@@ -5813,10 +5978,6 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
if (err)
goto err_sched_init_port;
err = ice_update_link_info(hw->port_info);
if (err)
dev_err(dev, "Get link status error %d\n", err);
/* start misc vector */
err = ice_req_irq_msix_misc(pf);
if (err) {
......
......@@ -1275,6 +1275,53 @@ ice_sched_find_node_in_subtree(struct ice_hw *hw, struct ice_sched_node *base,
return false;
}
/**
* ice_sched_get_free_qgrp - Scan all queue group siblings and find a free node
* @pi: port information structure
* @vsi_node: software VSI handle
* @qgrp_node: first queue group node identified for scanning
* @owner: LAN or RDMA
*
* This function retrieves a free LAN or RDMA queue group node by scanning
* qgrp_node and its siblings for the queue group with the fewest number
* of queues currently assigned.
*/
static struct ice_sched_node *
ice_sched_get_free_qgrp(struct ice_port_info *pi,
struct ice_sched_node *vsi_node,
struct ice_sched_node *qgrp_node, u8 owner)
{
struct ice_sched_node *min_qgrp;
u8 min_children;
if (!qgrp_node)
return qgrp_node;
min_children = qgrp_node->num_children;
if (!min_children)
return qgrp_node;
min_qgrp = qgrp_node;
/* scan all queue groups until find a node which has less than the
* minimum number of children. This way all queue group nodes get
* equal number of shares and active. The bandwidth will be equally
* distributed across all queues.
*/
while (qgrp_node) {
/* make sure the qgroup node is part of the VSI subtree */
if (ice_sched_find_node_in_subtree(pi->hw, vsi_node, qgrp_node))
if (qgrp_node->num_children < min_children &&
qgrp_node->owner == owner) {
/* replace the new min queue group node */
min_qgrp = qgrp_node;
min_children = min_qgrp->num_children;
/* break if it has no children, */
if (!min_children)
break;
}
qgrp_node = qgrp_node->sibling;
}
return min_qgrp;
}
/**
* ice_sched_get_free_qparent - Get a free LAN or RDMA queue group node
* @pi: port information structure
......@@ -1288,7 +1335,7 @@ struct ice_sched_node *
ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u8 owner)
{
struct ice_sched_node *vsi_node, *qgrp_node = NULL;
struct ice_sched_node *vsi_node, *qgrp_node;
struct ice_vsi_ctx *vsi_ctx;
u16 max_children;
u8 qgrp_layer;
......@@ -1302,7 +1349,7 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
vsi_node = vsi_ctx->sched.vsi_node[tc];
/* validate invalid VSI ID */
if (!vsi_node)
goto lan_q_exit;
return NULL;
/* get the first queue group node from VSI sub-tree */
qgrp_node = ice_sched_get_first_node(pi, vsi_node, qgrp_layer);
......@@ -1315,8 +1362,8 @@ ice_sched_get_free_qparent(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
qgrp_node = qgrp_node->sibling;
}
lan_q_exit:
return qgrp_node;
/* Select the best queue group */
return ice_sched_get_free_qgrp(pi, vsi_node, qgrp_node, owner);
}
/**
......@@ -2153,8 +2200,8 @@ ice_sched_add_rl_profile(struct ice_port_info *pi,
hw = pi->hw;
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
if (rl_prof_elem->profile.flags == profile_type &&
rl_prof_elem->bw == bw)
if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
profile_type && rl_prof_elem->bw == bw)
/* Return existing profile ID info */
return rl_prof_elem;
......@@ -2384,7 +2431,8 @@ ice_sched_rm_rl_profile(struct ice_port_info *pi, u8 layer_num, u8 profile_type,
/* Check the existing list for RL profile */
list_for_each_entry(rl_prof_elem, &pi->rl_prof_list[layer_num],
list_entry)
if (rl_prof_elem->profile.flags == profile_type &&
if ((rl_prof_elem->profile.flags & ICE_AQC_RL_PROFILE_TYPE_M) ==
profile_type &&
le16_to_cpu(rl_prof_elem->profile.profile_id) ==
profile_id) {
if (rl_prof_elem->prof_id_ref)
......@@ -2546,8 +2594,8 @@ ice_sched_set_node_bw(struct ice_port_info *pi, struct ice_sched_node *node,
return 0;
return ice_sched_rm_rl_profile(pi, layer_num,
rl_prof_info->profile.flags,
old_id);
rl_prof_info->profile.flags &
ICE_AQC_RL_PROFILE_TYPE_M, old_id);
}
/**
......
......@@ -495,6 +495,7 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
u8 num_rules, enum ice_adminq_opc opc, struct ice_sq_cd *cd)
{
struct ice_aq_desc desc;
enum ice_status status;
if (opc != ice_aqc_opc_add_sw_rules &&
opc != ice_aqc_opc_update_sw_rules &&
......@@ -506,7 +507,12 @@ ice_aq_sw_rules(struct ice_hw *hw, void *rule_list, u16 rule_list_sz,
desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
desc.params.sw_rules.num_rules_fltr_entry_index =
cpu_to_le16(num_rules);
return ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
status = ice_aq_send_cmd(hw, &desc, rule_list, rule_list_sz, cd);
if (opc != ice_aqc_opc_add_sw_rules &&
hw->adminq.sq_last_status == ICE_AQ_RC_ENOENT)
status = ICE_ERR_DOES_NOT_EXIST;
return status;
}
/* ice_init_port_info - Initialize port_info with switch configuration data
......
......@@ -509,8 +509,8 @@ static unsigned int ice_rx_offset(struct ice_ring *rx_ring)
return 0;
}
static unsigned int ice_rx_frame_truesize(struct ice_ring *rx_ring,
unsigned int size)
static unsigned int
ice_rx_frame_truesize(struct ice_ring *rx_ring, unsigned int __maybe_unused size)
{
unsigned int truesize;
......
......@@ -409,7 +409,7 @@ enum ice_rl_type {
#define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */
#define ICE_SCHED_DFLT_RL_PROF_ID 0
#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BW_WT 1
#define ICE_SCHED_DFLT_BW_WT 4
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
......
......@@ -4071,3 +4071,33 @@ void ice_print_vfs_mdd_events(struct ice_pf *pf)
}
}
}
/**
* ice_restore_all_vfs_msi_state - restore VF MSI state after PF FLR
* @pdev: pointer to a pci_dev structure
*
* Called when recovering from a PF FLR to restore interrupt capability to
* the VFs.
*/
void ice_restore_all_vfs_msi_state(struct pci_dev *pdev)
{
struct pci_dev *vfdev;
u16 vf_id;
int pos;
if (!pci_num_vf(pdev))
return;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
if (pos) {
pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID,
&vf_id);
vfdev = pci_get_device(pdev->vendor, vf_id, NULL);
while (vfdev) {
if (vfdev->is_virtfn && vfdev->physfn == pdev)
pci_restore_msi_state(vfdev);
vfdev = pci_get_device(pdev->vendor, vf_id,
vfdev);
}
}
}
......@@ -114,6 +114,7 @@ void ice_vc_notify_link_state(struct ice_pf *pf);
void ice_vc_notify_reset(struct ice_pf *pf);
bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr);
bool ice_reset_vf(struct ice_vf *vf, bool is_vflr);
void ice_restore_all_vfs_msi_state(struct pci_dev *pdev);
int
ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
......@@ -146,6 +147,7 @@ void ice_print_vf_rx_mdd_event(struct ice_vf *vf);
#define ice_vf_lan_overflow_event(pf, event) do {} while (0)
#define ice_print_vfs_mdd_events(pf) do {} while (0)
#define ice_print_vf_rx_mdd_event(vf) do {} while (0)
#define ice_restore_all_vfs_msi_state(pdev) do {} while (0)
static inline bool
ice_reset_all_vfs(struct ice_pf __always_unused *pf,
......
......@@ -706,8 +706,6 @@ static bool ice_xmit_zc(struct ice_ring *xdp_ring, int budget)
if (tx_desc) {
ice_xdp_ring_update_tail(xdp_ring);
xsk_umem_consume_tx_done(xdp_ring->xsk_umem);
if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
return budget > 0 && work_done;
......@@ -783,12 +781,8 @@ bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget)
if (xsk_frames)
xsk_umem_complete_tx(xdp_ring->xsk_umem, xsk_frames);
if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem)) {
if (xdp_ring->next_to_clean == xdp_ring->next_to_use)
if (xsk_umem_uses_need_wakeup(xdp_ring->xsk_umem))
xsk_set_tx_need_wakeup(xdp_ring->xsk_umem);
else
xsk_clear_tx_need_wakeup(xdp_ring->xsk_umem);
}
ice_update_tx_ring_stats(xdp_ring, total_packets, total_bytes);
xmit_done = ice_xmit_zc(xdp_ring, ICE_DFLT_IRQ_WORK);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment