Commit f1ff4e80 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-11-08

Another series that contains updates to the ice driver only.

Anirudh cleans up the code of kernel config of ifdef wrappers by moving
code that is needed by DCB to disable and enable the PF VSI for
configuration.  Implements ice_vsi_type_str() to convert an VSI type
enum value to its string equivalent to help identify VSI types from
module print statements.

Usha and Tarun add support for setting the maximum per-queue bit rate
for transmit queues.

Dave implements dcb_nl set functions and supporting software DCB
functions to support the callbacks defined in the dcbnl_rtnl_ops
structure.

Henry adds a check to ensure we are not resetting the device when trying
to configure it, and to return -EBUSY during a reset.

Usha fixes a call trace caused by the receive/transmit descriptor size
change request via ethtool when DCB is configured by using the number of
enabled queues and not the total number of allocated queues.

Paul cleans up and refactors the software LLDP configuration to handle
when firmware DCBX is disabled.

Akeem adds checks to ensure the VF or PF is not disabled before honoring
mailbox messages to configure the VF.

Brett corrects the check to make sure the vector_id passed down from
iavf is less than the max allowed interrupts per VF.  Updates a flag bit
to align with the current specification.

Bruce updates a switch statement to use the correct status of the
Download Package AQ command.  Does some housekeeping by cleaning up a
conditional check that is not needed.

Mitch shortens up the delay for SQ responses to resolve issues with VF
resets failing.

Jake cleans up the code reducing namespace pollution and to simplify
ice_debug_cq() since it always uses the same mask, not need to pass it
in.  Improve debugging by adding the command opcode in the debug
messages that print an error code.

v2: fixed reverse christmas tree issue in patch 3 of the series.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents bbab7ef2 fb0254b2
......@@ -20,5 +20,5 @@ ice-y := ice_main.o \
ice_flex_pipe.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_nl.o ice_dcb_lib.o
ice-$(CONFIG_XDP_SOCKETS) += ice_xsk.o
......@@ -180,6 +180,7 @@ enum ice_state {
__ICE_NEEDS_RESTART,
__ICE_PREPARED_FOR_RESET, /* set by driver when prepared */
__ICE_RESET_OICR_RECV, /* set by driver after rcv reset OICR */
__ICE_DCBNL_DEVRESET, /* set by dcbnl devreset */
__ICE_PFR_REQ, /* set by driver and peers */
__ICE_CORER_REQ, /* set by driver and peers */
__ICE_GLOBR_REQ, /* set by driver and peers */
......@@ -365,6 +366,7 @@ struct ice_pf {
struct work_struct serv_task;
struct mutex avail_q_mutex; /* protects access to avail_[rx|tx]qs */
struct mutex sw_mutex; /* lock for protecting VSI alloc flow */
struct mutex tc_mutex; /* lock to protect TC changes */
u32 msg_enable;
u32 hw_csum_rx_error;
u32 oicr_idx; /* Other interrupt cause MSIX vector index */
......@@ -502,10 +504,6 @@ int ice_set_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
int ice_get_rss(struct ice_vsi *vsi, u8 *seed, u8 *lut, u16 lut_size);
void ice_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size);
void ice_print_link_msg(struct ice_vsi *vsi, bool isup);
#ifdef CONFIG_DCB
int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked);
void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked);
#endif /* CONFIG_DCB */
int ice_open(struct net_device *netdev);
int ice_stop(struct net_device *netdev);
......
......@@ -742,6 +742,10 @@ struct ice_aqc_add_elem {
struct ice_aqc_txsched_elem_data generic[1];
};
struct ice_aqc_conf_elem {
struct ice_aqc_txsched_elem_data generic[1];
};
struct ice_aqc_get_elem {
struct ice_aqc_txsched_elem_data generic[1];
};
......@@ -783,6 +787,44 @@ struct ice_aqc_port_ets_elem {
__le32 tc_node_teid[8]; /* Used for response, reserved in command */
};
/* Rate limiting profile for
* Add RL profile (indirect 0x0410)
* Query RL profile (indirect 0x0411)
* Remove RL profile (indirect 0x0415)
* These indirect commands acts on single or multiple
* RL profiles with specified data.
*/
struct ice_aqc_rl_profile {
__le16 num_profiles;
__le16 num_processed; /* Only for response. Reserved in Command. */
u8 reserved[4];
__le32 addr_high;
__le32 addr_low;
};
struct ice_aqc_rl_profile_elem {
u8 level;
u8 flags;
#define ICE_AQC_RL_PROFILE_TYPE_S 0x0
#define ICE_AQC_RL_PROFILE_TYPE_M (0x3 << ICE_AQC_RL_PROFILE_TYPE_S)
#define ICE_AQC_RL_PROFILE_TYPE_CIR 0
#define ICE_AQC_RL_PROFILE_TYPE_EIR 1
#define ICE_AQC_RL_PROFILE_TYPE_SRL 2
/* The following flag is used for Query RL Profile Data */
#define ICE_AQC_RL_PROFILE_INVAL_S 0x7
#define ICE_AQC_RL_PROFILE_INVAL_M (0x1 << ICE_AQC_RL_PROFILE_INVAL_S)
__le16 profile_id;
__le16 max_burst_size;
__le16 rl_multiply;
__le16 wake_up_calc;
__le16 rl_encode;
};
struct ice_aqc_rl_profile_generic_elem {
struct ice_aqc_rl_profile_elem generic[1];
};
/* Query Scheduler Resource Allocation (indirect 0x0412)
* This indirect command retrieves the scheduler resources allocated by
* EMP Firmware to the given PF.
......@@ -1657,6 +1699,7 @@ struct ice_aq_desc {
struct ice_aqc_sched_elem_cmd sched_elem_cmd;
struct ice_aqc_query_txsched_res query_sched_res;
struct ice_aqc_query_port_ets port_ets;
struct ice_aqc_rl_profile rl_profile;
struct ice_aqc_nvm nvm;
struct ice_aqc_nvm_checksum nvm_checksum;
struct ice_aqc_pf_vf_msg virt;
......@@ -1758,12 +1801,15 @@ enum ice_adminq_opc {
/* transmit scheduler commands */
ice_aqc_opc_get_dflt_topo = 0x0400,
ice_aqc_opc_add_sched_elems = 0x0401,
ice_aqc_opc_cfg_sched_elems = 0x0403,
ice_aqc_opc_get_sched_elems = 0x0404,
ice_aqc_opc_suspend_sched_elems = 0x0409,
ice_aqc_opc_resume_sched_elems = 0x040A,
ice_aqc_opc_query_port_ets = 0x040E,
ice_aqc_opc_delete_sched_elems = 0x040F,
ice_aqc_opc_add_rl_profiles = 0x0410,
ice_aqc_opc_query_sched_res = 0x0412,
ice_aqc_opc_remove_rl_profiles = 0x0415,
/* PHY commands */
ice_aqc_opc_get_phy_caps = 0x0600,
......
......@@ -855,6 +855,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
goto err_unroll_sched;
}
INIT_LIST_HEAD(&hw->agg_list);
/* Initialize max burst size */
if (!hw->max_burst_size)
ice_cfg_rl_burst_size(hw, ICE_SCHED_DFLT_BURST_SIZE);
status = ice_init_fltr_mgmt_struct(hw);
if (status)
......@@ -1248,56 +1251,6 @@ const struct ice_ctx_ele ice_tlan_ctx_info[] = {
{ 0 }
};
/**
* ice_debug_cq
* @hw: pointer to the hardware structure
* @mask: debug mask
* @desc: pointer to control queue descriptor
* @buf: pointer to command buffer
* @buf_len: max length of buf
*
* Dumps debug log about control command with descriptor contents.
*/
void
ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
u16 buf_len)
{
struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
u16 len;
#ifndef CONFIG_DYNAMIC_DEBUG
if (!(mask & hw->debug_mask))
return;
#endif
if (!desc)
return;
len = le16_to_cpu(cq_desc->datalen);
ice_debug(hw, mask,
"CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
le16_to_cpu(cq_desc->opcode),
le16_to_cpu(cq_desc->flags),
le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
ice_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
le32_to_cpu(cq_desc->cookie_high),
le32_to_cpu(cq_desc->cookie_low));
ice_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
le32_to_cpu(cq_desc->params.generic.param0),
le32_to_cpu(cq_desc->params.generic.param1));
ice_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
le32_to_cpu(cq_desc->params.generic.addr_high),
le32_to_cpu(cq_desc->params.generic.addr_low));
if (buf && cq_desc->datalen != 0) {
ice_debug(hw, mask, "Buffer:\n");
if (buf_len < len)
len = buf_len;
ice_debug_array(hw, mask, 16, 1, (u8 *)buf, len);
}
}
/* FW Admin Queue command wrappers */
/* Software lock/mutex that is meant to be held while the Global Config Lock
......@@ -3260,7 +3213,7 @@ ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info)
* @tc: TC number
* @q_handle: software queue handle
*/
static struct ice_q_ctx *
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle)
{
struct ice_vsi_ctx *vsi;
......@@ -3357,9 +3310,12 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
node.node_teid = buf->txqs[0].q_teid;
node.data.elem_type = ICE_AQC_ELEM_TYPE_LEAF;
q_ctx->q_handle = q_handle;
q_ctx->q_teid = le32_to_cpu(node.node_teid);
/* add a leaf node into schduler tree queue layer */
/* add a leaf node into scheduler tree queue layer */
status = ice_sched_add_node(pi, hw->num_tx_sched_layers - 1, &node);
if (!status)
status = ice_sched_replay_q_bw(pi, q_ctx);
ena_txq_exit:
mutex_unlock(&pi->sched_lock);
......
......@@ -13,8 +13,6 @@
enum ice_status ice_nvm_validate_checksum(struct ice_hw *hw);
void
ice_debug_cq(struct ice_hw *hw, u32 mask, void *desc, void *buf, u16 buf_len);
enum ice_status ice_init_hw(struct ice_hw *hw);
void ice_deinit_hw(struct ice_hw *hw);
enum ice_status
......@@ -141,6 +139,8 @@ ice_ena_vsi_txq(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 q_handle,
enum ice_status ice_replay_vsi(struct ice_hw *hw, u16 vsi_handle);
void ice_replay_post(struct ice_hw *hw);
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf);
struct ice_q_ctx *
ice_get_lan_q_ctx(struct ice_hw *hw, u16 vsi_handle, u8 tc, u16 q_handle);
void
ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
......
......@@ -809,6 +809,52 @@ static u16 ice_clean_sq(struct ice_hw *hw, struct ice_ctl_q_info *cq)
return ICE_CTL_Q_DESC_UNUSED(sq);
}
/**
* ice_debug_cq
* @hw: pointer to the hardware structure
* @desc: pointer to control queue descriptor
* @buf: pointer to command buffer
* @buf_len: max length of buf
*
* Dumps debug log about control command with descriptor contents.
*/
static void ice_debug_cq(struct ice_hw *hw, void *desc, void *buf, u16 buf_len)
{
struct ice_aq_desc *cq_desc = (struct ice_aq_desc *)desc;
u16 len;
if (!IS_ENABLED(CONFIG_DYNAMIC_DEBUG) &&
!((ICE_DBG_AQ_DESC | ICE_DBG_AQ_DESC_BUF) & hw->debug_mask))
return;
if (!desc)
return;
len = le16_to_cpu(cq_desc->datalen);
ice_debug(hw, ICE_DBG_AQ_DESC,
"CQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
le16_to_cpu(cq_desc->opcode),
le16_to_cpu(cq_desc->flags),
le16_to_cpu(cq_desc->datalen), le16_to_cpu(cq_desc->retval));
ice_debug(hw, ICE_DBG_AQ_DESC, "\tcookie (h,l) 0x%08X 0x%08X\n",
le32_to_cpu(cq_desc->cookie_high),
le32_to_cpu(cq_desc->cookie_low));
ice_debug(hw, ICE_DBG_AQ_DESC, "\tparam (0,1) 0x%08X 0x%08X\n",
le32_to_cpu(cq_desc->params.generic.param0),
le32_to_cpu(cq_desc->params.generic.param1));
ice_debug(hw, ICE_DBG_AQ_DESC, "\taddr (h,l) 0x%08X 0x%08X\n",
le32_to_cpu(cq_desc->params.generic.addr_high),
le32_to_cpu(cq_desc->params.generic.addr_low));
if (buf && cq_desc->datalen != 0) {
ice_debug(hw, ICE_DBG_AQ_DESC_BUF, "Buffer:\n");
if (buf_len < len)
len = buf_len;
ice_debug_array(hw, ICE_DBG_AQ_DESC_BUF, 16, 1, (u8 *)buf, len);
}
}
/**
* ice_sq_done - check if FW has processed the Admin Send Queue (ATQ)
* @hw: pointer to the HW struct
......@@ -934,10 +980,10 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
}
/* Debug desc and buffer */
ice_debug(hw, ICE_DBG_AQ_MSG,
ice_debug(hw, ICE_DBG_AQ_DESC,
"ATQ: Control Send queue desc and buffer:\n");
ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc_on_ring, buf, buf_size);
ice_debug_cq(hw, (void *)desc_on_ring, buf, buf_size);
(cq->sq.next_to_use)++;
if (cq->sq.next_to_use == cq->sq.count)
......@@ -948,7 +994,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (ice_sq_done(hw, cq))
break;
mdelay(1);
udelay(ICE_CTL_Q_SQ_CMD_USEC);
total_delay++;
} while (total_delay < cq->sq_cmd_timeout);
......@@ -971,7 +1017,8 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
retval = le16_to_cpu(desc->retval);
if (retval) {
ice_debug(hw, ICE_DBG_AQ_MSG,
"Control Send Queue command completed with error 0x%x\n",
"Control Send Queue command 0x%04X completed with error 0x%X\n",
le16_to_cpu(desc->opcode),
retval);
/* strip off FW internal code */
......@@ -986,7 +1033,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
ice_debug(hw, ICE_DBG_AQ_MSG,
"ATQ: desc and buffer writeback:\n");
ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, buf, buf_size);
ice_debug_cq(hw, (void *)desc, buf, buf_size);
/* save writeback AQ if requested */
if (details->wb_desc)
......@@ -1075,7 +1122,8 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (flags & ICE_AQ_FLAG_ERR) {
ret_code = ICE_ERR_AQ_ERROR;
ice_debug(hw, ICE_DBG_AQ_MSG,
"Control Receive Queue Event received with error 0x%x\n",
"Control Receive Queue Event 0x%04X received with error 0x%X\n",
le16_to_cpu(desc->opcode),
cq->rq_last_status);
}
memcpy(&e->desc, desc, sizeof(e->desc));
......@@ -1084,10 +1132,9 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
if (e->msg_buf && e->msg_len)
memcpy(e->msg_buf, cq->rq.r.rq_bi[desc_idx].va, e->msg_len);
ice_debug(hw, ICE_DBG_AQ_MSG, "ARQ: desc and buffer:\n");
ice_debug(hw, ICE_DBG_AQ_DESC, "ARQ: desc and buffer:\n");
ice_debug_cq(hw, ICE_DBG_AQ_CMD, (void *)desc, e->msg_buf,
cq->rq_buf_size);
ice_debug_cq(hw, (void *)desc, e->msg_buf, cq->rq_buf_size);
/* Restore the original datalen and buffer address in the desc,
* FW updates datalen to indicate the event message size
......
......@@ -31,8 +31,9 @@ enum ice_ctl_q {
ICE_CTL_Q_MAILBOX,
};
/* Control Queue default settings */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
/* Control Queue timeout settings - max delay 250ms */
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 2500 /* Count 2500 times */
#define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */
struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to DMA head */
......
......@@ -2,6 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
static void ice_pf_dcb_recfg(struct ice_pf *pf);
......@@ -101,6 +102,16 @@ u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg)
return ret;
}
/**
* ice_dcb_get_tc - Get the TC associated with the queue
* @vsi: ptr to the VSI
* @queue_index: queue number associated with VSI
*/
u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index)
{
return vsi->tx_rings[queue_index]->dcb_tc;
}
/**
* ice_vsi_cfg_dcb_rings - Update rings to reflect DCB TC
* @vsi: VSI owner of rings being updated
......@@ -145,15 +156,19 @@ void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi)
* @new_cfg: DCBX config to apply
* @locked: is the RTNL held
*/
static
int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
{
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
int ret = 0;
struct ice_dcbx_cfg *old_cfg, *curr_cfg;
int ret = ICE_DCB_NO_HW_CHG;
struct ice_vsi *pf_vsi;
curr_cfg = &pf->hw.port_info->local_dcbx_cfg;
/* FW does not care if change happened */
if (!pf->hw.port_info->is_sw_lldp)
ret = ICE_DCB_HW_CHG_RST;
/* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(new_cfg) > 1) {
dev_dbg(&pf->pdev->dev, "DCB tagging enabled (num TC > 1)\n");
......@@ -169,18 +184,28 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
}
/* Store old config in case FW config fails */
old_cfg = devm_kzalloc(&pf->pdev->dev, sizeof(*old_cfg), GFP_KERNEL);
memcpy(old_cfg, curr_cfg, sizeof(*old_cfg));
old_cfg = kmemdup(curr_cfg, sizeof(*old_cfg), GFP_KERNEL);
if (!old_cfg)
return -ENOMEM;
dev_info(&pf->pdev->dev, "Commit DCB Configuration to the hardware\n");
pf_vsi = ice_get_main_vsi(pf);
if (!pf_vsi) {
dev_dbg(&pf->pdev->dev, "PF VSI doesn't exist\n");
ret = -EINVAL;
goto free_cfg;
}
/* avoid race conditions by holding the lock while disabling and
* re-enabling the VSI
*/
if (!locked)
rtnl_lock();
ice_pf_dis_all_vsi(pf, true);
ice_dis_vsi(pf_vsi, true);
memcpy(curr_cfg, new_cfg, sizeof(*curr_cfg));
memcpy(&curr_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
memcpy(&new_cfg->etsrec, &curr_cfg->etscfg, sizeof(curr_cfg->etsrec));
/* Only send new config to HW if we are in SW LLDP mode. Otherwise,
* the new config came from the HW in the first place.
......@@ -204,10 +229,11 @@ int ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked)
ice_pf_dcb_recfg(pf);
out:
ice_pf_ena_all_vsi(pf, true);
ice_ena_vsi(pf_vsi, true);
if (!locked)
rtnl_unlock();
devm_kfree(&pf->pdev->dev, old_cfg);
free_cfg:
kfree(old_cfg);
return ret;
}
......@@ -539,6 +565,8 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf)
}
ice_vsi_map_rings_to_vectors(pf->vsi[v]);
if (pf->vsi[v]->type == ICE_VSI_PF)
ice_dcbnl_set_all(pf->vsi[v]);
}
}
......@@ -566,6 +594,8 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
"DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
pf->hw.func_caps.common_cap.maxtc);
if (err) {
struct ice_vsi *pf_vsi;
/* FW LLDP is disabled, activate SW DCBX/LLDP mode */
dev_info(&pf->pdev->dev,
"FW LLDP is disabled, DCBx/LLDP in SW mode.\n");
......@@ -578,6 +608,19 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
goto dcb_init_err;
}
/* If the FW DCBX engine is not running then Rx LLDP packets
* need to be redirected up the stack.
*/
pf_vsi = ice_get_main_vsi(pf);
if (!pf_vsi) {
dev_err(&pf->pdev->dev,
"Failed to set local DCB config\n");
err = -EIO;
goto dcb_init_err;
}
ice_cfg_sw_lldp(pf_vsi, false, true);
pf->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
return 0;
}
......@@ -690,6 +733,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_dcbx_cfg tmp_dcbx_cfg;
bool need_reconfig = false;
struct ice_port_info *pi;
struct ice_vsi *pf_vsi;
u8 type;
int ret;
......@@ -749,6 +793,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg);
ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
if (!need_reconfig)
return;
......@@ -761,8 +806,14 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
}
pf_vsi = ice_get_main_vsi(pf);
if (!pf_vsi) {
dev_dbg(&pf->pdev->dev, "PF VSI doesn't exist\n");
return;
}
rtnl_lock();
ice_pf_dis_all_vsi(pf, true);
ice_dis_vsi(pf_vsi, true);
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
......@@ -774,6 +825,6 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* changes in configuration update VSI */
ice_pf_dcb_recfg(pf);
ice_pf_ena_all_vsi(pf, true);
ice_ena_vsi(pf_vsi, true);
rtnl_unlock();
}
......@@ -9,11 +9,17 @@
#include "ice_lib.h"
#ifdef CONFIG_DCB
#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */
#define ICE_TC_MAX_BW 100 /* Default Max BW percentage */
#define ICE_DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */
#define ICE_DCB_NO_HW_CHG 1 /* DCB configuration did not change */
#define ICE_DCB_HW_CHG 2 /* DCB configuration changed, no reset */
void ice_dcb_rebuild(struct ice_pf *pf);
u8 ice_dcb_get_ena_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg *dcbcfg);
u8 ice_dcb_get_tc(struct ice_vsi *vsi, int queue_index);
int
ice_pf_dcb_cfg(struct ice_pf *pf, struct ice_dcbx_cfg *new_cfg, bool locked);
void ice_vsi_cfg_dcb_rings(struct ice_vsi *vsi);
int ice_init_pf_dcb(struct ice_pf *pf, bool locked);
void ice_update_dcb_stats(struct ice_pf *pf);
......@@ -42,6 +48,13 @@ static inline u8 ice_dcb_get_num_tc(struct ice_dcbx_cfg __always_unused *dcbcfg)
return 1;
}
static inline u8
ice_dcb_get_tc(struct ice_vsi __always_unused *vsi,
int __always_unused queue_index)
{
return 0;
}
static inline int
ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked)
{
......@@ -49,6 +62,14 @@ ice_init_pf_dcb(struct ice_pf *pf, bool __always_unused locked)
return -EOPNOTSUPP;
}
static inline int
ice_pf_dcb_cfg(struct ice_pf __always_unused *pf,
struct ice_dcbx_cfg __always_unused *new_cfg,
bool __always_unused locked)
{
return -EOPNOTSUPP;
}
static inline int
ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
struct ice_tx_buf __always_unused *first)
......
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright (c) 2019, Intel Corporation. */
#ifndef _ICE_DCB_NL_H_
#define _ICE_DCB_NL_H_
#ifdef CONFIG_DCB
void ice_dcbnl_setup(struct ice_vsi *vsi);
void ice_dcbnl_set_all(struct ice_vsi *vsi);
void
ice_dcbnl_flush_apps(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
struct ice_dcbx_cfg *new_cfg);
#else
#define ice_dcbnl_setup(vsi) do {} while (0)
#define ice_dcbnl_set_all(vsi) do {} while (0)
#define ice_dcbnl_flush_apps(pf, old_cfg, new_cfg) do {} while (0)
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_NL_H_ */
......@@ -1206,11 +1206,6 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
status = ice_init_pf_dcb(pf, true);
if (status)
dev_warn(&pf->pdev->dev, "Fail to init DCB\n");
/* Forward LLDP packets to default VSI so that they
* are passed up the stack
*/
ice_cfg_sw_lldp(vsi, false, true);
} else {
enum ice_status status;
bool dcbx_agent_status;
......@@ -2654,14 +2649,14 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
netdev_info(netdev, "Changing Tx descriptor count from %d to %d\n",
vsi->tx_rings[0]->count, new_tx_cnt);
tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_txq,
tx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_txq,
sizeof(*tx_rings), GFP_KERNEL);
if (!tx_rings) {
err = -ENOMEM;
goto done;
}
for (i = 0; i < vsi->alloc_txq; i++) {
ice_for_each_txq(vsi, i) {
/* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_cnt;
......@@ -2714,14 +2709,14 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
netdev_info(netdev, "Changing Rx descriptor count from %d to %d\n",
vsi->rx_rings[0]->count, new_rx_cnt);
rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->alloc_rxq,
rx_rings = devm_kcalloc(&pf->pdev->dev, vsi->num_rxq,
sizeof(*rx_rings), GFP_KERNEL);
if (!rx_rings) {
err = -ENOMEM;
goto done;
}
for (i = 0; i < vsi->alloc_rxq; i++) {
ice_for_each_rxq(vsi, i) {
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
......@@ -2759,7 +2754,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
ice_down(vsi);
if (tx_rings) {
for (i = 0; i < vsi->alloc_txq; i++) {
ice_for_each_txq(vsi, i) {
ice_free_tx_ring(vsi->tx_rings[i]);
*vsi->tx_rings[i] = tx_rings[i];
}
......@@ -2767,7 +2762,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
}
if (rx_rings) {
for (i = 0; i < vsi->alloc_rxq; i++) {
ice_for_each_rxq(vsi, i) {
ice_free_rx_ring(vsi->rx_rings[i]);
/* copy the real tail offset */
rx_rings[i].tail = vsi->rx_rings[i]->tail;
......@@ -2801,7 +2796,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
free_tx:
/* error cleanup if the Rx allocations failed after getting Tx */
if (tx_rings) {
for (i = 0; i < vsi->alloc_txq; i++)
ice_for_each_txq(vsi, i)
ice_free_tx_ring(&tx_rings[i]);
devm_kfree(&pf->pdev->dev, tx_rings);
}
......
......@@ -52,6 +52,9 @@
#define PF_MBX_ATQLEN_ATQLEN_M ICE_M(0x3FF, 0)
#define PF_MBX_ATQLEN_ATQENABLE_M BIT(31)
#define PF_MBX_ATQT 0x0022E300
#define PRTDCB_GENC 0x00083000
#define PRTDCB_GENC_PFCLDA_S 16
#define PRTDCB_GENC_PFCLDA_M ICE_M(0xFFFF, 16)
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
......
......@@ -211,7 +211,7 @@ enum ice_flex_rx_mdid {
/* Rx/Tx Flag64 packet flag bits */
enum ice_flg64_bits {
ICE_FLG_PKT_DSI = 0,
ICE_FLG_EVLAN_x8100 = 15,
ICE_FLG_EVLAN_x8100 = 14,
ICE_FLG_EVLAN_x9100,
ICE_FLG_VLAN_x8100,
ICE_FLG_TNL_MAC = 22,
......
......@@ -6,6 +6,24 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
/**
* ice_vsi_type_str - maps VSI type enum to string equivalents
* @type: VSI type enum
*/
const char *ice_vsi_type_str(enum ice_vsi_type type)
{
switch (type) {
case ICE_VSI_PF:
return "ICE_VSI_PF";
case ICE_VSI_VF:
return "ICE_VSI_VF";
case ICE_VSI_LB:
return "ICE_VSI_LB";
default:
return "unknown";
}
}
/**
* ice_vsi_ctrl_rx_rings - Start or stop a VSI's Rx rings
* @vsi: the VSI being configured
......@@ -700,7 +718,8 @@ static void ice_set_rss_vsi_ctx(struct ice_vsi_ctx *ctxt, struct ice_vsi *vsi)
hash_type = ICE_AQ_VSI_Q_OPT_RSS_TPLZ;
break;
case ICE_VSI_LB:
dev_dbg(&pf->pdev->dev, "Unsupported VSI type %d\n", vsi->type);
dev_dbg(&pf->pdev->dev, "Unsupported VSI type %s\n",
ice_vsi_type_str(vsi->type));
return;
default:
dev_warn(&pf->pdev->dev, "Unknown VSI type %d\n", vsi->type);
......@@ -1881,23 +1900,17 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* out PAUSE or PFC frames. If enabled, FW can still send FC frames.
* The rule is added once for PF VSI in order to create appropriate
* recipe, since VSI/VSI list is ignored with drop action...
* Also add rules to handle LLDP Tx and Rx packets. Tx LLDP packets
* need to be dropped so that VFs cannot send LLDP packets to reconfig
* DCB settings in the HW. Also, if the FW DCBX engine is not running
* then Rx LLDP packets need to be redirected up the stack.
* Also add rules to handle LLDP Tx packets. Tx LLDP packets need to
* be dropped so that VFs cannot send LLDP packets to reconfig DCB
* settings in the HW.
*/
if (!ice_is_safe_mode(pf)) {
if (!ice_is_safe_mode(pf))
if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, true);
/* Tx LLDP packets */
ice_cfg_sw_lldp(vsi, true, true);
/* Rx LLDP packets */
if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
ice_cfg_sw_lldp(vsi, false, true);
}
}
return vsi;
......@@ -2040,6 +2053,62 @@ void ice_vsi_close(struct ice_vsi *vsi)
ice_vsi_free_rx_rings(vsi);
}
/**
* ice_ena_vsi - resume a VSI
* @vsi: the VSI being resume
* @locked: is the rtnl_lock already held
*/
int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
{
int err = 0;
if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
return 0;
clear_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
err = ice_open(vsi->netdev);
if (!locked)
rtnl_unlock();
}
}
return err;
}
/**
* ice_dis_vsi - pause a VSI
* @vsi: the VSI being paused
* @locked: is the rtnl_lock already held
*/
void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
if (test_bit(__ICE_DOWN, vsi->state))
return;
set_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
ice_stop(vsi->netdev);
if (!locked)
rtnl_unlock();
} else {
ice_vsi_close(vsi);
}
}
}
/**
* ice_free_res - free a block of resources
* @res: pointer to the resource
......@@ -2431,6 +2500,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
bool ice_is_reset_in_progress(unsigned long *state)
{
return test_bit(__ICE_RESET_OICR_RECV, state) ||
test_bit(__ICE_DCBNL_DEVRESET, state) ||
test_bit(__ICE_PFR_REQ, state) ||
test_bit(__ICE_CORER_REQ, state) ||
test_bit(__ICE_GLOBR_REQ, state);
......
......@@ -6,6 +6,8 @@
#include "ice.h"
const char *ice_vsi_type_str(enum ice_vsi_type type);
int
ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
const u8 *macaddr);
......@@ -62,6 +64,10 @@ int ice_vsi_release(struct ice_vsi *vsi);
void ice_vsi_close(struct ice_vsi *vsi);
int ice_ena_vsi(struct ice_vsi *vsi, bool locked);
void ice_dis_vsi(struct ice_vsi *vsi, bool locked);
int ice_free_res(struct ice_res_tracker *res, u16 index, u16 id);
int
......
......@@ -9,6 +9,7 @@
#include "ice_base.h"
#include "ice_lib.h"
#include "ice_dcb_lib.h"
#include "ice_dcb_nl.h"
#define DRV_VERSION_MAJOR 0
#define DRV_VERSION_MINOR 8
......@@ -435,43 +436,12 @@ static void ice_sync_fltr_subtask(struct ice_pf *pf)
}
}
/**
* ice_dis_vsi - pause a VSI
* @vsi: the VSI being paused
* @locked: is the rtnl_lock already held
*/
static void ice_dis_vsi(struct ice_vsi *vsi, bool locked)
{
if (test_bit(__ICE_DOWN, vsi->state))
return;
set_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->type == ICE_VSI_PF && vsi->netdev) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
ice_stop(vsi->netdev);
if (!locked)
rtnl_unlock();
} else {
ice_vsi_close(vsi);
}
}
}
/**
* ice_pf_dis_all_vsi - Pause all VSIs on a PF
* @pf: the PF
* @locked: is the rtnl_lock already held
*/
#ifdef CONFIG_DCB
void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
#else
static void ice_pf_dis_all_vsi(struct ice_pf *pf, bool locked)
#endif /* CONFIG_DCB */
{
int v;
......@@ -2547,6 +2517,9 @@ static int ice_setup_pf_sw(struct ice_pf *pf)
/* netdev has to be configured before setting frame size */
ice_vsi_cfg_frame_size(vsi);
/* Setup DCB netlink interface */
ice_dcbnl_setup(vsi);
/* registering the NAPI handler requires both the queues and
* netdev to be created, which are done in ice_pf_vsi_setup()
* and ice_cfg_netdev() respectively
......@@ -2627,6 +2600,7 @@ static void ice_deinit_pf(struct ice_pf *pf)
{
ice_service_task_stop(pf);
mutex_destroy(&pf->sw_mutex);
mutex_destroy(&pf->tc_mutex);
mutex_destroy(&pf->avail_q_mutex);
if (pf->avail_txqs) {
......@@ -2676,6 +2650,7 @@ static int ice_init_pf(struct ice_pf *pf)
ice_set_pf_caps(pf);
mutex_init(&pf->sw_mutex);
mutex_init(&pf->tc_mutex);
/* setup service timer and periodic service task */
timer_setup(&pf->serv_tmr, ice_service_timer, 0);
......@@ -2925,7 +2900,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
break;
case ICE_ERR_AQ_ERROR:
switch (hw->adminq.sq_last_status) {
switch (hw->pkg_dwnld_status) {
case ICE_AQ_RC_ENOSEC:
case ICE_AQ_RC_EBADSIG:
dev_err(dev,
......@@ -3679,6 +3654,48 @@ static void ice_set_rx_mode(struct net_device *netdev)
ice_service_task_schedule(vsi->back);
}
/**
* ice_set_tx_maxrate - NDO callback to set the maximum per-queue bitrate
* @netdev: network interface device structure
* @queue_index: Queue ID
* @maxrate: maximum bandwidth in Mbps
*/
static int
ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
enum ice_status status;
u16 q_handle;
u8 tc;
/* Validate maxrate requested is within permitted range */
if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
netdev_err(netdev,
"Invalid max rate %d specified for the queue %d\n",
maxrate, queue_index);
return -EINVAL;
}
q_handle = vsi->tx_rings[queue_index]->q_handle;
tc = ice_dcb_get_tc(vsi, queue_index);
/* Set BW back to default, when user set maxrate to 0 */
if (!maxrate)
status = ice_cfg_q_bw_dflt_lmt(vsi->port_info, vsi->idx, tc,
q_handle, ICE_MAX_BW);
else
status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
q_handle, ICE_MAX_BW, maxrate * 1000);
if (status) {
netdev_err(netdev,
"Unable to set Tx max rate, error %d\n", status);
return -EIO;
}
return 0;
}
/**
* ice_fdb_add - add an entry to the hardware database
* @ndm: the input from the stack
......@@ -3759,6 +3776,7 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
int ret = 0;
/* Don't set any netdev advanced features with device in Safe Mode */
......@@ -3768,6 +3786,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
return ret;
}
/* Do not change setting during reset */
if (ice_is_reset_in_progress(pf->state)) {
dev_err(&vsi->back->pdev->dev,
"Device is resetting, changing advanced netdev features temporarily unavailable.\n");
return -EBUSY;
}
/* Multiple features can be changed in one call so keep features in
* separate if/else statements to guarantee each feature is checked
*/
......@@ -4440,54 +4465,6 @@ static void ice_vsi_release_all(struct ice_pf *pf)
}
}
/**
* ice_ena_vsi - resume a VSI
* @vsi: the VSI being resume
* @locked: is the rtnl_lock already held
*/
static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
{
int err = 0;
if (!test_bit(__ICE_NEEDS_RESTART, vsi->state))
return 0;
clear_bit(__ICE_NEEDS_RESTART, vsi->state);
if (vsi->netdev && vsi->type == ICE_VSI_PF) {
if (netif_running(vsi->netdev)) {
if (!locked)
rtnl_lock();
err = ice_open(vsi->netdev);
if (!locked)
rtnl_unlock();
}
}
return err;
}
/**
* ice_pf_ena_all_vsi - Resume all VSIs on a PF
* @pf: the PF
* @locked: is the rtnl_lock already held
*/
#ifdef CONFIG_DCB
int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
{
int v;
ice_for_each_vsi(pf, v)
if (pf->vsi[v])
if (ice_ena_vsi(pf->vsi[v], locked))
return -EIO;
return 0;
}
#endif /* CONFIG_DCB */
/**
* ice_vsi_rebuild_by_type - Rebuild VSI of a given type
* @pf: pointer to the PF instance
......@@ -4510,8 +4487,8 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
err = ice_vsi_rebuild(vsi);
if (err) {
dev_err(&pf->pdev->dev,
"rebuild VSI failed, err %d, VSI index %d, type %d\n",
err, vsi->idx, type);
"rebuild VSI failed, err %d, VSI index %d, type %s\n",
err, vsi->idx, ice_vsi_type_str(type));
return err;
}
......@@ -4519,8 +4496,8 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
status = ice_replay_vsi(&pf->hw, vsi->idx);
if (status) {
dev_err(&pf->pdev->dev,
"replay VSI failed, status %d, VSI index %d, type %d\n",
status, vsi->idx, type);
"replay VSI failed, status %d, VSI index %d, type %s\n",
status, vsi->idx, ice_vsi_type_str(type));
return -EIO;
}
......@@ -4533,13 +4510,13 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
err = ice_ena_vsi(vsi, false);
if (err) {
dev_err(&pf->pdev->dev,
"enable VSI failed, err %d, VSI index %d, type %d\n",
err, vsi->idx, type);
"enable VSI failed, err %d, VSI index %d, type %s\n",
err, vsi->idx, ice_vsi_type_str(type));
return err;
}
dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %d\n",
vsi->idx, type);
dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %s\n",
vsi->idx, ice_vsi_type_str(type));
}
return 0;
......@@ -5238,6 +5215,7 @@ static const struct net_device_ops ice_netdev_ops = {
.ndo_validate_addr = eth_validate_addr,
.ndo_change_mtu = ice_change_mtu,
.ndo_get_stats64 = ice_get_stats64,
.ndo_set_tx_maxrate = ice_set_tx_maxrate,
.ndo_set_vf_spoofchk = ice_set_vf_spoofchk,
.ndo_set_vf_mac = ice_set_vf_mac,
.ndo_get_vf_config = ice_get_vf_cfg,
......
This diff is collapsed.
......@@ -8,6 +8,36 @@
#define ICE_QGRP_LAYER_OFFSET 2
#define ICE_VSI_LAYER_OFFSET 4
#define ICE_SCHED_INVAL_LAYER_NUM 0xFF
/* Burst size is a 12 bits register that is configured while creating the RL
* profile(s). MSB is a granularity bit and tells the granularity type
* 0 - LSB bits are in 64 bytes granularity
* 1 - LSB bits are in 1K bytes granularity
*/
#define ICE_64_BYTE_GRANULARITY 0
#define ICE_KBYTE_GRANULARITY BIT(11)
#define ICE_MIN_BURST_SIZE_ALLOWED 64 /* In Bytes */
#define ICE_MAX_BURST_SIZE_ALLOWED \
((BIT(11) - 1) * 1024) /* In Bytes */
#define ICE_MAX_BURST_SIZE_64_BYTE_GRANULARITY \
((BIT(11) - 1) * 64) /* In Bytes */
#define ICE_MAX_BURST_SIZE_KBYTE_GRANULARITY ICE_MAX_BURST_SIZE_ALLOWED
#define ICE_RL_PROF_FREQUENCY 446000000
#define ICE_RL_PROF_ACCURACY_BYTES 128
#define ICE_RL_PROF_MULTIPLIER 10000
#define ICE_RL_PROF_TS_MULTIPLIER 32
#define ICE_RL_PROF_FRACTION 512
/* BW rate limit profile parameters list entry along
* with bandwidth maintained per layer in port info
*/
struct ice_aqc_rl_profile_info {
struct ice_aqc_rl_profile_elem profile;
struct list_head list_entry;
u32 bw; /* requested */
u16 prof_id_ref; /* profile ID to node association ref count */
};
struct ice_sched_agg_vsi_info {
struct list_head list_entry;
......@@ -48,4 +78,13 @@ enum ice_status
ice_sched_cfg_vsi(struct ice_port_info *pi, u16 vsi_handle, u8 tc, u16 maxqs,
u8 owner, bool enable);
enum ice_status ice_rm_vsi_lan_cfg(struct ice_port_info *pi, u16 vsi_handle);
enum ice_status
ice_cfg_q_bw_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type, u32 bw);
enum ice_status
ice_cfg_q_bw_dflt_lmt(struct ice_port_info *pi, u16 vsi_handle, u8 tc,
u16 q_handle, enum ice_rl_type rl_type);
enum ice_status ice_cfg_rl_burst_size(struct ice_hw *hw, u32 bytes);
enum ice_status
ice_sched_replay_q_bw(struct ice_port_info *pi, struct ice_q_ctx *q_ctx);
#endif /* _ICE_SCHED_H_ */
......@@ -416,8 +416,7 @@ ice_add_vsi(struct ice_hw *hw, u16 vsi_handle, struct ice_vsi_ctx *vsi_ctx,
ice_save_vsi_ctx(hw, vsi_handle, tmp_vsi_ctx);
} else {
/* update with new HW VSI num */
if (tmp_vsi_ctx->vsi_num != vsi_ctx->vsi_num)
tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
tmp_vsi_ctx->vsi_num = vsi_ctx->vsi_num;
}
return 0;
......
......@@ -14,11 +14,6 @@
#define ICE_VSI_INVAL_ID 0xffff
#define ICE_INVAL_Q_HANDLE 0xFFFF
/* VSI queue context structure */
struct ice_q_ctx {
u16 q_handle;
};
/* VSI context structure for add/get/update/free operations */
struct ice_vsi_ctx {
u16 vsi_num;
......
......@@ -19,6 +19,17 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
return test_bit(tc, &bitmap);
}
static inline u64 round_up_64bit(u64 a, u32 b)
{
return div64_long(((a) + (b) / 2), (b));
}
static inline u32 ice_round_to_num(u32 N, u32 R)
{
return ((((N) % (R)) < ((R) / 2)) ? (((N) / (R)) * (R)) :
((((N) + (R) - 1) / (R)) * (R)));
}
/* Driver always calls main vsi_handle first */
#define ICE_MAIN_VSI_HANDLE 0
......@@ -35,6 +46,8 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
#define ICE_DBG_PKG BIT_ULL(16)
#define ICE_DBG_RES BIT_ULL(17)
#define ICE_DBG_AQ_MSG BIT_ULL(24)
#define ICE_DBG_AQ_DESC BIT_ULL(25)
#define ICE_DBG_AQ_DESC_BUF BIT_ULL(26)
#define ICE_DBG_AQ_CMD BIT_ULL(27)
#define ICE_DBG_USER BIT_ULL(31)
......@@ -272,10 +285,56 @@ enum ice_agg_type {
ICE_AGG_TYPE_QG
};
/* Rate limit types */
enum ice_rl_type {
ICE_UNKNOWN_BW = 0,
ICE_MIN_BW, /* for CIR profile */
ICE_MAX_BW, /* for EIR profile */
ICE_SHARED_BW /* for shared profile */
};
#define ICE_SCHED_MIN_BW 500 /* in Kbps */
#define ICE_SCHED_MAX_BW 100000000 /* in Kbps */
#define ICE_SCHED_DFLT_BW 0xFFFFFFFF /* unlimited */
#define ICE_SCHED_DFLT_RL_PROF_ID 0
#define ICE_SCHED_NO_SHARED_RL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BW_WT 1
#define ICE_SCHED_INVAL_PROF_ID 0xFFFF
#define ICE_SCHED_DFLT_BURST_SIZE (15 * 1024) /* in bytes (15k) */
/* VSI type list entry to locate corresponding VSI/ag nodes */
/* Data structure for saving BW information */
enum ice_bw_type {
ICE_BW_TYPE_PRIO,
ICE_BW_TYPE_CIR,
ICE_BW_TYPE_CIR_WT,
ICE_BW_TYPE_EIR,
ICE_BW_TYPE_EIR_WT,
ICE_BW_TYPE_SHARED,
ICE_BW_TYPE_CNT /* This must be last */
};
struct ice_bw {
u32 bw;
u16 bw_alloc;
};
struct ice_bw_type_info {
DECLARE_BITMAP(bw_t_bitmap, ICE_BW_TYPE_CNT);
u8 generic;
struct ice_bw cir_bw;
struct ice_bw eir_bw;
u32 shared_bw;
};
/* VSI queue context structure for given TC */
struct ice_q_ctx {
u16 q_handle;
u32 q_teid;
/* bw_t_info saves queue BW information */
struct ice_bw_type_info bw_t_info;
};
/* VSI type list entry to locate corresponding VSI/aggregator nodes */
struct ice_sched_vsi_info {
struct ice_sched_node *vsi_node[ICE_MAX_TRAFFIC_CLASS];
struct ice_sched_node *ag_node[ICE_MAX_TRAFFIC_CLASS];
......@@ -364,6 +423,8 @@ struct ice_port_info {
struct mutex sched_lock; /* protect access to TXSched tree */
struct ice_sched_node *
sib_head[ICE_MAX_TRAFFIC_CLASS][ICE_AQC_TOPO_MAX_LEVEL_NUM];
/* List contain profile ID(s) and other params per layer */
struct list_head rl_prof_list[ICE_AQC_TOPO_MAX_LEVEL_NUM];
struct ice_dcbx_cfg local_dcbx_cfg; /* Oper/Local Cfg */
/* DCBX info */
struct ice_dcbx_cfg remote_dcbx_cfg; /* Peer Cfg */
......@@ -415,6 +476,8 @@ struct ice_hw {
u8 pf_id; /* device profile info */
u16 max_burst_size; /* driver sets this value */
/* Tx Scheduler values */
u16 num_tx_sched_layers;
u16 num_tx_sched_phys_layers;
......
......@@ -1151,6 +1151,25 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
return true;
}
/**
* ice_is_vf_disabled
* @vf: pointer to the VF info
*
* Returns true if the PF or VF is disabled, false otherwise.
*/
static bool ice_is_vf_disabled(struct ice_vf *vf)
{
struct ice_pf *pf = vf->pf;
/* If the PF has been disabled, there is no need resetting VF until
* PF is active again. Similarly, if the VF has been disabled, this
* means something else is resetting the VF, so we shouldn't continue.
* Otherwise, set disable VF state bit for actual reset, and continue.
*/
return (test_bit(__ICE_VF_DIS, pf->state) ||
test_bit(ICE_VF_STATE_DIS, vf->vf_states));
}
/**
* ice_reset_vf - Reset a particular VF
* @vf: pointer to the VF structure
......@@ -1168,19 +1187,15 @@ static bool ice_reset_vf(struct ice_vf *vf, bool is_vflr)
u32 reg;
int i;
/* If the PF has been disabled, there is no need resetting VF until
* PF is active again.
*/
if (test_bit(__ICE_VF_DIS, pf->state))
return false;
/* If the VF has been disabled, this means something else is
* resetting the VF, so we shouldn't continue. Otherwise, set
* disable VF state bit for actual reset, and continue.
*/
if (test_and_set_bit(ICE_VF_STATE_DIS, vf->vf_states))
return false;
if (ice_is_vf_disabled(vf)) {
dev_dbg(&pf->pdev->dev,
"VF is already disabled, there is no need for resetting it, telling VM, all is fine %d\n",
vf->vf_id);
return true;
}
/* Set VF disable bit state here, before triggering reset */
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
ice_trigger_vf_reset(vf, is_vflr, false);
vsi = pf->vsi[vf->lan_vsi_idx];
......@@ -2158,9 +2173,11 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
vector_id = map->vector_id;
vsi_id = map->vsi_id;
/* validate msg params */
if (!(vector_id < pf->hw.func_caps.common_cap
.num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
/* vector_id is always 0-based for each VF, and can never be
* larger than or equal to the max allowed interrupts per VF
*/
if (!(vector_id < ICE_MAX_INTR_PER_VF) ||
!ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
......@@ -3122,6 +3139,23 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
return ret;
}
/**
* ice_wait_on_vf_reset
* @vf: The VF being resseting
*
* Poll to make sure a given VF is ready after reset
*/
static void ice_wait_on_vf_reset(struct ice_vf *vf)
{
int i;
for (i = 0; i < ICE_MAX_VF_RESET_WAIT; i++) {
if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
break;
msleep(20);
}
}
/**
* ice_set_vf_mac
* @netdev: network interface device structure
......@@ -3145,6 +3179,15 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
}
vf = &pf->vf[vf_id];
/* Don't set MAC on disabled VF */
if (ice_is_vf_disabled(vf))
return -EINVAL;
/* In case VF is in reset mode, wait until it is completed. Depending
* on factors like queue disabling routine, this could take ~250ms
*/
ice_wait_on_vf_reset(vf);
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
netdev_err(netdev, "VF %d in reset. Try again.\n", vf_id);
return -EBUSY;
......@@ -3192,6 +3235,15 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
}
vf = &pf->vf[vf_id];
/* Don't set Trusted Mode on disabled VF */
if (ice_is_vf_disabled(vf))
return -EINVAL;
/* In case VF is in reset mode, wait until it is completed. Depending
* on factors like queue disabling routine, this could take ~250ms
*/
ice_wait_on_vf_reset(vf);
if (!test_bit(ICE_VF_STATE_INIT, vf->vf_states)) {
dev_err(&pf->pdev->dev, "VF %d in reset. Try again.\n", vf_id);
return -EBUSY;
......
......@@ -38,6 +38,7 @@
#define ICE_MAX_POLICY_INTR_PER_VF 33
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
#define ICE_MAX_VF_RESET_WAIT 15
/* Specific VF states */
enum ice_vf_states {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment