Commit 7b3ed2a1 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2019-05-30

This series contains updates to ice driver only.

Brett continues his work with interrupt handling by fixing an issue
where were writing to the incorrect register to disable all VF
interrupts.

Tony consolidates the unicast and multicast MAC filters into a single
new function.

Anirudh adds support for virtual channel vector mapping to receive and
transmit queues.  This uses a bitmap to associate indicated queues with
the specified vector.  Makes several cosmetic code cleanups, as well as
update the driver to align with the current specification for managing
MAC operation codes (opcodes).

Paul adds support for Forward Error Correction (FEC) and also adds the
ethtool get and set handlers to modify FEC parameters.

Bruce cleans up the driver code to fix a number of issues, such as,
reducing the scope of some local variables, reduce the number of
de-references by changing a local variable and reorder the code to
remove unnecessary "goto's".

Dave adds switch rules to be able to handle LLDP packets and in the
process, fix a couple of issues found, like stop treating DCBx state of
"not started" as an error and stop hard coding the filter information
flag to transmit.

Jacob updates the driver to allow for more granular debugging by
developers by using a distinct separate bit for dumping firmware logs.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 84a32ede 2f2da36e
...@@ -44,7 +44,7 @@ ...@@ -44,7 +44,7 @@
extern const char ice_drv_ver[]; extern const char ice_drv_ver[];
#define ICE_BAR0 0 #define ICE_BAR0 0
#define ICE_REQ_DESC_MULTIPLE 32 #define ICE_REQ_DESC_MULTIPLE 32
#define ICE_MIN_NUM_DESC ICE_REQ_DESC_MULTIPLE #define ICE_MIN_NUM_DESC 64
#define ICE_MAX_NUM_DESC 8160 #define ICE_MAX_NUM_DESC 8160
#define ICE_DFLT_MIN_RX_DESC 512 #define ICE_DFLT_MIN_RX_DESC 512
/* if the default number of Rx descriptors between ICE_MAX_NUM_DESC and the /* if the default number of Rx descriptors between ICE_MAX_NUM_DESC and the
......
...@@ -35,8 +35,8 @@ struct ice_aqc_get_ver { ...@@ -35,8 +35,8 @@ struct ice_aqc_get_ver {
/* Queue Shutdown (direct 0x0003) */ /* Queue Shutdown (direct 0x0003) */
struct ice_aqc_q_shutdown { struct ice_aqc_q_shutdown {
#define ICE_AQC_DRIVER_UNLOADING BIT(0)
__le32 driver_unloading; __le32 driver_unloading;
#define ICE_AQC_DRIVER_UNLOADING BIT(0)
u8 reserved[12]; u8 reserved[12];
}; };
...@@ -120,11 +120,9 @@ struct ice_aqc_manage_mac_read { ...@@ -120,11 +120,9 @@ struct ice_aqc_manage_mac_read {
#define ICE_AQC_MAN_MAC_WOL_ADDR_VALID BIT(7) #define ICE_AQC_MAN_MAC_WOL_ADDR_VALID BIT(7)
#define ICE_AQC_MAN_MAC_READ_S 4 #define ICE_AQC_MAN_MAC_READ_S 4
#define ICE_AQC_MAN_MAC_READ_M (0xF << ICE_AQC_MAN_MAC_READ_S) #define ICE_AQC_MAN_MAC_READ_M (0xF << ICE_AQC_MAN_MAC_READ_S)
u8 lport_num; u8 rsvd[2];
u8 lport_num_valid;
#define ICE_AQC_MAN_MAC_PORT_NUM_IS_VALID BIT(0)
u8 num_addr; /* Used in response */ u8 num_addr; /* Used in response */
u8 reserved[3]; u8 rsvd1[3];
__le32 addr_high; __le32 addr_high;
__le32 addr_low; __le32 addr_low;
}; };
...@@ -140,7 +138,7 @@ struct ice_aqc_manage_mac_read_resp { ...@@ -140,7 +138,7 @@ struct ice_aqc_manage_mac_read_resp {
/* Manage MAC address, write command - direct (0x0108) */ /* Manage MAC address, write command - direct (0x0108) */
struct ice_aqc_manage_mac_write { struct ice_aqc_manage_mac_write {
u8 port_num; u8 rsvd;
u8 flags; u8 flags;
#define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0) #define ICE_AQC_MAN_MAC_WR_MC_MAG_EN BIT(0)
#define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1) #define ICE_AQC_MAN_MAC_WR_WOL_LAA_PFR_KEEP BIT(1)
...@@ -920,6 +918,8 @@ struct ice_aqc_get_phy_caps_data { ...@@ -920,6 +918,8 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_PHY_EN_LINK BIT(3) #define ICE_AQC_PHY_EN_LINK BIT(3)
#define ICE_AQC_PHY_AN_MODE BIT(4) #define ICE_AQC_PHY_AN_MODE BIT(4)
#define ICE_AQC_GET_PHY_EN_MOD_QUAL BIT(5) #define ICE_AQC_GET_PHY_EN_MOD_QUAL BIT(5)
#define ICE_AQC_PHY_EN_AUTO_FEC BIT(7)
#define ICE_AQC_PHY_CAPS_MASK ICE_M(0xff, 0)
u8 low_power_ctrl; u8 low_power_ctrl;
#define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0) #define ICE_AQC_PHY_EN_D3COLD_LOW_POWER_AUTONEG BIT(0)
__le16 eee_cap; __le16 eee_cap;
...@@ -932,6 +932,7 @@ struct ice_aqc_get_phy_caps_data { ...@@ -932,6 +932,7 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_PHY_EEE_EN_40GBASE_KR4 BIT(6) #define ICE_AQC_PHY_EEE_EN_40GBASE_KR4 BIT(6)
__le16 eeer_value; __le16 eeer_value;
u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */ u8 phy_id_oui[4]; /* PHY/Module ID connected on the port */
u8 phy_fw_ver[8];
u8 link_fec_options; u8 link_fec_options;
#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN BIT(0) #define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN BIT(0)
#define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1) #define ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ BIT(1)
...@@ -940,6 +941,8 @@ struct ice_aqc_get_phy_caps_data { ...@@ -940,6 +941,8 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4) #define ICE_AQC_PHY_FEC_25G_RS_544_REQ BIT(4)
#define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6) #define ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN BIT(6)
#define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7) #define ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN BIT(7)
#define ICE_AQC_PHY_FEC_MASK ICE_M(0xdf, 0)
u8 rsvd1; /* Byte 35 reserved */
u8 extended_compliance_code; u8 extended_compliance_code;
#define ICE_MODULE_TYPE_TOTAL_BYTE 3 #define ICE_MODULE_TYPE_TOTAL_BYTE 3
u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE]; u8 module_type[ICE_MODULE_TYPE_TOTAL_BYTE];
...@@ -954,13 +957,14 @@ struct ice_aqc_get_phy_caps_data { ...@@ -954,13 +957,14 @@ struct ice_aqc_get_phy_caps_data {
#define ICE_AQC_MOD_TYPE_BYTE2_SFP_PLUS 0xA0 #define ICE_AQC_MOD_TYPE_BYTE2_SFP_PLUS 0xA0
#define ICE_AQC_MOD_TYPE_BYTE2_QSFP_PLUS 0x86 #define ICE_AQC_MOD_TYPE_BYTE2_QSFP_PLUS 0x86
u8 qualified_module_count; u8 qualified_module_count;
u8 rsvd2[7]; /* Bytes 47:41 reserved */
#define ICE_AQC_QUAL_MOD_COUNT_MAX 16 #define ICE_AQC_QUAL_MOD_COUNT_MAX 16
struct { struct {
u8 v_oui[3]; u8 v_oui[3];
u8 rsvd1; u8 rsvd3;
u8 v_part[16]; u8 v_part[16];
__le32 v_rev; __le32 v_rev;
__le64 rsvd8; __le64 rsvd4;
} qual_modules[ICE_AQC_QUAL_MOD_COUNT_MAX]; } qual_modules[ICE_AQC_QUAL_MOD_COUNT_MAX];
}; };
...@@ -1062,6 +1066,7 @@ struct ice_aqc_get_link_status_data { ...@@ -1062,6 +1066,7 @@ struct ice_aqc_get_link_status_data {
#define ICE_AQ_LINK_25G_KR_FEC_EN BIT(0) #define ICE_AQ_LINK_25G_KR_FEC_EN BIT(0)
#define ICE_AQ_LINK_25G_RS_528_FEC_EN BIT(1) #define ICE_AQ_LINK_25G_RS_528_FEC_EN BIT(1)
#define ICE_AQ_LINK_25G_RS_544_FEC_EN BIT(2) #define ICE_AQ_LINK_25G_RS_544_FEC_EN BIT(2)
#define ICE_AQ_FEC_MASK ICE_M(0x7, 0)
/* Pacing Config */ /* Pacing Config */
#define ICE_AQ_CFG_PACING_S 3 #define ICE_AQ_CFG_PACING_S 3
#define ICE_AQ_CFG_PACING_M (0xF << ICE_AQ_CFG_PACING_S) #define ICE_AQ_CFG_PACING_M (0xF << ICE_AQ_CFG_PACING_S)
...@@ -1268,7 +1273,7 @@ struct ice_aqc_get_cee_dcb_cfg_resp { ...@@ -1268,7 +1273,7 @@ struct ice_aqc_get_cee_dcb_cfg_resp {
}; };
/* Set Local LLDP MIB (indirect 0x0A08) /* Set Local LLDP MIB (indirect 0x0A08)
* Used to replace the local MIB of a given LLDP agent. e.g. DCBx * Used to replace the local MIB of a given LLDP agent. e.g. DCBX
*/ */
struct ice_aqc_lldp_set_local_mib { struct ice_aqc_lldp_set_local_mib {
u8 type; u8 type;
...@@ -1285,7 +1290,7 @@ struct ice_aqc_lldp_set_local_mib { ...@@ -1285,7 +1290,7 @@ struct ice_aqc_lldp_set_local_mib {
}; };
/* Stop/Start LLDP Agent (direct 0x0A09) /* Stop/Start LLDP Agent (direct 0x0A09)
* Used for stopping/starting specific LLDP agent. e.g. DCBx. * Used for stopping/starting specific LLDP agent. e.g. DCBX.
* The same structure is used for the response, with the command field * The same structure is used for the response, with the command field
* being used as the status field. * being used as the status field.
*/ */
......
...@@ -304,6 +304,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse, ...@@ -304,6 +304,8 @@ ice_aq_get_link_info(struct ice_port_info *pi, bool ena_lse,
hw_link_info->an_info = link_data.an_info; hw_link_info->an_info = link_data.an_info;
hw_link_info->ext_info = link_data.ext_info; hw_link_info->ext_info = link_data.ext_info;
hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size); hw_link_info->max_frame_size = le16_to_cpu(link_data.max_frame_size);
hw_link_info->fec_info = link_data.cfg & ICE_AQ_FEC_MASK;
hw_link_info->topo_media_conflict = link_data.topo_media_conflict;
hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M; hw_link_info->pacing = link_data.cfg & ICE_AQ_CFG_PACING_M;
/* update fc info */ /* update fc info */
...@@ -497,7 +499,7 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw) ...@@ -497,7 +499,7 @@ static enum ice_status ice_get_fw_log_cfg(struct ice_hw *hw)
if (!status) { if (!status) {
u16 i; u16 i;
/* Save fw logging information into the hw structure */ /* Save FW logging information into the HW structure */
for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) { for (i = 0; i < ICE_AQC_FW_LOG_ID_MAX; i++) {
u16 v, m, flgs; u16 v, m, flgs;
...@@ -679,17 +681,17 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable) ...@@ -679,17 +681,17 @@ static enum ice_status ice_cfg_fw_log(struct ice_hw *hw, bool enable)
*/ */
void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf) void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
{ {
ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg Start ]\n"); ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg Start ]\n");
ice_debug_array(hw, ICE_DBG_AQ_MSG, 16, 1, (u8 *)buf, ice_debug_array(hw, ICE_DBG_FW_LOG, 16, 1, (u8 *)buf,
le16_to_cpu(desc->datalen)); le16_to_cpu(desc->datalen));
ice_debug(hw, ICE_DBG_AQ_MSG, "[ FW Log Msg End ]\n"); ice_debug(hw, ICE_DBG_FW_LOG, "[ FW Log Msg End ]\n");
} }
/** /**
* ice_get_itr_intrl_gran - determine int/intrl granularity * ice_get_itr_intrl_gran - determine int/intrl granularity
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* *
* Determines the itr/intrl granularities based on the maximum aggregate * Determines the ITR/intrl granularities based on the maximum aggregate
* bandwidth according to the device's configuration during power-on. * bandwidth according to the device's configuration during power-on.
*/ */
static void ice_get_itr_intrl_gran(struct ice_hw *hw) static void ice_get_itr_intrl_gran(struct ice_hw *hw)
...@@ -860,6 +862,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ...@@ -860,6 +862,10 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
/** /**
* ice_deinit_hw - unroll initialization operations done by ice_init_hw * ice_deinit_hw - unroll initialization operations done by ice_init_hw
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
*
* This should be called only during nominal operation, not as a result of
* ice_init_hw() failing since ice_init_hw() will take care of unrolling
* applicable initializations if it fails for any reason.
*/ */
void ice_deinit_hw(struct ice_hw *hw) void ice_deinit_hw(struct ice_hw *hw)
{ {
...@@ -1996,36 +2002,37 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, ...@@ -1996,36 +2002,37 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
*/ */
enum ice_status ice_update_link_info(struct ice_port_info *pi) enum ice_status ice_update_link_info(struct ice_port_info *pi)
{ {
struct ice_aqc_get_phy_caps_data *pcaps; struct ice_link_status *li;
struct ice_phy_info *phy_info;
enum ice_status status; enum ice_status status;
struct ice_hw *hw;
if (!pi) if (!pi)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
hw = pi->hw; li = &pi->phy.link_info;
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps), GFP_KERNEL);
if (!pcaps)
return ICE_ERR_NO_MEMORY;
phy_info = &pi->phy;
status = ice_aq_get_link_info(pi, true, NULL, NULL); status = ice_aq_get_link_info(pi, true, NULL, NULL);
if (status) if (status)
goto out; return status;
if (li->link_info & ICE_AQ_MEDIA_AVAILABLE) {
struct ice_aqc_get_phy_caps_data *pcaps;
struct ice_hw *hw;
hw = pi->hw;
pcaps = devm_kzalloc(ice_hw_to_dev(hw), sizeof(*pcaps),
GFP_KERNEL);
if (!pcaps)
return ICE_ERR_NO_MEMORY;
if (phy_info->link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
pcaps, NULL); pcaps, NULL);
if (status) if (!status)
goto out; memcpy(li->module_type, &pcaps->module_type,
sizeof(li->module_type));
memcpy(phy_info->link_info.module_type, &pcaps->module_type,
sizeof(phy_info->link_info.module_type));
}
out:
devm_kfree(ice_hw_to_dev(hw), pcaps); devm_kfree(ice_hw_to_dev(hw), pcaps);
}
return status; return status;
} }
...@@ -2129,6 +2136,74 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update) ...@@ -2129,6 +2136,74 @@ ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, bool ena_auto_link_update)
return status; return status;
} }
/**
* ice_copy_phy_caps_to_cfg - Copy PHY ability data to configuration data
* @caps: PHY ability structure to copy date from
* @cfg: PHY configuration structure to copy data to
*
* Helper function to copy AQC PHY get ability data to PHY set configuration
* data structure
*/
void
ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg)
{
if (!caps || !cfg)
return;
cfg->phy_type_low = caps->phy_type_low;
cfg->phy_type_high = caps->phy_type_high;
cfg->caps = caps->caps;
cfg->low_power_ctrl = caps->low_power_ctrl;
cfg->eee_cap = caps->eee_cap;
cfg->eeer_value = caps->eeer_value;
cfg->link_fec_opt = caps->link_fec_options;
}
/**
* ice_cfg_phy_fec - Configure PHY FEC data based on FEC mode
* @cfg: PHY configuration data to set FEC mode
* @fec: FEC mode to configure
*
* Caller should copy ice_aqc_get_phy_caps_data.caps ICE_AQC_PHY_EN_AUTO_FEC
* (bit 7) and ice_aqc_get_phy_caps_data.link_fec_options to cfg.caps
* ICE_AQ_PHY_ENA_AUTO_FEC (bit 7) and cfg.link_fec_options before calling.
*/
void
ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec)
{
switch (fec) {
case ICE_FEC_BASER:
/* Clear auto FEC and RS bits, and AND BASE-R ability
* bits and OR request bits.
*/
cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
cfg->link_fec_opt &= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN |
ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ |
ICE_AQC_PHY_FEC_25G_KR_REQ;
break;
case ICE_FEC_RS:
/* Clear auto FEC and BASE-R bits, and AND RS ability
* bits and OR request bits.
*/
cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
cfg->link_fec_opt &= ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN;
cfg->link_fec_opt |= ICE_AQC_PHY_FEC_25G_RS_528_REQ |
ICE_AQC_PHY_FEC_25G_RS_544_REQ;
break;
case ICE_FEC_NONE:
/* Clear auto FEC and all FEC option bits. */
cfg->caps &= ~ICE_AQC_PHY_EN_AUTO_FEC;
cfg->link_fec_opt &= ~ICE_AQC_PHY_FEC_MASK;
break;
case ICE_FEC_AUTO:
/* AND auto FEC bit, and all caps bits. */
cfg->caps &= ICE_AQC_PHY_CAPS_MASK;
break;
}
}
/** /**
* ice_get_link_status - get status of the HW network link * ice_get_link_status - get status of the HW network link
* @pi: port information structure * @pi: port information structure
...@@ -2624,7 +2699,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps, ...@@ -2624,7 +2699,7 @@ ice_aq_dis_lan_txq(struct ice_hw *hw, u8 num_qgrps,
ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n", ice_debug(hw, ICE_DBG_SCHED, "VM%d disable failed %d\n",
vmvf_num, hw->adminq.sq_last_status); vmvf_num, hw->adminq.sq_last_status);
else else
ice_debug(hw, ICE_DBG_SCHED, "disable Q %d failed %d\n", ice_debug(hw, ICE_DBG_SCHED, "disable queue %d failed %d\n",
le16_to_cpu(qg_list[0].q_id[0]), le16_to_cpu(qg_list[0].q_id[0]),
hw->adminq.sq_last_status); hw->adminq.sq_last_status);
} }
......
...@@ -86,7 +86,11 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport, ...@@ -86,7 +86,11 @@ ice_aq_set_phy_cfg(struct ice_hw *hw, u8 lport,
enum ice_status enum ice_status
ice_set_fc(struct ice_port_info *pi, u8 *aq_failures, ice_set_fc(struct ice_port_info *pi, u8 *aq_failures,
bool ena_auto_link_update); bool ena_auto_link_update);
void
ice_cfg_phy_fec(struct ice_aqc_set_phy_cfg_data *cfg, enum ice_fec_mode fec);
void
ice_copy_phy_caps_to_cfg(struct ice_aqc_get_phy_caps_data *caps,
struct ice_aqc_set_phy_cfg_data *cfg);
enum ice_status enum ice_status
ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link, ice_aq_set_link_restart_an(struct ice_port_info *pi, bool ena_link,
struct ice_sq_cd *cd); struct ice_sq_cd *cd);
......
...@@ -439,7 +439,7 @@ do { \ ...@@ -439,7 +439,7 @@ do { \
/* free the buffer info list */ \ /* free the buffer info list */ \
if ((qi)->ring.cmd_buf) \ if ((qi)->ring.cmd_buf) \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.cmd_buf); \
/* free dma head */ \ /* free DMA head */ \
devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \ devm_kfree(ice_hw_to_dev(hw), (qi)->ring.dma_head); \
} while (0) } while (0)
......
...@@ -35,7 +35,7 @@ enum ice_ctl_q { ...@@ -35,7 +35,7 @@ enum ice_ctl_q {
#define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */ #define ICE_CTL_Q_SQ_CMD_TIMEOUT 250 /* msecs */
struct ice_ctl_q_ring { struct ice_ctl_q_ring {
void *dma_head; /* Virtual address to dma head */ void *dma_head; /* Virtual address to DMA head */
struct ice_dma_mem desc_buf; /* descriptor ring memory */ struct ice_dma_mem desc_buf; /* descriptor ring memory */
void *cmd_buf; /* command buffer memory */ void *cmd_buf; /* command buffer memory */
......
...@@ -669,7 +669,7 @@ ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg) ...@@ -669,7 +669,7 @@ ice_lldp_to_dcb_cfg(u8 *lldpmib, struct ice_dcbx_cfg *dcbcfg)
/** /**
* ice_aq_get_dcb_cfg * ice_aq_get_dcb_cfg
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* @mib_type: mib type for the query * @mib_type: MIB type for the query
* @bridgetype: bridge type for the query (remote) * @bridgetype: bridge type for the query (remote)
* @dcbcfg: store for LLDPDU data * @dcbcfg: store for LLDPDU data
* *
...@@ -700,13 +700,13 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype, ...@@ -700,13 +700,13 @@ ice_aq_get_dcb_cfg(struct ice_hw *hw, u8 mib_type, u8 bridgetype,
} }
/** /**
* ice_aq_start_stop_dcbx - Start/Stop DCBx service in FW * ice_aq_start_stop_dcbx - Start/Stop DCBX service in FW
* @hw: pointer to the HW struct * @hw: pointer to the HW struct
* @start_dcbx_agent: True if DCBx Agent needs to be started * @start_dcbx_agent: True if DCBX Agent needs to be started
* False if DCBx Agent needs to be stopped * False if DCBX Agent needs to be stopped
* @dcbx_agent_status: FW indicates back the DCBx agent status * @dcbx_agent_status: FW indicates back the DCBX agent status
* True if DCBx Agent is active * True if DCBX Agent is active
* False if DCBx Agent is stopped * False if DCBX Agent is stopped
* @cd: pointer to command details structure or NULL * @cd: pointer to command details structure or NULL
* *
* Start/Stop the embedded dcbx Agent. In case that this wrapper function * Start/Stop the embedded dcbx Agent. In case that this wrapper function
......
...@@ -120,11 +120,13 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf) ...@@ -120,11 +120,13 @@ static void ice_pf_dcb_recfg(struct ice_pf *pf)
tc_map = ICE_DFLT_TRAFFIC_CLASS; tc_map = ICE_DFLT_TRAFFIC_CLASS;
ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map); ret = ice_vsi_cfg_tc(pf->vsi[v], tc_map);
if (ret) if (ret) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"Failed to config TC for VSI index: %d\n", "Failed to config TC for VSI index: %d\n",
pf->vsi[v]->idx); pf->vsi[v]->idx);
else continue;
}
ice_vsi_map_rings_to_vectors(pf->vsi[v]); ice_vsi_map_rings_to_vectors(pf->vsi[v]);
} }
} }
...@@ -281,7 +283,7 @@ void ice_dcb_rebuild(struct ice_pf *pf) ...@@ -281,7 +283,7 @@ void ice_dcb_rebuild(struct ice_pf *pf)
/** /**
* ice_dcb_init_cfg - set the initial DCB config in SW * ice_dcb_init_cfg - set the initial DCB config in SW
* @pf: pf to apply config to * @pf: PF to apply config to
* @locked: Is the RTNL held * @locked: Is the RTNL held
*/ */
static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked) static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
...@@ -309,7 +311,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked) ...@@ -309,7 +311,7 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
/** /**
* ice_dcb_sw_default_config - Apply a default DCB config * ice_dcb_sw_default_config - Apply a default DCB config
* @pf: pf to apply config to * @pf: PF to apply config to
* @locked: was this function called with RTNL held * @locked: was this function called with RTNL held
*/ */
static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked) static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
...@@ -354,7 +356,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked) ...@@ -354,7 +356,7 @@ static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool locked)
/** /**
* ice_init_pf_dcb - initialize DCB for a PF * ice_init_pf_dcb - initialize DCB for a PF
* @pf: pf to initiialize DCB for * @pf: PF to initialize DCB for
* @locked: Was function called with RTNL held * @locked: Was function called with RTNL held
*/ */
int ice_init_pf_dcb(struct ice_pf *pf, bool locked) int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
...@@ -369,7 +371,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) ...@@ -369,7 +371,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
err = ice_init_dcb(hw); err = ice_init_dcb(hw);
if (err) { if (err) {
/* FW LLDP is not active, default to SW DCBx/LLDP */ /* FW LLDP is not active, default to SW DCBX/LLDP */
dev_info(&pf->pdev->dev, "FW LLDP is not active\n"); dev_info(&pf->pdev->dev, "FW LLDP is not active\n");
hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED; hw->port_info->dcbx_status = ICE_DCBX_STATUS_NOT_STARTED;
hw->port_info->is_sw_lldp = true; hw->port_info->is_sw_lldp = true;
...@@ -387,10 +389,8 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked) ...@@ -387,10 +389,8 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
set_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags); set_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags);
} }
if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED) { if (port_info->dcbx_status == ICE_DCBX_STATUS_NOT_STARTED)
sw_default = 1;
dev_info(&pf->pdev->dev, "DCBX not started\n"); dev_info(&pf->pdev->dev, "DCBX not started\n");
}
if (sw_default) { if (sw_default) {
err = ice_dcb_sw_dflt_cfg(pf, locked); err = ice_dcb_sw_dflt_cfg(pf, locked);
...@@ -604,10 +604,10 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf, ...@@ -604,10 +604,10 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
/* store the old configuration */ /* store the old configuration */
tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg; tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg;
/* Reset the old DCBx configuration data */ /* Reset the old DCBX configuration data */
memset(&pi->local_dcbx_cfg, 0, sizeof(pi->local_dcbx_cfg)); memset(&pi->local_dcbx_cfg, 0, sizeof(pi->local_dcbx_cfg));
/* Get updated DCBx data from firmware */ /* Get updated DCBX data from firmware */
ret = ice_get_dcb_cfg(pf->hw.port_info); ret = ice_get_dcb_cfg(pf->hw.port_info);
if (ret) { if (ret) {
dev_err(&pf->pdev->dev, "Failed to get DCB config\n"); dev_err(&pf->pdev->dev, "Failed to get DCB config\n");
......
...@@ -959,6 +959,185 @@ ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) ...@@ -959,6 +959,185 @@ ice_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state)
return 0; return 0;
} }
/**
* ice_set_fec_cfg - Set link FEC options
* @netdev: network interface device structure
* @req_fec: FEC mode to configure
*/
static int ice_set_fec_cfg(struct net_device *netdev, enum ice_fec_mode req_fec)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_aqc_set_phy_cfg_data config = { 0 };
struct ice_aqc_get_phy_caps_data *caps;
struct ice_vsi *vsi = np->vsi;
u8 sw_cfg_caps, sw_cfg_fec;
struct ice_port_info *pi;
enum ice_status status;
int err = 0;
pi = vsi->port_info;
if (!pi)
return -EOPNOTSUPP;
/* Changing the FEC parameters is not supported if not the PF VSI */
if (vsi->type != ICE_VSI_PF) {
netdev_info(netdev, "Changing FEC parameters only supported for PF VSI\n");
return -EOPNOTSUPP;
}
/* Get last SW configuration */
caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
if (!caps)
return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG,
caps, NULL);
if (status) {
err = -EAGAIN;
goto done;
}
/* Copy SW configuration returned from PHY caps to PHY config */
ice_copy_phy_caps_to_cfg(caps, &config);
sw_cfg_caps = caps->caps;
sw_cfg_fec = caps->link_fec_options;
/* Get toloplogy caps, then copy PHY FEC topoloy caps to PHY config */
memset(caps, 0, sizeof(*caps));
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
caps, NULL);
if (status) {
err = -EAGAIN;
goto done;
}
config.caps |= (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC);
config.link_fec_opt = caps->link_fec_options;
ice_cfg_phy_fec(&config, req_fec);
/* If FEC mode has changed, then set PHY configuration and enable AN. */
if ((config.caps & ICE_AQ_PHY_ENA_AUTO_FEC) !=
(sw_cfg_caps & ICE_AQC_PHY_EN_AUTO_FEC) ||
config.link_fec_opt != sw_cfg_fec) {
if (caps->caps & ICE_AQC_PHY_AN_MODE)
config.caps |= ICE_AQ_PHY_ENA_AUTO_LINK_UPDT;
status = ice_aq_set_phy_cfg(pi->hw, pi->lport, &config, NULL);
if (status)
err = -EAGAIN;
}
done:
devm_kfree(&vsi->back->pdev->dev, caps);
return err;
}
/**
* ice_set_fecparam - Set FEC link options
* @netdev: network interface device structure
* @fecparam: Ethtool structure to retrieve FEC parameters
*/
static int
ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_vsi *vsi = np->vsi;
enum ice_fec_mode fec;
switch (fecparam->fec) {
case ETHTOOL_FEC_AUTO:
fec = ICE_FEC_AUTO;
break;
case ETHTOOL_FEC_RS:
fec = ICE_FEC_RS;
break;
case ETHTOOL_FEC_BASER:
fec = ICE_FEC_BASER;
break;
case ETHTOOL_FEC_OFF:
case ETHTOOL_FEC_NONE:
fec = ICE_FEC_NONE;
break;
default:
dev_warn(&vsi->back->pdev->dev, "Unsupported FEC mode: %d\n",
fecparam->fec);
return -EINVAL;
}
return ice_set_fec_cfg(netdev, fec);
}
/**
* ice_get_fecparam - Get link FEC options
* @netdev: network interface device structure
* @fecparam: Ethtool structure to retrieve FEC parameters
*/
static int
ice_get_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_aqc_get_phy_caps_data *caps;
struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
enum ice_status status;
int err = 0;
pi = vsi->port_info;
if (!pi)
return -EOPNOTSUPP;
link_info = &pi->phy.link_info;
/* Set FEC mode based on negotiated link info */
switch (link_info->fec_info) {
case ICE_AQ_LINK_25G_KR_FEC_EN:
fecparam->active_fec = ETHTOOL_FEC_BASER;
break;
case ICE_AQ_LINK_25G_RS_528_FEC_EN:
/* fall through */
case ICE_AQ_LINK_25G_RS_544_FEC_EN:
fecparam->active_fec = ETHTOOL_FEC_RS;
break;
default:
fecparam->active_fec = ETHTOOL_FEC_OFF;
break;
}
caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
if (!caps)
return -ENOMEM;
status = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_TOPO_CAP,
caps, NULL);
if (status) {
err = -EAGAIN;
goto done;
}
/* Set supported/configured FEC modes based on PHY capability */
if (caps->caps & ICE_AQC_PHY_EN_AUTO_FEC)
fecparam->fec |= ETHTOOL_FEC_AUTO;
if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN ||
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
fecparam->fec |= ETHTOOL_FEC_BASER;
if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ ||
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
fecparam->fec |= ETHTOOL_FEC_RS;
if (caps->link_fec_options == 0)
fecparam->fec |= ETHTOOL_FEC_OFF;
done:
devm_kfree(&vsi->back->pdev->dev, caps);
return err;
}
/** /**
* ice_get_priv_flags - report device private flags * ice_get_priv_flags - report device private flags
* @netdev: network interface device structure * @netdev: network interface device structure
...@@ -1026,6 +1205,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1026,6 +1205,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) { if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags)) {
enum ice_status status; enum ice_status status;
/* Disable FW LLDP engine */
status = ice_aq_cfg_lldp_mib_change(&pf->hw, false, status = ice_aq_cfg_lldp_mib_change(&pf->hw, false,
NULL); NULL);
/* If unregistering for LLDP events fails, this is /* If unregistering for LLDP events fails, this is
...@@ -1050,6 +1230,11 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1050,6 +1230,11 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
status = ice_init_pf_dcb(pf, true); status = ice_init_pf_dcb(pf, true);
if (status) if (status)
dev_warn(&pf->pdev->dev, "Fail to init DCB\n"); dev_warn(&pf->pdev->dev, "Fail to init DCB\n");
/* Forward LLDP packets to default VSI so that they
* are passed up the stack
*/
ice_cfg_sw_lldp(vsi, false, true);
} else { } else {
enum ice_status status; enum ice_status status;
bool dcbx_agent_status; bool dcbx_agent_status;
...@@ -1062,7 +1247,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1062,7 +1247,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
dev_warn(&pf->pdev->dev, dev_warn(&pf->pdev->dev,
"Fail to start LLDP Agent\n"); "Fail to start LLDP Agent\n");
/* AQ command to start FW DCBx agent will fail if /* AQ command to start FW DCBX agent will fail if
* the agent is already started * the agent is already started
*/ */
status = ice_aq_start_stop_dcbx(&pf->hw, true, status = ice_aq_start_stop_dcbx(&pf->hw, true,
...@@ -1083,6 +1268,11 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags) ...@@ -1083,6 +1268,11 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
status = ice_init_pf_dcb(pf, true); status = ice_init_pf_dcb(pf, true);
if (status) if (status)
dev_dbg(&pf->pdev->dev, "Fail to init DCB\n"); dev_dbg(&pf->pdev->dev, "Fail to init DCB\n");
/* Remove rule to direct LLDP packets to default VSI.
* The FW LLDP engine will now be consuming them.
*/
ice_cfg_sw_lldp(vsi, false, false);
} }
} }
clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags); clear_bit(ICE_FLAG_ETHTOOL_CTXT, pf->flags);
...@@ -1885,6 +2075,7 @@ ice_get_link_ksettings(struct net_device *netdev, ...@@ -1885,6 +2075,7 @@ ice_get_link_ksettings(struct net_device *netdev,
struct ethtool_link_ksettings *ks) struct ethtool_link_ksettings *ks)
{ {
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_aqc_get_phy_caps_data *caps;
struct ice_link_status *hw_link_info; struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
...@@ -1955,6 +2146,40 @@ ice_get_link_ksettings(struct net_device *netdev, ...@@ -1955,6 +2146,40 @@ ice_get_link_ksettings(struct net_device *netdev,
break; break;
} }
caps = devm_kzalloc(&vsi->back->pdev->dev, sizeof(*caps), GFP_KERNEL);
if (!caps)
goto done;
if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_TOPO_CAP,
caps, NULL))
netdev_info(netdev, "Get phy capability failed.\n");
/* Set supported FEC modes based on PHY capability */
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_NONE);
if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_EN ||
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_CLAUSE74_EN)
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_BASER);
if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_CLAUSE91_EN)
ethtool_link_ksettings_add_link_mode(ks, supported, FEC_RS);
if (ice_aq_get_phy_caps(vsi->port_info, false, ICE_AQC_REPORT_SW_CFG,
caps, NULL))
netdev_info(netdev, "Get phy capability failed.\n");
/* Set advertised FEC modes based on PHY capability */
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_NONE);
if (caps->link_fec_options & ICE_AQC_PHY_FEC_10G_KR_40G_KR4_REQ ||
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_KR_REQ)
ethtool_link_ksettings_add_link_mode(ks, advertising,
FEC_BASER);
if (caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_528_REQ ||
caps->link_fec_options & ICE_AQC_PHY_FEC_25G_RS_544_REQ)
ethtool_link_ksettings_add_link_mode(ks, advertising, FEC_RS);
done:
devm_kfree(&vsi->back->pdev->dev, caps);
return 0; return 0;
} }
...@@ -3167,6 +3392,8 @@ static const struct ethtool_ops ice_ethtool_ops = { ...@@ -3167,6 +3392,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_ts_info = ethtool_op_get_ts_info, .get_ts_info = ethtool_op_get_ts_info,
.get_per_queue_coalesce = ice_get_per_q_coalesce, .get_per_queue_coalesce = ice_get_per_q_coalesce,
.set_per_queue_coalesce = ice_set_per_q_coalesce, .set_per_queue_coalesce = ice_set_per_q_coalesce,
.get_fecparam = ice_get_fecparam,
.set_fecparam = ice_set_fecparam,
}; };
/** /**
......
...@@ -321,10 +321,10 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id) ...@@ -321,10 +321,10 @@ static void ice_vsi_set_num_qs(struct ice_vsi *vsi, u16 vf_id)
vsi->alloc_rxq = vf->num_vf_qs; vsi->alloc_rxq = vf->num_vf_qs;
/* pf->num_vf_msix includes (VF miscellaneous vector + /* pf->num_vf_msix includes (VF miscellaneous vector +
* data queue interrupts). Since vsi->num_q_vectors is number * data queue interrupts). Since vsi->num_q_vectors is number
* of queues vectors, subtract 1 from the original vector * of queues vectors, subtract 1 (ICE_NONQ_VECS_VF) from the
* count * original vector count
*/ */
vsi->num_q_vectors = pf->num_vf_msix - 1; vsi->num_q_vectors = pf->num_vf_msix - ICE_NONQ_VECS_VF;
break; break;
case ICE_VSI_LB: case ICE_VSI_LB:
vsi->alloc_txq = 1; vsi->alloc_txq = 1;
...@@ -1835,9 +1835,74 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector) ...@@ -1835,9 +1835,74 @@ ice_cfg_itr(struct ice_hw *hw, struct ice_q_vector *q_vector)
} }
} }
/**
* ice_cfg_txq_interrupt - configure interrupt on Tx queue
* @vsi: the VSI being configured
* @txq: Tx queue being mapped to MSI-X vector
* @msix_idx: MSI-X vector index within the function
* @itr_idx: ITR index of the interrupt cause
*
* Configure interrupt on Tx queue by associating Tx queue to MSI-X vector
* within the function space.
*/
#ifdef CONFIG_PCI_IOV
void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
#else
static void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx)
#endif /* CONFIG_PCI_IOV */
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 val;
itr_idx = (itr_idx << QINT_TQCTL_ITR_INDX_S) & QINT_TQCTL_ITR_INDX_M;
val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
((msix_idx << QINT_TQCTL_MSIX_INDX_S) & QINT_TQCTL_MSIX_INDX_M);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
}
/**
* ice_cfg_rxq_interrupt - configure interrupt on Rx queue
* @vsi: the VSI being configured
* @rxq: Rx queue being mapped to MSI-X vector
* @msix_idx: MSI-X vector index within the function
* @itr_idx: ITR index of the interrupt cause
*
* Configure interrupt on Rx queue by associating Rx queue to MSI-X vector
* within the function space.
*/
#ifdef CONFIG_PCI_IOV
void
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
#else
static void
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx)
#endif /* CONFIG_PCI_IOV */
{
struct ice_pf *pf = vsi->back;
struct ice_hw *hw = &pf->hw;
u32 val;
itr_idx = (itr_idx << QINT_RQCTL_ITR_INDX_S) & QINT_RQCTL_ITR_INDX_M;
val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
((msix_idx << QINT_RQCTL_MSIX_INDX_S) & QINT_RQCTL_MSIX_INDX_M);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
ice_flush(hw);
}
/** /**
* ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW * ice_vsi_cfg_msix - MSIX mode Interrupt Config in the HW
* @vsi: the VSI being configured * @vsi: the VSI being configured
*
* This configures MSIX mode interrupts for the PF VSI, and should not be used
* for the VF VSI.
*/ */
void ice_vsi_cfg_msix(struct ice_vsi *vsi) void ice_vsi_cfg_msix(struct ice_vsi *vsi)
{ {
...@@ -1850,7 +1915,6 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) ...@@ -1850,7 +1915,6 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
struct ice_q_vector *q_vector = vsi->q_vectors[i]; struct ice_q_vector *q_vector = vsi->q_vectors[i];
u16 reg_idx = q_vector->reg_idx; u16 reg_idx = q_vector->reg_idx;
if (vsi->type != ICE_VSI_VF)
ice_cfg_itr(hw, q_vector); ice_cfg_itr(hw, q_vector);
wr32(hw, GLINT_RATE(reg_idx), wr32(hw, GLINT_RATE(reg_idx),
...@@ -1868,43 +1932,17 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi) ...@@ -1868,43 +1932,17 @@ void ice_vsi_cfg_msix(struct ice_vsi *vsi)
* tracked for this PF. * tracked for this PF.
*/ */
for (q = 0; q < q_vector->num_ring_tx; q++) { for (q = 0; q < q_vector->num_ring_tx; q++) {
int itr_idx = (q_vector->tx.itr_idx << ice_cfg_txq_interrupt(vsi, txq, reg_idx,
QINT_TQCTL_ITR_INDX_S) & q_vector->tx.itr_idx);
QINT_TQCTL_ITR_INDX_M;
u32 val;
if (vsi->type == ICE_VSI_VF)
val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
(((i + 1) << QINT_TQCTL_MSIX_INDX_S) &
QINT_TQCTL_MSIX_INDX_M);
else
val = QINT_TQCTL_CAUSE_ENA_M | itr_idx |
((reg_idx << QINT_TQCTL_MSIX_INDX_S) &
QINT_TQCTL_MSIX_INDX_M);
wr32(hw, QINT_TQCTL(vsi->txq_map[txq]), val);
txq++; txq++;
} }
for (q = 0; q < q_vector->num_ring_rx; q++) { for (q = 0; q < q_vector->num_ring_rx; q++) {
int itr_idx = (q_vector->rx.itr_idx << ice_cfg_rxq_interrupt(vsi, rxq, reg_idx,
QINT_RQCTL_ITR_INDX_S) & q_vector->rx.itr_idx);
QINT_RQCTL_ITR_INDX_M;
u32 val;
if (vsi->type == ICE_VSI_VF)
val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
(((i + 1) << QINT_RQCTL_MSIX_INDX_S) &
QINT_RQCTL_MSIX_INDX_M);
else
val = QINT_RQCTL_CAUSE_ENA_M | itr_idx |
((reg_idx << QINT_RQCTL_MSIX_INDX_S) &
QINT_RQCTL_MSIX_INDX_M);
wr32(hw, QINT_RQCTL(vsi->rxq_map[rxq]), val);
rxq++; rxq++;
} }
} }
ice_flush(hw);
} }
/** /**
...@@ -2307,6 +2345,56 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule) ...@@ -2307,6 +2345,56 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list); ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
} }
#define ICE_ETH_P_LLDP 0x88CC
/**
* ice_cfg_sw_lldp - Config switch rules for LLDP packet handling
* @vsi: the VSI being configured
* @tx: bool to determine Tx or Rx rule
* @create: bool to determine create or remove Rule
*/
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create)
{
struct ice_fltr_list_entry *list;
struct ice_pf *pf = vsi->back;
LIST_HEAD(tmp_add_list);
enum ice_status status;
list = devm_kzalloc(&pf->pdev->dev, sizeof(*list), GFP_KERNEL);
if (!list)
return;
list->fltr_info.lkup_type = ICE_SW_LKUP_ETHERTYPE;
list->fltr_info.vsi_handle = vsi->idx;
list->fltr_info.l_data.ethertype_mac.ethertype = ICE_ETH_P_LLDP;
if (tx) {
list->fltr_info.fltr_act = ICE_DROP_PACKET;
list->fltr_info.flag = ICE_FLTR_TX;
list->fltr_info.src_id = ICE_SRC_ID_VSI;
} else {
list->fltr_info.fltr_act = ICE_FWD_TO_VSI;
list->fltr_info.flag = ICE_FLTR_RX;
list->fltr_info.src_id = ICE_SRC_ID_LPORT;
}
INIT_LIST_HEAD(&list->list_entry);
list_add(&list->list_entry, &tmp_add_list);
if (create)
status = ice_add_eth_mac(&pf->hw, &tmp_add_list);
else
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
if (status)
dev_err(&pf->pdev->dev,
"Fail %s %s LLDP rule on VSI %i error: %d\n",
create ? "adding" : "removing", tx ? "TX" : "RX",
vsi->vsi_num, status);
ice_free_fltr_list(&pf->pdev->dev, &tmp_add_list);
}
/** /**
* ice_vsi_setup - Set up a VSI by a given type * ice_vsi_setup - Set up a VSI by a given type
* @pf: board private structure * @pf: board private structure
...@@ -2327,6 +2415,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -2327,6 +2415,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
{ {
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct device *dev = &pf->pdev->dev; struct device *dev = &pf->pdev->dev;
enum ice_status status;
struct ice_vsi *vsi; struct ice_vsi *vsi;
int ret, i; int ret, i;
...@@ -2434,12 +2523,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -2434,12 +2523,12 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
for (i = 0; i < vsi->tc_cfg.numtc; i++) for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = pf->num_lan_tx; max_txqs[i] = pf->num_lan_tx;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs); max_txqs);
if (ret) { if (status) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"VSI %d failed lan queue config, error %d\n", "VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, ret); vsi->vsi_num, status);
goto unroll_vector_base; goto unroll_vector_base;
} }
...@@ -2448,10 +2537,22 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -2448,10 +2537,22 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* out PAUSE or PFC frames. If enabled, FW can still send FC frames. * out PAUSE or PFC frames. If enabled, FW can still send FC frames.
* The rule is added once for PF VSI in order to create appropriate * The rule is added once for PF VSI in order to create appropriate
* recipe, since VSI/VSI list is ignored with drop action... * recipe, since VSI/VSI list is ignored with drop action...
* Also add rules to handle LLDP Tx and Rx packets. Tx LLDP packets
* need to be dropped so that VFs cannot send LLDP packets to reconfig
* DCB settings in the HW. Also, if the FW DCBX engine is not running
* then Rx LLDP packets need to be redirected up the stack.
*/ */
if (vsi->type == ICE_VSI_PF) if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, true); ice_vsi_add_rem_eth_mac(vsi, true);
/* Tx LLDP packets */
ice_cfg_sw_lldp(vsi, true, true);
/* Rx LLDP packets */
if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags))
ice_cfg_sw_lldp(vsi, false, true);
}
return vsi; return vsi;
unroll_vector_base: unroll_vector_base:
...@@ -2733,6 +2834,21 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi) ...@@ -2733,6 +2834,21 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
} }
} }
/**
* ice_napi_del - Remove NAPI handler for the VSI
* @vsi: VSI for which NAPI handler is to be removed
*/
void ice_napi_del(struct ice_vsi *vsi)
{
int v_idx;
if (!vsi->netdev)
return;
ice_for_each_q_vector(vsi, v_idx)
netif_napi_del(&vsi->q_vectors[v_idx]->napi);
}
/** /**
* ice_vsi_release - Delete a VSI and free its resources * ice_vsi_release - Delete a VSI and free its resources
* @vsi: the VSI being removed * @vsi: the VSI being removed
...@@ -2775,8 +2891,15 @@ int ice_vsi_release(struct ice_vsi *vsi) ...@@ -2775,8 +2891,15 @@ int ice_vsi_release(struct ice_vsi *vsi)
pf->num_avail_sw_msix += vsi->num_q_vectors; pf->num_avail_sw_msix += vsi->num_q_vectors;
} }
if (vsi->type == ICE_VSI_PF) if (vsi->type == ICE_VSI_PF) {
ice_vsi_add_rem_eth_mac(vsi, false); ice_vsi_add_rem_eth_mac(vsi, false);
ice_cfg_sw_lldp(vsi, true, false);
/* The Rx rule will only exist to remove if the LLDP FW
* engine is currently stopped
*/
if (!test_bit(ICE_FLAG_ENABLE_FW_LLDP, pf->flags))
ice_cfg_sw_lldp(vsi, false, false);
}
ice_remove_vsi_fltr(&pf->hw, vsi->idx); ice_remove_vsi_fltr(&pf->hw, vsi->idx);
ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx); ice_rm_vsi_lan_cfg(vsi->port_info, vsi->idx);
...@@ -2815,6 +2938,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ...@@ -2815,6 +2938,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
{ {
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 }; u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
struct ice_vf *vf = NULL; struct ice_vf *vf = NULL;
enum ice_status status;
struct ice_pf *pf; struct ice_pf *pf;
int ret, i; int ret, i;
...@@ -2908,12 +3032,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ...@@ -2908,12 +3032,12 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
for (i = 0; i < vsi->tc_cfg.numtc; i++) for (i = 0; i < vsi->tc_cfg.numtc; i++)
max_txqs[i] = pf->num_lan_tx; max_txqs[i] = pf->num_lan_tx;
ret = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc, status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs); max_txqs);
if (ret) { if (status) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
"VSI %d failed lan queue config, error %d\n", "VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, ret); vsi->vsi_num, status);
goto err_vectors; goto err_vectors;
} }
return 0; return 0;
...@@ -2935,7 +3059,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi) ...@@ -2935,7 +3059,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi)
/** /**
* ice_is_reset_in_progress - check for a reset in progress * ice_is_reset_in_progress - check for a reset in progress
* @state: pf state field * @state: PF state field
*/ */
bool ice_is_reset_in_progress(unsigned long *state) bool ice_is_reset_in_progress(unsigned long *state)
{ {
......
...@@ -19,6 +19,14 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi); ...@@ -19,6 +19,14 @@ int ice_vsi_cfg_lan_txqs(struct ice_vsi *vsi);
void ice_vsi_cfg_msix(struct ice_vsi *vsi); void ice_vsi_cfg_msix(struct ice_vsi *vsi);
#ifdef CONFIG_PCI_IOV
void
ice_cfg_txq_interrupt(struct ice_vsi *vsi, u16 txq, u16 msix_idx, u16 itr_idx);
void
ice_cfg_rxq_interrupt(struct ice_vsi *vsi, u16 rxq, u16 msix_idx, u16 itr_idx);
#endif /* CONFIG_PCI_IOV */
int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid); int ice_vsi_add_vlan(struct ice_vsi *vsi, u16 vid);
int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid); int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid);
...@@ -37,6 +45,8 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src, ...@@ -37,6 +45,8 @@ ice_vsi_stop_lan_tx_rings(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc); int ice_cfg_vlan_pruning(struct ice_vsi *vsi, bool ena, bool vlan_promisc);
void ice_cfg_sw_lldp(struct ice_vsi *vsi, bool tx, bool create);
void ice_vsi_delete(struct ice_vsi *vsi); void ice_vsi_delete(struct ice_vsi *vsi);
int ice_vsi_clear(struct ice_vsi *vsi); int ice_vsi_clear(struct ice_vsi *vsi);
...@@ -49,6 +59,8 @@ struct ice_vsi * ...@@ -49,6 +59,8 @@ struct ice_vsi *
ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
enum ice_vsi_type type, u16 vf_id); enum ice_vsi_type type, u16 vf_id);
void ice_napi_del(struct ice_vsi *vsi);
int ice_vsi_release(struct ice_vsi *vsi); int ice_vsi_release(struct ice_vsi *vsi);
void ice_vsi_close(struct ice_vsi *vsi); void ice_vsi_close(struct ice_vsi *vsi);
......
This diff is collapsed.
...@@ -1973,6 +1973,10 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list) ...@@ -1973,6 +1973,10 @@ ice_add_vlan(struct ice_hw *hw, struct list_head *v_list)
* ice_add_eth_mac - Add ethertype and MAC based filter rule * ice_add_eth_mac - Add ethertype and MAC based filter rule
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @em_list: list of ether type MAC filter, MAC is optional * @em_list: list of ether type MAC filter, MAC is optional
*
* This function requires the caller to populate the entries in
* the filter list with the necessary fields (including flags to
* indicate Tx or Rx rules).
*/ */
enum ice_status enum ice_status
ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
...@@ -1990,7 +1994,6 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list) ...@@ -1990,7 +1994,6 @@ ice_add_eth_mac(struct ice_hw *hw, struct list_head *em_list)
l_type != ICE_SW_LKUP_ETHERTYPE) l_type != ICE_SW_LKUP_ETHERTYPE)
return ICE_ERR_PARAM; return ICE_ERR_PARAM;
em_list_itr->fltr_info.flag = ICE_FLTR_TX;
em_list_itr->status = ice_add_rule_internal(hw, l_type, em_list_itr->status = ice_add_rule_internal(hw, l_type,
em_list_itr); em_list_itr);
if (em_list_itr->status) if (em_list_itr->status)
......
...@@ -55,7 +55,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring) ...@@ -55,7 +55,7 @@ void ice_clean_tx_ring(struct ice_ring *tx_ring)
if (!tx_ring->tx_buf) if (!tx_ring->tx_buf)
return; return;
/* Free all the Tx ring sk_bufss */ /* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) for (i = 0; i < tx_ring->count; i++)
ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]); ice_unmap_and_free_tx_buf(tx_ring, &tx_ring->tx_buf[i]);
...@@ -1101,7 +1101,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget) ...@@ -1101,7 +1101,7 @@ static int ice_clean_rx_irq(struct ice_ring *rx_ring, int budget)
* ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic * ice_adjust_itr_by_size_and_speed - Adjust ITR based on current traffic
* @port_info: port_info structure containing the current link speed * @port_info: port_info structure containing the current link speed
* @avg_pkt_size: average size of Tx or Rx packets based on clean routine * @avg_pkt_size: average size of Tx or Rx packets based on clean routine
* @itr: itr value to update * @itr: ITR value to update
* *
* Calculate how big of an increment should be applied to the ITR value passed * Calculate how big of an increment should be applied to the ITR value passed
* in based on wmem_default, SKB overhead, Ethernet overhead, and the current * in based on wmem_default, SKB overhead, Ethernet overhead, and the current
...@@ -1316,7 +1316,7 @@ ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc) ...@@ -1316,7 +1316,7 @@ ice_update_itr(struct ice_q_vector *q_vector, struct ice_ring_container *rc)
*/ */
static u32 ice_buildreg_itr(u16 itr_idx, u16 itr) static u32 ice_buildreg_itr(u16 itr_idx, u16 itr)
{ {
/* The itr value is reported in microseconds, and the register value is /* The ITR value is reported in microseconds, and the register value is
* recorded in 2 microsecond units. For this reason we only need to * recorded in 2 microsecond units. For this reason we only need to
* shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this * shift by the GLINT_DYN_CTL_INTERVAL_S - ICE_ITR_GRAN_S to apply this
* granularity as a shift instead of division. The mask makes sure the * granularity as a shift instead of division. The mask makes sure the
...@@ -1645,7 +1645,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first, ...@@ -1645,7 +1645,7 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
return; return;
dma_error: dma_error:
/* clear dma mappings for failed tx_buf map */ /* clear DMA mappings for failed tx_buf map */
for (;;) { for (;;) {
tx_buf = &tx_ring->tx_buf[i]; tx_buf = &tx_ring->tx_buf[i];
ice_unmap_and_free_tx_buf(tx_ring, tx_buf); ice_unmap_and_free_tx_buf(tx_ring, tx_buf);
......
...@@ -23,6 +23,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc) ...@@ -23,6 +23,7 @@ static inline bool ice_is_tc_ena(u8 bitmap, u8 tc)
/* debug masks - set these bits in hw->debug_mask to control output */ /* debug masks - set these bits in hw->debug_mask to control output */
#define ICE_DBG_INIT BIT_ULL(1) #define ICE_DBG_INIT BIT_ULL(1)
#define ICE_DBG_FW_LOG BIT_ULL(3)
#define ICE_DBG_LINK BIT_ULL(4) #define ICE_DBG_LINK BIT_ULL(4)
#define ICE_DBG_PHY BIT_ULL(5) #define ICE_DBG_PHY BIT_ULL(5)
#define ICE_DBG_QCTX BIT_ULL(6) #define ICE_DBG_QCTX BIT_ULL(6)
...@@ -61,6 +62,13 @@ enum ice_fc_mode { ...@@ -61,6 +62,13 @@ enum ice_fc_mode {
ICE_FC_DFLT ICE_FC_DFLT
}; };
enum ice_fec_mode {
ICE_FEC_NONE = 0,
ICE_FEC_RS,
ICE_FEC_BASER,
ICE_FEC_AUTO
};
enum ice_set_fc_aq_failures { enum ice_set_fc_aq_failures {
ICE_SET_FC_AQ_FAIL_NONE = 0, ICE_SET_FC_AQ_FAIL_NONE = 0,
ICE_SET_FC_AQ_FAIL_GET, ICE_SET_FC_AQ_FAIL_GET,
...@@ -93,6 +101,7 @@ struct ice_link_status { ...@@ -93,6 +101,7 @@ struct ice_link_status {
/* Refer to ice_aq_phy_type for bits definition */ /* Refer to ice_aq_phy_type for bits definition */
u64 phy_type_low; u64 phy_type_low;
u64 phy_type_high; u64 phy_type_high;
u8 topo_media_conflict;
u16 max_frame_size; u16 max_frame_size;
u16 link_speed; u16 link_speed;
u16 req_speeds; u16 req_speeds;
...@@ -100,6 +109,7 @@ struct ice_link_status { ...@@ -100,6 +109,7 @@ struct ice_link_status {
u8 link_info; u8 link_info;
u8 an_info; u8 an_info;
u8 ext_info; u8 ext_info;
u8 fec_info;
u8 pacing; u8 pacing;
/* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of /* Refer to #define from module_type[ICE_MODULE_TYPE_TOTAL_BYTE] of
* ice_aqc_get_phy_caps structure * ice_aqc_get_phy_caps structure
...@@ -424,7 +434,7 @@ struct ice_hw { ...@@ -424,7 +434,7 @@ struct ice_hw {
struct ice_fw_log_cfg fw_log; struct ice_fw_log_cfg fw_log;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL /* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
* register. Used for determining the itr/intrl granularity during * register. Used for determining the ITR/intrl granularity during
* initialization. * initialization.
*/ */
#define ICE_MAX_AGG_BW_200G 0x0 #define ICE_MAX_AGG_BW_200G 0x0
......
...@@ -103,7 +103,7 @@ ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe, ...@@ -103,7 +103,7 @@ ice_set_pfe_link_forced(struct ice_vf *vf, struct virtchnl_pf_event *pfe,
u16 link_speed; u16 link_speed;
if (link_up) if (link_up)
link_speed = ICE_AQ_LINK_SPEED_40GB; link_speed = ICE_AQ_LINK_SPEED_100GB;
else else
link_speed = ICE_AQ_LINK_SPEED_UNKNOWN; link_speed = ICE_AQ_LINK_SPEED_UNKNOWN;
...@@ -140,18 +140,6 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf) ...@@ -140,18 +140,6 @@ static void ice_vc_notify_vf_link_state(struct ice_vf *vf)
sizeof(pfe), NULL); sizeof(pfe), NULL);
} }
/**
* ice_get_vf_vector - get VF interrupt vector register offset
* @vf_msix: number of MSIx vector per VF on a PF
* @vf_id: VF identifier
* @i: index of MSIx vector
*/
static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i)
{
return ((i == 0) ? VFINT_DYN_CTLN(vf_id) :
VFINT_DYN_CTLN(((vf_msix - 1) * (vf_id)) + (i - 1)));
}
/** /**
* ice_free_vf_res - Free a VF's resources * ice_free_vf_res - Free a VF's resources
* @vf: pointer to the VF info * @vf: pointer to the VF info
...@@ -159,14 +147,14 @@ static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i) ...@@ -159,14 +147,14 @@ static u32 ice_get_vf_vector(int vf_msix, int vf_id, int i)
static void ice_free_vf_res(struct ice_vf *vf) static void ice_free_vf_res(struct ice_vf *vf)
{ {
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
int i, pf_vf_msix; int i, last_vector_idx;
/* First, disable VF's configuration API to prevent OS from /* First, disable VF's configuration API to prevent OS from
* accessing the VF's VSI after it's freed or invalidated. * accessing the VF's VSI after it's freed or invalidated.
*/ */
clear_bit(ICE_VF_STATE_INIT, vf->vf_states); clear_bit(ICE_VF_STATE_INIT, vf->vf_states);
/* free vsi & disconnect it from the parent uplink */ /* free VSI and disconnect it from the parent uplink */
if (vf->lan_vsi_idx) { if (vf->lan_vsi_idx) {
ice_vsi_release(pf->vsi[vf->lan_vsi_idx]); ice_vsi_release(pf->vsi[vf->lan_vsi_idx]);
vf->lan_vsi_idx = 0; vf->lan_vsi_idx = 0;
...@@ -174,13 +162,10 @@ static void ice_free_vf_res(struct ice_vf *vf) ...@@ -174,13 +162,10 @@ static void ice_free_vf_res(struct ice_vf *vf)
vf->num_mac = 0; vf->num_mac = 0;
} }
pf_vf_msix = pf->num_vf_msix; last_vector_idx = vf->first_vector_idx + pf->num_vf_msix - 1;
/* Disable interrupts so that VF starts in a known state */ /* Disable interrupts so that VF starts in a known state */
for (i = 0; i < pf_vf_msix; i++) { for (i = vf->first_vector_idx; i <= last_vector_idx; i++) {
u32 reg_idx; wr32(&pf->hw, GLINT_DYN_CTL(i), GLINT_DYN_CTL_CLEARPBA_M);
reg_idx = ice_get_vf_vector(pf_vf_msix, vf->vf_id, i);
wr32(&pf->hw, reg_idx, VFINT_DYN_CTLN_CLEARPBA_M);
ice_flush(&pf->hw); ice_flush(&pf->hw);
} }
/* reset some of the state variables keeping track of the resources */ /* reset some of the state variables keeping track of the resources */
...@@ -281,15 +266,6 @@ void ice_free_vfs(struct ice_pf *pf) ...@@ -281,15 +266,6 @@ void ice_free_vfs(struct ice_pf *pf)
while (test_and_set_bit(__ICE_VF_DIS, pf->state)) while (test_and_set_bit(__ICE_VF_DIS, pf->state))
usleep_range(1000, 2000); usleep_range(1000, 2000);
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
* the carpet out from underneath their feet.
*/
if (!pci_vfs_assigned(pf->pdev))
pci_disable_sriov(pf->pdev);
else
dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
/* Avoid wait time by stopping all VFs at the same time */ /* Avoid wait time by stopping all VFs at the same time */
for (i = 0; i < pf->num_alloc_vfs; i++) { for (i = 0; i < pf->num_alloc_vfs; i++) {
struct ice_vsi *vsi; struct ice_vsi *vsi;
...@@ -305,6 +281,15 @@ void ice_free_vfs(struct ice_pf *pf) ...@@ -305,6 +281,15 @@ void ice_free_vfs(struct ice_pf *pf)
clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states); clear_bit(ICE_VF_STATE_ENA, pf->vf[i].vf_states);
} }
/* Disable IOV before freeing resources. This lets any VF drivers
* running in the host get themselves cleaned up before we yank
* the carpet out from underneath their feet.
*/
if (!pci_vfs_assigned(pf->pdev))
pci_disable_sriov(pf->pdev);
else
dev_warn(&pf->pdev->dev, "VFs are assigned - not disabling SR-IOV\n");
tmp = pf->num_alloc_vfs; tmp = pf->num_alloc_vfs;
pf->num_vf_qps = 0; pf->num_vf_qps = 0;
pf->num_alloc_vfs = 0; pf->num_alloc_vfs = 0;
...@@ -529,7 +514,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) ...@@ -529,7 +514,6 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf); vf->first_vector_idx = ice_calc_vf_first_vector_idx(pf, vf);
vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id); vsi = ice_vf_vsi_setup(pf, pf->hw.port_info, vf->vf_id);
if (!vsi) { if (!vsi) {
dev_err(&pf->pdev->dev, "Failed to create VF VSI\n"); dev_err(&pf->pdev->dev, "Failed to create VF VSI\n");
return -ENOMEM; return -ENOMEM;
...@@ -1327,8 +1311,8 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs) ...@@ -1327,8 +1311,8 @@ static int ice_alloc_vfs(struct ice_pf *pf, u16 num_alloc_vfs)
} }
/** /**
* ice_pf_state_is_nominal - checks the pf for nominal state * ice_pf_state_is_nominal - checks the PF for nominal state
* @pf: pointer to pf to check * @pf: pointer to PF to check
* *
* Check the PF's state for a collection of bits that would indicate * Check the PF's state for a collection of bits that would indicate
* the PF is in a state that would inhibit normal operation for * the PF is in a state that would inhibit normal operation for
...@@ -1655,7 +1639,7 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf) ...@@ -1655,7 +1639,7 @@ static void ice_vc_reset_vf_msg(struct ice_vf *vf)
/** /**
* ice_find_vsi_from_id * ice_find_vsi_from_id
* @pf: the pf structure to search for the VSI * @pf: the PF structure to search for the VSI
* @id: ID of the VSI it is searching for * @id: ID of the VSI it is searching for
* *
* searches for the VSI with the given ID * searches for the VSI with the given ID
...@@ -1970,24 +1954,33 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) ...@@ -1970,24 +1954,33 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
u16 vsi_id, vsi_q_id, vector_id; u16 vsi_id, vsi_q_id, vector_id;
struct virtchnl_vector_map *map; struct virtchnl_vector_map *map;
struct ice_pf *pf = vf->pf; struct ice_pf *pf = vf->pf;
u16 num_q_vectors_mapped;
struct ice_vsi *vsi; struct ice_vsi *vsi;
unsigned long qmap; unsigned long qmap;
u16 num_q_vectors;
int i; int i;
irqmap_info = (struct virtchnl_irq_map_info *)msg; irqmap_info = (struct virtchnl_irq_map_info *)msg;
num_q_vectors = irqmap_info->num_vectors - ICE_NONQ_VECS_VF; num_q_vectors_mapped = irqmap_info->num_vectors;
vsi = pf->vsi[vf->lan_vsi_idx]; vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* Check to make sure number of VF vectors mapped is not greater than
* number of VF vectors originally allocated, and check that
* there is actually at least a single VF queue vector mapped
*/
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) || if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states) ||
!vsi || vsi->num_q_vectors < num_q_vectors || pf->num_vf_msix < num_q_vectors_mapped ||
irqmap_info->num_vectors == 0) { !irqmap_info->num_vectors) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
for (i = 0; i < num_q_vectors; i++) { for (i = 0; i < num_q_vectors_mapped; i++) {
struct ice_q_vector *q_vector = vsi->q_vectors[i]; struct ice_q_vector *q_vector;
map = &irqmap_info->vecmap[i]; map = &irqmap_info->vecmap[i];
...@@ -1995,7 +1988,21 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) ...@@ -1995,7 +1988,21 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
vsi_id = map->vsi_id; vsi_id = map->vsi_id;
/* validate msg params */ /* validate msg params */
if (!(vector_id < pf->hw.func_caps.common_cap if (!(vector_id < pf->hw.func_caps.common_cap
.num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id)) { .num_msix_vectors) || !ice_vc_isvalid_vsi_id(vf, vsi_id) ||
(!vector_id && (map->rxq_map || map->txq_map))) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
}
/* No need to map VF miscellaneous or rogue vector */
if (!vector_id)
continue;
/* Subtract non queue vector from vector_id passed by VF
* to get actual number of VSI queue vector array index
*/
q_vector = vsi->q_vectors[vector_id - ICE_NONQ_VECS_VF];
if (!q_vector) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM; v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param; goto error_param;
} }
...@@ -2011,6 +2018,8 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) ...@@ -2011,6 +2018,8 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
q_vector->num_ring_rx++; q_vector->num_ring_rx++;
q_vector->rx.itr_idx = map->rxitr_idx; q_vector->rx.itr_idx = map->rxitr_idx;
vsi->rx_rings[vsi_q_id]->q_vector = q_vector; vsi->rx_rings[vsi_q_id]->q_vector = q_vector;
ice_cfg_rxq_interrupt(vsi, vsi_q_id, vector_id,
q_vector->rx.itr_idx);
} }
qmap = map->txq_map; qmap = map->txq_map;
...@@ -2023,11 +2032,11 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg) ...@@ -2023,11 +2032,11 @@ static int ice_vc_cfg_irq_map_msg(struct ice_vf *vf, u8 *msg)
q_vector->num_ring_tx++; q_vector->num_ring_tx++;
q_vector->tx.itr_idx = map->txitr_idx; q_vector->tx.itr_idx = map->txitr_idx;
vsi->tx_rings[vsi_q_id]->q_vector = q_vector; vsi->tx_rings[vsi_q_id]->q_vector = q_vector;
ice_cfg_txq_interrupt(vsi, vsi_q_id, vector_id,
q_vector->tx.itr_idx);
} }
} }
if (vsi)
ice_vsi_cfg_msix(vsi);
error_param: error_param:
/* send the response to the VF */ /* send the response to the VF */
return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret, return ice_vc_send_msg_to_vf(vf, VIRTCHNL_OP_CONFIG_IRQ_MAP, v_ret,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment