Commit c08eebad authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Fixes 2018-08-23

This series contains bug fixes to the ice driver.

Anirudh provides several fixes, starting with static analysis fixes by
replacing a bitwise-and with a constant value and replace "magic"
numbers with defines.  Fixed the control queue processing by removing
unnecessary read/writes to registers, as well as getting a accurate
value for "pending".  Added additional checks to avoid NULL pointer
dereferences.  Fixed up code formatting issues, by cleaning up code
comments and coding style.

Bruce cleans up a duplicate check for owner, within the same function.
Also cleans up interrupt causes that are not handled or applicable.
Fix checkpatch warning about the use of bool in structures due to the
wasted space and size of bool, so convert struct members to u8 instead.

Jake fixes a number of potential bugs in the reporting of stats via
ethtool, by simply reporting all the queue statistics, even for the
queues that are not activated.  Fixed a compiler warning, as well as
make the code a bit cleaner but just using order_base_2() for
calculating the power-of-2.

Preethi adds a check to avoid a NULL pointer dereference crash during
initialization.

Brett clarifies the code when it comes to port VLANs and regular VLANs,
by renaming defines and field values to match their intended use and
purpose.

Jesse initializes a variable to avoid garbage values being returned to
the caller.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0d092f06 3968540b
......@@ -89,6 +89,13 @@ extern const char ice_drv_ver[];
#define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
/* Macros for each allocated tx/rx ring whether used or not in a VSI */
#define ice_for_each_alloc_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
#define ice_for_each_alloc_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_rxq; (i)++)
struct ice_tc_info {
u16 qoffset;
u16 qcount;
......@@ -189,9 +196,9 @@ struct ice_vsi {
struct list_head tmp_sync_list; /* MAC filters to be synced */
struct list_head tmp_unsync_list; /* MAC filters to be unsynced */
bool irqs_ready;
bool current_isup; /* Sync 'link up' logging */
bool stat_offsets_loaded;
u8 irqs_ready;
u8 current_isup; /* Sync 'link up' logging */
u8 stat_offsets_loaded;
/* queue information */
u8 tx_mapping_mode; /* ICE_MAP_MODE_[CONTIG|SCATTER] */
......@@ -262,7 +269,7 @@ struct ice_pf {
struct ice_hw_port_stats stats;
struct ice_hw_port_stats stats_prev;
struct ice_hw hw;
bool stat_prev_loaded; /* has previous stats been loaded */
u8 stat_prev_loaded; /* has previous stats been loaded */
char int_name[ICE_INT_NAME_STR_LEN];
};
......
......@@ -329,19 +329,19 @@ struct ice_aqc_vsi_props {
/* VLAN section */
__le16 pvid; /* VLANS include priority bits */
u8 pvlan_reserved[2];
u8 port_vlan_flags;
#define ICE_AQ_VSI_PVLAN_MODE_S 0
#define ICE_AQ_VSI_PVLAN_MODE_M (0x3 << ICE_AQ_VSI_PVLAN_MODE_S)
#define ICE_AQ_VSI_PVLAN_MODE_UNTAGGED 0x1
#define ICE_AQ_VSI_PVLAN_MODE_TAGGED 0x2
#define ICE_AQ_VSI_PVLAN_MODE_ALL 0x3
u8 vlan_flags;
#define ICE_AQ_VSI_VLAN_MODE_S 0
#define ICE_AQ_VSI_VLAN_MODE_M (0x3 << ICE_AQ_VSI_VLAN_MODE_S)
#define ICE_AQ_VSI_VLAN_MODE_UNTAGGED 0x1
#define ICE_AQ_VSI_VLAN_MODE_TAGGED 0x2
#define ICE_AQ_VSI_VLAN_MODE_ALL 0x3
#define ICE_AQ_VSI_PVLAN_INSERT_PVID BIT(2)
#define ICE_AQ_VSI_PVLAN_EMOD_S 3
#define ICE_AQ_VSI_PVLAN_EMOD_M (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)
#define ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_PVLAN_EMOD_S)
#define ICE_AQ_VSI_PVLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_PVLAN_EMOD_S)
#define ICE_AQ_VSI_PVLAN_EMOD_STR (0x2 << ICE_AQ_VSI_PVLAN_EMOD_S)
#define ICE_AQ_VSI_PVLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_PVLAN_EMOD_S)
#define ICE_AQ_VSI_VLAN_EMOD_S 3
#define ICE_AQ_VSI_VLAN_EMOD_M (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_VLAN_EMOD_STR_BOTH (0x0 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_VLAN_EMOD_STR_UP (0x1 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_VLAN_EMOD_STR (0x2 << ICE_AQ_VSI_VLAN_EMOD_S)
#define ICE_AQ_VSI_VLAN_EMOD_NOTHING (0x3 << ICE_AQ_VSI_VLAN_EMOD_S)
u8 pvlan_reserved2[3];
/* ingress egress up sections */
__le32 ingress_table; /* bitmap, 3 bits per up */
......@@ -594,6 +594,7 @@ struct ice_sw_rule_lg_act {
#define ICE_LG_ACT_GENERIC_OFFSET_M (0x7 << ICE_LG_ACT_GENERIC_OFFSET_S)
#define ICE_LG_ACT_GENERIC_PRIORITY_S 22
#define ICE_LG_ACT_GENERIC_PRIORITY_M (0x7 << ICE_LG_ACT_GENERIC_PRIORITY_S)
#define ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX 7
/* Action = 7 - Set Stat count */
#define ICE_LG_ACT_STAT_COUNT 0x7
......
......@@ -45,6 +45,9 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
* Clears any existing PF configuration (VSIs, VSI lists, switch rules, port
* configuration, flow director filters, etc.).
*/
enum ice_status ice_clear_pf_cfg(struct ice_hw *hw)
{
......@@ -1483,7 +1486,7 @@ enum ice_status ice_get_link_status(struct ice_port_info *pi, bool *link_up)
struct ice_phy_info *phy_info;
enum ice_status status = 0;
if (!pi)
if (!pi || !link_up)
return ICE_ERR_PARAM;
phy_info = &pi->phy;
......@@ -1619,20 +1622,23 @@ __ice_aq_get_set_rss_lut(struct ice_hw *hw, u16 vsi_id, u8 lut_type, u8 *lut,
}
/* LUT size is only valid for Global and PF table types */
if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128) {
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
} else if (lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512) {
switch (lut_size) {
case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_128:
break;
case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512:
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_512_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
} else if ((lut_size == ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K) &&
(lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF)) {
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
} else {
break;
case ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K:
if (lut_type == ICE_AQC_GSET_RSS_LUT_TABLE_TYPE_PF) {
flags |= (ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_2K_FLAG <<
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_S) &
ICE_AQC_GSET_RSS_LUT_TABLE_SIZE_M;
break;
}
/* fall-through */
default:
status = ICE_ERR_PARAM;
goto ice_aq_get_set_rss_lut_exit;
}
......
......@@ -597,10 +597,14 @@ static enum ice_status ice_init_check_adminq(struct ice_hw *hw)
return 0;
init_ctrlq_free_rq:
ice_shutdown_rq(hw, cq);
ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
mutex_destroy(&cq->rq_lock);
if (cq->rq.head) {
ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->rq_lock);
}
if (cq->sq.head) {
ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
}
return status;
}
......@@ -706,10 +710,14 @@ static void ice_shutdown_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type)
return;
}
ice_shutdown_sq(hw, cq);
ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->sq_lock);
mutex_destroy(&cq->rq_lock);
if (cq->sq.head) {
ice_shutdown_sq(hw, cq);
mutex_destroy(&cq->sq_lock);
}
if (cq->rq.head) {
ice_shutdown_rq(hw, cq);
mutex_destroy(&cq->rq_lock);
}
}
/**
......@@ -1057,8 +1065,11 @@ ice_clean_rq_elem(struct ice_hw *hw, struct ice_ctl_q_info *cq,
clean_rq_elem_out:
/* Set pending if needed, unlock and return */
if (pending)
if (pending) {
/* re-read HW head to calculate actual pending messages */
ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
*pending = (u16)((ntc > ntu ? cq->rq.count : 0) + (ntu - ntc));
}
clean_rq_elem_err:
mutex_unlock(&cq->rq_lock);
......
......@@ -26,7 +26,7 @@ static int ice_q_stats_len(struct net_device *netdev)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
return ((np->vsi->num_txq + np->vsi->num_rxq) *
return ((np->vsi->alloc_txq + np->vsi->alloc_rxq) *
(sizeof(struct ice_q_stats) / sizeof(u64)));
}
......@@ -218,7 +218,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
ice_for_each_txq(vsi, i) {
ice_for_each_alloc_txq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN,
"tx-queue-%u.tx_packets", i);
p += ETH_GSTRING_LEN;
......@@ -226,7 +226,7 @@ static void ice_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
p += ETH_GSTRING_LEN;
}
ice_for_each_rxq(vsi, i) {
ice_for_each_alloc_rxq(vsi, i) {
snprintf(p, ETH_GSTRING_LEN,
"rx-queue-%u.rx_packets", i);
p += ETH_GSTRING_LEN;
......@@ -253,6 +253,24 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
{
switch (sset) {
case ETH_SS_STATS:
/* The number (and order) of strings reported *must* remain
* constant for a given netdevice. This function must not
* report a different number based on run time parameters
* (such as the number of queues in use, or the setting of
* a private ethtool flag). This is due to the nature of the
* ethtool stats API.
*
* User space programs such as ethtool must make 3 separate
* ioctl requests, one for size, one for the strings, and
* finally one for the stats. Since these cross into
* user space, changes to the number or size could result in
* undefined memory access or incorrect string<->value
* correlations for statistics.
*
* Even if it appears to be safe, changes to the size or
* order of strings will suffer from race conditions and are
* not safe.
*/
return ICE_ALL_STATS_LEN(netdev);
default:
return -EOPNOTSUPP;
......@@ -280,18 +298,26 @@ ice_get_ethtool_stats(struct net_device *netdev,
/* populate per queue stats */
rcu_read_lock();
ice_for_each_txq(vsi, j) {
ice_for_each_alloc_txq(vsi, j) {
ring = READ_ONCE(vsi->tx_rings[j]);
if (!ring)
continue;
data[i++] = ring->stats.pkts;
data[i++] = ring->stats.bytes;
if (ring) {
data[i++] = ring->stats.pkts;
data[i++] = ring->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
}
}
ice_for_each_rxq(vsi, j) {
ice_for_each_alloc_rxq(vsi, j) {
ring = READ_ONCE(vsi->rx_rings[j]);
data[i++] = ring->stats.pkts;
data[i++] = ring->stats.bytes;
if (ring) {
data[i++] = ring->stats.pkts;
data[i++] = ring->stats.bytes;
} else {
data[i++] = 0;
data[i++] = 0;
}
}
rcu_read_unlock();
......@@ -519,7 +545,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
goto done;
}
for (i = 0; i < vsi->num_txq; i++) {
for (i = 0; i < vsi->alloc_txq; i++) {
/* clone ring and setup updated count */
tx_rings[i] = *vsi->tx_rings[i];
tx_rings[i].count = new_tx_cnt;
......@@ -551,7 +577,7 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
goto done;
}
for (i = 0; i < vsi->num_rxq; i++) {
for (i = 0; i < vsi->alloc_rxq; i++) {
/* clone ring and setup updated count */
rx_rings[i] = *vsi->rx_rings[i];
rx_rings[i].count = new_rx_cnt;
......
......@@ -121,10 +121,6 @@
#define PFINT_FW_CTL_CAUSE_ENA_S 30
#define PFINT_FW_CTL_CAUSE_ENA_M BIT(PFINT_FW_CTL_CAUSE_ENA_S)
#define PFINT_OICR 0x0016CA00
#define PFINT_OICR_HLP_RDY_S 14
#define PFINT_OICR_HLP_RDY_M BIT(PFINT_OICR_HLP_RDY_S)
#define PFINT_OICR_CPM_RDY_S 15
#define PFINT_OICR_CPM_RDY_M BIT(PFINT_OICR_CPM_RDY_S)
#define PFINT_OICR_ECC_ERR_S 16
#define PFINT_OICR_ECC_ERR_M BIT(PFINT_OICR_ECC_ERR_S)
#define PFINT_OICR_MAL_DETECT_S 19
......@@ -133,10 +129,6 @@
#define PFINT_OICR_GRST_M BIT(PFINT_OICR_GRST_S)
#define PFINT_OICR_PCI_EXCEPTION_S 21
#define PFINT_OICR_PCI_EXCEPTION_M BIT(PFINT_OICR_PCI_EXCEPTION_S)
#define PFINT_OICR_GPIO_S 22
#define PFINT_OICR_GPIO_M BIT(PFINT_OICR_GPIO_S)
#define PFINT_OICR_STORM_DETECT_S 24
#define PFINT_OICR_STORM_DETECT_M BIT(PFINT_OICR_STORM_DETECT_S)
#define PFINT_OICR_HMC_ERR_S 26
#define PFINT_OICR_HMC_ERR_M BIT(PFINT_OICR_HMC_ERR_S)
#define PFINT_OICR_PE_CRITERR_S 28
......
......@@ -265,6 +265,7 @@ enum ice_rx_flex_desc_status_error_0_bits {
struct ice_rlan_ctx {
u16 head;
u16 cpuid; /* bigger than needed, see above for reason */
#define ICE_RLAN_BASE_S 7
u64 base;
u16 qlen;
#define ICE_RLAN_CTX_DBUF_S 7
......
......@@ -901,7 +901,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
case ice_aqc_opc_get_link_status:
if (ice_handle_link_event(pf))
dev_err(&pf->pdev->dev,
"Could not handle link event");
"Could not handle link event\n");
break;
default:
dev_dbg(&pf->pdev->dev,
......@@ -916,6 +916,21 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
return pending && (i == ICE_DFLT_IRQ_WORK);
}
/**
* ice_ctrlq_pending - check if there is a difference between ntc and ntu
* @hw: pointer to hardware info
* @cq: control queue information
*
* returns true if there are pending messages in a queue, false if there aren't
*/
static bool ice_ctrlq_pending(struct ice_hw *hw, struct ice_ctl_q_info *cq)
{
u16 ntu;
ntu = (u16)(rd32(hw, cq->rq.head) & cq->rq.head_mask);
return cq->rq.next_to_clean != ntu;
}
/**
* ice_clean_adminq_subtask - clean the AdminQ rings
* @pf: board private structure
......@@ -923,7 +938,6 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
static void ice_clean_adminq_subtask(struct ice_pf *pf)
{
struct ice_hw *hw = &pf->hw;
u32 val;
if (!test_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state))
return;
......@@ -933,9 +947,13 @@ static void ice_clean_adminq_subtask(struct ice_pf *pf)
clear_bit(__ICE_ADMINQ_EVENT_PENDING, pf->state);
/* re-enable Admin queue interrupt causes */
val = rd32(hw, PFINT_FW_CTL);
wr32(hw, PFINT_FW_CTL, (val | PFINT_FW_CTL_CAUSE_ENA_M));
/* There might be a situation where new messages arrive to a control
* queue between processing the last message and clearing the
* EVENT_PENDING bit. So before exiting, check queue head again (using
* ice_ctrlq_pending) and process new messages if any.
*/
if (ice_ctrlq_pending(hw, &hw->adminq))
__ice_clean_ctrlq(pf, ICE_CTL_Q_ADMIN);
ice_flush(hw);
}
......@@ -1295,11 +1313,8 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
qcount = numq_tc;
}
/* find higher power-of-2 of qcount */
pow = ilog2(qcount);
if (!is_power_of_2(qcount))
pow++;
/* find the (rounded up) power-of-2 of qcount */
pow = order_base_2(qcount);
for (i = 0; i < ICE_MAX_TRAFFIC_CLASS; i++) {
if (!(vsi->tc_cfg.ena_tc & BIT(i))) {
......@@ -1352,14 +1367,15 @@ static void ice_set_dflt_vsi_ctx(struct ice_vsi_ctx *ctxt)
ctxt->info.sw_flags = ICE_AQ_VSI_SW_FLAG_SRC_PRUNE;
/* Traffic from VSI can be sent to LAN */
ctxt->info.sw_flags2 = ICE_AQ_VSI_SW_FLAG_LAN_ENA;
/* Allow all packets untagged/tagged */
ctxt->info.port_vlan_flags = ((ICE_AQ_VSI_PVLAN_MODE_ALL &
ICE_AQ_VSI_PVLAN_MODE_M) >>
ICE_AQ_VSI_PVLAN_MODE_S);
/* Show VLAN/UP from packets in Rx descriptors */
ctxt->info.port_vlan_flags |= ((ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH &
ICE_AQ_VSI_PVLAN_EMOD_M) >>
ICE_AQ_VSI_PVLAN_EMOD_S);
/* By default bits 3 and 4 in vlan_flags are 0's which results in legacy
* behavior (show VLAN, DEI, and UP) in descriptor. Also, allow all
* packets untagged/tagged.
*/
ctxt->info.vlan_flags = ((ICE_AQ_VSI_VLAN_MODE_ALL &
ICE_AQ_VSI_VLAN_MODE_M) >>
ICE_AQ_VSI_VLAN_MODE_S);
/* Have 1:1 UP mapping for both ingress/egress tables */
table |= ICE_UP_TABLE_TRANSLATE(0, 0);
table |= ICE_UP_TABLE_TRANSLATE(1, 1);
......@@ -1688,15 +1704,12 @@ static void ice_ena_misc_vector(struct ice_pf *pf)
wr32(hw, PFINT_OICR_ENA, 0); /* disable all */
rd32(hw, PFINT_OICR); /* read to clear */
val = (PFINT_OICR_HLP_RDY_M |
PFINT_OICR_CPM_RDY_M |
PFINT_OICR_ECC_ERR_M |
val = (PFINT_OICR_ECC_ERR_M |
PFINT_OICR_MAL_DETECT_M |
PFINT_OICR_GRST_M |
PFINT_OICR_PCI_EXCEPTION_M |
PFINT_OICR_GPIO_M |
PFINT_OICR_STORM_DETECT_M |
PFINT_OICR_HMC_ERR_M);
PFINT_OICR_HMC_ERR_M |
PFINT_OICR_PE_CRITERR_M);
wr32(hw, PFINT_OICR_ENA, val);
......@@ -2058,15 +2071,13 @@ static int ice_req_irq_msix_misc(struct ice_pf *pf)
skip_req_irq:
ice_ena_misc_vector(pf);
val = (pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
(ICE_RX_ITR & PFINT_OICR_CTL_ITR_INDX_M) |
PFINT_OICR_CTL_CAUSE_ENA_M;
val = ((pf->oicr_idx & PFINT_OICR_CTL_MSIX_INDX_M) |
PFINT_OICR_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_OICR_CTL, val);
/* This enables Admin queue Interrupt causes */
val = (pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
(ICE_RX_ITR & PFINT_FW_CTL_ITR_INDX_M) |
PFINT_FW_CTL_CAUSE_ENA_M;
val = ((pf->oicr_idx & PFINT_FW_CTL_MSIX_INDX_M) |
PFINT_FW_CTL_CAUSE_ENA_M);
wr32(hw, PFINT_FW_CTL, val);
itr_gran = hw->itr_gran_200;
......@@ -3246,8 +3257,10 @@ static void ice_clear_interrupt_scheme(struct ice_pf *pf)
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_dis_msix(pf);
devm_kfree(&pf->pdev->dev, pf->irq_tracker);
pf->irq_tracker = NULL;
if (pf->irq_tracker) {
devm_kfree(&pf->pdev->dev, pf->irq_tracker);
pf->irq_tracker = NULL;
}
}
/**
......@@ -3271,7 +3284,7 @@ static int ice_probe(struct pci_dev *pdev,
err = pcim_iomap_regions(pdev, BIT(ICE_BAR0), pci_name(pdev));
if (err) {
dev_err(&pdev->dev, "I/O map error %d\n", err);
dev_err(&pdev->dev, "BAR0 I/O map error %d\n", err);
return err;
}
......@@ -3720,10 +3733,10 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
enum ice_status status;
/* Here we are configuring the VSI to let the driver add VLAN tags by
* setting port_vlan_flags to ICE_AQ_VSI_PVLAN_MODE_ALL. The actual VLAN
* tag insertion happens in the Tx hot path, in ice_tx_map.
* setting vlan_flags to ICE_AQ_VSI_VLAN_MODE_ALL. The actual VLAN tag
* insertion happens in the Tx hot path, in ice_tx_map.
*/
ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_MODE_ALL;
ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_MODE_ALL;
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
ctxt.vsi_num = vsi->vsi_num;
......@@ -3735,7 +3748,7 @@ static int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
return -EIO;
}
vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
vsi->info.vlan_flags = ctxt.info.vlan_flags;
return 0;
}
......@@ -3757,12 +3770,15 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
*/
if (ena) {
/* Strip VLAN tag from Rx packet and put it in the desc */
ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_STR_BOTH;
ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_STR_BOTH;
} else {
/* Disable stripping. Leave tag in packet */
ctxt.info.port_vlan_flags = ICE_AQ_VSI_PVLAN_EMOD_NOTHING;
ctxt.info.vlan_flags = ICE_AQ_VSI_VLAN_EMOD_NOTHING;
}
/* Allow all packets untagged/tagged */
ctxt.info.vlan_flags |= ICE_AQ_VSI_VLAN_MODE_ALL;
ctxt.info.valid_sections = cpu_to_le16(ICE_AQ_VSI_PROP_VLAN_VALID);
ctxt.vsi_num = vsi->vsi_num;
......@@ -3773,7 +3789,7 @@ static int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
return -EIO;
}
vsi->info.port_vlan_flags = ctxt.info.port_vlan_flags;
vsi->info.vlan_flags = ctxt.info.vlan_flags;
return 0;
}
......@@ -3986,7 +4002,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
/* clear the context structure first */
memset(&rlan_ctx, 0, sizeof(rlan_ctx));
rlan_ctx.base = ring->dma >> 7;
rlan_ctx.base = ring->dma >> ICE_RLAN_BASE_S;
rlan_ctx.qlen = ring->count;
......@@ -4098,11 +4114,12 @@ static int ice_vsi_cfg(struct ice_vsi *vsi)
{
int err;
ice_set_rx_mode(vsi->netdev);
err = ice_restore_vlan(vsi);
if (err)
return err;
if (vsi->netdev) {
ice_set_rx_mode(vsi->netdev);
err = ice_restore_vlan(vsi);
if (err)
return err;
}
err = ice_vsi_cfg_txqs(vsi);
if (!err)
......@@ -4868,7 +4885,7 @@ int ice_down(struct ice_vsi *vsi)
*/
static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
{
int i, err;
int i, err = 0;
if (!vsi->num_txq) {
dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
......@@ -4893,7 +4910,7 @@ static int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
*/
static int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
{
int i, err;
int i, err = 0;
if (!vsi->num_rxq) {
dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
......@@ -5235,7 +5252,7 @@ static int ice_change_mtu(struct net_device *netdev, int new_mtu)
u8 count = 0;
if (new_mtu == netdev->mtu) {
netdev_warn(netdev, "mtu is already %d\n", netdev->mtu);
netdev_warn(netdev, "mtu is already %u\n", netdev->mtu);
return 0;
}
......
......@@ -131,9 +131,8 @@ ice_read_sr_word_aq(struct ice_hw *hw, u16 offset, u16 *data)
*
* This function will request NVM ownership.
*/
static enum
ice_status ice_acquire_nvm(struct ice_hw *hw,
enum ice_aq_res_access_type access)
static enum ice_status
ice_acquire_nvm(struct ice_hw *hw, enum ice_aq_res_access_type access)
{
if (hw->nvm.blank_nvm_mode)
return 0;
......
......@@ -1576,8 +1576,7 @@ ice_sched_update_vsi_child_nodes(struct ice_port_info *pi, u16 vsi_id, u8 tc,
return status;
}
if (owner == ICE_SCHED_NODE_OWNER_LAN)
vsi->max_lanq[tc] = new_numqs;
vsi->max_lanq[tc] = new_numqs;
return status;
}
......
......@@ -645,14 +645,14 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
act |= (1 << ICE_LG_ACT_GENERIC_VALUE_S) & ICE_LG_ACT_GENERIC_VALUE_M;
lg_act->pdata.lg_act.act[1] = cpu_to_le32(act);
act = (7 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
act = (ICE_LG_ACT_GENERIC_OFF_RX_DESC_PROF_IDX <<
ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_OFFSET_M;
/* Third action Marker value */
act |= ICE_LG_ACT_GENERIC;
act |= (sw_marker << ICE_LG_ACT_GENERIC_VALUE_S) &
ICE_LG_ACT_GENERIC_VALUE_M;
act |= (0 << ICE_LG_ACT_GENERIC_OFFSET_S) & ICE_LG_ACT_GENERIC_VALUE_M;
lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
/* call the fill switch rule to fill the lookup tx rx structure */
......
......@@ -17,7 +17,7 @@ struct ice_vsi_ctx {
u16 vsis_unallocated;
u16 flags;
struct ice_aqc_vsi_props info;
bool alloc_from_pool;
u8 alloc_from_pool;
};
enum ice_sw_fwd_act_type {
......@@ -94,8 +94,8 @@ struct ice_fltr_info {
u8 qgrp_size;
/* Rule creations populate these indicators basing on the switch type */
bool lb_en; /* Indicate if packet can be looped back */
bool lan_en; /* Indicate if packet can be forwarded to the uplink */
u8 lb_en; /* Indicate if packet can be looped back */
u8 lan_en; /* Indicate if packet can be forwarded to the uplink */
};
/* Bookkeeping structure to hold bitmap of VSIs corresponding to VSI list id */
......
......@@ -143,7 +143,7 @@ struct ice_ring {
u16 next_to_use;
u16 next_to_clean;
bool ring_active; /* is ring online or not */
u8 ring_active; /* is ring online or not */
/* stats structs */
struct ice_q_stats stats;
......
......@@ -83,7 +83,7 @@ struct ice_link_status {
u64 phy_type_low;
u16 max_frame_size;
u16 link_speed;
bool lse_ena; /* Link Status Event notification */
u8 lse_ena; /* Link Status Event notification */
u8 link_info;
u8 an_info;
u8 ext_info;
......@@ -101,7 +101,7 @@ struct ice_phy_info {
struct ice_link_status link_info_old;
u64 phy_type_low;
enum ice_media_type media_type;
bool get_link_info;
u8 get_link_info;
};
/* Common HW capabilities for SW use */
......@@ -167,7 +167,7 @@ struct ice_nvm_info {
u32 oem_ver; /* OEM version info */
u16 sr_words; /* Shadow RAM size in words */
u16 ver; /* NVM package version */
bool blank_nvm_mode; /* is NVM empty (no FW present) */
u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
/* Max number of port to queue branches w.r.t topology */
......@@ -181,7 +181,7 @@ struct ice_sched_node {
struct ice_aqc_txsched_elem_data info;
u32 agg_id; /* aggregator group id */
u16 vsi_id;
bool in_use; /* suspended or in use */
u8 in_use; /* suspended or in use */
u8 tx_sched_layer; /* Logical Layer (1-9) */
u8 num_children;
u8 tc_num;
......@@ -218,7 +218,7 @@ struct ice_sched_vsi_info {
struct ice_sched_tx_policy {
u16 max_num_vsis;
u8 max_num_lan_qs_per_tc[ICE_MAX_TRAFFIC_CLASS];
bool rdma_ena;
u8 rdma_ena;
};
struct ice_port_info {
......@@ -243,7 +243,7 @@ struct ice_port_info {
struct list_head agg_list; /* lists all aggregator */
u8 lport;
#define ICE_LPORT_MASK 0xff
bool is_vf;
u8 is_vf;
};
struct ice_switch_info {
......@@ -287,7 +287,7 @@ struct ice_hw {
u8 max_cgds;
u8 sw_entry_point_layer;
bool evb_veb; /* true for VEB, false for VEPA */
u8 evb_veb; /* true for VEB, false for VEPA */
struct ice_bus_info bus;
struct ice_nvm_info nvm;
struct ice_hw_dev_caps dev_caps; /* device capabilities */
......@@ -318,7 +318,7 @@ struct ice_hw {
u8 itr_gran_100;
u8 itr_gran_50;
u8 itr_gran_25;
bool ucast_shared; /* true if VSIs can share unicast addr */
u8 ucast_shared; /* true if VSIs can share unicast addr */
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment