Commit 11c6c0c2 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2018-11-20

This series contains updates to the ice driver only.

Akeem updates the driver to determine whether or not to do
auto-negotiation based on the VSI state.

Bruce cleans up the control queue code to remove duplicate code.  Take
advantage of some compiler optimizations by making some structures
constant, and also note that they cannot be modified.  Cleaned up
formatting issues and code comment that needed clarification.  Fixed a
potential NULL pointer dereference by adding a check.

Jaroslaw adds a check to verify if memory was allocated or not.

Yashaswini Raghuram fixes the driver to ensure we are not enabling the
LAN_EN flag if the MAC in the MAC-VLAN is a unicast MAC, so that the
unicast packets are not forwarded to the wire.

Dave fixes the return value of ice_napi_poll() to be more useful in
returning the work that was done and should only return 0 when no work
was done.

Anirudh does code comment cleanup, to make more consistent.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 51428fd6 f25dad19
...@@ -96,14 +96,14 @@ extern const char ice_drv_ver[]; ...@@ -96,14 +96,14 @@ extern const char ice_drv_ver[];
#define ice_for_each_vsi(pf, i) \ #define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++) for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
/* Macros for each tx/rx ring in a VSI */ /* Macros for each Tx/Rx ring in a VSI */
#define ice_for_each_txq(vsi, i) \ #define ice_for_each_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_txq; (i)++) for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
#define ice_for_each_rxq(vsi, i) \ #define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++) for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
/* Macros for each allocated tx/rx ring whether used or not in a VSI */ /* Macros for each allocated Tx/Rx ring whether used or not in a VSI */
#define ice_for_each_alloc_txq(vsi, i) \ #define ice_for_each_alloc_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++) for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
...@@ -183,8 +183,8 @@ struct ice_vsi { ...@@ -183,8 +183,8 @@ struct ice_vsi {
struct ice_sw *vsw; /* switch this VSI is on */ struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */ struct ice_pf *back; /* back pointer to PF */
struct ice_port_info *port_info; /* back pointer to port_info */ struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_ring **rx_rings; /* rx ring array */ struct ice_ring **rx_rings; /* Rx ring array */
struct ice_ring **tx_rings; /* tx ring array */ struct ice_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */ struct ice_q_vector **q_vectors; /* q_vector array */
irqreturn_t (*irq_handler)(int irq, void *data); irqreturn_t (*irq_handler)(int irq, void *data);
...@@ -255,8 +255,8 @@ struct ice_q_vector { ...@@ -255,8 +255,8 @@ struct ice_q_vector {
struct ice_ring_container tx; struct ice_ring_container tx;
struct irq_affinity_notify affinity_notify; struct irq_affinity_notify affinity_notify;
u16 v_idx; /* index in the vsi->q_vector array. */ u16 v_idx; /* index in the vsi->q_vector array. */
u8 num_ring_tx; /* total number of tx rings in vector */ u8 num_ring_tx; /* total number of Tx rings in vector */
u8 num_ring_rx; /* total number of rx rings in vector */ u8 num_ring_rx; /* total number of Rx rings in vector */
char name[ICE_INT_NAME_STR_LEN]; char name[ICE_INT_NAME_STR_LEN];
/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this /* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
* value to the device * value to the device
...@@ -308,10 +308,10 @@ struct ice_pf { ...@@ -308,10 +308,10 @@ struct ice_pf {
u32 hw_oicr_idx; /* Other interrupt cause vector HW index */ u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */ u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */ u32 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num lan tx queues setup */ u16 num_lan_tx; /* num lan Tx queues setup */
u16 num_lan_rx; /* num lan rx queues setup */ u16 num_lan_rx; /* num lan Rx queues setup */
u16 q_left_tx; /* remaining num tx queues left unclaimed */ u16 q_left_tx; /* remaining num Tx queues left unclaimed */
u16 q_left_rx; /* remaining num rx queues left unclaimed */ u16 q_left_rx; /* remaining num Rx queues left unclaimed */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */ u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
u16 num_alloc_vsi; u16 num_alloc_vsi;
u16 corer_count; /* Core reset count */ u16 corer_count; /* Core reset count */
......
...@@ -1380,10 +1380,10 @@ struct ice_aq_desc { ...@@ -1380,10 +1380,10 @@ struct ice_aq_desc {
/* error codes */ /* error codes */
enum ice_aq_err { enum ice_aq_err {
ICE_AQ_RC_OK = 0, /* success */ ICE_AQ_RC_OK = 0, /* Success */
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */ ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */ ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* object already exists */ ICE_AQ_RC_EEXIST = 13, /* Object already exists */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */ ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
}; };
......
...@@ -405,9 +405,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw) ...@@ -405,9 +405,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head); INIT_LIST_HEAD(&sw->vsi_list_map_head);
ice_init_def_sw_recp(hw); return ice_init_def_sw_recp(hw);
return 0;
} }
/** /**
...@@ -715,7 +713,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw) ...@@ -715,7 +713,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
hw->evb_veb = true; hw->evb_veb = true;
/* Query the allocated resources for tx scheduler */ /* Query the allocated resources for Tx scheduler */
status = ice_sched_query_res_alloc(hw); status = ice_sched_query_res_alloc(hw);
if (status) { if (status) {
ice_debug(hw, ICE_DBG_SCHED, ice_debug(hw, ICE_DBG_SCHED,
...@@ -958,7 +956,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req) ...@@ -958,7 +956,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
* ice_copy_rxq_ctx_to_hw * ice_copy_rxq_ctx_to_hw
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @ice_rxq_ctx: pointer to the rxq context * @ice_rxq_ctx: pointer to the rxq context
* @rxq_index: the index of the rx queue * @rxq_index: the index of the Rx queue
* *
* Copies rxq context from dense structure to hw register space * Copies rxq context from dense structure to hw register space
*/ */
...@@ -1014,7 +1012,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = { ...@@ -1014,7 +1012,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* ice_write_rxq_ctx * ice_write_rxq_ctx
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @rlan_ctx: pointer to the rxq context * @rlan_ctx: pointer to the rxq context
* @rxq_index: the index of the rx queue * @rxq_index: the index of the Rx queue
* *
* Converts rxq context from sparse to dense structure and then writes * Converts rxq context from sparse to dense structure and then writes
* it to hw register space * it to hw register space
...@@ -1715,8 +1713,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw) ...@@ -1715,8 +1713,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned * If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned * If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
*/ */
static u16 static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
{ {
u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN; u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
......
...@@ -62,7 +62,7 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = { ...@@ -62,7 +62,7 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
* The PF_STATs are appended to the netdev stats only when ethtool -S * The PF_STATs are appended to the netdev stats only when ethtool -S
* is queried on the base PF netdev. * is queried on the base PF netdev.
*/ */
static struct ice_stats ice_gstrings_pf_stats[] = { static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes), ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes), ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast), ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
...@@ -104,7 +104,7 @@ static struct ice_stats ice_gstrings_pf_stats[] = { ...@@ -104,7 +104,7 @@ static struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults), ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
}; };
static u32 ice_regs_dump_list[] = { static const u32 ice_regs_dump_list[] = {
PFGEN_STATE, PFGEN_STATE,
PRTGEN_STATUS, PRTGEN_STATUS,
QRX_CTRL(0), QRX_CTRL(0),
...@@ -260,10 +260,10 @@ static int ice_get_sset_count(struct net_device *netdev, int sset) ...@@ -260,10 +260,10 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
* a private ethtool flag). This is due to the nature of the * a private ethtool flag). This is due to the nature of the
* ethtool stats API. * ethtool stats API.
* *
* User space programs such as ethtool must make 3 separate * Userspace programs such as ethtool must make 3 separate
* ioctl requests, one for size, one for the strings, and * ioctl requests, one for size, one for the strings, and
* finally one for the stats. Since these cross into * finally one for the stats. Since these cross into
* user space, changes to the number or size could result in * userspace, changes to the number or size could result in
* undefined memory access or incorrect string<->value * undefined memory access or incorrect string<->value
* correlations for statistics. * correlations for statistics.
* *
...@@ -1392,17 +1392,17 @@ static int ice_nway_reset(struct net_device *netdev) ...@@ -1392,17 +1392,17 @@ static int ice_nway_reset(struct net_device *netdev)
{ {
/* restart autonegotiation */ /* restart autonegotiation */
struct ice_netdev_priv *np = netdev_priv(netdev); struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi; struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi; struct ice_port_info *pi;
enum ice_status status; enum ice_status status;
bool link_up;
pi = vsi->port_info; pi = vsi->port_info;
hw_link_info = &pi->phy.link_info; /* If VSI state is up, then restart autoneg with link up */
link_up = hw_link_info->link_info & ICE_AQ_LINK_UP; if (!test_bit(__ICE_DOWN, vsi->back->state))
status = ice_aq_set_link_restart_an(pi, true, NULL);
else
status = ice_aq_set_link_restart_an(pi, false, NULL);
status = ice_aq_set_link_restart_an(pi, link_up, NULL);
if (status) { if (status) {
netdev_info(netdev, "link restart failed, err %d aq_err %d\n", netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
status, pi->hw->adminq.sq_last_status); status, pi->hw->adminq.sq_last_status);
...@@ -1441,7 +1441,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) ...@@ -1441,7 +1441,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
/** /**
* ice_set_pauseparam - Set Flow Control parameter * ice_set_pauseparam - Set Flow Control parameter
* @netdev: network interface device structure * @netdev: network interface device structure
* @pause: return tx/rx flow control status * @pause: return Tx/Rx flow control status
*/ */
static int static int
ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause) ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
...@@ -1543,7 +1543,7 @@ static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev) ...@@ -1543,7 +1543,7 @@ static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
} }
/** /**
* ice_get_rxfh_indir_size - get the rx flow hash indirection table size * ice_get_rxfh_indir_size - get the Rx flow hash indirection table size
* @netdev: network interface device structure * @netdev: network interface device structure
* *
* Returns the table size. * Returns the table size.
...@@ -1556,7 +1556,7 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev) ...@@ -1556,7 +1556,7 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
} }
/** /**
* ice_get_rxfh - get the rx flow hash indirection table * ice_get_rxfh - get the Rx flow hash indirection table
* @netdev: network interface device structure * @netdev: network interface device structure
* @indir: indirection table * @indir: indirection table
* @key: hash key * @key: hash key
...@@ -1603,7 +1603,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc) ...@@ -1603,7 +1603,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
} }
/** /**
* ice_set_rxfh - set the rx flow hash indirection table * ice_set_rxfh - set the Rx flow hash indirection table
* @netdev: network interface device structure * @netdev: network interface device structure
* @indir: indirection table * @indir: indirection table
* @key: hash key * @key: hash key
......
...@@ -256,6 +256,9 @@ enum ice_rx_flex_desc_status_error_0_bits { ...@@ -256,6 +256,9 @@ enum ice_rx_flex_desc_status_error_0_bits {
#define ICE_RXQ_CTX_SIZE_DWORDS 8 #define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32)) #define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
/* RLAN Rx queue context data /* RLAN Rx queue context data
* *
......
...@@ -20,7 +20,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -20,7 +20,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
u16 pf_q; u16 pf_q;
int err; int err;
/* what is RX queue number in global space of 2K Rx queues */ /* what is Rx queue number in global space of 2K Rx queues */
pf_q = vsi->rxq_map[ring->q_index]; pf_q = vsi->rxq_map[ring->q_index];
/* clear the context structure first */ /* clear the context structure first */
...@@ -1200,7 +1200,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ...@@ -1200,7 +1200,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back; struct ice_pf *pf = vsi->back;
int i; int i;
/* Allocate tx_rings */ /* Allocate Tx rings */
for (i = 0; i < vsi->alloc_txq; i++) { for (i = 0; i < vsi->alloc_txq; i++) {
struct ice_ring *ring; struct ice_ring *ring;
...@@ -1219,7 +1219,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi) ...@@ -1219,7 +1219,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
vsi->tx_rings[i] = ring; vsi->tx_rings[i] = ring;
} }
/* Allocate rx_rings */ /* Allocate Rx rings */
for (i = 0; i < vsi->alloc_rxq; i++) { for (i = 0; i < vsi->alloc_rxq; i++) {
struct ice_ring *ring; struct ice_ring *ring;
...@@ -2136,9 +2136,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi, ...@@ -2136,9 +2136,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
pf->q_left_rx -= vsi->alloc_rxq; pf->q_left_rx -= vsi->alloc_rxq;
break; break;
default: default:
/* if VSI type is not recognized, clean up the resources and /* clean up the resources and exit */
* exit
*/
goto unroll_vsi_init; goto unroll_vsi_init;
} }
......
...@@ -1786,7 +1786,7 @@ static void ice_determine_q_usage(struct ice_pf *pf) ...@@ -1786,7 +1786,7 @@ static void ice_determine_q_usage(struct ice_pf *pf)
pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus()); pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
/* only 1 rx queue unless RSS is enabled */ /* only 1 Rx queue unless RSS is enabled */
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags)) if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
pf->num_lan_rx = 1; pf->num_lan_rx = 1;
else else
...@@ -3674,7 +3674,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh, ...@@ -3674,7 +3674,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
*/ */
status = ice_update_sw_rule_bridge_mode(hw); status = ice_update_sw_rule_bridge_mode(hw);
if (status) { if (status) {
netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n", netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
mode, status, hw->adminq.sq_last_status); mode, status, hw->adminq.sq_last_status);
/* revert hw->evb_veb */ /* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB); hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
...@@ -3702,35 +3702,31 @@ static void ice_tx_timeout(struct net_device *netdev) ...@@ -3702,35 +3702,31 @@ static void ice_tx_timeout(struct net_device *netdev)
pf->tx_timeout_count++; pf->tx_timeout_count++;
/* find the stopped queue the same way the stack does */ /* find the stopped queue the same way dev_watchdog() does */
for (i = 0; i < netdev->num_tx_queues; i++) { for (i = 0; i < netdev->num_tx_queues; i++) {
struct netdev_queue *q;
unsigned long trans_start; unsigned long trans_start;
struct netdev_queue *q;
q = netdev_get_tx_queue(netdev, i); q = netdev_get_tx_queue(netdev, i);
trans_start = q->trans_start; trans_start = q->trans_start;
if (netif_xmit_stopped(q) && if (netif_xmit_stopped(q) &&
time_after(jiffies, time_after(jiffies,
(trans_start + netdev->watchdog_timeo))) { trans_start + netdev->watchdog_timeo)) {
hung_queue = i; hung_queue = i;
break; break;
} }
} }
if (i == netdev->num_tx_queues) { if (i == netdev->num_tx_queues)
netdev_info(netdev, "tx_timeout: no netdev hung queue found\n"); netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
} else { else
/* now that we have an index, find the tx_ring struct */ /* now that we have an index, find the tx_ring struct */
for (i = 0; i < vsi->num_txq; i++) { for (i = 0; i < vsi->num_txq; i++)
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) { if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
if (hung_queue == if (hung_queue == vsi->tx_rings[i]->q_index) {
vsi->tx_rings[i]->q_index) {
tx_ring = vsi->tx_rings[i]; tx_ring = vsi->tx_rings[i];
break; break;
} }
}
}
}
/* Reset recovery level if enough time has elapsed after last timeout. /* Reset recovery level if enough time has elapsed after last timeout.
* Also ensure no new reset action happens before next timeout period. * Also ensure no new reset action happens before next timeout period.
......
...@@ -894,8 +894,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw) ...@@ -894,8 +894,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* This function removes the leaf node that was created by the FW * This function removes the leaf node that was created by the FW
* during initialization * during initialization
*/ */
static void static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
ice_rm_dflt_leaf_node(struct ice_port_info *pi)
{ {
struct ice_sched_node *node; struct ice_sched_node *node;
...@@ -923,8 +922,7 @@ ice_rm_dflt_leaf_node(struct ice_port_info *pi) ...@@ -923,8 +922,7 @@ ice_rm_dflt_leaf_node(struct ice_port_info *pi)
* This function frees all the nodes except root and TC that were created by * This function frees all the nodes except root and TC that were created by
* the FW during initialization * the FW during initialization
*/ */
static void static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
{ {
struct ice_sched_node *node; struct ice_sched_node *node;
...@@ -1339,7 +1337,7 @@ ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi, ...@@ -1339,7 +1337,7 @@ ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
* @num_nodes: pointer to num nodes array * @num_nodes: pointer to num nodes array
* *
* This function calculates the number of supported nodes needed to add this * This function calculates the number of supported nodes needed to add this
* VSI into tx tree including the VSI, parent and intermediate nodes in below * VSI into Tx tree including the VSI, parent and intermediate nodes in below
* layers * layers
*/ */
static void static void
...@@ -1376,13 +1374,13 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw, ...@@ -1376,13 +1374,13 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
} }
/** /**
* ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree * ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
* @pi: port information structure * @pi: port information structure
* @vsi_handle: software VSI handle * @vsi_handle: software VSI handle
* @tc_node: pointer to TC node * @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array * @num_nodes: pointer to num nodes array
* *
* This function adds the VSI supported nodes into tx tree including the * This function adds the VSI supported nodes into Tx tree including the
* VSI, its parent and intermediate nodes in below layers * VSI, its parent and intermediate nodes in below layers
*/ */
static enum ice_status static enum ice_status
......
...@@ -92,8 +92,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries, ...@@ -92,8 +92,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
* Allocate memory for the entire recipe table and initialize the structures/ * Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes. * entries corresponding to basic recipes.
*/ */
enum ice_status enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
ice_init_def_sw_recp(struct ice_hw *hw)
{ {
struct ice_sw_recipe *recps; struct ice_sw_recipe *recps;
u8 i; u8 i;
...@@ -629,25 +628,36 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw) ...@@ -629,25 +628,36 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
/** /**
* ice_fill_sw_info - Helper function to populate lb_en and lan_en * ice_fill_sw_info - Helper function to populate lb_en and lan_en
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
* @f_info: filter info structure to fill/update * @fi: filter info structure to fill/update
* *
* This helper function populates the lb_en and lan_en elements of the provided * This helper function populates the lb_en and lan_en elements of the provided
* ice_fltr_info struct using the switch's type and characteristics of the * ice_fltr_info struct using the switch's type and characteristics of the
* switch rule being configured. * switch rule being configured.
*/ */
static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info) static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
{ {
f_info->lb_en = false; fi->lb_en = false;
f_info->lan_en = false; fi->lan_en = false;
if ((f_info->flag & ICE_FLTR_TX) && if ((fi->flag & ICE_FLTR_TX) &&
(f_info->fltr_act == ICE_FWD_TO_VSI || (fi->fltr_act == ICE_FWD_TO_VSI ||
f_info->fltr_act == ICE_FWD_TO_VSI_LIST || fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
f_info->fltr_act == ICE_FWD_TO_Q || fi->fltr_act == ICE_FWD_TO_Q ||
f_info->fltr_act == ICE_FWD_TO_QGRP)) { fi->fltr_act == ICE_FWD_TO_QGRP)) {
f_info->lb_en = true; fi->lb_en = true;
if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC && /* Do not set lan_en to TRUE if
is_unicast_ether_addr(f_info->l_data.mac.mac_addr))) * 1. The switch is a VEB AND
f_info->lan_en = true; * 2
* 2.1 The lookup is MAC with unicast addr for MAC, OR
* 2.2 The lookup is MAC_VLAN with unicast addr for MAC
*
* In all other cases, the LAN enable has to be set to true.
*/
if (!(hw->evb_veb &&
((fi->lkup_type == ICE_SW_LKUP_MAC &&
is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
(fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr)))))
fi->lan_en = true;
} }
} }
...@@ -817,7 +827,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, ...@@ -817,7 +827,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/* Create two back-to-back switch rules and submit them to the HW using /* Create two back-to-back switch rules and submit them to the HW using
* one memory buffer: * one memory buffer:
* 1. Large Action * 1. Large Action
* 2. Look up tx rx * 2. Look up Tx Rx
*/ */
lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts); lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE; rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
...@@ -861,7 +871,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent, ...@@ -861,7 +871,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
lg_act->pdata.lg_act.act[2] = cpu_to_le32(act); lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
/* call the fill switch rule to fill the lookup tx rx structure */ /* call the fill switch rule to fill the lookup Tx Rx structure */
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx, ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
ice_aqc_opc_update_sw_rules); ice_aqc_opc_update_sw_rules);
...@@ -1158,8 +1168,8 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw) ...@@ -1158,8 +1168,8 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Call AQ command to add or update previously created VSI list with new VSI. * Call AQ command to add or update previously created VSI list with new VSI.
* *
* Helper function to do book keeping associated with adding filter information * Helper function to do book keeping associated with adding filter information
* The algorithm to do the booking keeping is described below : * The algorithm to do the book keeping is described below :
* When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.) * When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
* if only one VSI has been added till now * if only one VSI has been added till now
* Allocate a new VSI list and add two VSIs * Allocate a new VSI list and add two VSIs
* to this list using switch rule command * to this list using switch rule command
...@@ -1237,6 +1247,9 @@ ice_add_update_vsi_list(struct ice_hw *hw, ...@@ -1237,6 +1247,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle = new_fltr->vsi_handle; u16 vsi_handle = new_fltr->vsi_handle;
enum ice_adminq_opc opcode; enum ice_adminq_opc opcode;
if (!m_entry->vsi_list_info)
return ICE_ERR_CFG;
/* A rule already exists with the new VSI being added */ /* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map)) if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
return 0; return 0;
...@@ -1853,7 +1866,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry) ...@@ -1853,7 +1866,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
tmp_fltr.fwd_id.vsi_list_id = vsi_list_id; tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST; tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
/* Update the previous switch rule to a new VSI list which /* Update the previous switch rule to a new VSI list which
* includes current VSI thats requested * includes current VSI that is requested
*/ */
status = ice_update_pkt_fwd_rule(hw, &tmp_fltr); status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
if (status) if (status)
......
...@@ -219,7 +219,7 @@ static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring, ...@@ -219,7 +219,7 @@ static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
/** /**
* ice_setup_tx_ring - Allocate the Tx descriptors * ice_setup_tx_ring - Allocate the Tx descriptors
* @tx_ring: the tx ring to set up * @tx_ring: the Tx ring to set up
* *
* Return 0 on success, negative on error * Return 0 on success, negative on error
*/ */
...@@ -324,7 +324,7 @@ void ice_free_rx_ring(struct ice_ring *rx_ring) ...@@ -324,7 +324,7 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
/** /**
* ice_setup_rx_ring - Allocate the Rx descriptors * ice_setup_rx_ring - Allocate the Rx descriptors
* @rx_ring: the rx ring to set up * @rx_ring: the Rx ring to set up
* *
* Return 0 on success, negative on error * Return 0 on success, negative on error
*/ */
...@@ -586,7 +586,7 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf, ...@@ -586,7 +586,7 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
/** /**
* ice_reuse_rx_page - page flip buffer and store it back on the ring * ice_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on * @rx_ring: Rx descriptor ring to store buffers on
* @old_buf: donor buffer to have page reused * @old_buf: donor buffer to have page reused
* *
* Synchronizes page for reuse by the adapter * Synchronizes page for reuse by the adapter
...@@ -609,7 +609,7 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring, ...@@ -609,7 +609,7 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring,
/** /**
* ice_fetch_rx_buf - Allocate skb and populate it * ice_fetch_rx_buf - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: Rx descriptor ring to transact packets on
* @rx_desc: descriptor containing info written by hardware * @rx_desc: descriptor containing info written by hardware
* *
* This function allocates an skb on the fly, and populates it with the page * This function allocates an skb on the fly, and populates it with the page
...@@ -904,7 +904,7 @@ static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb, ...@@ -904,7 +904,7 @@ static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
/** /**
* ice_process_skb_fields - Populate skb header fields from Rx descriptor * ice_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on * @rx_ring: Rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor * @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated * @skb: pointer to current skb being populated
* @ptype: the packet type decoded by hardware * @ptype: the packet type decoded by hardware
...@@ -927,7 +927,7 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring, ...@@ -927,7 +927,7 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring,
/** /**
* ice_receive_skb - Send a completed packet up the stack * ice_receive_skb - Send a completed packet up the stack
* @rx_ring: rx ring in play * @rx_ring: Rx ring in play
* @skb: packet to send up * @skb: packet to send up
* @vlan_tag: vlan tag for packet * @vlan_tag: vlan tag for packet
* *
...@@ -946,7 +946,7 @@ static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb, ...@@ -946,7 +946,7 @@ static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
/** /**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf * ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on * @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process * @budget: Total limit on number of packets to process
* *
* This function provides a "bounce buffer" approach to Rx interrupt * This function provides a "bounce buffer" approach to Rx interrupt
...@@ -1107,7 +1107,8 @@ int ice_napi_poll(struct napi_struct *napi, int budget) ...@@ -1107,7 +1107,8 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
napi_complete_done(napi, work_done); napi_complete_done(napi, work_done);
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags)) if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector); ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
return 0;
return min(work_done, budget - 1);
} }
/* helper function for building cmd/type/offset */ /* helper function for building cmd/type/offset */
...@@ -1122,7 +1123,7 @@ build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag) ...@@ -1122,7 +1123,7 @@ build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
} }
/** /**
* __ice_maybe_stop_tx - 2nd level check for tx stop conditions * __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
* @tx_ring: the ring to be checked * @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available * @size: the size buffer we want to assure is available
* *
...@@ -1145,7 +1146,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) ...@@ -1145,7 +1146,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
} }
/** /**
* ice_maybe_stop_tx - 1st level check for tx stop conditions * ice_maybe_stop_tx - 1st level check for Tx stop conditions
* @tx_ring: the ring to be checked * @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available * @size: the size buffer we want to assure is available
* *
...@@ -1155,6 +1156,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size) ...@@ -1155,6 +1156,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
{ {
if (likely(ICE_DESC_UNUSED(tx_ring) >= size)) if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
return 0; return 0;
return __ice_maybe_stop_tx(tx_ring, size); return __ice_maybe_stop_tx(tx_ring, size);
} }
...@@ -1568,7 +1570,7 @@ static unsigned int ice_txd_use_count(unsigned int size) ...@@ -1568,7 +1570,7 @@ static unsigned int ice_txd_use_count(unsigned int size)
} }
/** /**
* ice_xmit_desc_count - calculate number of tx descriptors needed * ice_xmit_desc_count - calculate number of Tx descriptors needed
* @skb: send buffer * @skb: send buffer
* *
* Returns number of data descriptors needed for this skb. * Returns number of data descriptors needed for this skb.
......
...@@ -156,8 +156,6 @@ static void ice_free_vf_res(struct ice_vf *vf) ...@@ -156,8 +156,6 @@ static void ice_free_vf_res(struct ice_vf *vf)
clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states); clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
} }
/***********************enable_vf routines*****************************/
/** /**
* ice_dis_vf_mappings * ice_dis_vf_mappings
* @vf: pointer to the VF structure * @vf: pointer to the VF structure
...@@ -454,7 +452,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf) ...@@ -454,7 +452,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
/* Clear this bit after VF initialization since we shouldn't reclaim /* Clear this bit after VF initialization since we shouldn't reclaim
* and reassign interrupts for synchronous or asynchronous VFR events. * and reassign interrupts for synchronous or asynchronous VFR events.
* We don't want to reconfigure interrupts since AVF driver doesn't * We dont want to reconfigure interrupts since AVF driver doesn't
* expect vector assignment to be changed unless there is a request for * expect vector assignment to be changed unless there is a request for
* more vectors. * more vectors.
*/ */
...@@ -1105,7 +1103,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs) ...@@ -1105,7 +1103,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
* ice_process_vflr_event - Free VF resources via IRQ calls * ice_process_vflr_event - Free VF resources via IRQ calls
* @pf: pointer to the PF structure * @pf: pointer to the PF structure
* *
* called from the VLFR IRQ handler to * called from the VFLR IRQ handler to
* free up VF resources and state variables * free up VF resources and state variables
*/ */
void ice_process_vflr_event(struct ice_pf *pf) void ice_process_vflr_event(struct ice_pf *pf)
...@@ -1764,7 +1762,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg) ...@@ -1764,7 +1762,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Tx queue info from VF into VSI */ /* copy Tx queue info from VF into VSI */
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr; vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[i]->count = qpi->txq.ring_len; vsi->tx_rings[i]->count = qpi->txq.ring_len;
/* copy Rx queue info from VF into vsi */ /* copy Rx queue info from VF into VSI */
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr; vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len; vsi->rx_rings[i]->count = qpi->rxq.ring_len;
if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) { if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
...@@ -1830,7 +1828,7 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf) ...@@ -1830,7 +1828,7 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
* @msg: pointer to the msg buffer * @msg: pointer to the msg buffer
* @set: true if mac filters are being set, false otherwise * @set: true if mac filters are being set, false otherwise
* *
* add guest mac address filter * add guest MAC address filter
*/ */
static int static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set) ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
...@@ -1970,7 +1968,7 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg) ...@@ -1970,7 +1968,7 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
* VFs get a default number of queues but can use this message to request a * VFs get a default number of queues but can use this message to request a
* different number. If the request is successful, PF will reset the VF and * different number. If the request is successful, PF will reset the VF and
* return 0. If unsuccessful, PF will send message informing VF of number of * return 0. If unsuccessful, PF will send message informing VF of number of
* available queue pairs via virtchnl message response to VF. * available queue pairs via virtchnl message response to vf.
*/ */
static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg) static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment