Commit 11c6c0c2 authored by David S. Miller's avatar David S. Miller

Merge branch '100GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
100GbE Intel Wired LAN Driver Updates 2018-11-20

This series contains updates to the ice driver only.

Akeem updates the driver to determine whether or not to do
auto-negotiation based on the VSI state.

Bruce cleans up the control queue code to remove duplicate code.  Take
advantage of some compiler optimizations by making some structures
constant, and also note that they cannot be modified.  Cleaned up
formatting issues and code comment that needed clarification.  Fixed a
potential NULL pointer dereference by adding a check.

Jaroslaw adds a check to verify if memory was allocated or not.

Yashaswini Raghuram fixes the driver to ensure we are not enabling the
LAN_EN flag if the MAC in the MAC-VLAN is a unicast MAC, so that the
unicast packets are not forwarded to the wire.

Dave fixes the return value of ice_napi_poll() to be more useful in
returning the work that was done and should only return 0 when no work
was done.

Anirudh does code comment cleanup, to make more consistent.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 51428fd6 f25dad19
......@@ -96,14 +96,14 @@ extern const char ice_drv_ver[];
#define ice_for_each_vsi(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vsi; (i)++)
/* Macros for each tx/rx ring in a VSI */
/* Macros for each Tx/Rx ring in a VSI */
#define ice_for_each_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_txq; (i)++)
#define ice_for_each_rxq(vsi, i) \
for ((i) = 0; (i) < (vsi)->num_rxq; (i)++)
/* Macros for each allocated tx/rx ring whether used or not in a VSI */
/* Macros for each allocated Tx/Rx ring whether used or not in a VSI */
#define ice_for_each_alloc_txq(vsi, i) \
for ((i) = 0; (i) < (vsi)->alloc_txq; (i)++)
......@@ -150,10 +150,10 @@ enum ice_state {
__ICE_RESET_FAILED, /* set by reset/rebuild */
/* When checking for the PF to be in a nominal operating state, the
* bits that are grouped at the beginning of the list need to be
* checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
* be checked. If you need to add a bit into consideration for nominal
* checked. Bits occurring before __ICE_STATE_NOMINAL_CHECK_BITS will
* be checked. If you need to add a bit into consideration for nominal
* operating state, it must be added before
* __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
* __ICE_STATE_NOMINAL_CHECK_BITS. Do not move this entry's position
* without appropriate consideration.
*/
__ICE_STATE_NOMINAL_CHECK_BITS,
......@@ -183,8 +183,8 @@ struct ice_vsi {
struct ice_sw *vsw; /* switch this VSI is on */
struct ice_pf *back; /* back pointer to PF */
struct ice_port_info *port_info; /* back pointer to port_info */
struct ice_ring **rx_rings; /* rx ring array */
struct ice_ring **tx_rings; /* tx ring array */
struct ice_ring **rx_rings; /* Rx ring array */
struct ice_ring **tx_rings; /* Tx ring array */
struct ice_q_vector **q_vectors; /* q_vector array */
irqreturn_t (*irq_handler)(int irq, void *data);
......@@ -201,8 +201,8 @@ struct ice_vsi {
int sw_base_vector; /* Irq base for OS reserved vectors */
int hw_base_vector; /* HW (absolute) index of a vector */
enum ice_vsi_type type;
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
u16 vsi_num; /* HW (absolute) index of this VSI */
u16 idx; /* software index in pf->vsi[] */
/* Interrupt thresholds */
u16 work_lmt;
......@@ -255,8 +255,8 @@ struct ice_q_vector {
struct ice_ring_container tx;
struct irq_affinity_notify affinity_notify;
u16 v_idx; /* index in the vsi->q_vector array. */
u8 num_ring_tx; /* total number of tx rings in vector */
u8 num_ring_rx; /* total number of rx rings in vector */
u8 num_ring_tx; /* total number of Tx rings in vector */
u8 num_ring_rx; /* total number of Rx rings in vector */
char name[ICE_INT_NAME_STR_LEN];
/* in usecs, need to use ice_intrl_to_usecs_reg() before writing this
* value to the device
......@@ -308,10 +308,10 @@ struct ice_pf {
u32 hw_oicr_idx; /* Other interrupt cause vector HW index */
u32 num_avail_hw_msix; /* remaining HW MSIX vectors left unclaimed */
u32 num_lan_msix; /* Total MSIX vectors for base driver */
u16 num_lan_tx; /* num lan tx queues setup */
u16 num_lan_rx; /* num lan rx queues setup */
u16 q_left_tx; /* remaining num tx queues left unclaimed */
u16 q_left_rx; /* remaining num rx queues left unclaimed */
u16 num_lan_tx; /* num lan Tx queues setup */
u16 num_lan_rx; /* num lan Rx queues setup */
u16 q_left_tx; /* remaining num Tx queues left unclaimed */
u16 q_left_rx; /* remaining num Rx queues left unclaimed */
u16 next_vsi; /* Next free slot in pf->vsi[] - 0-based! */
u16 num_alloc_vsi;
u16 corer_count; /* Core reset count */
......
......@@ -5,7 +5,7 @@
#define _ICE_ADMINQ_CMD_H_
/* This header file defines the Admin Queue commands, error codes and
* descriptor format. It is shared between Firmware and Software.
* descriptor format. It is shared between Firmware and Software.
*/
#define ICE_MAX_VSI 768
......@@ -463,7 +463,7 @@ struct ice_aqc_sw_rules {
};
/* Add/Update/Get/Remove lookup Rx/Tx command/response entry
* This structures describes the lookup rules and associated actions. "index"
* This structures describes the lookup rules and associated actions. "index"
* is returned as part of a response to a successful Add command, and can be
* used to identify the rule for Update/Get/Remove commands.
*/
......@@ -1111,7 +1111,7 @@ struct ice_aqc_get_set_rss_keys {
};
/* Get/Set RSS LUT (indirect 0x0B05/0x0B03) */
struct ice_aqc_get_set_rss_lut {
struct ice_aqc_get_set_rss_lut {
#define ICE_AQC_GSET_RSS_LUT_VSI_VALID BIT(15)
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_S 0
#define ICE_AQC_GSET_RSS_LUT_VSI_ID_M (0x1FF << ICE_AQC_GSET_RSS_LUT_VSI_ID_S)
......@@ -1315,10 +1315,10 @@ struct ice_aqc_get_clear_fw_log {
* @params: command-specific parameters
*
* Descriptor format for commands the driver posts on the Admin Transmit Queue
* (ATQ). The firmware writes back onto the command descriptor and returns
* the result of the command. Asynchronous events that are not an immediate
* (ATQ). The firmware writes back onto the command descriptor and returns
* the result of the command. Asynchronous events that are not an immediate
* result of the command are written to the Admin Receive Queue (ARQ) using
* the same descriptor format. Descriptors are in little-endian notation with
* the same descriptor format. Descriptors are in little-endian notation with
* 32-bit words.
*/
struct ice_aq_desc {
......@@ -1380,10 +1380,10 @@ struct ice_aq_desc {
/* error codes */
enum ice_aq_err {
ICE_AQ_RC_OK = 0, /* success */
ICE_AQ_RC_OK = 0, /* Success */
ICE_AQ_RC_ENOMEM = 9, /* Out of memory */
ICE_AQ_RC_EBUSY = 12, /* Device or resource busy */
ICE_AQ_RC_EEXIST = 13, /* object already exists */
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
};
......
......@@ -405,9 +405,7 @@ static enum ice_status ice_init_fltr_mgmt_struct(struct ice_hw *hw)
INIT_LIST_HEAD(&sw->vsi_list_map_head);
ice_init_def_sw_recp(hw);
return 0;
return ice_init_def_sw_recp(hw);
}
/**
......@@ -715,7 +713,7 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
hw->evb_veb = true;
/* Query the allocated resources for tx scheduler */
/* Query the allocated resources for Tx scheduler */
status = ice_sched_query_res_alloc(hw);
if (status) {
ice_debug(hw, ICE_DBG_SCHED,
......@@ -958,7 +956,7 @@ enum ice_status ice_reset(struct ice_hw *hw, enum ice_reset_req req)
* ice_copy_rxq_ctx_to_hw
* @hw: pointer to the hardware structure
* @ice_rxq_ctx: pointer to the rxq context
* @rxq_index: the index of the rx queue
* @rxq_index: the index of the Rx queue
*
* Copies rxq context from dense structure to hw register space
*/
......@@ -1014,7 +1012,7 @@ static const struct ice_ctx_ele ice_rlan_ctx_info[] = {
* ice_write_rxq_ctx
* @hw: pointer to the hardware structure
* @rlan_ctx: pointer to the rxq context
* @rxq_index: the index of the rx queue
* @rxq_index: the index of the Rx queue
*
* Converts rxq context from sparse to dense structure and then writes
* it to hw register space
......@@ -1715,8 +1713,7 @@ void ice_clear_pxe_mode(struct ice_hw *hw)
* If no bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
* If more than one bit gets set, ICE_LINK_SPEED_UNKNOWN will be returned
*/
static u16
ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
static u16 ice_get_link_speed_based_on_phy_type(u64 phy_type_low)
{
u16 speed_phy_type_low = ICE_AQ_LINK_SPEED_UNKNOWN;
......
......@@ -62,7 +62,7 @@ static const struct ice_stats ice_gstrings_vsi_stats[] = {
* The PF_STATs are appended to the netdev stats only when ethtool -S
* is queried on the base PF netdev.
*/
static struct ice_stats ice_gstrings_pf_stats[] = {
static const struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("tx_bytes", stats.eth.tx_bytes),
ICE_PF_STAT("rx_bytes", stats.eth.rx_bytes),
ICE_PF_STAT("tx_unicast", stats.eth.tx_unicast),
......@@ -104,7 +104,7 @@ static struct ice_stats ice_gstrings_pf_stats[] = {
ICE_PF_STAT("mac_remote_faults", stats.mac_remote_faults),
};
static u32 ice_regs_dump_list[] = {
static const u32 ice_regs_dump_list[] = {
PFGEN_STATE,
PRTGEN_STATUS,
QRX_CTRL(0),
......@@ -260,10 +260,10 @@ static int ice_get_sset_count(struct net_device *netdev, int sset)
* a private ethtool flag). This is due to the nature of the
* ethtool stats API.
*
* User space programs such as ethtool must make 3 separate
* Userspace programs such as ethtool must make 3 separate
* ioctl requests, one for size, one for the strings, and
* finally one for the stats. Since these cross into
* user space, changes to the number or size could result in
* userspace, changes to the number or size could result in
* undefined memory access or incorrect string<->value
* correlations for statistics.
*
......@@ -1392,17 +1392,17 @@ static int ice_nway_reset(struct net_device *netdev)
{
/* restart autonegotiation */
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_link_status *hw_link_info;
struct ice_vsi *vsi = np->vsi;
struct ice_port_info *pi;
enum ice_status status;
bool link_up;
pi = vsi->port_info;
hw_link_info = &pi->phy.link_info;
link_up = hw_link_info->link_info & ICE_AQ_LINK_UP;
/* If VSI state is up, then restart autoneg with link up */
if (!test_bit(__ICE_DOWN, vsi->back->state))
status = ice_aq_set_link_restart_an(pi, true, NULL);
else
status = ice_aq_set_link_restart_an(pi, false, NULL);
status = ice_aq_set_link_restart_an(pi, link_up, NULL);
if (status) {
netdev_info(netdev, "link restart failed, err %d aq_err %d\n",
status, pi->hw->adminq.sq_last_status);
......@@ -1441,7 +1441,7 @@ ice_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
/**
* ice_set_pauseparam - Set Flow Control parameter
* @netdev: network interface device structure
* @pause: return tx/rx flow control status
* @pause: return Tx/Rx flow control status
*/
static int
ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
......@@ -1543,7 +1543,7 @@ static u32 ice_get_rxfh_key_size(struct net_device __always_unused *netdev)
}
/**
* ice_get_rxfh_indir_size - get the rx flow hash indirection table size
* ice_get_rxfh_indir_size - get the Rx flow hash indirection table size
* @netdev: network interface device structure
*
* Returns the table size.
......@@ -1556,7 +1556,7 @@ static u32 ice_get_rxfh_indir_size(struct net_device *netdev)
}
/**
* ice_get_rxfh - get the rx flow hash indirection table
* ice_get_rxfh - get the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
......@@ -1603,7 +1603,7 @@ ice_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, u8 *hfunc)
}
/**
* ice_set_rxfh - set the rx flow hash indirection table
* ice_set_rxfh - set the Rx flow hash indirection table
* @netdev: network interface device structure
* @indir: indirection table
* @key: hash key
......
......@@ -6,11 +6,11 @@
union ice_32byte_rx_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
/* bit 0 of hdr_addr is DD bit */
__le64 rsvd1;
__le64 rsvd2;
__le64 rsvd1;
__le64 rsvd2;
} read;
struct {
struct {
......@@ -105,11 +105,11 @@ enum ice_rx_ptype_payload_layer {
*/
union ice_32b_rx_flex_desc {
struct {
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
/* bit 0 of hdr_addr is DD bit */
__le64 rsvd1;
__le64 rsvd2;
__le64 pkt_addr; /* Packet buffer address */
__le64 hdr_addr; /* Header buffer address */
/* bit 0 of hdr_addr is DD bit */
__le64 rsvd1;
__le64 rsvd2;
} read;
struct {
/* Qword 0 */
......@@ -256,6 +256,9 @@ enum ice_rx_flex_desc_status_error_0_bits {
#define ICE_RXQ_CTX_SIZE_DWORDS 8
#define ICE_RXQ_CTX_SZ (ICE_RXQ_CTX_SIZE_DWORDS * sizeof(u32))
#define ICE_TX_CMPLTNQ_CTX_SIZE_DWORDS 22
#define ICE_TX_DRBELL_Q_CTX_SIZE_DWORDS 5
#define GLTCLAN_CQ_CNTX(i, CQ) (GLTCLAN_CQ_CNTX0(CQ) + ((i) * 0x0800))
/* RLAN Rx queue context data
*
......@@ -274,18 +277,18 @@ struct ice_rlan_ctx {
u16 dbuf; /* bigger than needed, see above for reason */
#define ICE_RLAN_CTX_HBUF_S 6
u16 hbuf; /* bigger than needed, see above for reason */
u8 dtype;
u8 dsize;
u8 crcstrip;
u8 l2tsel;
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
u8 dtype;
u8 dsize;
u8 crcstrip;
u8 l2tsel;
u8 hsplit_0;
u8 hsplit_1;
u8 showiv;
u32 rxmax; /* bigger than needed, see above for reason */
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
u8 tphrdesc_ena;
u8 tphwdesc_ena;
u8 tphdata_ena;
u8 tphhead_ena;
u16 lrxqthresh; /* bigger than needed, see above for reason */
};
......@@ -413,35 +416,35 @@ enum ice_tx_ctx_desc_cmd_bits {
struct ice_tlan_ctx {
#define ICE_TLAN_CTX_BASE_S 7
u64 base; /* base is defined in 128-byte units */
u8 port_num;
u8 port_num;
u16 cgd_num; /* bigger than needed, see above for reason */
u8 pf_num;
u8 pf_num;
u16 vmvf_num;
u8 vmvf_type;
u8 vmvf_type;
#define ICE_TLAN_CTX_VMVF_TYPE_VF 0
#define ICE_TLAN_CTX_VMVF_TYPE_VMQ 1
#define ICE_TLAN_CTX_VMVF_TYPE_PF 2
u16 src_vsi;
u8 tsyn_ena;
u8 alt_vlan;
u8 tsyn_ena;
u8 alt_vlan;
u16 cpuid; /* bigger than needed, see above for reason */
u8 wb_mode;
u8 tphrd_desc;
u8 tphrd;
u8 tphwr_desc;
u8 wb_mode;
u8 tphrd_desc;
u8 tphrd;
u8 tphwr_desc;
u16 cmpq_id;
u16 qnum_in_func;
u8 itr_notification_mode;
u8 adjust_prof_id;
u8 itr_notification_mode;
u8 adjust_prof_id;
u32 qlen; /* bigger than needed, see above for reason */
u8 quanta_prof_idx;
u8 tso_ena;
u8 quanta_prof_idx;
u8 tso_ena;
u16 tso_qnum;
u8 legacy_int;
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
u8 int_q_state; /* width not needed - internal do not write */
u8 legacy_int;
u8 drop_ena;
u8 cache_prof_idx;
u8 pkt_shaper_prof_idx;
u8 int_q_state; /* width not needed - internal do not write */
};
/* macro to make the table lines short */
......
......@@ -20,7 +20,7 @@ static int ice_setup_rx_ctx(struct ice_ring *ring)
u16 pf_q;
int err;
/* what is RX queue number in global space of 2K Rx queues */
/* what is Rx queue number in global space of 2K Rx queues */
pf_q = vsi->rxq_map[ring->q_index];
/* clear the context structure first */
......@@ -1012,7 +1012,7 @@ void ice_vsi_free_q_vectors(struct ice_vsi *vsi)
* @vsi: the VSI being configured
* @v_idx: index of the vector in the VSI struct
*
* We allocate one q_vector. If allocation fails we return -ENOMEM.
* We allocate one q_vector. If allocation fails we return -ENOMEM.
*/
static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
{
......@@ -1051,7 +1051,7 @@ static int ice_vsi_alloc_q_vector(struct ice_vsi *vsi, int v_idx)
* ice_vsi_alloc_q_vectors - Allocate memory for interrupt vectors
* @vsi: the VSI being configured
*
* We allocate one q_vector per queue interrupt. If allocation fails we
* We allocate one q_vector per queue interrupt. If allocation fails we
* return -ENOMEM.
*/
static int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
......@@ -1200,7 +1200,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
struct ice_pf *pf = vsi->back;
int i;
/* Allocate tx_rings */
/* Allocate Tx rings */
for (i = 0; i < vsi->alloc_txq; i++) {
struct ice_ring *ring;
......@@ -1219,7 +1219,7 @@ static int ice_vsi_alloc_rings(struct ice_vsi *vsi)
vsi->tx_rings[i] = ring;
}
/* Allocate rx_rings */
/* Allocate Rx rings */
for (i = 0; i < vsi->alloc_rxq; i++) {
struct ice_ring *ring;
......@@ -2136,9 +2136,7 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
pf->q_left_rx -= vsi->alloc_rxq;
break;
default:
/* if VSI type is not recognized, clean up the resources and
* exit
*/
/* clean up the resources and exit */
goto unroll_vsi_init;
}
......@@ -2335,7 +2333,7 @@ static int ice_search_res(struct ice_res_tracker *res, u16 needed, u16 id)
int start = res->search_hint;
int end = start;
if ((start + needed) > res->num_entries)
if ((start + needed) > res->num_entries)
return -ENOMEM;
id |= ICE_RES_VALID_BIT;
......
......@@ -408,7 +408,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
/* When a CORER/GLOBR/EMPR is about to happen, the hardware triggers an
* OICR interrupt. The OICR handler (ice_misc_intr) determines what type
* of reset is pending and sets bits in pf->state indicating the reset
* type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
* type and __ICE_RESET_OICR_RECV. So, if the latter bit is set
* prepare for pending reset if not already (for PF software-initiated
* global resets the software should already be prepared for it as
* indicated by __ICE_PREPARED_FOR_RESET; for global resets initiated
......@@ -1382,7 +1382,7 @@ static void ice_free_irq_msix_misc(struct ice_pf *pf)
* @pf: board private structure
*
* This sets up the handler for MSIX 0, which is used to manage the
* non-queue interrupts, e.g. AdminQ and errors. This is not used
* non-queue interrupts, e.g. AdminQ and errors. This is not used
* when in MSI or Legacy interrupt mode.
*/
static int ice_req_irq_msix_misc(struct ice_pf *pf)
......@@ -1786,7 +1786,7 @@ static void ice_determine_q_usage(struct ice_pf *pf)
pf->num_lan_tx = min_t(int, q_left_tx, num_online_cpus());
/* only 1 rx queue unless RSS is enabled */
/* only 1 Rx queue unless RSS is enabled */
if (!test_bit(ICE_FLAG_RSS_ENA, pf->flags))
pf->num_lan_rx = 1;
else
......@@ -3674,7 +3674,7 @@ ice_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh,
*/
status = ice_update_sw_rule_bridge_mode(hw);
if (status) {
netdev_err(dev, "update SW_RULE for bridge mode failed, = %d err %d aq_err %d\n",
netdev_err(dev, "switch rule update failed, mode = %d err %d aq_err %d\n",
mode, status, hw->adminq.sq_last_status);
/* revert hw->evb_veb */
hw->evb_veb = (pf_sw->bridge_mode == BRIDGE_MODE_VEB);
......@@ -3702,35 +3702,31 @@ static void ice_tx_timeout(struct net_device *netdev)
pf->tx_timeout_count++;
/* find the stopped queue the same way the stack does */
/* find the stopped queue the same way dev_watchdog() does */
for (i = 0; i < netdev->num_tx_queues; i++) {
struct netdev_queue *q;
unsigned long trans_start;
struct netdev_queue *q;
q = netdev_get_tx_queue(netdev, i);
trans_start = q->trans_start;
if (netif_xmit_stopped(q) &&
time_after(jiffies,
(trans_start + netdev->watchdog_timeo))) {
trans_start + netdev->watchdog_timeo)) {
hung_queue = i;
break;
}
}
if (i == netdev->num_tx_queues) {
if (i == netdev->num_tx_queues)
netdev_info(netdev, "tx_timeout: no netdev hung queue found\n");
} else {
else
/* now that we have an index, find the tx_ring struct */
for (i = 0; i < vsi->num_txq; i++) {
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc) {
if (hung_queue ==
vsi->tx_rings[i]->q_index) {
for (i = 0; i < vsi->num_txq; i++)
if (vsi->tx_rings[i] && vsi->tx_rings[i]->desc)
if (hung_queue == vsi->tx_rings[i]->q_index) {
tx_ring = vsi->tx_rings[i];
break;
}
}
}
}
/* Reset recovery level if enough time has elapsed after last timeout.
* Also ensure no new reset action happens before next timeout period.
......@@ -3751,7 +3747,7 @@ static void ice_tx_timeout(struct net_device *netdev)
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
val = rd32(hw,
GLINT_DYN_CTL(tx_ring->q_vector->v_idx +
tx_ring->vsi->hw_base_vector));
tx_ring->vsi->hw_base_vector));
netdev_info(netdev, "tx_timeout: VSI_num: %d, Q %d, NTC: 0x%x, HW_HEAD: 0x%x, NTU: 0x%x, INT: 0x%x\n",
vsi->vsi_num, hung_queue, tx_ring->next_to_clean,
......@@ -3789,7 +3785,7 @@ static void ice_tx_timeout(struct net_device *netdev)
* @netdev: network interface device structure
*
* The open entry point is called when a network interface is made
* active by the system (IFF_UP). At this point all resources needed
* active by the system (IFF_UP). At this point all resources needed
* for transmit and receive operations are allocated, the interrupt
* handler is registered with the OS, the netdev watchdog is enabled,
* and the stack is notified that the interface is ready.
......@@ -3822,7 +3818,7 @@ static int ice_open(struct net_device *netdev)
* @netdev: network interface device structure
*
* The stop entry point is called when an interface is de-activated by the OS,
* and the netdevice enters the DOWN state. The hardware is still under the
* and the netdevice enters the DOWN state. The hardware is still under the
* driver's control, but the netdev interface is disabled.
*
* Returns success only - not allowed to fail
......@@ -3851,14 +3847,14 @@ ice_features_check(struct sk_buff *skb,
size_t len;
/* No point in doing any of this if neither checksum nor GSO are
* being requested for this frame. We can rule out both by just
* being requested for this frame. We can rule out both by just
* checking for CHECKSUM_PARTIAL
*/
if (skb->ip_summed != CHECKSUM_PARTIAL)
return features;
/* We cannot support GSO if the MSS is going to be less than
* 64 bytes. If it is then we need to drop support for GSO.
* 64 bytes. If it is then we need to drop support for GSO.
*/
if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_size < 64))
features &= ~NETIF_F_GSO_MASK;
......
......@@ -894,8 +894,7 @@ static u8 ice_sched_get_vsi_layer(struct ice_hw *hw)
* This function removes the leaf node that was created by the FW
* during initialization
*/
static void
ice_rm_dflt_leaf_node(struct ice_port_info *pi)
static void ice_rm_dflt_leaf_node(struct ice_port_info *pi)
{
struct ice_sched_node *node;
......@@ -923,8 +922,7 @@ ice_rm_dflt_leaf_node(struct ice_port_info *pi)
* This function frees all the nodes except root and TC that were created by
* the FW during initialization
*/
static void
ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
static void ice_sched_rm_dflt_nodes(struct ice_port_info *pi)
{
struct ice_sched_node *node;
......@@ -1339,7 +1337,7 @@ ice_sched_rm_vsi_child_nodes(struct ice_port_info *pi,
* @num_nodes: pointer to num nodes array
*
* This function calculates the number of supported nodes needed to add this
* VSI into tx tree including the VSI, parent and intermediate nodes in below
* VSI into Tx tree including the VSI, parent and intermediate nodes in below
* layers
*/
static void
......@@ -1376,13 +1374,13 @@ ice_sched_calc_vsi_support_nodes(struct ice_hw *hw,
}
/**
* ice_sched_add_vsi_support_nodes - add VSI supported nodes into tx tree
* ice_sched_add_vsi_support_nodes - add VSI supported nodes into Tx tree
* @pi: port information structure
* @vsi_handle: software VSI handle
* @tc_node: pointer to TC node
* @num_nodes: pointer to num nodes array
*
* This function adds the VSI supported nodes into tx tree including the
* This function adds the VSI supported nodes into Tx tree including the
* VSI, its parent and intermediate nodes in below layers
*/
static enum ice_status
......
......@@ -46,7 +46,7 @@ ice_aq_send_msg_to_vf(struct ice_hw *hw, u16 vfid, u32 v_opcode, u32 v_retval,
* @link_speed: variable containing the link_speed to be converted
*
* Convert link speed supported by HW to link speed supported by virtchnl.
* If adv_link_support is true, then return link speed in Mbps. Else return
* If adv_link_support is true, then return link speed in Mbps. Else return
* link speed as a VIRTCHNL_LINK_SPEED_* casted to a u32. Note that the caller
* needs to cast back to an enum virtchnl_link_speed in the case where
* adv_link_support is false, but when adv_link_support is true the caller can
......
......@@ -92,8 +92,7 @@ ice_aq_alloc_free_res(struct ice_hw *hw, u16 num_entries,
* Allocate memory for the entire recipe table and initialize the structures/
* entries corresponding to basic recipes.
*/
enum ice_status
ice_init_def_sw_recp(struct ice_hw *hw)
enum ice_status ice_init_def_sw_recp(struct ice_hw *hw)
{
struct ice_sw_recipe *recps;
u8 i;
......@@ -130,7 +129,7 @@ ice_init_def_sw_recp(struct ice_hw *hw)
*
* NOTE: *req_desc is both an input/output parameter.
* The caller of this function first calls this function with *request_desc set
* to 0. If the response from f/w has *req_desc set to 0, all the switch
* to 0. If the response from f/w has *req_desc set to 0, all the switch
* configuration information has been returned; if non-zero (meaning not all
* the information was returned), the caller should call this function again
* with *req_desc set to the previous value returned by f/w to get the
......@@ -629,25 +628,36 @@ enum ice_status ice_get_initial_sw_cfg(struct ice_hw *hw)
/**
* ice_fill_sw_info - Helper function to populate lb_en and lan_en
* @hw: pointer to the hardware structure
* @f_info: filter info structure to fill/update
* @fi: filter info structure to fill/update
*
* This helper function populates the lb_en and lan_en elements of the provided
* ice_fltr_info struct using the switch's type and characteristics of the
* switch rule being configured.
*/
static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *f_info)
static void ice_fill_sw_info(struct ice_hw *hw, struct ice_fltr_info *fi)
{
f_info->lb_en = false;
f_info->lan_en = false;
if ((f_info->flag & ICE_FLTR_TX) &&
(f_info->fltr_act == ICE_FWD_TO_VSI ||
f_info->fltr_act == ICE_FWD_TO_VSI_LIST ||
f_info->fltr_act == ICE_FWD_TO_Q ||
f_info->fltr_act == ICE_FWD_TO_QGRP)) {
f_info->lb_en = true;
if (!(hw->evb_veb && f_info->lkup_type == ICE_SW_LKUP_MAC &&
is_unicast_ether_addr(f_info->l_data.mac.mac_addr)))
f_info->lan_en = true;
fi->lb_en = false;
fi->lan_en = false;
if ((fi->flag & ICE_FLTR_TX) &&
(fi->fltr_act == ICE_FWD_TO_VSI ||
fi->fltr_act == ICE_FWD_TO_VSI_LIST ||
fi->fltr_act == ICE_FWD_TO_Q ||
fi->fltr_act == ICE_FWD_TO_QGRP)) {
fi->lb_en = true;
/* Do not set lan_en to TRUE if
* 1. The switch is a VEB AND
* 2
* 2.1 The lookup is MAC with unicast addr for MAC, OR
* 2.2 The lookup is MAC_VLAN with unicast addr for MAC
*
* In all other cases, the LAN enable has to be set to true.
*/
if (!(hw->evb_veb &&
((fi->lkup_type == ICE_SW_LKUP_MAC &&
is_unicast_ether_addr(fi->l_data.mac.mac_addr)) ||
(fi->lkup_type == ICE_SW_LKUP_MAC_VLAN &&
is_unicast_ether_addr(fi->l_data.mac_vlan.mac_addr)))))
fi->lan_en = true;
}
}
......@@ -817,7 +827,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
/* Create two back-to-back switch rules and submit them to the HW using
* one memory buffer:
* 1. Large Action
* 2. Look up tx rx
* 2. Look up Tx Rx
*/
lg_act_size = (u16)ICE_SW_RULE_LG_ACT_SIZE(num_lg_acts);
rules_size = lg_act_size + ICE_SW_RULE_RX_TX_ETH_HDR_SIZE;
......@@ -861,7 +871,7 @@ ice_add_marker_act(struct ice_hw *hw, struct ice_fltr_mgmt_list_entry *m_ent,
lg_act->pdata.lg_act.act[2] = cpu_to_le32(act);
/* call the fill switch rule to fill the lookup tx rx structure */
/* call the fill switch rule to fill the lookup Tx Rx structure */
ice_fill_sw_rule(hw, &m_ent->fltr_info, rx_tx,
ice_aqc_opc_update_sw_rules);
......@@ -1158,8 +1168,8 @@ enum ice_status ice_update_sw_rule_bridge_mode(struct ice_hw *hw)
* Call AQ command to add or update previously created VSI list with new VSI.
*
* Helper function to do book keeping associated with adding filter information
* The algorithm to do the booking keeping is described below :
* When a VSI needs to subscribe to a given filter( MAC/VLAN/Ethtype etc.)
* The algorithm to do the book keeping is described below :
* When a VSI needs to subscribe to a given filter (MAC/VLAN/Ethtype etc.)
* if only one VSI has been added till now
* Allocate a new VSI list and add two VSIs
* to this list using switch rule command
......@@ -1237,6 +1247,9 @@ ice_add_update_vsi_list(struct ice_hw *hw,
u16 vsi_handle = new_fltr->vsi_handle;
enum ice_adminq_opc opcode;
if (!m_entry->vsi_list_info)
return ICE_ERR_CFG;
/* A rule already exists with the new VSI being added */
if (test_bit(vsi_handle, m_entry->vsi_list_info->vsi_map))
return 0;
......@@ -1853,7 +1866,7 @@ ice_add_vlan_internal(struct ice_hw *hw, struct ice_fltr_list_entry *f_entry)
tmp_fltr.fwd_id.vsi_list_id = vsi_list_id;
tmp_fltr.fltr_act = ICE_FWD_TO_VSI_LIST;
/* Update the previous switch rule to a new VSI list which
* includes current VSI thats requested
* includes current VSI that is requested
*/
status = ice_update_pkt_fwd_rule(hw, &tmp_fltr);
if (status)
......
......@@ -219,7 +219,7 @@ static bool ice_clean_tx_irq(struct ice_vsi *vsi, struct ice_ring *tx_ring,
/**
* ice_setup_tx_ring - Allocate the Tx descriptors
* @tx_ring: the tx ring to set up
* @tx_ring: the Tx ring to set up
*
* Return 0 on success, negative on error
*/
......@@ -324,7 +324,7 @@ void ice_free_rx_ring(struct ice_ring *rx_ring)
/**
* ice_setup_rx_ring - Allocate the Rx descriptors
* @rx_ring: the rx ring to set up
* @rx_ring: the Rx ring to set up
*
* Return 0 on success, negative on error
*/
......@@ -377,7 +377,7 @@ static void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
rx_ring->next_to_alloc = val;
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
......@@ -586,7 +586,7 @@ static bool ice_add_rx_frag(struct ice_rx_buf *rx_buf,
/**
* ice_reuse_rx_page - page flip buffer and store it back on the ring
* @rx_ring: rx descriptor ring to store buffers on
* @rx_ring: Rx descriptor ring to store buffers on
* @old_buf: donor buffer to have page reused
*
* Synchronizes page for reuse by the adapter
......@@ -609,7 +609,7 @@ static void ice_reuse_rx_page(struct ice_ring *rx_ring,
/**
* ice_fetch_rx_buf - Allocate skb and populate it
* @rx_ring: rx descriptor ring to transact packets on
* @rx_ring: Rx descriptor ring to transact packets on
* @rx_desc: descriptor containing info written by hardware
*
* This function allocates an skb on the fly, and populates it with the page
......@@ -686,7 +686,7 @@ static struct sk_buff *ice_fetch_rx_buf(struct ice_ring *rx_ring,
* ice_pull_tail - ice specific version of skb_pull_tail
* @skb: pointer to current skb being adjusted
*
* This function is an ice specific version of __pskb_pull_tail. The
* This function is an ice specific version of __pskb_pull_tail. The
* main difference between this version and the original function is that
* this function can make several assumptions about the state of things
* that allow for significant optimizations versus the standard function.
......@@ -768,7 +768,7 @@ static bool ice_test_staterr(union ice_32b_rx_flex_desc *rx_desc,
* @rx_desc: Rx descriptor for current buffer
* @skb: Current socket buffer containing buffer in progress
*
* This function updates next to clean. If the buffer is an EOP buffer
* This function updates next to clean. If the buffer is an EOP buffer
* this function exits returning false, otherwise it will place the
* sk_buff in the next buffer to be chained and return true indicating
* that this is in fact a non-EOP buffer.
......@@ -904,7 +904,7 @@ static void ice_rx_csum(struct ice_vsi *vsi, struct sk_buff *skb,
/**
* ice_process_skb_fields - Populate skb header fields from Rx descriptor
* @rx_ring: rx descriptor ring packet is being transacted on
* @rx_ring: Rx descriptor ring packet is being transacted on
* @rx_desc: pointer to the EOP Rx descriptor
* @skb: pointer to current skb being populated
* @ptype: the packet type decoded by hardware
......@@ -927,7 +927,7 @@ static void ice_process_skb_fields(struct ice_ring *rx_ring,
/**
* ice_receive_skb - Send a completed packet up the stack
* @rx_ring: rx ring in play
* @rx_ring: Rx ring in play
* @skb: packet to send up
* @vlan_tag: vlan tag for packet
*
......@@ -946,11 +946,11 @@ static void ice_receive_skb(struct ice_ring *rx_ring, struct sk_buff *skb,
/**
* ice_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf
* @rx_ring: rx descriptor ring to transact packets on
* @rx_ring: Rx descriptor ring to transact packets on
* @budget: Total limit on number of packets to process
*
* This function provides a "bounce buffer" approach to Rx interrupt
* processing. The advantage to this is that on systems that have
* processing. The advantage to this is that on systems that have
* expensive overhead for IOMMU access this provides a means of avoiding
* it by maintaining the mapping of the page to the system.
*
......@@ -1107,7 +1107,8 @@ int ice_napi_poll(struct napi_struct *napi, int budget)
napi_complete_done(napi, work_done);
if (test_bit(ICE_FLAG_MSIX_ENA, pf->flags))
ice_irq_dynamic_ena(&vsi->back->hw, vsi, q_vector);
return 0;
return min(work_done, budget - 1);
}
/* helper function for building cmd/type/offset */
......@@ -1122,7 +1123,7 @@ build_ctob(u64 td_cmd, u64 td_offset, unsigned int size, u64 td_tag)
}
/**
* __ice_maybe_stop_tx - 2nd level check for tx stop conditions
* __ice_maybe_stop_tx - 2nd level check for Tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
......@@ -1145,7 +1146,7 @@ static int __ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
}
/**
* ice_maybe_stop_tx - 1st level check for tx stop conditions
* ice_maybe_stop_tx - 1st level check for Tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
......@@ -1155,6 +1156,7 @@ static int ice_maybe_stop_tx(struct ice_ring *tx_ring, unsigned int size)
{
if (likely(ICE_DESC_UNUSED(tx_ring) >= size))
return 0;
return __ice_maybe_stop_tx(tx_ring, size);
}
......@@ -1552,7 +1554,7 @@ int ice_tso(struct ice_tx_buf *first, struct ice_tx_offload_params *off)
* Finally, we add one to round up. Because 256 isn't an exact multiple of
* 3, we'll underestimate near each multiple of 12K. This is actually more
* accurate as we have 4K - 1 of wiggle room that we can fit into the last
* segment. For our purposes this is accurate out to 1M which is orders of
* segment. For our purposes this is accurate out to 1M which is orders of
* magnitude greater than our largest possible GSO size.
*
* This would then be implemented as:
......@@ -1568,7 +1570,7 @@ static unsigned int ice_txd_use_count(unsigned int size)
}
/**
* ice_xmit_desc_count - calculate number of tx descriptors needed
* ice_xmit_desc_count - calculate number of Tx descriptors needed
* @skb: send buffer
*
* Returns number of data descriptors needed for this skb.
......@@ -1620,7 +1622,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
nr_frags -= ICE_MAX_BUF_TXD - 2;
frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We
/* Initialize size to the negative value of gso_size minus 1. We
* use this as the worst case scenerio in which the frag ahead
* of us only provides one byte which is why we are limited to 6
* descriptors for a single transmit as the header and previous
......
......@@ -156,8 +156,6 @@ static void ice_free_vf_res(struct ice_vf *vf)
clear_bit(ICE_VF_STATE_UC_PROMISC, vf->vf_states);
}
/***********************enable_vf routines*****************************/
/**
* ice_dis_vf_mappings
* @vf: pointer to the VF structure
......@@ -454,7 +452,7 @@ static int ice_alloc_vsi_res(struct ice_vf *vf)
/* Clear this bit after VF initialization since we shouldn't reclaim
* and reassign interrupts for synchronous or asynchronous VFR events.
* We don't want to reconfigure interrupts since AVF driver doesn't
* We dont want to reconfigure interrupts since AVF driver doesn't
* expect vector assignment to be changed unless there is a request for
* more vectors.
*/
......@@ -1105,7 +1103,7 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
* ice_process_vflr_event - Free VF resources via IRQ calls
* @pf: pointer to the PF structure
*
* called from the VLFR IRQ handler to
* called from the VFLR IRQ handler to
* free up VF resources and state variables
*/
void ice_process_vflr_event(struct ice_pf *pf)
......@@ -1764,7 +1762,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
/* copy Tx queue info from VF into VSI */
vsi->tx_rings[i]->dma = qpi->txq.dma_ring_addr;
vsi->tx_rings[i]->count = qpi->txq.ring_len;
/* copy Rx queue info from VF into vsi */
/* copy Rx queue info from VF into VSI */
vsi->rx_rings[i]->dma = qpi->rxq.dma_ring_addr;
vsi->rx_rings[i]->count = qpi->rxq.ring_len;
if (qpi->rxq.databuffer_size > ((16 * 1024) - 128)) {
......@@ -1830,7 +1828,7 @@ static bool ice_can_vf_change_mac(struct ice_vf *vf)
* @msg: pointer to the msg buffer
* @set: true if mac filters are being set, false otherwise
*
* add guest mac address filter
* add guest MAC address filter
*/
static int
ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
......@@ -1968,9 +1966,9 @@ static int ice_vc_del_mac_addr_msg(struct ice_vf *vf, u8 *msg)
* @msg: pointer to the msg buffer
*
* VFs get a default number of queues but can use this message to request a
* different number. If the request is successful, PF will reset the VF and
* different number. If the request is successful, PF will reset the VF and
* return 0. If unsuccessful, PF will send message informing VF of number of
* available queue pairs via virtchnl message response to VF.
* available queue pairs via virtchnl message response to vf.
*/
static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
{
......@@ -1991,7 +1989,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
tx_rx_queue_left = min_t(int, pf->q_left_tx, pf->q_left_rx);
if (req_queues <= 0) {
dev_err(&pf->pdev->dev,
"VF %d tried to request %d queues. Ignoring.\n",
"VF %d tried to request %d queues. Ignoring.\n",
vf->vf_id, req_queues);
} else if (req_queues > ICE_MAX_QS_PER_VF) {
dev_err(&pf->pdev->dev,
......
......@@ -70,7 +70,7 @@ struct ice_vf {
u8 spoofchk;
u16 num_mac;
u16 num_vlan;
u8 num_req_qs; /* num of queue pairs requested by VF */
u8 num_req_qs; /* num of queue pairs requested by VF */
};
#ifdef CONFIG_PCI_IOV
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment