Commit d8ef0347 authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2016-02-17

This series contains updates to i40e/i40evf only (again).

Jesse moves sync_vsi_filters() up in the service_task because it may need
to request a reset, and we do not want to wait another round of service
task time.  Refactored the enable_icr0() in order to allow it to be
decided by the caller whether the CLEARPBA (clear pending events) bit will
be set while re-enabling the interrupt.  Also provides the "Don't Give Up"
patch, where the driver will keep polling trying to allocate receive buffers
until it succeeds.  This should keep all receive queues running even in
the face of memory pressure.  Cleans up the debugging helpers by putting
everything in hex to be consistent.

Neerav updates the DCB firmware version related checkes specific to X710
and XL710 only since the checks are not required for X722 devices.

Shannon adds the use of the new shared MAC filter bit for multicast and
broadcast filters in order to make better use of the filters available
from the device.  Added a parameter to allow the driver to set the
enable/disable of statistics gathering in the hardware switch.  Also the
L2 cloud filtering parameter is removed since it was never used.

Anjali refactors the force_wb and WB_ON_ITR functionality since
Force-WriteBack functionality in X710/XL710 devices has been moved out of
the clean routine and into the service task, so we need to make sure
WriteBack-On-ITR is separated out since it is still called from clean.

Catherine changes the VF driver string to reflect all the products that
are supported.

Mitch refactors the packet split receive code to properly use half-pages
for receives.  Also changes the use of bitwise operators to logical
operators on clean_complete variable, while making a witty reference to
Mr. Spock.  Cleans up (i.e. removes) the hsplit field in the ring
structure and use the existing macro to detect packet split enablement,
which allows debugfs dumps of the VSI to properly show which recevie
routine is in use.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2bc6b4f4 c24215c0
/******************************************************************************* /*******************************************************************************
* *
* Intel Ethernet Controller XL710 Family Linux Driver * Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2015 Intel Corporation. * Copyright(c) 2013 - 2016 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -138,6 +138,19 @@ ...@@ -138,6 +138,19 @@
/* default to trying for four seconds */ /* default to trying for four seconds */
#define I40E_TRY_LINK_TIMEOUT (4 * HZ) #define I40E_TRY_LINK_TIMEOUT (4 * HZ)
/**
* i40e_is_mac_710 - Return true if MAC is X710/XL710
* @hw: ptr to the hardware info
**/
static inline bool i40e_is_mac_710(struct i40e_hw *hw)
{
if ((hw->mac.type == I40E_MAC_X710) ||
(hw->mac.type == I40E_MAC_XL710))
return true;
return false;
}
/* driver state flags */ /* driver state flags */
enum i40e_state_t { enum i40e_state_t {
__I40E_TESTING, __I40E_TESTING,
...@@ -342,6 +355,9 @@ struct i40e_pf { ...@@ -342,6 +355,9 @@ struct i40e_pf {
#define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42) #define I40E_FLAG_NO_PCI_LINK_CHECK BIT_ULL(42)
#define I40E_FLAG_100M_SGMII_CAPABLE BIT_ULL(43) #define I40E_FLAG_100M_SGMII_CAPABLE BIT_ULL(43)
#define I40E_FLAG_RESTART_AUTONEG BIT_ULL(44) #define I40E_FLAG_RESTART_AUTONEG BIT_ULL(44)
#define I40E_FLAG_NO_DCB_SUPPORT BIT_ULL(45)
#define I40E_FLAG_USE_SET_LLDP_MIB BIT_ULL(46)
#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47)
#define I40E_FLAG_PF_MAC BIT_ULL(50) #define I40E_FLAG_PF_MAC BIT_ULL(50)
/* tracks features that get auto disabled by errors */ /* tracks features that get auto disabled by errors */
...@@ -751,6 +767,9 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) ...@@ -751,6 +767,9 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
u32 val; u32 val;
/* definitely clear the PBA here, as this function is meant to
* clean out all previous interrupts AND enable the interrupt
*/
val = I40E_PFINT_DYN_CTLN_INTENA_MASK | val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT); (I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
...@@ -759,7 +778,7 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector) ...@@ -759,7 +778,7 @@ static inline void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
} }
void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf); void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba);
#ifdef I40E_FCOE #ifdef I40E_FCOE
struct rtnl_link_stats64 *i40e_get_netdev_stats_struct( struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
struct net_device *netdev, struct net_device *netdev,
......
/******************************************************************************* /*******************************************************************************
* *
* Intel Ethernet Controller XL710 Family Linux Driver * Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2015 Intel Corporation. * Copyright(c) 2013 - 2016 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -2308,8 +2308,8 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw) ...@@ -2308,8 +2308,8 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw)
* @downlink_seid: the VSI SEID * @downlink_seid: the VSI SEID
* @enabled_tc: bitmap of TCs to be enabled * @enabled_tc: bitmap of TCs to be enabled
* @default_port: true for default port VSI, false for control port * @default_port: true for default port VSI, false for control port
* @enable_l2_filtering: true to add L2 filter table rules to regular forwarding rules for cloud support
* @veb_seid: pointer to where to put the resulting VEB SEID * @veb_seid: pointer to where to put the resulting VEB SEID
* @enable_stats: true to turn on VEB stats
* @cmd_details: pointer to command details structure or NULL * @cmd_details: pointer to command details structure or NULL
* *
* This asks the FW to add a VEB between the uplink and downlink * This asks the FW to add a VEB between the uplink and downlink
...@@ -2317,8 +2317,8 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw) ...@@ -2317,8 +2317,8 @@ i40e_status i40e_update_link_info(struct i40e_hw *hw)
**/ **/
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc, u16 downlink_seid, u8 enabled_tc,
bool default_port, bool enable_l2_filtering, bool default_port, u16 *veb_seid,
u16 *veb_seid, bool enable_stats,
struct i40e_asq_cmd_details *cmd_details) struct i40e_asq_cmd_details *cmd_details)
{ {
struct i40e_aq_desc desc; struct i40e_aq_desc desc;
...@@ -2345,8 +2345,9 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, ...@@ -2345,8 +2345,9 @@ i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
else else
veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA; veb_flags |= I40E_AQC_ADD_VEB_PORT_TYPE_DATA;
if (enable_l2_filtering) /* reverse logic here: set the bitflag to disable the stats */
veb_flags |= I40E_AQC_ADD_VEB_ENABLE_L2_FILTER; if (!enable_stats)
veb_flags |= I40E_AQC_ADD_VEB_ENABLE_DISABLE_STATS;
cmd->veb_flags = cpu_to_le16(veb_flags); cmd->veb_flags = cpu_to_le16(veb_flags);
...@@ -2435,6 +2436,7 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, ...@@ -2435,6 +2436,7 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
(struct i40e_aqc_macvlan *)&desc.params.raw; (struct i40e_aqc_macvlan *)&desc.params.raw;
i40e_status status; i40e_status status;
u16 buf_size; u16 buf_size;
int i;
if (count == 0 || !mv_list || !hw) if (count == 0 || !mv_list || !hw)
return I40E_ERR_PARAM; return I40E_ERR_PARAM;
...@@ -2448,12 +2450,17 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid, ...@@ -2448,12 +2450,17 @@ i40e_status i40e_aq_add_macvlan(struct i40e_hw *hw, u16 seid,
cmd->seid[1] = 0; cmd->seid[1] = 0;
cmd->seid[2] = 0; cmd->seid[2] = 0;
for (i = 0; i < count; i++)
if (is_multicast_ether_addr(mv_list[i].mac_addr))
mv_list[i].flags |=
cpu_to_le16(I40E_AQC_MACVLAN_ADD_USE_SHARED_MAC);
desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD)); desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
if (buf_size > I40E_AQ_LARGE_BUF) if (buf_size > I40E_AQ_LARGE_BUF)
desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB); desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, mv_list, buf_size, status = i40e_asq_send_command(hw, &desc, mv_list, buf_size,
cmd_details); cmd_details);
return status; return status;
} }
......
/******************************************************************************* /*******************************************************************************
* *
* Intel Ethernet Controller XL710 Family Linux Driver * Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation. * Copyright(c) 2013 - 2016 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -521,7 +521,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -521,7 +521,7 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
rx_ring->dtype); rx_ring->dtype);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", " rx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i, rx_ring->hsplit, i, ring_is_ps_enabled(rx_ring),
rx_ring->next_to_use, rx_ring->next_to_use,
rx_ring->next_to_clean, rx_ring->next_to_clean,
rx_ring->ring_active); rx_ring->ring_active);
...@@ -535,6 +535,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -535,6 +535,11 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
i, i,
rx_ring->rx_stats.alloc_page_failed, rx_ring->rx_stats.alloc_page_failed,
rx_ring->rx_stats.alloc_buff_failed); rx_ring->rx_stats.alloc_buff_failed);
dev_info(&pf->pdev->dev,
" rx_rings[%i]: rx_stats: realloc_count = %lld, page_reuse_count = %lld\n",
i,
rx_ring->rx_stats.realloc_count,
rx_ring->rx_stats.page_reuse_count);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" rx_rings[%i]: size = %i, dma = 0x%08lx\n", " rx_rings[%i]: size = %i, dma = 0x%08lx\n",
i, rx_ring->size, i, rx_ring->size,
...@@ -567,8 +572,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid) ...@@ -567,8 +572,8 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
" tx_rings[%i]: dtype = %d\n", " tx_rings[%i]: dtype = %d\n",
i, tx_ring->dtype); i, tx_ring->dtype);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" tx_rings[%i]: hsplit = %d, next_to_use = %d, next_to_clean = %d, ring_active = %i\n", " tx_rings[%i]: next_to_use = %d, next_to_clean = %d, ring_active = %i\n",
i, tx_ring->hsplit, i,
tx_ring->next_to_use, tx_ring->next_to_use,
tx_ring->next_to_clean, tx_ring->next_to_clean,
tx_ring->ring_active); tx_ring->ring_active);
...@@ -825,20 +830,20 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, ...@@ -825,20 +830,20 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
if (!is_rx_ring) { if (!is_rx_ring) {
txd = I40E_TX_DESC(ring, i); txd = I40E_TX_DESC(ring, i);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" d[%03i] = 0x%016llx 0x%016llx\n", " d[%03x] = 0x%016llx 0x%016llx\n",
i, txd->buffer_addr, i, txd->buffer_addr,
txd->cmd_type_offset_bsz); txd->cmd_type_offset_bsz);
} else if (sizeof(union i40e_rx_desc) == } else if (sizeof(union i40e_rx_desc) ==
sizeof(union i40e_16byte_rx_desc)) { sizeof(union i40e_16byte_rx_desc)) {
rxd = I40E_RX_DESC(ring, i); rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" d[%03i] = 0x%016llx 0x%016llx\n", " d[%03x] = 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr, i, rxd->read.pkt_addr,
rxd->read.hdr_addr); rxd->read.hdr_addr);
} else { } else {
rxd = I40E_RX_DESC(ring, i); rxd = I40E_RX_DESC(ring, i);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
" d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", " d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
i, rxd->read.pkt_addr, i, rxd->read.pkt_addr,
rxd->read.hdr_addr, rxd->read.hdr_addr,
rxd->read.rsvd1, rxd->read.rsvd2); rxd->read.rsvd1, rxd->read.rsvd2);
...@@ -853,20 +858,20 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n, ...@@ -853,20 +858,20 @@ static void i40e_dbg_dump_desc(int cnt, int vsi_seid, int ring_id, int desc_n,
if (!is_rx_ring) { if (!is_rx_ring) {
txd = I40E_TX_DESC(ring, desc_n); txd = I40E_TX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"vsi = %02i tx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", "vsi = %02i tx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n, vsi_seid, ring_id, desc_n,
txd->buffer_addr, txd->cmd_type_offset_bsz); txd->buffer_addr, txd->cmd_type_offset_bsz);
} else if (sizeof(union i40e_rx_desc) == } else if (sizeof(union i40e_rx_desc) ==
sizeof(union i40e_16byte_rx_desc)) { sizeof(union i40e_16byte_rx_desc)) {
rxd = I40E_RX_DESC(ring, desc_n); rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx\n", "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n, vsi_seid, ring_id, desc_n,
rxd->read.pkt_addr, rxd->read.hdr_addr); rxd->read.pkt_addr, rxd->read.hdr_addr);
} else { } else {
rxd = I40E_RX_DESC(ring, desc_n); rxd = I40E_RX_DESC(ring, desc_n);
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"vsi = %02i rx ring = %02i d[%03i] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n", "vsi = %02i rx ring = %02i d[%03x] = 0x%016llx 0x%016llx 0x%016llx 0x%016llx\n",
vsi_seid, ring_id, desc_n, vsi_seid, ring_id, desc_n,
rxd->read.pkt_addr, rxd->read.hdr_addr, rxd->read.pkt_addr, rxd->read.hdr_addr,
rxd->read.rsvd1, rxd->read.rsvd2); rxd->read.rsvd1, rxd->read.rsvd2);
......
/******************************************************************************* /*******************************************************************************
* *
* Intel Ethernet Controller XL710 Family Linux Driver * Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2015 Intel Corporation. * Copyright(c) 2013 - 2016 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -2785,10 +2785,15 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags) ...@@ -2785,10 +2785,15 @@ static int i40e_set_priv_flags(struct net_device *dev, u32 flags)
pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED; pf->auto_disable_flags |= I40E_FLAG_FD_ATR_ENABLED;
} }
if (flags & I40E_PRIV_FLAGS_VEB_STATS) if ((flags & I40E_PRIV_FLAGS_VEB_STATS) &&
!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
pf->flags |= I40E_FLAG_VEB_STATS_ENABLED; pf->flags |= I40E_FLAG_VEB_STATS_ENABLED;
else reset_required = true;
} else if (!(flags & I40E_PRIV_FLAGS_VEB_STATS) &&
(pf->flags & I40E_FLAG_VEB_STATS_ENABLED)) {
pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED; pf->flags &= ~I40E_FLAG_VEB_STATS_ENABLED;
reset_required = true;
}
if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) && if ((flags & I40E_PRIV_FLAGS_HW_ATR_EVICT) &&
(pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE)) (pf->flags & I40E_FLAG_HW_ATR_EVICT_CAPABLE))
......
/******************************************************************************* /*******************************************************************************
* *
* Intel Ethernet Controller XL710 Family Linux Driver * Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2015 Intel Corporation. * Copyright(c) 2013 - 2016 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -46,7 +46,7 @@ static const char i40e_driver_string[] = ...@@ -46,7 +46,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 4 #define DRV_VERSION_MINOR 4
#define DRV_VERSION_BUILD 12 #define DRV_VERSION_BUILD 13
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN __stringify(DRV_VERSION_BUILD) DRV_KERN
...@@ -2168,6 +2168,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi) ...@@ -2168,6 +2168,10 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
} }
} }
out: out:
/* if something went wrong then set the changed flag so we try again */
if (retval)
vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
clear_bit(__I40E_CONFIG_BUSY, &vsi->state); clear_bit(__I40E_CONFIG_BUSY, &vsi->state);
return retval; return retval;
} }
...@@ -3253,14 +3257,15 @@ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf) ...@@ -3253,14 +3257,15 @@ void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf)
/** /**
* i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0 * i40e_irq_dynamic_enable_icr0 - Enable default interrupt generation for icr0
* @pf: board private structure * @pf: board private structure
* @clearpba: true when all pending interrupt events should be cleared
**/ **/
void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf) void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf, bool clearpba)
{ {
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
u32 val; u32 val;
val = I40E_PFINT_DYN_CTL0_INTENA_MASK | val = I40E_PFINT_DYN_CTL0_INTENA_MASK |
I40E_PFINT_DYN_CTL0_CLEARPBA_MASK | (clearpba ? I40E_PFINT_DYN_CTL0_CLEARPBA_MASK : 0) |
(I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT); (I40E_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
wr32(hw, I40E_PFINT_DYN_CTL0, val); wr32(hw, I40E_PFINT_DYN_CTL0, val);
...@@ -3392,7 +3397,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi) ...@@ -3392,7 +3397,7 @@ static int i40e_vsi_enable_irq(struct i40e_vsi *vsi)
for (i = 0; i < vsi->num_q_vectors; i++) for (i = 0; i < vsi->num_q_vectors; i++)
i40e_irq_dynamic_enable(vsi, i); i40e_irq_dynamic_enable(vsi, i);
} else { } else {
i40e_irq_dynamic_enable_icr0(pf); i40e_irq_dynamic_enable_icr0(pf, true);
} }
i40e_flush(&pf->hw); i40e_flush(&pf->hw);
...@@ -3538,7 +3543,7 @@ static irqreturn_t i40e_intr(int irq, void *data) ...@@ -3538,7 +3543,7 @@ static irqreturn_t i40e_intr(int irq, void *data)
wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask); wr32(hw, I40E_PFINT_ICR0_ENA, ena_mask);
if (!test_bit(__I40E_DOWN, &pf->state)) { if (!test_bit(__I40E_DOWN, &pf->state)) {
i40e_service_event_schedule(pf); i40e_service_event_schedule(pf);
i40e_irq_dynamic_enable_icr0(pf); i40e_irq_dynamic_enable_icr0(pf, false);
} }
return ret; return ret;
...@@ -5008,8 +5013,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf) ...@@ -5008,8 +5013,7 @@ static int i40e_init_pf_dcb(struct i40e_pf *pf)
int err = 0; int err = 0;
/* Do not enable DCB for SW1 and SW2 images even if the FW is capable */ /* Do not enable DCB for SW1 and SW2 images even if the FW is capable */
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || if (pf->flags & I40E_FLAG_NO_DCB_SUPPORT)
(pf->hw.aq.fw_maj_ver < 4))
goto out; goto out;
/* Get the initial DCB configuration */ /* Get the initial DCB configuration */
...@@ -7113,6 +7117,7 @@ static void i40e_service_task(struct work_struct *work) ...@@ -7113,6 +7117,7 @@ static void i40e_service_task(struct work_struct *work)
} }
i40e_detect_recover_hung(pf); i40e_detect_recover_hung(pf);
i40e_sync_filters_subtask(pf);
i40e_reset_subtask(pf); i40e_reset_subtask(pf);
i40e_handle_mdd_event(pf); i40e_handle_mdd_event(pf);
i40e_vc_process_vflr_event(pf); i40e_vc_process_vflr_event(pf);
...@@ -7854,7 +7859,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf) ...@@ -7854,7 +7859,7 @@ static int i40e_setup_misc_vector(struct i40e_pf *pf)
i40e_flush(hw); i40e_flush(hw);
i40e_irq_dynamic_enable_icr0(pf); i40e_irq_dynamic_enable_icr0(pf, true);
return err; return err;
} }
...@@ -8420,11 +8425,25 @@ static int i40e_sw_init(struct i40e_pf *pf) ...@@ -8420,11 +8425,25 @@ static int i40e_sw_init(struct i40e_pf *pf)
pf->hw.func_caps.fd_filters_best_effort; pf->hw.func_caps.fd_filters_best_effort;
} }
if (((pf->hw.mac.type == I40E_MAC_X710) || if (i40e_is_mac_710(&pf->hw) &&
(pf->hw.mac.type == I40E_MAC_XL710)) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) || (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 33)) ||
(pf->hw.aq.fw_maj_ver < 4))) (pf->hw.aq.fw_maj_ver < 4))) {
pf->flags |= I40E_FLAG_RESTART_AUTONEG; pf->flags |= I40E_FLAG_RESTART_AUTONEG;
/* No DCB support for FW < v4.33 */
pf->flags |= I40E_FLAG_NO_DCB_SUPPORT;
}
/* Disable FW LLDP if FW < v4.3 */
if (i40e_is_mac_710(&pf->hw) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
(pf->hw.aq.fw_maj_ver < 4)))
pf->flags |= I40E_FLAG_STOP_FW_LLDP;
/* Use the FW Set LLDP MIB API if FW > v4.40 */
if (i40e_is_mac_710(&pf->hw) &&
(((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver >= 40)) ||
(pf->hw.aq.fw_maj_ver >= 5)))
pf->flags |= I40E_FLAG_USE_SET_LLDP_MIB;
if (pf->hw.func_caps.vmdq) { if (pf->hw.func_caps.vmdq) {
pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI; pf->num_vmdq_vsis = I40E_DEFAULT_NUM_VMDQ_VSI;
...@@ -8453,6 +8472,7 @@ static int i40e_sw_init(struct i40e_pf *pf) ...@@ -8453,6 +8472,7 @@ static int i40e_sw_init(struct i40e_pf *pf)
I40E_FLAG_WB_ON_ITR_CAPABLE | I40E_FLAG_WB_ON_ITR_CAPABLE |
I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE | I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE |
I40E_FLAG_100M_SGMII_CAPABLE | I40E_FLAG_100M_SGMII_CAPABLE |
I40E_FLAG_USE_SET_LLDP_MIB |
I40E_FLAG_GENEVE_OFFLOAD_CAPABLE; I40E_FLAG_GENEVE_OFFLOAD_CAPABLE;
} else if ((pf->hw.aq.api_maj_ver > 1) || } else if ((pf->hw.aq.api_maj_ver > 1) ||
((pf->hw.aq.api_maj_ver == 1) && ((pf->hw.aq.api_maj_ver == 1) &&
...@@ -10050,13 +10070,13 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi) ...@@ -10050,13 +10070,13 @@ static int i40e_add_veb(struct i40e_veb *veb, struct i40e_vsi *vsi)
{ {
struct i40e_pf *pf = veb->pf; struct i40e_pf *pf = veb->pf;
bool is_default = veb->pf->cur_promisc; bool is_default = veb->pf->cur_promisc;
bool is_cloud = false; bool enable_stats = !!(pf->flags & I40E_FLAG_VEB_STATS_ENABLED);
int ret; int ret;
/* get a VEB from the hardware */ /* get a VEB from the hardware */
ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid, ret = i40e_aq_add_veb(&pf->hw, veb->uplink_seid, vsi->seid,
veb->enabled_tc, is_default, veb->enabled_tc, is_default,
is_cloud, &veb->seid, NULL); &veb->seid, enable_stats, NULL);
if (ret) { if (ret) {
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"couldn't add VEB, err %s aq_err %s\n", "couldn't add VEB, err %s aq_err %s\n",
...@@ -10820,8 +10840,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -10820,8 +10840,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
* Ignore error return codes because if it was already disabled via * Ignore error return codes because if it was already disabled via
* hardware settings this will fail * hardware settings this will fail
*/ */
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) || if (pf->flags & I40E_FLAG_STOP_FW_LLDP) {
(pf->hw.aq.fw_maj_ver < 4)) {
dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n"); dev_info(&pdev->dev, "Stopping firmware LLDP agent.\n");
i40e_aq_stop_lldp(hw, true, NULL); i40e_aq_stop_lldp(hw, true, NULL);
} }
......
/******************************************************************************* /*******************************************************************************
* *
* Intel Ethernet Controller XL710 Family Linux Driver * Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2015 Intel Corporation. * Copyright(c) 2013 - 2016 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -138,8 +138,8 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw, ...@@ -138,8 +138,8 @@ i40e_status i40e_aq_update_vsi_params(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details); struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid, i40e_status i40e_aq_add_veb(struct i40e_hw *hw, u16 uplink_seid,
u16 downlink_seid, u8 enabled_tc, u16 downlink_seid, u8 enabled_tc,
bool default_port, bool enable_l2_filtering, bool default_port, u16 *pveb_seid,
u16 *pveb_seid, bool enable_stats,
struct i40e_asq_cmd_details *cmd_details); struct i40e_asq_cmd_details *cmd_details);
i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw, i40e_status i40e_aq_get_veb_parameters(struct i40e_hw *hw,
u16 veb_seid, u16 *switch_id, bool *floating, u16 veb_seid, u16 *switch_id, bool *floating,
......
/******************************************************************************* /*******************************************************************************
* *
* Intel Ethernet Controller XL710 Family Linux Driver * Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation. * Copyright(c) 2013 - 2016 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -774,37 +774,48 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) ...@@ -774,37 +774,48 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
} }
/** /**
* i40e_force_wb - Arm hardware to do a wb on noncache aligned descriptors * i40e_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
* @vsi: the VSI we care about * @vsi: the VSI we care about
* @q_vector: the vector on which to force writeback * @q_vector: the vector on which to enable writeback
* *
**/ **/
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector)
{ {
u16 flags = q_vector->tx.ring[0].flags; u16 flags = q_vector->tx.ring[0].flags;
u32 val;
if (flags & I40E_TXR_FLAGS_WB_ON_ITR) { if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
u32 val; return;
if (q_vector->arm_wb_state) if (q_vector->arm_wb_state)
return; return;
if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK | val = I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK |
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK; /* set noitr */
wr32(&vsi->back->hw, wr32(&vsi->back->hw,
I40E_PFINT_DYN_CTLN(q_vector->v_idx + I40E_PFINT_DYN_CTLN(q_vector->v_idx + vsi->base_vector - 1),
vsi->base_vector - 1), val);
val); } else {
} else { val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK |
val = I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK | I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
I40E_PFINT_DYN_CTL0_ITR_INDX_MASK; /* set noitr */
wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val); wr32(&vsi->back->hw, I40E_PFINT_DYN_CTL0, val);
} }
q_vector->arm_wb_state = true; q_vector->arm_wb_state = true;
} else if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) { }
/**
* i40e_force_wb - Issue SW Interrupt so HW does a wb
* @vsi: the VSI we care about
* @q_vector: the vector on which to force writeback
*
**/
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
{
if (vsi->back->flags & I40E_FLAG_MSIX_ENABLED) {
u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK | u32 val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */ I40E_PFINT_DYN_CTLN_ITR_INDX_MASK | /* set noitr */
I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK | I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK |
...@@ -1049,7 +1060,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring) ...@@ -1049,7 +1060,7 @@ void i40e_clean_rx_ring(struct i40e_ring *rx_ring)
if (rx_bi->page_dma) { if (rx_bi->page_dma) {
dma_unmap_page(dev, dma_unmap_page(dev,
rx_bi->page_dma, rx_bi->page_dma,
PAGE_SIZE / 2, PAGE_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
rx_bi->page_dma = 0; rx_bi->page_dma = 0;
} }
...@@ -1184,16 +1195,19 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) ...@@ -1184,16 +1195,19 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
* i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split * i40e_alloc_rx_buffers_ps - Replace used receive buffers; packet split
* @rx_ring: ring to place buffers on * @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace * @cleaned_count: number of buffers to replace
*
* Returns true if any errors on allocation
**/ **/
void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
{ {
u16 i = rx_ring->next_to_use; u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi; struct i40e_rx_buffer *bi;
const int current_node = numa_node_id();
/* do nothing if no valid netdev defined */ /* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count) if (!rx_ring->netdev || !cleaned_count)
return; return false;
while (cleaned_count--) { while (cleaned_count--) {
rx_desc = I40E_RX_DESC(rx_ring, i); rx_desc = I40E_RX_DESC(rx_ring, i);
...@@ -1201,56 +1215,79 @@ void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -1201,56 +1215,79 @@ void i40e_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
if (bi->skb) /* desc is in use */ if (bi->skb) /* desc is in use */
goto no_buffers; goto no_buffers;
/* If we've been moved to a different NUMA node, release the
* page so we can get a new one on the current node.
*/
if (bi->page && page_to_nid(bi->page) != current_node) {
dma_unmap_page(rx_ring->dev,
bi->page_dma,
PAGE_SIZE,
DMA_FROM_DEVICE);
__free_page(bi->page);
bi->page = NULL;
bi->page_dma = 0;
rx_ring->rx_stats.realloc_count++;
} else if (bi->page) {
rx_ring->rx_stats.page_reuse_count++;
}
if (!bi->page) { if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC); bi->page = alloc_page(GFP_ATOMIC);
if (!bi->page) { if (!bi->page) {
rx_ring->rx_stats.alloc_page_failed++; rx_ring->rx_stats.alloc_page_failed++;
goto no_buffers; goto no_buffers;
} }
}
if (!bi->page_dma) {
/* use a half page if we're re-using */
bi->page_offset ^= PAGE_SIZE / 2;
bi->page_dma = dma_map_page(rx_ring->dev, bi->page_dma = dma_map_page(rx_ring->dev,
bi->page, bi->page,
bi->page_offset, 0,
PAGE_SIZE / 2, PAGE_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
bi->page_dma)) {
rx_ring->rx_stats.alloc_page_failed++; rx_ring->rx_stats.alloc_page_failed++;
__free_page(bi->page);
bi->page = NULL;
bi->page_dma = 0; bi->page_dma = 0;
bi->page_offset = 0;
goto no_buffers; goto no_buffers;
} }
bi->page_offset = 0;
} }
dma_sync_single_range_for_device(rx_ring->dev,
rx_ring->rx_bi[0].dma,
i * rx_ring->rx_hdr_len,
rx_ring->rx_hdr_len,
DMA_FROM_DEVICE);
/* Refresh the desc even if buffer_addrs didn't change /* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info. * because each write-back erases this info.
*/ */
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); rx_desc->read.pkt_addr =
cpu_to_le64(bi->page_dma + bi->page_offset);
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
i++; i++;
if (i == rx_ring->count) if (i == rx_ring->count)
i = 0; i = 0;
} }
if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i);
return false;
no_buffers: no_buffers:
if (rx_ring->next_to_use != i) if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i); i40e_release_rx_desc(rx_ring, i);
/* make sure to come back via polling to try again after
* allocation failure
*/
return true;
} }
/** /**
* i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * i40e_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
* @rx_ring: ring to place buffers on * @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace * @cleaned_count: number of buffers to replace
*
* Returns true if any errors on allocation
**/ **/
void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
{ {
u16 i = rx_ring->next_to_use; u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
...@@ -1259,7 +1296,7 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -1259,7 +1296,7 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
/* do nothing if no valid netdev defined */ /* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count) if (!rx_ring->netdev || !cleaned_count)
return; return false;
while (cleaned_count--) { while (cleaned_count--) {
rx_desc = I40E_RX_DESC(rx_ring, i); rx_desc = I40E_RX_DESC(rx_ring, i);
...@@ -1267,8 +1304,10 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -1267,8 +1304,10 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
skb = bi->skb; skb = bi->skb;
if (!skb) { if (!skb) {
skb = netdev_alloc_skb_ip_align(rx_ring->netdev, skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len); rx_ring->rx_buf_len,
GFP_ATOMIC |
__GFP_NOWARN);
if (!skb) { if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++; rx_ring->rx_stats.alloc_buff_failed++;
goto no_buffers; goto no_buffers;
...@@ -1286,6 +1325,8 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -1286,6 +1325,8 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
if (dma_mapping_error(rx_ring->dev, bi->dma)) { if (dma_mapping_error(rx_ring->dev, bi->dma)) {
rx_ring->rx_stats.alloc_buff_failed++; rx_ring->rx_stats.alloc_buff_failed++;
bi->dma = 0; bi->dma = 0;
dev_kfree_skb(bi->skb);
bi->skb = NULL;
goto no_buffers; goto no_buffers;
} }
} }
...@@ -1297,9 +1338,19 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -1297,9 +1338,19 @@ void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
i = 0; i = 0;
} }
if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i);
return false;
no_buffers: no_buffers:
if (rx_ring->next_to_use != i) if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i); i40e_release_rx_desc(rx_ring, i);
/* make sure to come back via polling to try again after
* allocation failure
*/
return true;
} }
/** /**
...@@ -1483,18 +1534,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, ...@@ -1483,18 +1534,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* *
* Returns true if there's any budget left (e.g. the clean is finished) * Returns true if there's any budget left (e.g. the clean is finished)
**/ **/
static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
{ {
unsigned int total_rx_bytes = 0, total_rx_packets = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
const int current_node = numa_mem_id();
struct i40e_vsi *vsi = rx_ring->vsi; struct i40e_vsi *vsi = rx_ring->vsi;
u16 i = rx_ring->next_to_clean; u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status; u32 rx_error, rx_status;
bool failure = false;
u8 rx_ptype; u8 rx_ptype;
u64 qword; u64 qword;
u32 copysize;
if (budget <= 0) if (budget <= 0)
return 0; return 0;
...@@ -1505,7 +1557,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1505,7 +1557,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
u16 vlan_tag; u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */ /* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) { if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
i40e_alloc_rx_buffers_ps(rx_ring, cleaned_count); failure = failure ||
i40e_alloc_rx_buffers_ps(rx_ring,
cleaned_count);
cleaned_count = 0; cleaned_count = 0;
} }
...@@ -1523,6 +1577,12 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1523,6 +1577,12 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
* DD bit is set. * DD bit is set.
*/ */
dma_rmb(); dma_rmb();
/* sync header buffer for reading */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_ring->rx_bi[0].dma,
i * rx_ring->rx_hdr_len,
rx_ring->rx_hdr_len,
DMA_FROM_DEVICE);
if (i40e_rx_is_programming_status(qword)) { if (i40e_rx_is_programming_status(qword)) {
i40e_clean_programming_status(rx_ring, rx_desc); i40e_clean_programming_status(rx_ring, rx_desc);
I40E_RX_INCREMENT(rx_ring, i); I40E_RX_INCREMENT(rx_ring, i);
...@@ -1531,10 +1591,13 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1531,10 +1591,13 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_bi = &rx_ring->rx_bi[i]; rx_bi = &rx_ring->rx_bi[i];
skb = rx_bi->skb; skb = rx_bi->skb;
if (likely(!skb)) { if (likely(!skb)) {
skb = netdev_alloc_skb_ip_align(rx_ring->netdev, skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_hdr_len); rx_ring->rx_hdr_len,
GFP_ATOMIC |
__GFP_NOWARN);
if (!skb) { if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++; rx_ring->rx_stats.alloc_buff_failed++;
failure = true;
break; break;
} }
...@@ -1561,9 +1624,16 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1561,9 +1624,16 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT; I40E_RXD_QW1_PTYPE_SHIFT;
prefetch(rx_bi->page); /* sync half-page for reading */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_bi->page_dma,
rx_bi->page_offset,
PAGE_SIZE / 2,
DMA_FROM_DEVICE);
prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
rx_bi->skb = NULL; rx_bi->skb = NULL;
cleaned_count++; cleaned_count++;
copysize = 0;
if (rx_hbo || rx_sph) { if (rx_hbo || rx_sph) {
int len; int len;
...@@ -1574,38 +1644,45 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1574,38 +1644,45 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
} else if (skb->len == 0) { } else if (skb->len == 0) {
int len; int len;
unsigned char *va = page_address(rx_bi->page) +
rx_bi->page_offset;
len = (rx_packet_len > skb_headlen(skb) ? len = min(rx_packet_len, rx_ring->rx_hdr_len);
skb_headlen(skb) : rx_packet_len); memcpy(__skb_put(skb, len), va, len);
memcpy(__skb_put(skb, len), copysize = len;
rx_bi->page + rx_bi->page_offset,
len);
rx_bi->page_offset += len;
rx_packet_len -= len; rx_packet_len -= len;
} }
/* Get the rest of the data if this was a header split */ /* Get the rest of the data if this was a header split */
if (rx_packet_len) { if (rx_packet_len) {
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rx_bi->page, rx_bi->page,
rx_bi->page_offset, rx_bi->page_offset + copysize,
rx_packet_len); rx_packet_len, I40E_RXBUFFER_2048);
skb->len += rx_packet_len; get_page(rx_bi->page);
skb->data_len += rx_packet_len; /* switch to the other half-page here; the allocation
skb->truesize += rx_packet_len; * code programs the right addr into HW. If we haven't
* used this half-page, the address won't be changed,
if ((page_count(rx_bi->page) == 1) && * and HW can just use it next time through.
(page_to_nid(rx_bi->page) == current_node)) */
get_page(rx_bi->page); rx_bi->page_offset ^= PAGE_SIZE / 2;
else /* If the page count is more than 2, then both halves
* of the page are used and we need to free it. Do it
* here instead of in the alloc code. Otherwise one
* of the half-pages might be released between now and
* then, and we wouldn't know which one to use.
*/
if (page_count(rx_bi->page) > 2) {
dma_unmap_page(rx_ring->dev,
rx_bi->page_dma,
PAGE_SIZE,
DMA_FROM_DEVICE);
__free_page(rx_bi->page);
rx_bi->page = NULL; rx_bi->page = NULL;
rx_bi->page_dma = 0;
rx_ring->rx_stats.realloc_count++;
}
dma_unmap_page(rx_ring->dev,
rx_bi->page_dma,
PAGE_SIZE / 2,
DMA_FROM_DEVICE);
rx_bi->page_dma = 0;
} }
I40E_RX_INCREMENT(rx_ring, i); I40E_RX_INCREMENT(rx_ring, i);
...@@ -1664,7 +1741,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1664,7 +1741,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes; rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
return total_rx_packets; return failure ? budget : total_rx_packets;
} }
/** /**
...@@ -1682,6 +1759,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1682,6 +1759,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status; u32 rx_error, rx_status;
u16 rx_packet_len; u16 rx_packet_len;
bool failure = false;
u8 rx_ptype; u8 rx_ptype;
u64 qword; u64 qword;
u16 i; u16 i;
...@@ -1692,7 +1770,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1692,7 +1770,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
u16 vlan_tag; u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */ /* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) { if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
i40e_alloc_rx_buffers_1buf(rx_ring, cleaned_count); failure = failure ||
i40e_alloc_rx_buffers_1buf(rx_ring,
cleaned_count);
cleaned_count = 0; cleaned_count = 0;
} }
...@@ -1791,7 +1871,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1791,7 +1871,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes; rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
return total_rx_packets; return failure ? budget : total_rx_packets;
} }
static u32 i40e_buildreg_itr(const int type, const u16 itr) static u32 i40e_buildreg_itr(const int type, const u16 itr)
...@@ -1799,7 +1879,9 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) ...@@ -1799,7 +1879,9 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
u32 val; u32 val;
val = I40E_PFINT_DYN_CTLN_INTENA_MASK | val = I40E_PFINT_DYN_CTLN_INTENA_MASK |
I40E_PFINT_DYN_CTLN_CLEARPBA_MASK | /* Don't clear PBA because that can cause lost interrupts that
* came in while we were cleaning/polling
*/
(type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) | (type << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT) |
(itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT); (itr << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT);
...@@ -1914,7 +1996,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) ...@@ -1914,7 +1996,8 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors. * budget and be more aggressive about cleaning up the Tx descriptors.
*/ */
i40e_for_each_ring(ring, q_vector->tx) { i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); clean_complete = clean_complete &&
i40e_clean_tx_irq(ring, vsi->work_limit);
arm_wb = arm_wb || ring->arm_wb; arm_wb = arm_wb || ring->arm_wb;
ring->arm_wb = false; ring->arm_wb = false;
} }
...@@ -1938,7 +2021,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) ...@@ -1938,7 +2021,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
work_done += cleaned; work_done += cleaned;
/* if we didn't clean as many as budgeted, we must be done */ /* if we didn't clean as many as budgeted, we must be done */
clean_complete &= (budget_per_ring != cleaned); clean_complete = clean_complete && (budget_per_ring > cleaned);
} }
/* If work not completed, return budget and polling will return */ /* If work not completed, return budget and polling will return */
...@@ -1946,7 +2029,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) ...@@ -1946,7 +2029,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
tx_only: tx_only:
if (arm_wb) { if (arm_wb) {
q_vector->tx.ring[0].tx_stats.tx_force_wb++; q_vector->tx.ring[0].tx_stats.tx_force_wb++;
i40e_force_wb(vsi, q_vector); i40e_enable_wb_on_itr(vsi, q_vector);
} }
return budget; return budget;
} }
...@@ -1972,7 +2055,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget) ...@@ -1972,7 +2055,7 @@ int i40e_napi_poll(struct napi_struct *napi, int budget)
qval = rd32(hw, I40E_QINT_TQCTL(0)) | qval = rd32(hw, I40E_QINT_TQCTL(0)) |
I40E_QINT_TQCTL_CAUSE_ENA_MASK; I40E_QINT_TQCTL_CAUSE_ENA_MASK;
wr32(hw, I40E_QINT_TQCTL(0), qval); wr32(hw, I40E_QINT_TQCTL(0), qval);
i40e_irq_dynamic_enable_icr0(vsi->back); i40e_irq_dynamic_enable_icr0(vsi->back, false);
} }
return 0; return 0;
} }
......
/******************************************************************************* /*******************************************************************************
* *
* Intel Ethernet Controller XL710 Family Linux Driver * Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2014 Intel Corporation. * Copyright(c) 2013 - 2016 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -209,6 +209,8 @@ struct i40e_rx_queue_stats { ...@@ -209,6 +209,8 @@ struct i40e_rx_queue_stats {
u64 non_eop_descs; u64 non_eop_descs;
u64 alloc_page_failed; u64 alloc_page_failed;
u64 alloc_buff_failed; u64 alloc_buff_failed;
u64 page_reuse_count;
u64 realloc_count;
}; };
enum i40e_ring_state_t { enum i40e_ring_state_t {
...@@ -254,7 +256,6 @@ struct i40e_ring { ...@@ -254,7 +256,6 @@ struct i40e_ring {
#define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_NO_SPLIT 0
#define I40E_RX_DTYPE_HEADER_SPLIT 1 #define I40E_RX_DTYPE_HEADER_SPLIT 1
#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2
u8 hsplit;
#define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_L2 0x1
#define I40E_RX_SPLIT_IP 0x2 #define I40E_RX_SPLIT_IP 0x2
#define I40E_RX_SPLIT_TCP_UDP 0x4 #define I40E_RX_SPLIT_TCP_UDP 0x4
...@@ -316,8 +317,8 @@ struct i40e_ring_container { ...@@ -316,8 +317,8 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \ #define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next) for (pos = (head).ring; pos != NULL; pos = pos->next)
void i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); bool i40e_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
void i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); bool i40e_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
void i40e_alloc_rx_headers(struct i40e_ring *rxr); void i40e_alloc_rx_headers(struct i40e_ring *rxr);
netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t i40e_lan_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40e_clean_tx_ring(struct i40e_ring *tx_ring); void i40e_clean_tx_ring(struct i40e_ring *tx_ring);
......
/******************************************************************************* /*******************************************************************************
* *
* Intel Ethernet Controller XL710 Family Linux Driver * Intel Ethernet Controller XL710 Family Linux Driver
* Copyright(c) 2013 - 2015 Intel Corporation. * Copyright(c) 2013 - 2016 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -980,7 +980,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs) ...@@ -980,7 +980,7 @@ int i40e_alloc_vfs(struct i40e_pf *pf, u16 num_alloc_vfs)
i40e_free_vfs(pf); i40e_free_vfs(pf);
err_iov: err_iov:
/* Re-enable interrupt 0. */ /* Re-enable interrupt 0. */
i40e_irq_dynamic_enable_icr0(pf); i40e_irq_dynamic_enable_icr0(pf, false);
return ret; return ret;
} }
......
/******************************************************************************* /*******************************************************************************
* *
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 - 2014 Intel Corporation. * Copyright(c) 2013 - 2016 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -292,40 +292,49 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) ...@@ -292,40 +292,49 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
} }
/** /**
* i40evf_force_wb -Arm hardware to do a wb on noncache aligned descriptors * i40evf_enable_wb_on_itr - Arm hardware to do a wb, interrupts are not enabled
* @vsi: the VSI we care about * @vsi: the VSI we care about
* @q_vector: the vector on which to force writeback * @q_vector: the vector on which to enable writeback
* *
**/ **/
static void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector) static void i40e_enable_wb_on_itr(struct i40e_vsi *vsi,
struct i40e_q_vector *q_vector)
{ {
u16 flags = q_vector->tx.ring[0].flags; u16 flags = q_vector->tx.ring[0].flags;
u32 val;
if (flags & I40E_TXR_FLAGS_WB_ON_ITR) { if (!(flags & I40E_TXR_FLAGS_WB_ON_ITR))
u32 val; return;
if (q_vector->arm_wb_state) if (q_vector->arm_wb_state)
return; return;
val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK | val = I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK |
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK; /* set noitr */
wr32(&vsi->back->hw, wr32(&vsi->back->hw,
I40E_VFINT_DYN_CTLN1(q_vector->v_idx + I40E_VFINT_DYN_CTLN1(q_vector->v_idx +
vsi->base_vector - 1), vsi->base_vector - 1), val);
val); q_vector->arm_wb_state = true;
q_vector->arm_wb_state = true; }
} else {
u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | /**
I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */ * i40evf_force_wb - Issue SW Interrupt so HW does a wb
I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK | * @vsi: the VSI we care about
I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK; * @q_vector: the vector on which to force writeback
/* allow 00 to be written to the index */ *
**/
wr32(&vsi->back->hw, void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector)
I40E_VFINT_DYN_CTLN1(q_vector->v_idx + {
vsi->base_vector - 1), val); u32 val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
} I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK | /* set noitr */
I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK
/* allow 00 to be written to the index */;
wr32(&vsi->back->hw,
I40E_VFINT_DYN_CTLN1(q_vector->v_idx + vsi->base_vector - 1),
val);
} }
/** /**
...@@ -523,7 +532,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring) ...@@ -523,7 +532,7 @@ void i40evf_clean_rx_ring(struct i40e_ring *rx_ring)
if (rx_bi->page_dma) { if (rx_bi->page_dma) {
dma_unmap_page(dev, dma_unmap_page(dev,
rx_bi->page_dma, rx_bi->page_dma,
PAGE_SIZE / 2, PAGE_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
rx_bi->page_dma = 0; rx_bi->page_dma = 0;
} }
...@@ -658,16 +667,19 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val) ...@@ -658,16 +667,19 @@ static inline void i40e_release_rx_desc(struct i40e_ring *rx_ring, u32 val)
* i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split * i40evf_alloc_rx_buffers_ps - Replace used receive buffers; packet split
* @rx_ring: ring to place buffers on * @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace * @cleaned_count: number of buffers to replace
*
* Returns true if any errors on allocation
**/ **/
void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
{ {
u16 i = rx_ring->next_to_use; u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
struct i40e_rx_buffer *bi; struct i40e_rx_buffer *bi;
const int current_node = numa_node_id();
/* do nothing if no valid netdev defined */ /* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count) if (!rx_ring->netdev || !cleaned_count)
return; return false;
while (cleaned_count--) { while (cleaned_count--) {
rx_desc = I40E_RX_DESC(rx_ring, i); rx_desc = I40E_RX_DESC(rx_ring, i);
...@@ -675,56 +687,79 @@ void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -675,56 +687,79 @@ void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rx_ring, u16 cleaned_count)
if (bi->skb) /* desc is in use */ if (bi->skb) /* desc is in use */
goto no_buffers; goto no_buffers;
/* If we've been moved to a different NUMA node, release the
* page so we can get a new one on the current node.
*/
if (bi->page && page_to_nid(bi->page) != current_node) {
dma_unmap_page(rx_ring->dev,
bi->page_dma,
PAGE_SIZE,
DMA_FROM_DEVICE);
__free_page(bi->page);
bi->page = NULL;
bi->page_dma = 0;
rx_ring->rx_stats.realloc_count++;
} else if (bi->page) {
rx_ring->rx_stats.page_reuse_count++;
}
if (!bi->page) { if (!bi->page) {
bi->page = alloc_page(GFP_ATOMIC); bi->page = alloc_page(GFP_ATOMIC);
if (!bi->page) { if (!bi->page) {
rx_ring->rx_stats.alloc_page_failed++; rx_ring->rx_stats.alloc_page_failed++;
goto no_buffers; goto no_buffers;
} }
}
if (!bi->page_dma) {
/* use a half page if we're re-using */
bi->page_offset ^= PAGE_SIZE / 2;
bi->page_dma = dma_map_page(rx_ring->dev, bi->page_dma = dma_map_page(rx_ring->dev,
bi->page, bi->page,
bi->page_offset, 0,
PAGE_SIZE / 2, PAGE_SIZE,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(rx_ring->dev, if (dma_mapping_error(rx_ring->dev, bi->page_dma)) {
bi->page_dma)) {
rx_ring->rx_stats.alloc_page_failed++; rx_ring->rx_stats.alloc_page_failed++;
__free_page(bi->page);
bi->page = NULL;
bi->page_dma = 0; bi->page_dma = 0;
bi->page_offset = 0;
goto no_buffers; goto no_buffers;
} }
bi->page_offset = 0;
} }
dma_sync_single_range_for_device(rx_ring->dev,
rx_ring->rx_bi[0].dma,
i * rx_ring->rx_hdr_len,
rx_ring->rx_hdr_len,
DMA_FROM_DEVICE);
/* Refresh the desc even if buffer_addrs didn't change /* Refresh the desc even if buffer_addrs didn't change
* because each write-back erases this info. * because each write-back erases this info.
*/ */
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); rx_desc->read.pkt_addr =
cpu_to_le64(bi->page_dma + bi->page_offset);
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
i++; i++;
if (i == rx_ring->count) if (i == rx_ring->count)
i = 0; i = 0;
} }
if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i);
return false;
no_buffers: no_buffers:
if (rx_ring->next_to_use != i) if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i); i40e_release_rx_desc(rx_ring, i);
/* make sure to come back via polling to try again after
* allocation failure
*/
return true;
} }
/** /**
* i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer * i40evf_alloc_rx_buffers_1buf - Replace used receive buffers; single buffer
* @rx_ring: ring to place buffers on * @rx_ring: ring to place buffers on
* @cleaned_count: number of buffers to replace * @cleaned_count: number of buffers to replace
*
* Returns true if any errors on allocation
**/ **/
void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
{ {
u16 i = rx_ring->next_to_use; u16 i = rx_ring->next_to_use;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
...@@ -733,7 +768,7 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -733,7 +768,7 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
/* do nothing if no valid netdev defined */ /* do nothing if no valid netdev defined */
if (!rx_ring->netdev || !cleaned_count) if (!rx_ring->netdev || !cleaned_count)
return; return false;
while (cleaned_count--) { while (cleaned_count--) {
rx_desc = I40E_RX_DESC(rx_ring, i); rx_desc = I40E_RX_DESC(rx_ring, i);
...@@ -741,8 +776,10 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -741,8 +776,10 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
skb = bi->skb; skb = bi->skb;
if (!skb) { if (!skb) {
skb = netdev_alloc_skb_ip_align(rx_ring->netdev, skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len); rx_ring->rx_buf_len,
GFP_ATOMIC |
__GFP_NOWARN);
if (!skb) { if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++; rx_ring->rx_stats.alloc_buff_failed++;
goto no_buffers; goto no_buffers;
...@@ -760,6 +797,8 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -760,6 +797,8 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
if (dma_mapping_error(rx_ring->dev, bi->dma)) { if (dma_mapping_error(rx_ring->dev, bi->dma)) {
rx_ring->rx_stats.alloc_buff_failed++; rx_ring->rx_stats.alloc_buff_failed++;
bi->dma = 0; bi->dma = 0;
dev_kfree_skb(bi->skb);
bi->skb = NULL;
goto no_buffers; goto no_buffers;
} }
} }
...@@ -771,9 +810,19 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count) ...@@ -771,9 +810,19 @@ void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rx_ring, u16 cleaned_count)
i = 0; i = 0;
} }
if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i);
return false;
no_buffers: no_buffers:
if (rx_ring->next_to_use != i) if (rx_ring->next_to_use != i)
i40e_release_rx_desc(rx_ring, i); i40e_release_rx_desc(rx_ring, i);
/* make sure to come back via polling to try again after
* allocation failure
*/
return true;
} }
/** /**
...@@ -956,18 +1005,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring, ...@@ -956,18 +1005,19 @@ static inline void i40e_rx_hash(struct i40e_ring *ring,
* *
* Returns true if there's any budget left (e.g. the clean is finished) * Returns true if there's any budget left (e.g. the clean is finished)
**/ **/
static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, const int budget)
{ {
unsigned int total_rx_bytes = 0, total_rx_packets = 0; unsigned int total_rx_bytes = 0, total_rx_packets = 0;
u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo; u16 rx_packet_len, rx_header_len, rx_sph, rx_hbo;
u16 cleaned_count = I40E_DESC_UNUSED(rx_ring); u16 cleaned_count = I40E_DESC_UNUSED(rx_ring);
const int current_node = numa_mem_id();
struct i40e_vsi *vsi = rx_ring->vsi; struct i40e_vsi *vsi = rx_ring->vsi;
u16 i = rx_ring->next_to_clean; u16 i = rx_ring->next_to_clean;
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status; u32 rx_error, rx_status;
bool failure = false;
u8 rx_ptype; u8 rx_ptype;
u64 qword; u64 qword;
u32 copysize;
do { do {
struct i40e_rx_buffer *rx_bi; struct i40e_rx_buffer *rx_bi;
...@@ -975,7 +1025,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -975,7 +1025,9 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
u16 vlan_tag; u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */ /* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) { if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
i40evf_alloc_rx_buffers_ps(rx_ring, cleaned_count); failure = failure ||
i40evf_alloc_rx_buffers_ps(rx_ring,
cleaned_count);
cleaned_count = 0; cleaned_count = 0;
} }
...@@ -993,13 +1045,22 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -993,13 +1045,22 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
* DD bit is set. * DD bit is set.
*/ */
dma_rmb(); dma_rmb();
/* sync header buffer for reading */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_ring->rx_bi[0].dma,
i * rx_ring->rx_hdr_len,
rx_ring->rx_hdr_len,
DMA_FROM_DEVICE);
rx_bi = &rx_ring->rx_bi[i]; rx_bi = &rx_ring->rx_bi[i];
skb = rx_bi->skb; skb = rx_bi->skb;
if (likely(!skb)) { if (likely(!skb)) {
skb = netdev_alloc_skb_ip_align(rx_ring->netdev, skb = __netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_hdr_len); rx_ring->rx_hdr_len,
GFP_ATOMIC |
__GFP_NOWARN);
if (!skb) { if (!skb) {
rx_ring->rx_stats.alloc_buff_failed++; rx_ring->rx_stats.alloc_buff_failed++;
failure = true;
break; break;
} }
...@@ -1026,9 +1087,16 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1026,9 +1087,16 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >> rx_ptype = (qword & I40E_RXD_QW1_PTYPE_MASK) >>
I40E_RXD_QW1_PTYPE_SHIFT; I40E_RXD_QW1_PTYPE_SHIFT;
prefetch(rx_bi->page); /* sync half-page for reading */
dma_sync_single_range_for_cpu(rx_ring->dev,
rx_bi->page_dma,
rx_bi->page_offset,
PAGE_SIZE / 2,
DMA_FROM_DEVICE);
prefetch(page_address(rx_bi->page) + rx_bi->page_offset);
rx_bi->skb = NULL; rx_bi->skb = NULL;
cleaned_count++; cleaned_count++;
copysize = 0;
if (rx_hbo || rx_sph) { if (rx_hbo || rx_sph) {
int len; int len;
...@@ -1039,38 +1107,45 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1039,38 +1107,45 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len); memcpy(__skb_put(skb, len), rx_bi->hdr_buf, len);
} else if (skb->len == 0) { } else if (skb->len == 0) {
int len; int len;
unsigned char *va = page_address(rx_bi->page) +
rx_bi->page_offset;
len = (rx_packet_len > skb_headlen(skb) ? len = min(rx_packet_len, rx_ring->rx_hdr_len);
skb_headlen(skb) : rx_packet_len); memcpy(__skb_put(skb, len), va, len);
memcpy(__skb_put(skb, len), copysize = len;
rx_bi->page + rx_bi->page_offset,
len);
rx_bi->page_offset += len;
rx_packet_len -= len; rx_packet_len -= len;
} }
/* Get the rest of the data if this was a header split */ /* Get the rest of the data if this was a header split */
if (rx_packet_len) { if (rx_packet_len) {
skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
rx_bi->page, rx_bi->page,
rx_bi->page_offset, rx_bi->page_offset + copysize,
rx_packet_len); rx_packet_len, I40E_RXBUFFER_2048);
skb->len += rx_packet_len; get_page(rx_bi->page);
skb->data_len += rx_packet_len; /* switch to the other half-page here; the allocation
skb->truesize += rx_packet_len; * code programs the right addr into HW. If we haven't
* used this half-page, the address won't be changed,
if ((page_count(rx_bi->page) == 1) && * and HW can just use it next time through.
(page_to_nid(rx_bi->page) == current_node)) */
get_page(rx_bi->page); rx_bi->page_offset ^= PAGE_SIZE / 2;
else /* If the page count is more than 2, then both halves
* of the page are used and we need to free it. Do it
* here instead of in the alloc code. Otherwise one
* of the half-pages might be released between now and
* then, and we wouldn't know which one to use.
*/
if (page_count(rx_bi->page) > 2) {
dma_unmap_page(rx_ring->dev,
rx_bi->page_dma,
PAGE_SIZE,
DMA_FROM_DEVICE);
__free_page(rx_bi->page);
rx_bi->page = NULL; rx_bi->page = NULL;
rx_bi->page_dma = 0;
rx_ring->rx_stats.realloc_count++;
}
dma_unmap_page(rx_ring->dev,
rx_bi->page_dma,
PAGE_SIZE / 2,
DMA_FROM_DEVICE);
rx_bi->page_dma = 0;
} }
I40E_RX_INCREMENT(rx_ring, i); I40E_RX_INCREMENT(rx_ring, i);
...@@ -1122,7 +1197,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget) ...@@ -1122,7 +1197,7 @@ static int i40e_clean_rx_irq_ps(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes; rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
return total_rx_packets; return failure ? budget : total_rx_packets;
} }
/** /**
...@@ -1140,6 +1215,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1140,6 +1215,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
union i40e_rx_desc *rx_desc; union i40e_rx_desc *rx_desc;
u32 rx_error, rx_status; u32 rx_error, rx_status;
u16 rx_packet_len; u16 rx_packet_len;
bool failure = false;
u8 rx_ptype; u8 rx_ptype;
u64 qword; u64 qword;
u16 i; u16 i;
...@@ -1150,7 +1226,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1150,7 +1226,9 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
u16 vlan_tag; u16 vlan_tag;
/* return some buffers to hardware, one at a time is too slow */ /* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= I40E_RX_BUFFER_WRITE) { if (cleaned_count >= I40E_RX_BUFFER_WRITE) {
i40evf_alloc_rx_buffers_1buf(rx_ring, cleaned_count); failure = failure ||
i40evf_alloc_rx_buffers_1buf(rx_ring,
cleaned_count);
cleaned_count = 0; cleaned_count = 0;
} }
...@@ -1231,7 +1309,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget) ...@@ -1231,7 +1309,7 @@ static int i40e_clean_rx_irq_1buf(struct i40e_ring *rx_ring, int budget)
rx_ring->q_vector->rx.total_packets += total_rx_packets; rx_ring->q_vector->rx.total_packets += total_rx_packets;
rx_ring->q_vector->rx.total_bytes += total_rx_bytes; rx_ring->q_vector->rx.total_bytes += total_rx_bytes;
return total_rx_packets; return failure ? budget : total_rx_packets;
} }
static u32 i40e_buildreg_itr(const int type, const u16 itr) static u32 i40e_buildreg_itr(const int type, const u16 itr)
...@@ -1239,7 +1317,9 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr) ...@@ -1239,7 +1317,9 @@ static u32 i40e_buildreg_itr(const int type, const u16 itr)
u32 val; u32 val;
val = I40E_VFINT_DYN_CTLN1_INTENA_MASK | val = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK | /* Don't clear PBA because that can cause lost interrupts that
* came in while we were cleaning/polling
*/
(type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) | (type << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT) |
(itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT); (itr << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT);
...@@ -1352,7 +1432,8 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1352,7 +1432,8 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
* budget and be more aggressive about cleaning up the Tx descriptors. * budget and be more aggressive about cleaning up the Tx descriptors.
*/ */
i40e_for_each_ring(ring, q_vector->tx) { i40e_for_each_ring(ring, q_vector->tx) {
clean_complete &= i40e_clean_tx_irq(ring, vsi->work_limit); clean_complete = clean_complete &&
i40e_clean_tx_irq(ring, vsi->work_limit);
arm_wb = arm_wb || ring->arm_wb; arm_wb = arm_wb || ring->arm_wb;
ring->arm_wb = false; ring->arm_wb = false;
} }
...@@ -1376,7 +1457,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1376,7 +1457,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
work_done += cleaned; work_done += cleaned;
/* if we didn't clean as many as budgeted, we must be done */ /* if we didn't clean as many as budgeted, we must be done */
clean_complete &= (budget_per_ring != cleaned); clean_complete = clean_complete && (budget_per_ring > cleaned);
} }
/* If work not completed, return budget and polling will return */ /* If work not completed, return budget and polling will return */
...@@ -1384,7 +1465,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget) ...@@ -1384,7 +1465,7 @@ int i40evf_napi_poll(struct napi_struct *napi, int budget)
tx_only: tx_only:
if (arm_wb) { if (arm_wb) {
q_vector->tx.ring[0].tx_stats.tx_force_wb++; q_vector->tx.ring[0].tx_stats.tx_force_wb++;
i40evf_force_wb(vsi, q_vector); i40e_enable_wb_on_itr(vsi, q_vector);
} }
return budget; return budget;
} }
......
/******************************************************************************* /*******************************************************************************
* *
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 - 2014 Intel Corporation. * Copyright(c) 2013 - 2016 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -208,6 +208,8 @@ struct i40e_rx_queue_stats { ...@@ -208,6 +208,8 @@ struct i40e_rx_queue_stats {
u64 non_eop_descs; u64 non_eop_descs;
u64 alloc_page_failed; u64 alloc_page_failed;
u64 alloc_buff_failed; u64 alloc_buff_failed;
u64 page_reuse_count;
u64 realloc_count;
}; };
enum i40e_ring_state_t { enum i40e_ring_state_t {
...@@ -253,7 +255,6 @@ struct i40e_ring { ...@@ -253,7 +255,6 @@ struct i40e_ring {
#define I40E_RX_DTYPE_NO_SPLIT 0 #define I40E_RX_DTYPE_NO_SPLIT 0
#define I40E_RX_DTYPE_HEADER_SPLIT 1 #define I40E_RX_DTYPE_HEADER_SPLIT 1
#define I40E_RX_DTYPE_SPLIT_ALWAYS 2 #define I40E_RX_DTYPE_SPLIT_ALWAYS 2
u8 hsplit;
#define I40E_RX_SPLIT_L2 0x1 #define I40E_RX_SPLIT_L2 0x1
#define I40E_RX_SPLIT_IP 0x2 #define I40E_RX_SPLIT_IP 0x2
#define I40E_RX_SPLIT_TCP_UDP 0x4 #define I40E_RX_SPLIT_TCP_UDP 0x4
...@@ -313,8 +314,8 @@ struct i40e_ring_container { ...@@ -313,8 +314,8 @@ struct i40e_ring_container {
#define i40e_for_each_ring(pos, head) \ #define i40e_for_each_ring(pos, head) \
for (pos = (head).ring; pos != NULL; pos = pos->next) for (pos = (head).ring; pos != NULL; pos = pos->next)
void i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count); bool i40evf_alloc_rx_buffers_ps(struct i40e_ring *rxr, u16 cleaned_count);
void i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count); bool i40evf_alloc_rx_buffers_1buf(struct i40e_ring *rxr, u16 cleaned_count);
void i40evf_alloc_rx_headers(struct i40e_ring *rxr); void i40evf_alloc_rx_headers(struct i40e_ring *rxr);
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev); netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
void i40evf_clean_tx_ring(struct i40e_ring *tx_ring); void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
...@@ -324,6 +325,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring); ...@@ -324,6 +325,7 @@ int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
void i40evf_free_tx_resources(struct i40e_ring *tx_ring); void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
void i40evf_free_rx_resources(struct i40e_ring *rx_ring); void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
int i40evf_napi_poll(struct napi_struct *napi, int budget); int i40evf_napi_poll(struct napi_struct *napi, int budget);
void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40evf_get_tx_pending(struct i40e_ring *ring); u32 i40evf_get_tx_pending(struct i40e_ring *ring);
/** /**
......
/******************************************************************************* /*******************************************************************************
* *
* Intel Ethernet Controller XL710 Family Linux Virtual Function Driver * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
* Copyright(c) 2013 - 2015 Intel Corporation. * Copyright(c) 2013 - 2016 Intel Corporation.
* *
* This program is free software; you can redistribute it and/or modify it * This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License, * under the terms and conditions of the GNU General Public License,
...@@ -32,13 +32,13 @@ static int i40evf_close(struct net_device *netdev); ...@@ -32,13 +32,13 @@ static int i40evf_close(struct net_device *netdev);
char i40evf_driver_name[] = "i40evf"; char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] = static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver"; "Intel(R) 40-10 Gigabit Virtual Function Network Driver";
#define DRV_KERN "-k" #define DRV_KERN "-k"
#define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 4 #define DRV_VERSION_MINOR 4
#define DRV_VERSION_BUILD 8 #define DRV_VERSION_BUILD 9
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \ __stringify(DRV_VERSION_BUILD) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment