Commit dba7bf03 authored by David S. Miller's avatar David S. Miller

Merge branch '1GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
1GbE Intel Wired LAN Driver Updates 2019-10-29

This series contains updates to e1000e, igb, ixgbe and i40e drivers.

Sasha adds support for Intel client platforms Comet Lake and Tiger Lake
to the e1000e driver.  Also adds a fix for a compiler warning that was
recently introduced, when CONFIG_PM_SLEEP is not defined, so wrap the
code that requires this kernel configuration to be defined.

Alex fixes a potential race condition between network configuration and
power management for e1000e, which is similar to a past issue in the igb
driver.  Also provided a bit of code cleanup since the driver no longer
checks for __E1000_DOWN.

Josh Hunt adds UDP segmentation offload support for igb, ixgbe and i40e.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 84e93d99 203bddfd
...@@ -894,8 +894,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data) ...@@ -894,8 +894,9 @@ static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
case e1000_pch2lan: case e1000_pch2lan:
case e1000_pch_lpt: case e1000_pch_lpt:
case e1000_pch_spt: case e1000_pch_spt:
/* fall through */
case e1000_pch_cnp: case e1000_pch_cnp:
/* fall through */
case e1000_pch_tgp:
mask |= BIT(18); mask |= BIT(18);
break; break;
default: default:
...@@ -1559,6 +1560,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter) ...@@ -1559,6 +1560,7 @@ static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
switch (hw->mac.type) { switch (hw->mac.type) {
case e1000_pch_spt: case e1000_pch_spt:
case e1000_pch_cnp: case e1000_pch_cnp:
case e1000_pch_tgp:
fext_nvm11 = er32(FEXTNVM11); fext_nvm11 = er32(FEXTNVM11);
fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX; fext_nvm11 &= ~E1000_FEXTNVM11_DISABLE_MULR_FIX;
ew32(FEXTNVM11, fext_nvm11); ew32(FEXTNVM11, fext_nvm11);
......
...@@ -86,6 +86,17 @@ struct e1000_hw; ...@@ -86,6 +86,17 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_ICP_I219_V8 0x15E0 #define E1000_DEV_ID_PCH_ICP_I219_V8 0x15E0
#define E1000_DEV_ID_PCH_ICP_I219_LM9 0x15E1 #define E1000_DEV_ID_PCH_ICP_I219_LM9 0x15E1
#define E1000_DEV_ID_PCH_ICP_I219_V9 0x15E2 #define E1000_DEV_ID_PCH_ICP_I219_V9 0x15E2
#define E1000_DEV_ID_PCH_CMP_I219_LM10 0x0D4E
#define E1000_DEV_ID_PCH_CMP_I219_V10 0x0D4F
#define E1000_DEV_ID_PCH_CMP_I219_LM11 0x0D4C
#define E1000_DEV_ID_PCH_CMP_I219_V11 0x0D4D
#define E1000_DEV_ID_PCH_CMP_I219_LM12 0x0D53
#define E1000_DEV_ID_PCH_CMP_I219_V12 0x0D55
#define E1000_DEV_ID_PCH_TGP_I219_LM13 0x15FB
#define E1000_DEV_ID_PCH_TGP_I219_V13 0x15FC
#define E1000_DEV_ID_PCH_TGP_I219_LM14 0x15F9
#define E1000_DEV_ID_PCH_TGP_I219_V14 0x15FA
#define E1000_DEV_ID_PCH_TGP_I219_LM15 0x15F4
#define E1000_REVISION_4 4 #define E1000_REVISION_4 4
...@@ -109,6 +120,7 @@ enum e1000_mac_type { ...@@ -109,6 +120,7 @@ enum e1000_mac_type {
e1000_pch_lpt, e1000_pch_lpt,
e1000_pch_spt, e1000_pch_spt,
e1000_pch_cnp, e1000_pch_cnp,
e1000_pch_tgp,
}; };
enum e1000_media_type { enum e1000_media_type {
......
...@@ -316,6 +316,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw) ...@@ -316,6 +316,7 @@ static s32 e1000_init_phy_workarounds_pchlan(struct e1000_hw *hw)
case e1000_pch_lpt: case e1000_pch_lpt:
case e1000_pch_spt: case e1000_pch_spt:
case e1000_pch_cnp: case e1000_pch_cnp:
case e1000_pch_tgp:
if (e1000_phy_is_accessible_pchlan(hw)) if (e1000_phy_is_accessible_pchlan(hw))
break; break;
...@@ -458,6 +459,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw) ...@@ -458,6 +459,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
case e1000_pch_lpt: case e1000_pch_lpt:
case e1000_pch_spt: case e1000_pch_spt:
case e1000_pch_cnp: case e1000_pch_cnp:
case e1000_pch_tgp:
/* In case the PHY needs to be in mdio slow mode, /* In case the PHY needs to be in mdio slow mode,
* set slow mode and try to get the PHY id again. * set slow mode and try to get the PHY id again.
*/ */
...@@ -700,6 +702,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw) ...@@ -700,6 +702,7 @@ static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
case e1000_pch_lpt: case e1000_pch_lpt:
case e1000_pch_spt: case e1000_pch_spt:
case e1000_pch_cnp: case e1000_pch_cnp:
case e1000_pch_tgp:
case e1000_pchlan: case e1000_pchlan:
/* check management mode */ /* check management mode */
mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan; mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
...@@ -1638,6 +1641,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter) ...@@ -1638,6 +1641,7 @@ static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
case e1000_pch_lpt: case e1000_pch_lpt:
case e1000_pch_spt: case e1000_pch_spt:
case e1000_pch_cnp: case e1000_pch_cnp:
case e1000_pch_tgp:
rc = e1000_init_phy_params_pchlan(hw); rc = e1000_init_phy_params_pchlan(hw);
break; break;
default: default:
...@@ -2090,6 +2094,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw) ...@@ -2090,6 +2094,7 @@ static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
case e1000_pch_lpt: case e1000_pch_lpt:
case e1000_pch_spt: case e1000_pch_spt:
case e1000_pch_cnp: case e1000_pch_cnp:
case e1000_pch_tgp:
sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M; sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
break; break;
default: default:
...@@ -3127,6 +3132,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank) ...@@ -3127,6 +3132,7 @@ static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
switch (hw->mac.type) { switch (hw->mac.type) {
case e1000_pch_spt: case e1000_pch_spt:
case e1000_pch_cnp: case e1000_pch_cnp:
case e1000_pch_tgp:
bank1_offset = nvm->flash_bank_size; bank1_offset = nvm->flash_bank_size;
act_offset = E1000_ICH_NVM_SIG_WORD; act_offset = E1000_ICH_NVM_SIG_WORD;
...@@ -4070,6 +4076,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw) ...@@ -4070,6 +4076,7 @@ static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
case e1000_pch_lpt: case e1000_pch_lpt:
case e1000_pch_spt: case e1000_pch_spt:
case e1000_pch_cnp: case e1000_pch_cnp:
case e1000_pch_tgp:
word = NVM_COMPAT; word = NVM_COMPAT;
valid_csum_mask = NVM_COMPAT_VALID_CSUM; valid_csum_mask = NVM_COMPAT_VALID_CSUM;
break; break;
......
...@@ -3538,6 +3538,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca) ...@@ -3538,6 +3538,7 @@ s32 e1000e_get_base_timinca(struct e1000_adapter *adapter, u32 *timinca)
adapter->cc.shift = shift; adapter->cc.shift = shift;
break; break;
case e1000_pch_cnp: case e1000_pch_cnp:
case e1000_pch_tgp:
if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) { if (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI) {
/* Stable 24MHz frequency */ /* Stable 24MHz frequency */
incperiod = INCPERIOD_24MHZ; incperiod = INCPERIOD_24MHZ;
...@@ -4049,6 +4050,8 @@ void e1000e_reset(struct e1000_adapter *adapter) ...@@ -4049,6 +4050,8 @@ void e1000e_reset(struct e1000_adapter *adapter)
case e1000_pch_lpt: case e1000_pch_lpt:
case e1000_pch_spt: case e1000_pch_spt:
case e1000_pch_cnp: case e1000_pch_cnp:
/* fall-through */
case e1000_pch_tgp:
fc->refresh_time = 0xFFFF; fc->refresh_time = 0xFFFF;
fc->pause_time = 0xFFFF; fc->pause_time = 0xFFFF;
...@@ -4715,12 +4718,12 @@ int e1000e_close(struct net_device *netdev) ...@@ -4715,12 +4718,12 @@ int e1000e_close(struct net_device *netdev)
pm_runtime_get_sync(&pdev->dev); pm_runtime_get_sync(&pdev->dev);
if (!test_bit(__E1000_DOWN, &adapter->state)) { if (netif_device_present(netdev)) {
e1000e_down(adapter, true); e1000e_down(adapter, true);
e1000_free_irq(adapter); e1000_free_irq(adapter);
/* Link status message must follow this format */ /* Link status message must follow this format */
pr_info("%s NIC Link is Down\n", adapter->netdev->name); pr_info("%s NIC Link is Down\n", netdev->name);
} }
napi_disable(&adapter->napi); napi_disable(&adapter->napi);
...@@ -6294,6 +6297,7 @@ static void e1000e_flush_lpic(struct pci_dev *pdev) ...@@ -6294,6 +6297,7 @@ static void e1000e_flush_lpic(struct pci_dev *pdev)
pm_runtime_put_sync(netdev->dev.parent); pm_runtime_put_sync(netdev->dev.parent);
} }
#ifdef CONFIG_PM_SLEEP
/* S0ix implementation */ /* S0ix implementation */
static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter) static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter)
{ {
...@@ -6461,15 +6465,20 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter) ...@@ -6461,15 +6465,20 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter)
mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS; mac_data &= ~E1000_CTRL_EXT_FORCE_SMBUS;
ew32(CTRL_EXT, mac_data); ew32(CTRL_EXT, mac_data);
} }
#endif /* CONFIG_PM_SLEEP */
static int e1000e_pm_freeze(struct device *dev) static int e1000e_pm_freeze(struct device *dev)
{ {
struct net_device *netdev = dev_get_drvdata(dev); struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
bool present;
rtnl_lock();
present = netif_device_present(netdev);
netif_device_detach(netdev); netif_device_detach(netdev);
if (netif_running(netdev)) { if (present && netif_running(netdev)) {
int count = E1000_CHECK_RESET_COUNT; int count = E1000_CHECK_RESET_COUNT;
while (test_bit(__E1000_RESETTING, &adapter->state) && count--) while (test_bit(__E1000_RESETTING, &adapter->state) && count--)
...@@ -6481,6 +6490,8 @@ static int e1000e_pm_freeze(struct device *dev) ...@@ -6481,6 +6490,8 @@ static int e1000e_pm_freeze(struct device *dev)
e1000e_down(adapter, false); e1000e_down(adapter, false);
e1000_free_irq(adapter); e1000_free_irq(adapter);
} }
rtnl_unlock();
e1000e_reset_interrupt_capability(adapter); e1000e_reset_interrupt_capability(adapter);
/* Allow time for pending master requests to run */ /* Allow time for pending master requests to run */
...@@ -6728,6 +6739,30 @@ static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state) ...@@ -6728,6 +6739,30 @@ static void e1000e_disable_aspm_locked(struct pci_dev *pdev, u16 state)
__e1000e_disable_aspm(pdev, state, 1); __e1000e_disable_aspm(pdev, state, 1);
} }
static int e1000e_pm_thaw(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
int rc = 0;
e1000e_set_interrupt_capability(adapter);
rtnl_lock();
if (netif_running(netdev)) {
rc = e1000_request_irq(adapter);
if (rc)
goto err_irq;
e1000e_up(adapter);
}
netif_device_attach(netdev);
err_irq:
rtnl_unlock();
return rc;
}
#ifdef CONFIG_PM #ifdef CONFIG_PM
static int __e1000_resume(struct pci_dev *pdev) static int __e1000_resume(struct pci_dev *pdev)
{ {
...@@ -6795,26 +6830,6 @@ static int __e1000_resume(struct pci_dev *pdev) ...@@ -6795,26 +6830,6 @@ static int __e1000_resume(struct pci_dev *pdev)
} }
#ifdef CONFIG_PM_SLEEP #ifdef CONFIG_PM_SLEEP
static int e1000e_pm_thaw(struct device *dev)
{
struct net_device *netdev = dev_get_drvdata(dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
e1000e_set_interrupt_capability(adapter);
if (netif_running(netdev)) {
u32 err = e1000_request_irq(adapter);
if (err)
return err;
e1000e_up(adapter);
}
netif_device_attach(netdev);
return 0;
}
static int e1000e_pm_suspend(struct device *dev) static int e1000e_pm_suspend(struct device *dev)
{ {
struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev)); struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
...@@ -7000,16 +7015,11 @@ static void e1000_netpoll(struct net_device *netdev) ...@@ -7000,16 +7015,11 @@ static void e1000_netpoll(struct net_device *netdev)
static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev, static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
pci_channel_state_t state) pci_channel_state_t state)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); e1000e_pm_freeze(&pdev->dev);
struct e1000_adapter *adapter = netdev_priv(netdev);
netif_device_detach(netdev);
if (state == pci_channel_io_perm_failure) if (state == pci_channel_io_perm_failure)
return PCI_ERS_RESULT_DISCONNECT; return PCI_ERS_RESULT_DISCONNECT;
if (netif_running(netdev))
e1000e_down(adapter, true);
pci_disable_device(pdev); pci_disable_device(pdev);
/* Request a slot slot reset. */ /* Request a slot slot reset. */
...@@ -7075,10 +7085,7 @@ static void e1000_io_resume(struct pci_dev *pdev) ...@@ -7075,10 +7085,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
e1000_init_manageability_pt(adapter); e1000_init_manageability_pt(adapter);
if (netif_running(netdev)) e1000e_pm_thaw(&pdev->dev);
e1000e_up(adapter);
netif_device_attach(netdev);
/* If the controller has AMT, do not set DRV_LOAD until the interface /* If the controller has AMT, do not set DRV_LOAD until the interface
* is up. For all other cases, let the f/w know that the h/w is now * is up. For all other cases, let the f/w know that the h/w is now
...@@ -7589,14 +7596,12 @@ static void e1000_remove(struct pci_dev *pdev) ...@@ -7589,14 +7596,12 @@ static void e1000_remove(struct pci_dev *pdev)
{ {
struct net_device *netdev = pci_get_drvdata(pdev); struct net_device *netdev = pci_get_drvdata(pdev);
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
bool down = test_bit(__E1000_DOWN, &adapter->state);
e1000e_ptp_remove(adapter); e1000e_ptp_remove(adapter);
/* The timers may be rescheduled, so explicitly disable them /* The timers may be rescheduled, so explicitly disable them
* from being rescheduled. * from being rescheduled.
*/ */
if (!down)
set_bit(__E1000_DOWN, &adapter->state); set_bit(__E1000_DOWN, &adapter->state);
del_timer_sync(&adapter->phy_info_timer); del_timer_sync(&adapter->phy_info_timer);
...@@ -7617,9 +7622,6 @@ static void e1000_remove(struct pci_dev *pdev) ...@@ -7617,9 +7622,6 @@ static void e1000_remove(struct pci_dev *pdev)
} }
} }
/* Don't lie to e1000_close() down the road. */
if (!down)
clear_bit(__E1000_DOWN, &adapter->state);
unregister_netdev(netdev); unregister_netdev(netdev);
if (pci_dev_run_wake(pdev)) if (pci_dev_run_wake(pdev))
...@@ -7749,6 +7751,17 @@ static const struct pci_device_id e1000_pci_tbl[] = { ...@@ -7749,6 +7751,17 @@ static const struct pci_device_id e1000_pci_tbl[] = {
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V8), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_LM9), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ICP_I219_V9), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM10), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V10), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM11), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V11), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_LM12), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_CMP_I219_V12), board_pch_spt },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM13), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V13), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_cnp },
{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_cnp },
{ 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */
}; };
......
...@@ -295,6 +295,8 @@ void e1000e_ptp_init(struct e1000_adapter *adapter) ...@@ -295,6 +295,8 @@ void e1000e_ptp_init(struct e1000_adapter *adapter)
case e1000_pch_lpt: case e1000_pch_lpt:
case e1000_pch_spt: case e1000_pch_spt:
case e1000_pch_cnp: case e1000_pch_cnp:
/* fall-through */
case e1000_pch_tgp:
if ((hw->mac.type < e1000_pch_lpt) || if ((hw->mac.type < e1000_pch_lpt) ||
(er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) { (er32(TSYNCRXCTL) & E1000_TSYNCRXCTL_SYSCFI)) {
adapter->ptp_clock_info.max_adj = 24000000 - 1; adapter->ptp_clock_info.max_adj = 24000000 - 1;
......
...@@ -12931,6 +12931,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi) ...@@ -12931,6 +12931,7 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
NETIF_F_GSO_IPXIP6 | NETIF_F_GSO_IPXIP6 |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_GSO_UDP_TUNNEL_CSUM |
NETIF_F_GSO_UDP_L4 |
NETIF_F_SCTP_CRC | NETIF_F_SCTP_CRC |
NETIF_F_RXHASH | NETIF_F_RXHASH |
NETIF_F_RXCSUM | NETIF_F_RXCSUM |
......
...@@ -2960,10 +2960,16 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len, ...@@ -2960,10 +2960,16 @@ static int i40e_tso(struct i40e_tx_buffer *first, u8 *hdr_len,
/* remove payload length from inner checksum */ /* remove payload length from inner checksum */
paylen = skb->len - l4_offset; paylen = skb->len - l4_offset;
csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
csum_replace_by_diff(&l4.udp->check, (__force __wsum)htonl(paylen));
/* compute length of segmentation header */
*hdr_len = sizeof(*l4.udp) + l4_offset;
} else {
csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
/* compute length of segmentation header */ /* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset; *hdr_len = (l4.tcp->doff * 4) + l4_offset;
}
/* pull values out of skb_shinfo */ /* pull values out of skb_shinfo */
gso_size = skb_shinfo(skb)->gso_size; gso_size = skb_shinfo(skb)->gso_size;
......
...@@ -127,6 +127,7 @@ struct e1000_adv_tx_context_desc { ...@@ -127,6 +127,7 @@ struct e1000_adv_tx_context_desc {
}; };
#define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ #define E1000_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */
#define E1000_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */
#define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ #define E1000_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */
#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ #define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */
#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */ #define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
......
...@@ -2516,6 +2516,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev, ...@@ -2516,6 +2516,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN)) if (unlikely(mac_hdr_len > IGB_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM | return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC | NETIF_F_SCTP_CRC |
NETIF_F_GSO_UDP_L4 |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_TSO | NETIF_F_TSO |
NETIF_F_TSO6); NETIF_F_TSO6);
...@@ -2524,6 +2525,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev, ...@@ -2524,6 +2525,7 @@ igb_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN)) if (unlikely(network_hdr_len > IGB_MAX_NETWORK_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM | return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC | NETIF_F_SCTP_CRC |
NETIF_F_GSO_UDP_L4 |
NETIF_F_TSO | NETIF_F_TSO |
NETIF_F_TSO6); NETIF_F_TSO6);
...@@ -3120,7 +3122,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -3120,7 +3122,7 @@ static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
NETIF_F_HW_CSUM; NETIF_F_HW_CSUM;
if (hw->mac.type >= e1000_82576) if (hw->mac.type >= e1000_82576)
netdev->features |= NETIF_F_SCTP_CRC; netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
if (hw->mac.type >= e1000_i350) if (hw->mac.type >= e1000_i350)
netdev->features |= NETIF_F_HW_TC; netdev->features |= NETIF_F_HW_TC;
...@@ -5694,6 +5696,7 @@ static int igb_tso(struct igb_ring *tx_ring, ...@@ -5694,6 +5696,7 @@ static int igb_tso(struct igb_ring *tx_ring,
} ip; } ip;
union { union {
struct tcphdr *tcp; struct tcphdr *tcp;
struct udphdr *udp;
unsigned char *hdr; unsigned char *hdr;
} l4; } l4;
u32 paylen, l4_offset; u32 paylen, l4_offset;
...@@ -5713,7 +5716,8 @@ static int igb_tso(struct igb_ring *tx_ring, ...@@ -5713,7 +5716,8 @@ static int igb_tso(struct igb_ring *tx_ring,
l4.hdr = skb_checksum_start(skb); l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = E1000_ADVTXD_TUCMD_L4T_TCP; type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
E1000_ADVTXD_TUCMD_L4T_UDP : E1000_ADVTXD_TUCMD_L4T_TCP;
/* initialize outer IP header fields */ /* initialize outer IP header fields */
if (ip.v4->version == 4) { if (ip.v4->version == 4) {
...@@ -5741,12 +5745,19 @@ static int igb_tso(struct igb_ring *tx_ring, ...@@ -5741,12 +5745,19 @@ static int igb_tso(struct igb_ring *tx_ring,
/* determine offset of inner transport header */ /* determine offset of inner transport header */
l4_offset = l4.hdr - skb->data; l4_offset = l4.hdr - skb->data;
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
/* remove payload length from inner checksum */ /* remove payload length from inner checksum */
paylen = skb->len - l4_offset; paylen = skb->len - l4_offset;
csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); if (type_tucmd & E1000_ADVTXD_TUCMD_L4T_TCP) {
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(paylen));
} else {
/* compute length of segmentation header */
*hdr_len = sizeof(*l4.udp) + l4_offset;
csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen));
}
/* update gso size and bytecount with header size */ /* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs; first->gso_segs = skb_shinfo(skb)->gso_segs;
......
...@@ -7946,6 +7946,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, ...@@ -7946,6 +7946,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
} ip; } ip;
union { union {
struct tcphdr *tcp; struct tcphdr *tcp;
struct udphdr *udp;
unsigned char *hdr; unsigned char *hdr;
} l4; } l4;
u32 paylen, l4_offset; u32 paylen, l4_offset;
...@@ -7969,7 +7970,8 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, ...@@ -7969,7 +7970,8 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
l4.hdr = skb_checksum_start(skb); l4.hdr = skb_checksum_start(skb);
/* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */
type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; type_tucmd = (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) ?
IXGBE_ADVTXD_TUCMD_L4T_UDP : IXGBE_ADVTXD_TUCMD_L4T_TCP;
/* initialize outer IP header fields */ /* initialize outer IP header fields */
if (ip.v4->version == 4) { if (ip.v4->version == 4) {
...@@ -7999,12 +8001,20 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, ...@@ -7999,12 +8001,20 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring,
/* determine offset of inner transport header */ /* determine offset of inner transport header */
l4_offset = l4.hdr - skb->data; l4_offset = l4.hdr - skb->data;
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
/* remove payload length from inner checksum */ /* remove payload length from inner checksum */
paylen = skb->len - l4_offset; paylen = skb->len - l4_offset;
csum_replace_by_diff(&l4.tcp->check, (__force __wsum)htonl(paylen));
if (type_tucmd & IXGBE_ADVTXD_TUCMD_L4T_TCP) {
/* compute length of segmentation header */
*hdr_len = (l4.tcp->doff * 4) + l4_offset;
csum_replace_by_diff(&l4.tcp->check,
(__force __wsum)htonl(paylen));
} else {
/* compute length of segmentation header */
*hdr_len = sizeof(*l4.udp) + l4_offset;
csum_replace_by_diff(&l4.udp->check,
(__force __wsum)htonl(paylen));
}
/* update gso size and bytecount with header size */ /* update gso size and bytecount with header size */
first->gso_segs = skb_shinfo(skb)->gso_segs; first->gso_segs = skb_shinfo(skb)->gso_segs;
...@@ -10190,6 +10200,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, ...@@ -10190,6 +10200,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM | return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC | NETIF_F_SCTP_CRC |
NETIF_F_GSO_UDP_L4 |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_TX |
NETIF_F_TSO | NETIF_F_TSO |
NETIF_F_TSO6); NETIF_F_TSO6);
...@@ -10198,6 +10209,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, ...@@ -10198,6 +10209,7 @@ ixgbe_features_check(struct sk_buff *skb, struct net_device *dev,
if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN))
return features & ~(NETIF_F_HW_CSUM | return features & ~(NETIF_F_HW_CSUM |
NETIF_F_SCTP_CRC | NETIF_F_SCTP_CRC |
NETIF_F_GSO_UDP_L4 |
NETIF_F_TSO | NETIF_F_TSO |
NETIF_F_TSO6); NETIF_F_TSO6);
...@@ -10907,7 +10919,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -10907,7 +10919,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
IXGBE_GSO_PARTIAL_FEATURES; IXGBE_GSO_PARTIAL_FEATURES;
if (hw->mac.type >= ixgbe_mac_82599EB) if (hw->mac.type >= ixgbe_mac_82599EB)
netdev->features |= NETIF_F_SCTP_CRC; netdev->features |= NETIF_F_SCTP_CRC | NETIF_F_GSO_UDP_L4;
#ifdef CONFIG_IXGBE_IPSEC #ifdef CONFIG_IXGBE_IPSEC
#define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \ #define IXGBE_ESP_FEATURES (NETIF_F_HW_ESP | \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment