Commit 5237b9dd authored by David S. Miller's avatar David S. Miller

Merge branch '10GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
10GbE Intel Wired LAN Driver Updates 2017-02-16

This series contains updates to ixgbe only.

Tony updates the driver to advertise 2.5Gb and 5.0Gb if the adapter
supports it.

Stephen Hemminger renames our dcbnl_ops since it is global to
ixgbe_dcbnl_ops to avoid namespace issues.

Mark updates the driver version based on the recent changes.

Alex has the remainder of the changes, starting with consolidating
functions that represent logical steps in the receive process so we can
later update them more easily (and align with igb).  Modify the receive
path to only synchronize the length of the frame versus the entire buffer.
Provided performance improvements by adding support for
DMA_ATTR_SKIP_CPU_SYNC and DMA_ATTR_WEAK_ORDERING.  Also made additional
performance gains by batching the page count updates instead of doing
them one at a time.  Adjusted the receive path to use 3k buffers with
8k backing them in order to support build_skb with jumbo frames.  Made
additional driver improvements by using the length of the packet instead
of the DD status to determine if a new descriptor is ready to be
processed, which cuts down on reads.  To reduce code duplication, pulled
apart the receive path into separate functions.  Added support for
providing a buffer with headroom and tailroom to allow for shared info
for NET_SKB_PAD and NET_IP_ALIGN.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 3f64116a ffed21bc
...@@ -91,6 +91,14 @@ ...@@ -91,6 +91,14 @@
#define IXGBE_RXBUFFER_4K 4096 #define IXGBE_RXBUFFER_4K 4096
#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN)
#if (PAGE_SIZE < 8192)
#define IXGBE_MAX_FRAME_BUILD_SKB \
(SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K) - IXGBE_SKB_PAD)
#else
#define IGB_MAX_FRAME_BUILD_SKB IXGBE_RXBUFFER_2K
#endif
/* /*
* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
* reserve 64 more, and skb_shared_info adds an additional 320 bytes more, * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
...@@ -104,6 +112,9 @@ ...@@ -104,6 +112,9 @@
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
#define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
#define IXGBE_RX_DMA_ATTR \
(DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
enum ixgbe_tx_flags { enum ixgbe_tx_flags {
/* cmd_type flags */ /* cmd_type flags */
IXGBE_TX_FLAGS_HW_VLAN = 0x01, IXGBE_TX_FLAGS_HW_VLAN = 0x01,
...@@ -192,7 +203,12 @@ struct ixgbe_rx_buffer { ...@@ -192,7 +203,12 @@ struct ixgbe_rx_buffer {
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
unsigned int page_offset; #if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
__u32 page_offset;
#else
__u16 page_offset;
#endif
__u16 pagecnt_bias;
}; };
struct ixgbe_queue_stats { struct ixgbe_queue_stats {
...@@ -218,15 +234,20 @@ struct ixgbe_rx_queue_stats { ...@@ -218,15 +234,20 @@ struct ixgbe_rx_queue_stats {
#define IXGBE_TS_HDR_LEN 8 #define IXGBE_TS_HDR_LEN 8
enum ixgbe_ring_state_t { enum ixgbe_ring_state_t {
__IXGBE_RX_3K_BUFFER,
__IXGBE_RX_BUILD_SKB_ENABLED,
__IXGBE_RX_RSC_ENABLED,
__IXGBE_RX_CSUM_UDP_ZERO_ERR,
__IXGBE_RX_FCOE,
__IXGBE_TX_FDIR_INIT_DONE, __IXGBE_TX_FDIR_INIT_DONE,
__IXGBE_TX_XPS_INIT_DONE, __IXGBE_TX_XPS_INIT_DONE,
__IXGBE_TX_DETECT_HANG, __IXGBE_TX_DETECT_HANG,
__IXGBE_HANG_CHECK_ARMED, __IXGBE_HANG_CHECK_ARMED,
__IXGBE_RX_RSC_ENABLED,
__IXGBE_RX_CSUM_UDP_ZERO_ERR,
__IXGBE_RX_FCOE,
}; };
#define ring_uses_build_skb(ring) \
test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state)
struct ixgbe_fwd_adapter { struct ixgbe_fwd_adapter {
unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
struct net_device *netdev; struct net_device *netdev;
...@@ -336,19 +357,20 @@ struct ixgbe_ring_feature { ...@@ -336,19 +357,20 @@ struct ixgbe_ring_feature {
*/ */
static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring)
{ {
#ifdef IXGBE_FCOE if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
if (test_bit(__IXGBE_RX_FCOE, &ring->state)) return IXGBE_RXBUFFER_3K;
return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K : #if (PAGE_SIZE < 8192)
IXGBE_RXBUFFER_3K; if (ring_uses_build_skb(ring))
return IXGBE_MAX_FRAME_BUILD_SKB;
#endif #endif
return IXGBE_RXBUFFER_2K; return IXGBE_RXBUFFER_2K;
} }
static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring)
{ {
#ifdef IXGBE_FCOE #if (PAGE_SIZE < 8192)
if (test_bit(__IXGBE_RX_FCOE, &ring->state)) if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state))
return (PAGE_SIZE < 8192) ? 1 : 0; return 1;
#endif #endif
return 0; return 0;
} }
...@@ -539,6 +561,7 @@ struct ixgbe_adapter { ...@@ -539,6 +561,7 @@ struct ixgbe_adapter {
#define IXGBE_FLAG2_VLAN_PROMISC BIT(13) #define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
#define IXGBE_FLAG2_EEE_CAPABLE BIT(14) #define IXGBE_FLAG2_EEE_CAPABLE BIT(14)
#define IXGBE_FLAG2_EEE_ENABLED BIT(15) #define IXGBE_FLAG2_EEE_ENABLED BIT(15)
#define IXGBE_FLAG2_RX_LEGACY BIT(16)
/* Tx fast path data */ /* Tx fast path data */
int num_tx_queues; int num_tx_queues;
...@@ -751,7 +774,7 @@ extern const struct ixgbe_info ixgbe_X550EM_x_info; ...@@ -751,7 +774,7 @@ extern const struct ixgbe_info ixgbe_X550EM_x_info;
extern const struct ixgbe_info ixgbe_x550em_a_info; extern const struct ixgbe_info ixgbe_x550em_a_info;
extern const struct ixgbe_info ixgbe_x550em_a_fw_info; extern const struct ixgbe_info ixgbe_x550em_a_fw_info;
#ifdef CONFIG_IXGBE_DCB #ifdef CONFIG_IXGBE_DCB
extern const struct dcbnl_rtnl_ops dcbnl_ops; extern const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops;
#endif #endif
extern char ixgbe_driver_name[]; extern char ixgbe_driver_name[];
......
...@@ -777,7 +777,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) ...@@ -777,7 +777,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode)
return err ? 1 : 0; return err ? 1 : 0;
} }
const struct dcbnl_rtnl_ops dcbnl_ops = { const struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = {
.ieee_getets = ixgbe_dcbnl_ieee_getets, .ieee_getets = ixgbe_dcbnl_ieee_getets,
.ieee_setets = ixgbe_dcbnl_ieee_setets, .ieee_setets = ixgbe_dcbnl_ieee_setets,
.ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc,
......
...@@ -151,6 +151,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { ...@@ -151,6 +151,13 @@ static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
}; };
#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = {
#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
"legacy-rx",
};
#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
/* currently supported speeds for 10G */ /* currently supported speeds for 10G */
#define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \ #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
SUPPORTED_10000baseKX4_Full | \ SUPPORTED_10000baseKX4_Full | \
...@@ -340,6 +347,9 @@ static int ixgbe_get_settings(struct net_device *netdev, ...@@ -340,6 +347,9 @@ static int ixgbe_get_settings(struct net_device *netdev,
case IXGBE_LINK_SPEED_10GB_FULL: case IXGBE_LINK_SPEED_10GB_FULL:
ethtool_cmd_speed_set(ecmd, SPEED_10000); ethtool_cmd_speed_set(ecmd, SPEED_10000);
break; break;
case IXGBE_LINK_SPEED_5GB_FULL:
ethtool_cmd_speed_set(ecmd, SPEED_5000);
break;
case IXGBE_LINK_SPEED_2_5GB_FULL: case IXGBE_LINK_SPEED_2_5GB_FULL:
ethtool_cmd_speed_set(ecmd, SPEED_2500); ethtool_cmd_speed_set(ecmd, SPEED_2500);
break; break;
...@@ -998,6 +1008,8 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, ...@@ -998,6 +1008,8 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
sizeof(drvinfo->bus_info)); sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN;
} }
static void ixgbe_get_ringparam(struct net_device *netdev, static void ixgbe_get_ringparam(struct net_device *netdev,
...@@ -1137,6 +1149,8 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset) ...@@ -1137,6 +1149,8 @@ static int ixgbe_get_sset_count(struct net_device *netdev, int sset)
return IXGBE_TEST_LEN; return IXGBE_TEST_LEN;
case ETH_SS_STATS: case ETH_SS_STATS:
return IXGBE_STATS_LEN; return IXGBE_STATS_LEN;
case ETH_SS_PRIV_FLAGS:
return IXGBE_PRIV_FLAGS_STR_LEN;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -1261,6 +1275,9 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, ...@@ -1261,6 +1275,9 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset,
} }
/* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
break; break;
case ETH_SS_PRIV_FLAGS:
memcpy(data, ixgbe_priv_flags_strings,
IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN);
} }
} }
...@@ -1865,7 +1882,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, ...@@ -1865,7 +1882,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
tx_ntc = tx_ring->next_to_clean; tx_ntc = tx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc);
while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { while (rx_desc->wb.upper.length) {
/* check Rx buffer */ /* check Rx buffer */
rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; rx_buffer = &rx_ring->rx_buffer_info[rx_ntc];
...@@ -1887,7 +1904,16 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, ...@@ -1887,7 +1904,16 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
/* unmap buffer on Tx side */ /* unmap buffer on Tx side */
tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; tx_buffer = &tx_ring->tx_buffer_info[tx_ntc];
ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer);
/* Free all the Tx ring sk_buffs */
dev_kfree_skb_any(tx_buffer->skb);
/* unmap skb header data */
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
/* increment Rx/Tx next to clean counters */ /* increment Rx/Tx next to clean counters */
rx_ntc++; rx_ntc++;
...@@ -3342,6 +3368,37 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) ...@@ -3342,6 +3368,37 @@ static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata)
return 0; return 0;
} }
static u32 ixgbe_get_priv_flags(struct net_device *netdev)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
u32 priv_flags = 0;
if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY)
priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX;
return priv_flags;
}
static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
unsigned int flags2 = adapter->flags2;
flags2 &= ~IXGBE_FLAG2_RX_LEGACY;
if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX)
flags2 |= IXGBE_FLAG2_RX_LEGACY;
if (flags2 != adapter->flags2) {
adapter->flags2 = flags2;
/* reset interface to repopulate queues */
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
}
return 0;
}
static const struct ethtool_ops ixgbe_ethtool_ops = { static const struct ethtool_ops ixgbe_ethtool_ops = {
.get_settings = ixgbe_get_settings, .get_settings = ixgbe_get_settings,
.set_settings = ixgbe_set_settings, .set_settings = ixgbe_set_settings,
...@@ -3378,6 +3435,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = { ...@@ -3378,6 +3435,8 @@ static const struct ethtool_ops ixgbe_ethtool_ops = {
.set_eee = ixgbe_set_eee, .set_eee = ixgbe_set_eee,
.get_channels = ixgbe_get_channels, .get_channels = ixgbe_get_channels,
.set_channels = ixgbe_set_channels, .set_channels = ixgbe_set_channels,
.get_priv_flags = ixgbe_get_priv_flags,
.set_priv_flags = ixgbe_set_priv_flags,
.get_ts_info = ixgbe_get_ts_info, .get_ts_info = ixgbe_get_ts_info,
.get_module_info = ixgbe_get_module_info, .get_module_info = ixgbe_get_module_info,
.get_module_eeprom = ixgbe_get_module_eeprom, .get_module_eeprom = ixgbe_get_module_eeprom,
......
...@@ -768,9 +768,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ...@@ -768,9 +768,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
ixgbe_link_speed speed, ixgbe_link_speed speed,
bool autoneg_wait_to_complete) bool autoneg_wait_to_complete)
{ {
/* Clear autoneg_advertised and set new values based on input link
/*
* Clear autoneg_advertised and set new values based on input link
* speed. * speed.
*/ */
hw->phy.autoneg_advertised = 0; hw->phy.autoneg_advertised = 0;
...@@ -778,6 +776,12 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, ...@@ -778,6 +776,12 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
if (speed & IXGBE_LINK_SPEED_10GB_FULL) if (speed & IXGBE_LINK_SPEED_10GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
if (speed & IXGBE_LINK_SPEED_5GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL;
if (speed & IXGBE_LINK_SPEED_2_5GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL;
if (speed & IXGBE_LINK_SPEED_1GB_FULL) if (speed & IXGBE_LINK_SPEED_1GB_FULL)
hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment