Commit 8ab190fb authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2017-10-26

This series contains fixes to e1000, igb, ixgbe and i40e.

Vincenzo Maffione fixes a potential race condition which would result in
the interface being up but transmits are disabled in the hardware.

Colin Ian King fixes a possible NULL pointer dereference in e1000, which
was found by Coverity.

Jean-Philippe Brucker fixes a possible kernel panic when a driver cannot
map a transmit buffer, which is caused by an erroneous test.

Alex provides a fix for ixgbe, which is a partial revert of the commit
ffed21bc ("ixgbe: Don't bother clearing buffer memory for descriptor rings")
because the previous commit messed up the exception handling path by
adding the count back in when we did not need to.  Also fixed a typo,
where the transmit ITR setting was being used to determine if we were
using adaptive receive interrupt moderation or not.  Lastly, fixed a
memory leak by including programming descriptors in the cleaned count.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8aec4959 62b4c669
...@@ -1824,11 +1824,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, ...@@ -1824,11 +1824,12 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
{ {
struct e1000_adapter *adapter = netdev_priv(netdev); struct e1000_adapter *adapter = netdev_priv(netdev);
int i; int i;
char *p = NULL;
const struct e1000_stats *stat = e1000_gstrings_stats; const struct e1000_stats *stat = e1000_gstrings_stats;
e1000_update_stats(adapter); e1000_update_stats(adapter);
for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++, stat++) {
char *p;
switch (stat->type) { switch (stat->type) {
case NETDEV_STATS: case NETDEV_STATS:
p = (char *)netdev + stat->stat_offset; p = (char *)netdev + stat->stat_offset;
...@@ -1839,15 +1840,13 @@ static void e1000_get_ethtool_stats(struct net_device *netdev, ...@@ -1839,15 +1840,13 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
default: default:
WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n", WARN_ONCE(1, "Invalid E1000 stat type: %u index %d\n",
stat->type, i); stat->type, i);
break; continue;
} }
if (stat->sizeof_stat == sizeof(u64)) if (stat->sizeof_stat == sizeof(u64))
data[i] = *(u64 *)p; data[i] = *(u64 *)p;
else else
data[i] = *(u32 *)p; data[i] = *(u32 *)p;
stat++;
} }
/* BUG_ON(i != E1000_STATS_LEN); */ /* BUG_ON(i != E1000_STATS_LEN); */
} }
......
...@@ -520,8 +520,6 @@ void e1000_down(struct e1000_adapter *adapter) ...@@ -520,8 +520,6 @@ void e1000_down(struct e1000_adapter *adapter)
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
u32 rctl, tctl; u32 rctl, tctl;
netif_carrier_off(netdev);
/* disable receives in the hardware */ /* disable receives in the hardware */
rctl = er32(RCTL); rctl = er32(RCTL);
ew32(RCTL, rctl & ~E1000_RCTL_EN); ew32(RCTL, rctl & ~E1000_RCTL_EN);
...@@ -537,6 +535,15 @@ void e1000_down(struct e1000_adapter *adapter) ...@@ -537,6 +535,15 @@ void e1000_down(struct e1000_adapter *adapter)
E1000_WRITE_FLUSH(); E1000_WRITE_FLUSH();
msleep(10); msleep(10);
/* Set the carrier off after transmits have been disabled in the
* hardware, to avoid race conditions with e1000_watchdog() (which
* may be running concurrently to us, checking for the carrier
* bit to decide whether it should enable transmits again). Such
* a race condition would result into transmission being disabled
* in the hardware until the next IFF_DOWN+IFF_UP cycle.
*/
netif_carrier_off(netdev);
napi_disable(&adapter->napi); napi_disable(&adapter->napi);
e1000_irq_disable(adapter); e1000_irq_disable(adapter);
......
...@@ -2102,6 +2102,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget) ...@@ -2102,6 +2102,7 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
if (unlikely(i40e_rx_is_programming_status(qword))) { if (unlikely(i40e_rx_is_programming_status(qword))) {
i40e_clean_programming_status(rx_ring, rx_desc, qword); i40e_clean_programming_status(rx_ring, rx_desc, qword);
cleaned_count++;
continue; continue;
} }
size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >> size = (qword & I40E_RXD_QW1_LENGTH_PBUF_MASK) >>
...@@ -2269,7 +2270,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi, ...@@ -2269,7 +2270,7 @@ static inline void i40e_update_enable_itr(struct i40e_vsi *vsi,
goto enable_int; goto enable_int;
} }
if (ITR_IS_DYNAMIC(tx_itr_setting)) { if (ITR_IS_DYNAMIC(rx_itr_setting)) {
rx = i40e_set_new_dynamic_itr(&q_vector->rx); rx = i40e_set_new_dynamic_itr(&q_vector->rx);
rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr); rxval = i40e_buildreg_itr(I40E_RX_ITR, q_vector->rx.itr);
} }
......
...@@ -5326,7 +5326,7 @@ static int igb_tx_map(struct igb_ring *tx_ring, ...@@ -5326,7 +5326,7 @@ static int igb_tx_map(struct igb_ring *tx_ring,
DMA_TO_DEVICE); DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0); dma_unmap_len_set(tx_buffer, len, 0);
if (i--) if (i-- == 0)
i += tx_ring->count; i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i]; tx_buffer = &tx_ring->tx_buffer_info[i];
} }
......
...@@ -8020,29 +8020,23 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, ...@@ -8020,29 +8020,23 @@ static int ixgbe_tx_map(struct ixgbe_ring *tx_ring,
return 0; return 0;
dma_error: dma_error:
dev_err(tx_ring->dev, "TX DMA map failed\n"); dev_err(tx_ring->dev, "TX DMA map failed\n");
tx_buffer = &tx_ring->tx_buffer_info[i];
/* clear dma mappings for failed tx_buffer_info map */ /* clear dma mappings for failed tx_buffer_info map */
while (tx_buffer != first) { for (;;) {
tx_buffer = &tx_ring->tx_buffer_info[i];
if (dma_unmap_len(tx_buffer, len)) if (dma_unmap_len(tx_buffer, len))
dma_unmap_page(tx_ring->dev, dma_unmap_page(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma), dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len), dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE); DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0); dma_unmap_len_set(tx_buffer, len, 0);
if (tx_buffer == first)
if (i--) break;
if (i == 0)
i += tx_ring->count; i += tx_ring->count;
tx_buffer = &tx_ring->tx_buffer_info[i]; i--;
} }
if (dma_unmap_len(tx_buffer, len))
dma_unmap_single(tx_ring->dev,
dma_unmap_addr(tx_buffer, dma),
dma_unmap_len(tx_buffer, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buffer, len, 0);
dev_kfree_skb_any(first->skb); dev_kfree_skb_any(first->skb);
first->skb = NULL; first->skb = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment