Commit 3647d345 authored by David S. Miller's avatar David S. Miller

Merge branch 'sfc-3.9' of git://git.kernel.org/pub/scm/linux/kernel/git/bwh/sfc

Ben Hutchings says:

====================
Some fixes that should go into 3.9:

1. Fix packet corruption when using non-coherent RX DMA buffers.
2. Fix occasional watchdog misfiring when changing MTU or ring size.

These are longstanding bugs and should be fixed in stable as well, but
I'd like to review other recent fixes first and send a separate request
for stable inclusion.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1cef9350 29c69a48
...@@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) ...@@ -779,6 +779,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
tx_queue->txd.entries); tx_queue->txd.entries);
} }
efx_device_detach_sync(efx);
efx_stop_all(efx); efx_stop_all(efx);
efx_stop_interrupts(efx, true); efx_stop_interrupts(efx, true);
...@@ -832,6 +833,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries) ...@@ -832,6 +833,7 @@ efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
efx_start_interrupts(efx, true); efx_start_interrupts(efx, true);
efx_start_all(efx); efx_start_all(efx);
netif_device_attach(efx->net_dev);
return rc; return rc;
rollback: rollback:
...@@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx) ...@@ -1641,8 +1643,12 @@ static void efx_stop_all(struct efx_nic *efx)
/* Flush efx_mac_work(), refill_workqueue, monitor_work */ /* Flush efx_mac_work(), refill_workqueue, monitor_work */
efx_flush_all(efx); efx_flush_all(efx);
/* Stop the kernel transmit interface late, so the watchdog /* Stop the kernel transmit interface. This is only valid if
* timer isn't ticking over the flush */ * the device is stopped or detached; otherwise the watchdog
* may fire immediately.
*/
WARN_ON(netif_running(efx->net_dev) &&
netif_device_present(efx->net_dev));
netif_tx_disable(efx->net_dev); netif_tx_disable(efx->net_dev);
efx_stop_datapath(efx); efx_stop_datapath(efx);
...@@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu) ...@@ -1963,16 +1969,18 @@ static int efx_change_mtu(struct net_device *net_dev, int new_mtu)
if (new_mtu > EFX_MAX_MTU) if (new_mtu > EFX_MAX_MTU)
return -EINVAL; return -EINVAL;
efx_stop_all(efx);
netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu); netif_dbg(efx, drv, efx->net_dev, "changing MTU to %d\n", new_mtu);
efx_device_detach_sync(efx);
efx_stop_all(efx);
mutex_lock(&efx->mac_lock); mutex_lock(&efx->mac_lock);
net_dev->mtu = new_mtu; net_dev->mtu = new_mtu;
efx->type->reconfigure_mac(efx); efx->type->reconfigure_mac(efx);
mutex_unlock(&efx->mac_lock); mutex_unlock(&efx->mac_lock);
efx_start_all(efx); efx_start_all(efx);
netif_device_attach(efx->net_dev);
return 0; return 0;
} }
......
...@@ -210,6 +210,7 @@ struct efx_tx_queue { ...@@ -210,6 +210,7 @@ struct efx_tx_queue {
* Will be %NULL if the buffer slot is currently free. * Will be %NULL if the buffer slot is currently free.
* @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE. * @page: The associated page buffer. Valif iff @flags & %EFX_RX_BUF_PAGE.
* Will be %NULL if the buffer slot is currently free. * Will be %NULL if the buffer slot is currently free.
* @page_offset: Offset within page. Valid iff @flags & %EFX_RX_BUF_PAGE.
* @len: Buffer length, in bytes. * @len: Buffer length, in bytes.
* @flags: Flags for buffer and packet state. * @flags: Flags for buffer and packet state.
*/ */
...@@ -219,7 +220,8 @@ struct efx_rx_buffer { ...@@ -219,7 +220,8 @@ struct efx_rx_buffer {
struct sk_buff *skb; struct sk_buff *skb;
struct page *page; struct page *page;
} u; } u;
unsigned int len; u16 page_offset;
u16 len;
u16 flags; u16 flags;
}; };
#define EFX_RX_BUF_PAGE 0x0001 #define EFX_RX_BUF_PAGE 0x0001
......
...@@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold; ...@@ -90,11 +90,7 @@ static unsigned int rx_refill_threshold;
static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx, static inline unsigned int efx_rx_buf_offset(struct efx_nic *efx,
struct efx_rx_buffer *buf) struct efx_rx_buffer *buf)
{ {
/* Offset is always within one page, so we don't need to consider return buf->page_offset + efx->type->rx_buffer_hash_size;
* the page order.
*/
return ((unsigned int) buf->dma_addr & (PAGE_SIZE - 1)) +
efx->type->rx_buffer_hash_size;
} }
static inline unsigned int efx_rx_buf_size(struct efx_nic *efx) static inline unsigned int efx_rx_buf_size(struct efx_nic *efx)
{ {
...@@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) ...@@ -187,6 +183,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
struct efx_nic *efx = rx_queue->efx; struct efx_nic *efx = rx_queue->efx;
struct efx_rx_buffer *rx_buf; struct efx_rx_buffer *rx_buf;
struct page *page; struct page *page;
unsigned int page_offset;
struct efx_rx_page_state *state; struct efx_rx_page_state *state;
dma_addr_t dma_addr; dma_addr_t dma_addr;
unsigned index, count; unsigned index, count;
...@@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) ...@@ -211,12 +208,14 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
state->dma_addr = dma_addr; state->dma_addr = dma_addr;
dma_addr += sizeof(struct efx_rx_page_state); dma_addr += sizeof(struct efx_rx_page_state);
page_offset = sizeof(struct efx_rx_page_state);
split: split:
index = rx_queue->added_count & rx_queue->ptr_mask; index = rx_queue->added_count & rx_queue->ptr_mask;
rx_buf = efx_rx_buffer(rx_queue, index); rx_buf = efx_rx_buffer(rx_queue, index);
rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN; rx_buf->dma_addr = dma_addr + EFX_PAGE_IP_ALIGN;
rx_buf->u.page = page; rx_buf->u.page = page;
rx_buf->page_offset = page_offset;
rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN; rx_buf->len = efx->rx_buffer_len - EFX_PAGE_IP_ALIGN;
rx_buf->flags = EFX_RX_BUF_PAGE; rx_buf->flags = EFX_RX_BUF_PAGE;
++rx_queue->added_count; ++rx_queue->added_count;
...@@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) ...@@ -227,6 +226,7 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
/* Use the second half of the page */ /* Use the second half of the page */
get_page(page); get_page(page);
dma_addr += (PAGE_SIZE >> 1); dma_addr += (PAGE_SIZE >> 1);
page_offset += (PAGE_SIZE >> 1);
++count; ++count;
goto split; goto split;
} }
...@@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue) ...@@ -236,7 +236,8 @@ static int efx_init_rx_buffers_page(struct efx_rx_queue *rx_queue)
} }
static void efx_unmap_rx_buffer(struct efx_nic *efx, static void efx_unmap_rx_buffer(struct efx_nic *efx,
struct efx_rx_buffer *rx_buf) struct efx_rx_buffer *rx_buf,
unsigned int used_len)
{ {
if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) { if ((rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.page) {
struct efx_rx_page_state *state; struct efx_rx_page_state *state;
...@@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx, ...@@ -247,6 +248,10 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
state->dma_addr, state->dma_addr,
efx_rx_buf_size(efx), efx_rx_buf_size(efx),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} else if (used_len) {
dma_sync_single_for_cpu(&efx->pci_dev->dev,
rx_buf->dma_addr, used_len,
DMA_FROM_DEVICE);
} }
} else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) { } else if (!(rx_buf->flags & EFX_RX_BUF_PAGE) && rx_buf->u.skb) {
dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr, dma_unmap_single(&efx->pci_dev->dev, rx_buf->dma_addr,
...@@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx, ...@@ -269,7 +274,7 @@ static void efx_free_rx_buffer(struct efx_nic *efx,
static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue, static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
struct efx_rx_buffer *rx_buf) struct efx_rx_buffer *rx_buf)
{ {
efx_unmap_rx_buffer(rx_queue->efx, rx_buf); efx_unmap_rx_buffer(rx_queue->efx, rx_buf, 0);
efx_free_rx_buffer(rx_queue->efx, rx_buf); efx_free_rx_buffer(rx_queue->efx, rx_buf);
} }
...@@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, ...@@ -535,10 +540,10 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
goto out; goto out;
} }
/* Release card resources - assumes all RX buffers consumed in-order /* Release and/or sync DMA mapping - assumes all RX buffers
* per RX queue * consumed in-order per RX queue
*/ */
efx_unmap_rx_buffer(efx, rx_buf); efx_unmap_rx_buffer(efx, rx_buf, len);
/* Prefetch nice and early so data will (hopefully) be in cache by /* Prefetch nice and early so data will (hopefully) be in cache by
* the time we look at it. * the time we look at it.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment