Commit 00423969 authored by Thierry Reding's avatar Thierry Reding Committed by David S. Miller

Revert "net: stmmac: re-init rx buffers when mac resume back"

This reverts commit 9c63faaa, which
introduces a suspend/resume regression on Jetson TX2 boards that can be
reproduced every time. Given that the issue that this was supposed to
fix only occurs very sporadically the safest course of action is to
revert before v5.12 and then we can have another go at fixing the more
rare issue in the next release (and perhaps backport it if necessary).

The root cause of the observed problem seems to be that when the system
is suspended, some packets are still in transit. When the descriptors
for these buffers are cleared on resume, the descriptors become invalid
and cause a fatal bus error.

Link: https://lore.kernel.org/r/708edb92-a5df-ecc4-3126-5ab36707e275@nvidia.com/Reported-by: default avatarJonathan Hunter <jonathanh@nvidia.com>
Signed-off-by: default avatarThierry Reding <treding@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 416dcc5c
......@@ -1379,88 +1379,6 @@ static void stmmac_free_tx_buffer(struct stmmac_priv *priv, u32 queue, int i)
}
}
/**
* stmmac_reinit_rx_buffers - reinit the RX descriptor buffer.
* @priv: driver private structure
* Description: this function is called to re-allocate a receive buffer, perform
* the DMA mapping and init the descriptor.
*/
static void stmmac_reinit_rx_buffers(struct stmmac_priv *priv)
{
u32 rx_count = priv->plat->rx_queues_to_use;
u32 queue;
int i;
for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
for (i = 0; i < priv->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
if (buf->page) {
page_pool_recycle_direct(rx_q->page_pool, buf->page);
buf->page = NULL;
}
if (priv->sph && buf->sec_page) {
page_pool_recycle_direct(rx_q->page_pool, buf->sec_page);
buf->sec_page = NULL;
}
}
}
for (queue = 0; queue < rx_count; queue++) {
struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue];
for (i = 0; i < priv->dma_rx_size; i++) {
struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
struct dma_desc *p;
if (priv->extend_desc)
p = &((rx_q->dma_erx + i)->basic);
else
p = rx_q->dma_rx + i;
if (!buf->page) {
buf->page = page_pool_dev_alloc_pages(rx_q->page_pool);
if (!buf->page)
goto err_reinit_rx_buffers;
buf->addr = page_pool_get_dma_addr(buf->page);
}
if (priv->sph && !buf->sec_page) {
buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool);
if (!buf->sec_page)
goto err_reinit_rx_buffers;
buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
}
stmmac_set_desc_addr(priv, p, buf->addr);
if (priv->sph)
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
else
stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
if (priv->dma_buf_sz == BUF_SIZE_16KiB)
stmmac_init_desc3(priv, p);
}
}
return;
err_reinit_rx_buffers:
do {
while (--i >= 0)
stmmac_free_rx_buffer(priv, queue, i);
if (queue == 0)
break;
i = priv->dma_rx_size;
} while (queue-- > 0);
}
/**
* init_dma_rx_desc_rings - init the RX descriptor rings
* @dev: net device structure
......@@ -5428,7 +5346,7 @@ int stmmac_resume(struct device *dev)
mutex_lock(&priv->lock);
stmmac_reset_queues_param(priv);
stmmac_reinit_rx_buffers(priv);
stmmac_free_tx_skbufs(priv);
stmmac_clear_descriptors(priv);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment