Commit 4688f4f4 authored by Justin Chen's avatar Justin Chen Committed by David S. Miller

net: bcmasp: Keep buffers through power management

There is no advantage of freeing and re-allocating buffers through
suspend and resume. This waste cycles and makes suspend/resume time
longer. We also open ourselves to failed allocations in systems with
heavy memory fragmentation.
Signed-off-by: default avatarJustin Chen <justin.chen@broadcom.com>
Acked-by: default avatarFlorian Fainelli <florian.fainelli@broadcom.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9112fc01
...@@ -315,6 +315,7 @@ struct bcmasp_intf { ...@@ -315,6 +315,7 @@ struct bcmasp_intf {
struct bcmasp_desc *rx_edpkt_cpu; struct bcmasp_desc *rx_edpkt_cpu;
dma_addr_t rx_edpkt_dma_addr; dma_addr_t rx_edpkt_dma_addr;
dma_addr_t rx_edpkt_dma_read; dma_addr_t rx_edpkt_dma_read;
dma_addr_t rx_edpkt_dma_valid;
/* RX buffer prefetcher ring*/ /* RX buffer prefetcher ring*/
void *rx_ring_cpu; void *rx_ring_cpu;
......
...@@ -674,40 +674,78 @@ static void bcmasp_adj_link(struct net_device *dev) ...@@ -674,40 +674,78 @@ static void bcmasp_adj_link(struct net_device *dev)
phy_print_status(phydev); phy_print_status(phydev);
} }
static int bcmasp_init_rx(struct bcmasp_intf *intf) static int bcmasp_alloc_buffers(struct bcmasp_intf *intf)
{ {
struct device *kdev = &intf->parent->pdev->dev; struct device *kdev = &intf->parent->pdev->dev;
struct page *buffer_pg; struct page *buffer_pg;
dma_addr_t dma;
void *p;
u32 reg;
int ret;
/* Alloc RX */
intf->rx_buf_order = get_order(RING_BUFFER_SIZE); intf->rx_buf_order = get_order(RING_BUFFER_SIZE);
buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order); buffer_pg = alloc_pages(GFP_KERNEL, intf->rx_buf_order);
if (!buffer_pg) if (!buffer_pg)
return -ENOMEM; return -ENOMEM;
dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(kdev, dma)) {
__free_pages(buffer_pg, intf->rx_buf_order);
return -ENOMEM;
}
intf->rx_ring_cpu = page_to_virt(buffer_pg); intf->rx_ring_cpu = page_to_virt(buffer_pg);
intf->rx_ring_dma = dma; intf->rx_ring_dma = dma_map_page(kdev, buffer_pg, 0, RING_BUFFER_SIZE,
intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1; DMA_FROM_DEVICE);
if (dma_mapping_error(kdev, intf->rx_ring_dma))
goto free_rx_buffer;
intf->rx_edpkt_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
&intf->rx_edpkt_dma_addr, GFP_KERNEL);
if (!intf->rx_edpkt_cpu)
goto free_rx_buffer_dma;
/* Alloc TX */
intf->tx_spb_cpu = dma_alloc_coherent(kdev, DESC_RING_SIZE,
&intf->tx_spb_dma_addr, GFP_KERNEL);
if (!intf->tx_spb_cpu)
goto free_rx_edpkt_dma;
p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->rx_edpkt_dma_addr, intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
GFP_KERNEL); GFP_KERNEL);
if (!p) { if (!intf->tx_cbs)
ret = -ENOMEM; goto free_tx_spb_dma;
goto free_rx_ring;
}
intf->rx_edpkt_cpu = p;
netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll); return 0;
free_tx_spb_dma:
dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
intf->tx_spb_dma_addr);
free_rx_edpkt_dma:
dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
intf->rx_edpkt_dma_addr);
free_rx_buffer_dma:
dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
DMA_FROM_DEVICE);
free_rx_buffer:
__free_pages(buffer_pg, intf->rx_buf_order);
return -ENOMEM;
}
static void bcmasp_reclaim_free_buffers(struct bcmasp_intf *intf)
{
struct device *kdev = &intf->parent->pdev->dev;
/* RX buffers */
dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu,
intf->rx_edpkt_dma_addr);
dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
DMA_FROM_DEVICE);
__free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
/* TX buffers */
dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
intf->tx_spb_dma_addr);
kfree(intf->tx_cbs);
}
static void bcmasp_init_rx(struct bcmasp_intf *intf)
{
/* Restart from index 0 */
intf->rx_ring_dma_valid = intf->rx_ring_dma + RING_BUFFER_SIZE - 1;
intf->rx_edpkt_dma_valid = intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1);
intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr; intf->rx_edpkt_dma_read = intf->rx_edpkt_dma_addr;
intf->rx_edpkt_index = 0; intf->rx_edpkt_index = 0;
...@@ -733,64 +771,23 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf) ...@@ -733,64 +771,23 @@ static int bcmasp_init_rx(struct bcmasp_intf *intf)
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE); rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_WRITE);
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ); rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_READ);
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE); rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr, RX_EDPKT_DMA_BASE);
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1), rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_END);
RX_EDPKT_DMA_END); rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_valid, RX_EDPKT_DMA_VALID);
rx_edpkt_dma_wq(intf, intf->rx_edpkt_dma_addr + (DESC_RING_SIZE - 1),
RX_EDPKT_DMA_VALID);
reg = UMAC2FB_CFG_DEFAULT_EN |
((intf->channel + 11) << UMAC2FB_CFG_CHID_SHIFT);
reg |= (0xd << UMAC2FB_CFG_OK_SEND_SHIFT);
umac2fb_wl(intf, reg, UMAC2FB_CFG);
return 0; umac2fb_wl(intf, UMAC2FB_CFG_DEFAULT_EN | ((intf->channel + 11) <<
UMAC2FB_CFG_CHID_SHIFT) | (0xd << UMAC2FB_CFG_OK_SEND_SHIFT),
free_rx_ring: UMAC2FB_CFG);
dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
DMA_FROM_DEVICE);
__free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
return ret;
} }
static void bcmasp_reclaim_free_all_rx(struct bcmasp_intf *intf)
{
struct device *kdev = &intf->parent->pdev->dev;
dma_free_coherent(kdev, DESC_RING_SIZE, intf->rx_edpkt_cpu, static void bcmasp_init_tx(struct bcmasp_intf *intf)
intf->rx_edpkt_dma_addr);
dma_unmap_page(kdev, intf->rx_ring_dma, RING_BUFFER_SIZE,
DMA_FROM_DEVICE);
__free_pages(virt_to_page(intf->rx_ring_cpu), intf->rx_buf_order);
}
static int bcmasp_init_tx(struct bcmasp_intf *intf)
{ {
struct device *kdev = &intf->parent->pdev->dev; /* Restart from index 0 */
void *p;
int ret;
p = dma_alloc_coherent(kdev, DESC_RING_SIZE, &intf->tx_spb_dma_addr,
GFP_KERNEL);
if (!p)
return -ENOMEM;
intf->tx_spb_cpu = p;
intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1; intf->tx_spb_dma_valid = intf->tx_spb_dma_addr + DESC_RING_SIZE - 1;
intf->tx_spb_dma_read = intf->tx_spb_dma_addr; intf->tx_spb_dma_read = intf->tx_spb_dma_addr;
intf->tx_cbs = kcalloc(DESC_RING_COUNT, sizeof(struct bcmasp_tx_cb),
GFP_KERNEL);
if (!intf->tx_cbs) {
ret = -ENOMEM;
goto free_tx_spb;
}
intf->tx_spb_index = 0; intf->tx_spb_index = 0;
intf->tx_spb_clean_index = 0; intf->tx_spb_clean_index = 0;
netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
/* Make sure channels are disabled */ /* Make sure channels are disabled */
tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE); tx_spb_ctrl_wl(intf, 0x0, TX_SPB_CTRL_ENABLE);
tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC); tx_epkt_core_wl(intf, 0x0, TX_EPKT_C_CFG_MISC);
...@@ -806,26 +803,6 @@ static int bcmasp_init_tx(struct bcmasp_intf *intf) ...@@ -806,26 +803,6 @@ static int bcmasp_init_tx(struct bcmasp_intf *intf)
tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE); tx_spb_dma_wq(intf, intf->tx_spb_dma_addr, TX_SPB_DMA_BASE);
tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END); tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_END);
tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID); tx_spb_dma_wq(intf, intf->tx_spb_dma_valid, TX_SPB_DMA_VALID);
return 0;
free_tx_spb:
dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
intf->tx_spb_dma_addr);
return ret;
}
static void bcmasp_reclaim_free_all_tx(struct bcmasp_intf *intf)
{
struct device *kdev = &intf->parent->pdev->dev;
/* Free descriptors */
dma_free_coherent(kdev, DESC_RING_SIZE, intf->tx_spb_cpu,
intf->tx_spb_dma_addr);
/* Free cbs */
kfree(intf->tx_cbs);
} }
static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable) static void bcmasp_ephy_enable_set(struct bcmasp_intf *intf, bool enable)
...@@ -915,10 +892,7 @@ static void bcmasp_netif_deinit(struct net_device *dev) ...@@ -915,10 +892,7 @@ static void bcmasp_netif_deinit(struct net_device *dev)
bcmasp_enable_rx_irq(intf, 0); bcmasp_enable_rx_irq(intf, 0);
netif_napi_del(&intf->tx_napi); netif_napi_del(&intf->tx_napi);
bcmasp_reclaim_free_all_tx(intf);
netif_napi_del(&intf->rx_napi); netif_napi_del(&intf->rx_napi);
bcmasp_reclaim_free_all_rx(intf);
} }
static int bcmasp_stop(struct net_device *dev) static int bcmasp_stop(struct net_device *dev)
...@@ -932,6 +906,8 @@ static int bcmasp_stop(struct net_device *dev) ...@@ -932,6 +906,8 @@ static int bcmasp_stop(struct net_device *dev)
bcmasp_netif_deinit(dev); bcmasp_netif_deinit(dev);
bcmasp_reclaim_free_buffers(intf);
phy_disconnect(dev->phydev); phy_disconnect(dev->phydev);
/* Disable internal EPHY or external PHY */ /* Disable internal EPHY or external PHY */
...@@ -1073,17 +1049,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) ...@@ -1073,17 +1049,12 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
intf->old_link = -1; intf->old_link = -1;
intf->old_pause = -1; intf->old_pause = -1;
ret = bcmasp_init_tx(intf); bcmasp_init_tx(intf);
if (ret) netif_napi_add_tx(intf->ndev, &intf->tx_napi, bcmasp_tx_poll);
goto err_phy_disconnect;
/* Turn on asp */
bcmasp_enable_tx(intf, 1); bcmasp_enable_tx(intf, 1);
ret = bcmasp_init_rx(intf); bcmasp_init_rx(intf);
if (ret) netif_napi_add(intf->ndev, &intf->rx_napi, bcmasp_rx_poll);
goto err_reclaim_tx;
bcmasp_enable_rx(intf, 1); bcmasp_enable_rx(intf, 1);
/* Turn on UniMAC TX/RX */ /* Turn on UniMAC TX/RX */
...@@ -1097,12 +1068,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect) ...@@ -1097,12 +1068,6 @@ static int bcmasp_netif_init(struct net_device *dev, bool phy_connect)
return 0; return 0;
err_reclaim_tx:
netif_napi_del(&intf->tx_napi);
bcmasp_reclaim_free_all_tx(intf);
err_phy_disconnect:
if (phydev)
phy_disconnect(phydev);
err_phy_disable: err_phy_disable:
if (intf->internal_phy) if (intf->internal_phy)
bcmasp_ephy_enable_set(intf, false); bcmasp_ephy_enable_set(intf, false);
...@@ -1118,13 +1083,24 @@ static int bcmasp_open(struct net_device *dev) ...@@ -1118,13 +1083,24 @@ static int bcmasp_open(struct net_device *dev)
netif_dbg(intf, ifup, dev, "bcmasp open\n"); netif_dbg(intf, ifup, dev, "bcmasp open\n");
ret = clk_prepare_enable(intf->parent->clk); ret = bcmasp_alloc_buffers(intf);
if (ret) if (ret)
return ret; return ret;
ret = bcmasp_netif_init(dev, true); ret = clk_prepare_enable(intf->parent->clk);
if (ret) if (ret)
goto err_free_mem;
ret = bcmasp_netif_init(dev, true);
if (ret) {
clk_disable_unprepare(intf->parent->clk); clk_disable_unprepare(intf->parent->clk);
goto err_free_mem;
}
return ret;
err_free_mem:
bcmasp_reclaim_free_buffers(intf);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment