Commit 84a3f375 authored by Dmitry Osipenko's avatar Dmitry Osipenko Committed by Vinod Koul

dmaengine: tegra-apb: Keep clock enabled only during of DMA transfer

It's a bit impractical to enable hardware's clock at the time of DMA
channel's allocation because most of DMA client drivers allocate DMA
channel at the time of the driver's probing, and thus, DMA clock is kept
always-enabled in practice, defeating the whole purpose of runtime PM.
Signed-off-by: default avatarDmitry Osipenko <digetx@gmail.com>
Acked-by: default avatarJon Hunter <jonathanh@nvidia.com>
Link: https://lore.kernel.org/r/20200209163356.6439-13-digetx@gmail.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent d8396c05
...@@ -569,6 +569,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc, ...@@ -569,6 +569,7 @@ static bool handle_continuous_head_request(struct tegra_dma_channel *tdc,
hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node); hsgreq = list_first_entry(&tdc->pending_sg_req, typeof(*hsgreq), node);
if (!hsgreq->configured) { if (!hsgreq->configured) {
tegra_dma_stop(tdc); tegra_dma_stop(tdc);
pm_runtime_put(tdc->tdma->dev);
dev_err(tdc2dev(tdc), "Error in DMA transfer, aborting DMA\n"); dev_err(tdc2dev(tdc), "Error in DMA transfer, aborting DMA\n");
tegra_dma_abort_all(tdc); tegra_dma_abort_all(tdc);
return false; return false;
...@@ -604,9 +605,14 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc, ...@@ -604,9 +605,14 @@ static void handle_once_dma_done(struct tegra_dma_channel *tdc,
list_add_tail(&sgreq->node, &tdc->free_sg_req); list_add_tail(&sgreq->node, &tdc->free_sg_req);
/* Do not start DMA if it is going to be terminate */ /* Do not start DMA if it is going to be terminate */
if (to_terminate || list_empty(&tdc->pending_sg_req)) if (to_terminate)
return; return;
if (list_empty(&tdc->pending_sg_req)) {
pm_runtime_put(tdc->tdma->dev);
return;
}
tdc_start_head_req(tdc); tdc_start_head_req(tdc);
} }
...@@ -712,6 +718,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc) ...@@ -712,6 +718,7 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
{ {
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
unsigned long flags; unsigned long flags;
int err;
spin_lock_irqsave(&tdc->lock, flags); spin_lock_irqsave(&tdc->lock, flags);
if (list_empty(&tdc->pending_sg_req)) { if (list_empty(&tdc->pending_sg_req)) {
...@@ -719,6 +726,12 @@ static void tegra_dma_issue_pending(struct dma_chan *dc) ...@@ -719,6 +726,12 @@ static void tegra_dma_issue_pending(struct dma_chan *dc)
goto end; goto end;
} }
if (!tdc->busy) { if (!tdc->busy) {
err = pm_runtime_get_sync(tdc->tdma->dev);
if (err < 0) {
dev_err(tdc2dev(tdc), "Failed to enable DMA\n");
goto end;
}
tdc_start_head_req(tdc); tdc_start_head_req(tdc);
/* Continuous single mode: Configure next req */ /* Continuous single mode: Configure next req */
...@@ -778,6 +791,8 @@ static int tegra_dma_terminate_all(struct dma_chan *dc) ...@@ -778,6 +791,8 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
} }
tegra_dma_resume(tdc); tegra_dma_resume(tdc);
pm_runtime_put(tdc->tdma->dev);
skip_dma_stop: skip_dma_stop:
tegra_dma_abort_all(tdc); tegra_dma_abort_all(tdc);
...@@ -1272,22 +1287,15 @@ tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr, ...@@ -1272,22 +1287,15 @@ tegra_dma_prep_dma_cyclic(struct dma_chan *dc, dma_addr_t buf_addr,
static int tegra_dma_alloc_chan_resources(struct dma_chan *dc) static int tegra_dma_alloc_chan_resources(struct dma_chan *dc)
{ {
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma *tdma = tdc->tdma;
int ret;
dma_cookie_init(&tdc->dma_chan); dma_cookie_init(&tdc->dma_chan);
ret = pm_runtime_get_sync(tdma->dev);
if (ret < 0)
return ret;
return 0; return 0;
} }
static void tegra_dma_free_chan_resources(struct dma_chan *dc) static void tegra_dma_free_chan_resources(struct dma_chan *dc)
{ {
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
struct tegra_dma *tdma = tdc->tdma;
struct tegra_dma_desc *dma_desc; struct tegra_dma_desc *dma_desc;
struct tegra_dma_sg_req *sg_req; struct tegra_dma_sg_req *sg_req;
struct list_head dma_desc_list; struct list_head dma_desc_list;
...@@ -1320,7 +1328,6 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc) ...@@ -1320,7 +1328,6 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
list_del(&sg_req->node); list_del(&sg_req->node);
kfree(sg_req); kfree(sg_req);
} }
pm_runtime_put(tdma->dev);
tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID; tdc->slave_id = TEGRA_APBDMA_SLAVE_ID_INVALID;
} }
...@@ -1420,6 +1427,11 @@ static int tegra_dma_probe(struct platform_device *pdev) ...@@ -1420,6 +1427,11 @@ static int tegra_dma_probe(struct platform_device *pdev)
spin_lock_init(&tdma->global_lock); spin_lock_init(&tdma->global_lock);
ret = clk_prepare(tdma->dma_clk);
if (ret)
return ret;
pm_runtime_irq_safe(&pdev->dev);
pm_runtime_enable(&pdev->dev); pm_runtime_enable(&pdev->dev);
ret = pm_runtime_get_sync(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev);
...@@ -1535,6 +1547,7 @@ static int tegra_dma_probe(struct platform_device *pdev) ...@@ -1535,6 +1547,7 @@ static int tegra_dma_probe(struct platform_device *pdev)
err_pm_disable: err_pm_disable:
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
clk_unprepare(tdma->dma_clk);
return ret; return ret;
} }
...@@ -1545,6 +1558,7 @@ static int tegra_dma_remove(struct platform_device *pdev) ...@@ -1545,6 +1558,7 @@ static int tegra_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&tdma->dma_dev); dma_async_device_unregister(&tdma->dma_dev);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
clk_unprepare(tdma->dma_clk);
return 0; return 0;
} }
...@@ -1573,7 +1587,7 @@ static int tegra_dma_runtime_suspend(struct device *dev) ...@@ -1573,7 +1587,7 @@ static int tegra_dma_runtime_suspend(struct device *dev)
TEGRA_APBDMA_CHAN_WCOUNT); TEGRA_APBDMA_CHAN_WCOUNT);
} }
clk_disable_unprepare(tdma->dma_clk); clk_disable(tdma->dma_clk);
return 0; return 0;
} }
...@@ -1584,7 +1598,7 @@ static int tegra_dma_runtime_resume(struct device *dev) ...@@ -1584,7 +1598,7 @@ static int tegra_dma_runtime_resume(struct device *dev)
unsigned int i; unsigned int i;
int ret; int ret;
ret = clk_prepare_enable(tdma->dma_clk); ret = clk_enable(tdma->dma_clk);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "clk_enable failed: %d\n", ret); dev_err(dev, "clk_enable failed: %d\n", ret);
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment