Commit b8603d2a authored by Lucas Stach's avatar Lucas Stach Committed by Vinod Koul

dmaengine: imx-sdma: implement channel termination via worker

The dmaengine documentation states that device_terminate_all may be
asynchronous and need not wait for the active transfers to stop.

This allows us to move most of the functionality currently implemented
in the sdma channel termination function to run in a worker, outside
of any atomic context. Moving this out of atomic context has two
benefits: we can now sleep while waiting for the channel to terminate,
instead of busy waiting and the freeing of the dma descriptors happens
with IRQs enabled, getting rid of a warning in the dma mapping code.

As the termination is now async, we need to implement the
device_synchronize dma engine function which simply waits for the
worker to finish its execution.
Signed-off-by: default avatarLucas Stach <l.stach@pengutronix.de>
Signed-off-by: default avatarRobin Gong <yibin.gong@nxp.com>
Cc: stable <stable@vger.kernel.org>
Signed-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent ebb853b1
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/of_dma.h> #include <linux/of_dma.h>
#include <linux/workqueue.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <linux/platform_data/dma-imx-sdma.h> #include <linux/platform_data/dma-imx-sdma.h>
...@@ -375,6 +376,7 @@ struct sdma_channel { ...@@ -375,6 +376,7 @@ struct sdma_channel {
u32 shp_addr, per_addr; u32 shp_addr, per_addr;
enum dma_status status; enum dma_status status;
struct imx_dma_data data; struct imx_dma_data data;
struct work_struct terminate_worker;
}; };
#define IMX_DMA_SG_LOOP BIT(0) #define IMX_DMA_SG_LOOP BIT(0)
...@@ -1025,31 +1027,49 @@ static int sdma_disable_channel(struct dma_chan *chan) ...@@ -1025,31 +1027,49 @@ static int sdma_disable_channel(struct dma_chan *chan)
return 0; return 0;
} }
static void sdma_channel_terminate_work(struct work_struct *work)
static int sdma_disable_channel_with_delay(struct dma_chan *chan)
{ {
struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_channel *sdmac = container_of(work, struct sdma_channel,
terminate_worker);
unsigned long flags; unsigned long flags;
LIST_HEAD(head); LIST_HEAD(head);
sdma_disable_channel(chan);
spin_lock_irqsave(&sdmac->vc.lock, flags);
vchan_get_all_descriptors(&sdmac->vc, &head);
sdmac->desc = NULL;
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head);
/* /*
* According to NXP R&D team a delay of one BD SDMA cost time * According to NXP R&D team a delay of one BD SDMA cost time
* (maximum is 1ms) should be added after disable of the channel * (maximum is 1ms) should be added after disable of the channel
* bit, to ensure SDMA core has really been stopped after SDMA * bit, to ensure SDMA core has really been stopped after SDMA
* clients call .device_terminate_all. * clients call .device_terminate_all.
*/ */
mdelay(1); usleep_range(1000, 2000);
spin_lock_irqsave(&sdmac->vc.lock, flags);
vchan_get_all_descriptors(&sdmac->vc, &head);
sdmac->desc = NULL;
spin_unlock_irqrestore(&sdmac->vc.lock, flags);
vchan_dma_desc_free_list(&sdmac->vc, &head);
}
static int sdma_disable_channel_async(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
sdma_disable_channel(chan);
if (sdmac->desc)
schedule_work(&sdmac->terminate_worker);
return 0; return 0;
} }
static void sdma_channel_synchronize(struct dma_chan *chan)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
vchan_synchronize(&sdmac->vc);
flush_work(&sdmac->terminate_worker);
}
static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
{ {
struct sdma_engine *sdma = sdmac->sdma; struct sdma_engine *sdma = sdmac->sdma;
...@@ -1287,7 +1307,9 @@ static void sdma_free_chan_resources(struct dma_chan *chan) ...@@ -1287,7 +1307,9 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
struct sdma_channel *sdmac = to_sdma_chan(chan); struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma; struct sdma_engine *sdma = sdmac->sdma;
sdma_disable_channel_with_delay(chan); sdma_disable_channel_async(chan);
sdma_channel_synchronize(chan);
if (sdmac->event_id0) if (sdmac->event_id0)
sdma_event_disable(sdmac, sdmac->event_id0); sdma_event_disable(sdmac, sdmac->event_id0);
...@@ -1993,6 +2015,8 @@ static int sdma_probe(struct platform_device *pdev) ...@@ -1993,6 +2015,8 @@ static int sdma_probe(struct platform_device *pdev)
sdmac->channel = i; sdmac->channel = i;
sdmac->vc.desc_free = sdma_desc_free; sdmac->vc.desc_free = sdma_desc_free;
INIT_WORK(&sdmac->terminate_worker,
sdma_channel_terminate_work);
/* /*
* Add the channel to the DMAC list. Do not add channel 0 though * Add the channel to the DMAC list. Do not add channel 0 though
* because we need it internally in the SDMA driver. This also means * because we need it internally in the SDMA driver. This also means
...@@ -2044,7 +2068,8 @@ static int sdma_probe(struct platform_device *pdev) ...@@ -2044,7 +2068,8 @@ static int sdma_probe(struct platform_device *pdev)
sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg; sdma->dma_device.device_prep_slave_sg = sdma_prep_slave_sg;
sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic; sdma->dma_device.device_prep_dma_cyclic = sdma_prep_dma_cyclic;
sdma->dma_device.device_config = sdma_config; sdma->dma_device.device_config = sdma_config;
sdma->dma_device.device_terminate_all = sdma_disable_channel_with_delay; sdma->dma_device.device_terminate_all = sdma_disable_channel_async;
sdma->dma_device.device_synchronize = sdma_channel_synchronize;
sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS; sdma->dma_device.src_addr_widths = SDMA_DMA_BUSWIDTHS;
sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS; sdma->dma_device.dst_addr_widths = SDMA_DMA_BUSWIDTHS;
sdma->dma_device.directions = SDMA_DMA_DIRECTIONS; sdma->dma_device.directions = SDMA_DMA_DIRECTIONS;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment