Commit c3635c78 authored by Linus Walleij's avatar Linus Walleij Committed by Dan Williams

DMAENGINE: generic slave control v2

Convert the device_terminate_all() operation on the
DMA engine to a generic device_control() operation
which can now optionally support also pausing and
resuming DMA on a certain channel. Implemented for the
COH 901 318 DMAC as an example.

[dan.j.williams@intel.com: update for timberdale]
Signed-off-by: default avatarLinus Walleij <linus.walleij@stericsson.com>
Acked-by: default avatarMark Brown <broonie@opensource.wolfsonmicro.com>
Cc: Maciej Sosnowski <maciej.sosnowski@intel.com>
Cc: Nicolas Ferre <nicolas.ferre@atmel.com>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Li Yang <leoli@freescale.com>
Cc: Guennadi Liakhovetski <g.liakhovetski@gmx.de>
Cc: Paul Mundt <lethal@linux-sh.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Haavard Skinnemoen <haavard.skinnemoen@atmel.com>
Cc: Magnus Damm <damm@opensource.se>
Cc: Liam Girdwood <lrg@slimlogic.co.uk>
Cc: Joe Perches <joe@perches.com>
Cc: Roland Dreier <rdreier@cisco.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 0f65169b
...@@ -109,20 +109,6 @@ struct coh901318_platform { ...@@ -109,20 +109,6 @@ struct coh901318_platform {
*/ */
u32 coh901318_get_bytes_left(struct dma_chan *chan); u32 coh901318_get_bytes_left(struct dma_chan *chan);
/**
* coh901318_stop() - Stops dma transfer
* @chan: dma channel handle
* return 0 on success otherwise negative value
*/
void coh901318_stop(struct dma_chan *chan);
/**
* coh901318_continue() - Resumes a stopped dma transfer
* @chan: dma channel handle
* return 0 on success otherwise negative value
*/
void coh901318_continue(struct dma_chan *chan);
/** /**
* coh901318_filter_id() - DMA channel filter function * coh901318_filter_id() - DMA channel filter function
* @chan: dma channel handle * @chan: dma channel handle
......
...@@ -759,13 +759,17 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -759,13 +759,17 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL; return NULL;
} }
static void atc_terminate_all(struct dma_chan *chan) static int atc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{ {
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
struct at_dma *atdma = to_at_dma(chan->device); struct at_dma *atdma = to_at_dma(chan->device);
struct at_desc *desc, *_desc; struct at_desc *desc, *_desc;
LIST_HEAD(list); LIST_HEAD(list);
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
/* /*
* This is only called when something went wrong elsewhere, so * This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the * we don't really care about the data. Just disable the
...@@ -789,6 +793,8 @@ static void atc_terminate_all(struct dma_chan *chan) ...@@ -789,6 +793,8 @@ static void atc_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */ /* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node) list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc); atc_chain_complete(atchan, desc);
return 0;
} }
/** /**
...@@ -1091,7 +1097,7 @@ static int __init at_dma_probe(struct platform_device *pdev) ...@@ -1091,7 +1097,7 @@ static int __init at_dma_probe(struct platform_device *pdev)
if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) { if (dma_has_cap(DMA_SLAVE, atdma->dma_common.cap_mask)) {
atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg; atdma->dma_common.device_prep_slave_sg = atc_prep_slave_sg;
atdma->dma_common.device_terminate_all = atc_terminate_all; atdma->dma_common.device_control = atc_control;
} }
dma_writel(atdma, EN, AT_DMA_ENABLE); dma_writel(atdma, EN, AT_DMA_ENABLE);
......
...@@ -506,10 +506,11 @@ u32 coh901318_get_bytes_left(struct dma_chan *chan) ...@@ -506,10 +506,11 @@ u32 coh901318_get_bytes_left(struct dma_chan *chan)
EXPORT_SYMBOL(coh901318_get_bytes_left); EXPORT_SYMBOL(coh901318_get_bytes_left);
/* Stops a transfer without losing data. Enables power save. /*
Use this function in conjunction with coh901318_continue(..) * Pauses a transfer without losing data. Enables power save.
*/ * Use this function in conjunction with coh901318_resume.
void coh901318_stop(struct dma_chan *chan) */
static void coh901318_pause(struct dma_chan *chan)
{ {
u32 val; u32 val;
unsigned long flags; unsigned long flags;
...@@ -550,12 +551,11 @@ void coh901318_stop(struct dma_chan *chan) ...@@ -550,12 +551,11 @@ void coh901318_stop(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags); spin_unlock_irqrestore(&cohc->lock, flags);
} }
EXPORT_SYMBOL(coh901318_stop);
/* Continues a transfer that has been stopped via 300_dma_stop(..). /* Resumes a transfer that has been stopped via 300_dma_stop(..).
Power save is handled. Power save is handled.
*/ */
void coh901318_continue(struct dma_chan *chan) static void coh901318_resume(struct dma_chan *chan)
{ {
u32 val; u32 val;
unsigned long flags; unsigned long flags;
...@@ -581,7 +581,6 @@ void coh901318_continue(struct dma_chan *chan) ...@@ -581,7 +581,6 @@ void coh901318_continue(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags); spin_unlock_irqrestore(&cohc->lock, flags);
} }
EXPORT_SYMBOL(coh901318_continue);
bool coh901318_filter_id(struct dma_chan *chan, void *chan_id) bool coh901318_filter_id(struct dma_chan *chan, void *chan_id)
{ {
...@@ -945,7 +944,7 @@ coh901318_free_chan_resources(struct dma_chan *chan) ...@@ -945,7 +944,7 @@ coh901318_free_chan_resources(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags); spin_unlock_irqrestore(&cohc->lock, flags);
chan->device->device_terminate_all(chan); chan->device->device_control(chan, DMA_TERMINATE_ALL);
} }
...@@ -1179,16 +1178,29 @@ coh901318_issue_pending(struct dma_chan *chan) ...@@ -1179,16 +1178,29 @@ coh901318_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&cohc->lock, flags); spin_unlock_irqrestore(&cohc->lock, flags);
} }
static void static int
coh901318_terminate_all(struct dma_chan *chan) coh901318_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{ {
unsigned long flags; unsigned long flags;
struct coh901318_chan *cohc = to_coh901318_chan(chan); struct coh901318_chan *cohc = to_coh901318_chan(chan);
struct coh901318_desc *cohd; struct coh901318_desc *cohd;
void __iomem *virtbase = cohc->base->virtbase; void __iomem *virtbase = cohc->base->virtbase;
coh901318_stop(chan); if (cmd == DMA_PAUSE) {
coh901318_pause(chan);
return 0;
}
if (cmd == DMA_RESUME) {
coh901318_resume(chan);
return 0;
}
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
/* The remainder of this function terminates the transfer */
coh901318_pause(chan);
spin_lock_irqsave(&cohc->lock, flags); spin_lock_irqsave(&cohc->lock, flags);
/* Clear any pending BE or TC interrupt */ /* Clear any pending BE or TC interrupt */
...@@ -1227,6 +1239,8 @@ coh901318_terminate_all(struct dma_chan *chan) ...@@ -1227,6 +1239,8 @@ coh901318_terminate_all(struct dma_chan *chan)
cohc->busy = 0; cohc->busy = 0;
spin_unlock_irqrestore(&cohc->lock, flags); spin_unlock_irqrestore(&cohc->lock, flags);
return 0;
} }
void coh901318_base_init(struct dma_device *dma, const int *pick_chans, void coh901318_base_init(struct dma_device *dma, const int *pick_chans,
struct coh901318_base *base) struct coh901318_base *base)
...@@ -1344,7 +1358,7 @@ static int __init coh901318_probe(struct platform_device *pdev) ...@@ -1344,7 +1358,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg; base->dma_slave.device_prep_slave_sg = coh901318_prep_slave_sg;
base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete; base->dma_slave.device_is_tx_complete = coh901318_is_tx_complete;
base->dma_slave.device_issue_pending = coh901318_issue_pending; base->dma_slave.device_issue_pending = coh901318_issue_pending;
base->dma_slave.device_terminate_all = coh901318_terminate_all; base->dma_slave.device_control = coh901318_control;
base->dma_slave.dev = &pdev->dev; base->dma_slave.dev = &pdev->dev;
err = dma_async_device_register(&base->dma_slave); err = dma_async_device_register(&base->dma_slave);
...@@ -1364,7 +1378,7 @@ static int __init coh901318_probe(struct platform_device *pdev) ...@@ -1364,7 +1378,7 @@ static int __init coh901318_probe(struct platform_device *pdev)
base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy; base->dma_memcpy.device_prep_dma_memcpy = coh901318_prep_memcpy;
base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete; base->dma_memcpy.device_is_tx_complete = coh901318_is_tx_complete;
base->dma_memcpy.device_issue_pending = coh901318_issue_pending; base->dma_memcpy.device_issue_pending = coh901318_issue_pending;
base->dma_memcpy.device_terminate_all = coh901318_terminate_all; base->dma_memcpy.device_control = coh901318_control;
base->dma_memcpy.dev = &pdev->dev; base->dma_memcpy.dev = &pdev->dev;
/* /*
* This controller can only access address at even 32bit boundaries, * This controller can only access address at even 32bit boundaries,
......
...@@ -694,7 +694,7 @@ int dma_async_device_register(struct dma_device *device) ...@@ -694,7 +694,7 @@ int dma_async_device_register(struct dma_device *device)
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_prep_slave_sg); !device->device_prep_slave_sg);
BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) && BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
!device->device_terminate_all); !device->device_control);
BUG_ON(!device->device_alloc_chan_resources); BUG_ON(!device->device_alloc_chan_resources);
BUG_ON(!device->device_free_chan_resources); BUG_ON(!device->device_free_chan_resources);
......
...@@ -781,13 +781,17 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -781,13 +781,17 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return NULL; return NULL;
} }
static void dwc_terminate_all(struct dma_chan *chan) static int dwc_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{ {
struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device); struct dw_dma *dw = to_dw_dma(chan->device);
struct dw_desc *desc, *_desc; struct dw_desc *desc, *_desc;
LIST_HEAD(list); LIST_HEAD(list);
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
/* /*
* This is only called when something went wrong elsewhere, so * This is only called when something went wrong elsewhere, so
* we don't really care about the data. Just disable the * we don't really care about the data. Just disable the
...@@ -810,6 +814,8 @@ static void dwc_terminate_all(struct dma_chan *chan) ...@@ -810,6 +814,8 @@ static void dwc_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */ /* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node) list_for_each_entry_safe(desc, _desc, &list, desc_node)
dwc_descriptor_complete(dwc, desc); dwc_descriptor_complete(dwc, desc);
return 0;
} }
static enum dma_status static enum dma_status
...@@ -1338,7 +1344,7 @@ static int __init dw_probe(struct platform_device *pdev) ...@@ -1338,7 +1344,7 @@ static int __init dw_probe(struct platform_device *pdev)
dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy; dw->dma.device_prep_dma_memcpy = dwc_prep_dma_memcpy;
dw->dma.device_prep_slave_sg = dwc_prep_slave_sg; dw->dma.device_prep_slave_sg = dwc_prep_slave_sg;
dw->dma.device_terminate_all = dwc_terminate_all; dw->dma.device_control = dwc_control;
dw->dma.device_is_tx_complete = dwc_is_tx_complete; dw->dma.device_is_tx_complete = dwc_is_tx_complete;
dw->dma.device_issue_pending = dwc_issue_pending; dw->dma.device_issue_pending = dwc_issue_pending;
......
...@@ -774,13 +774,18 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg( ...@@ -774,13 +774,18 @@ static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
return NULL; return NULL;
} }
static void fsl_dma_device_terminate_all(struct dma_chan *dchan) static int fsl_dma_device_control(struct dma_chan *dchan,
enum dma_ctrl_cmd cmd)
{ {
struct fsldma_chan *chan; struct fsldma_chan *chan;
unsigned long flags; unsigned long flags;
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
if (!dchan) if (!dchan)
return; return -EINVAL;
chan = to_fsl_chan(dchan); chan = to_fsl_chan(dchan);
...@@ -794,6 +799,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan) ...@@ -794,6 +799,8 @@ static void fsl_dma_device_terminate_all(struct dma_chan *dchan)
fsldma_free_desc_list(chan, &chan->ld_running); fsldma_free_desc_list(chan, &chan->ld_running);
spin_unlock_irqrestore(&chan->desc_lock, flags); spin_unlock_irqrestore(&chan->desc_lock, flags);
return 0;
} }
/** /**
...@@ -1332,7 +1339,7 @@ static int __devinit fsldma_of_probe(struct of_device *op, ...@@ -1332,7 +1339,7 @@ static int __devinit fsldma_of_probe(struct of_device *op,
fdev->common.device_is_tx_complete = fsl_dma_is_complete; fdev->common.device_is_tx_complete = fsl_dma_is_complete;
fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending; fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg; fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
fdev->common.device_terminate_all = fsl_dma_device_terminate_all; fdev->common.device_control = fsl_dma_device_control;
fdev->common.dev = &op->dev; fdev->common.dev = &op->dev;
dev_set_drvdata(&op->dev, fdev); dev_set_drvdata(&op->dev, fdev);
......
...@@ -1472,13 +1472,17 @@ static void idmac_issue_pending(struct dma_chan *chan) ...@@ -1472,13 +1472,17 @@ static void idmac_issue_pending(struct dma_chan *chan)
*/ */
} }
static void __idmac_terminate_all(struct dma_chan *chan) static int __idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{ {
struct idmac_channel *ichan = to_idmac_chan(chan); struct idmac_channel *ichan = to_idmac_chan(chan);
struct idmac *idmac = to_idmac(chan->device); struct idmac *idmac = to_idmac(chan->device);
unsigned long flags; unsigned long flags;
int i; int i;
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
ipu_disable_channel(idmac, ichan, ipu_disable_channel(idmac, ichan,
ichan->status >= IPU_CHANNEL_ENABLED); ichan->status >= IPU_CHANNEL_ENABLED);
...@@ -1505,17 +1509,22 @@ static void __idmac_terminate_all(struct dma_chan *chan) ...@@ -1505,17 +1509,22 @@ static void __idmac_terminate_all(struct dma_chan *chan)
tasklet_enable(&to_ipu(idmac)->tasklet); tasklet_enable(&to_ipu(idmac)->tasklet);
ichan->status = IPU_CHANNEL_INITIALIZED; ichan->status = IPU_CHANNEL_INITIALIZED;
return 0;
} }
static void idmac_terminate_all(struct dma_chan *chan) static int idmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{ {
struct idmac_channel *ichan = to_idmac_chan(chan); struct idmac_channel *ichan = to_idmac_chan(chan);
int ret;
mutex_lock(&ichan->chan_mutex); mutex_lock(&ichan->chan_mutex);
__idmac_terminate_all(chan); ret = __idmac_control(chan, cmd);
mutex_unlock(&ichan->chan_mutex); mutex_unlock(&ichan->chan_mutex);
return ret;
} }
#ifdef DEBUG #ifdef DEBUG
...@@ -1607,7 +1616,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan) ...@@ -1607,7 +1616,7 @@ static void idmac_free_chan_resources(struct dma_chan *chan)
mutex_lock(&ichan->chan_mutex); mutex_lock(&ichan->chan_mutex);
__idmac_terminate_all(chan); __idmac_control(chan, DMA_TERMINATE_ALL);
if (ichan->status > IPU_CHANNEL_FREE) { if (ichan->status > IPU_CHANNEL_FREE) {
#ifdef DEBUG #ifdef DEBUG
...@@ -1669,7 +1678,7 @@ static int __init ipu_idmac_init(struct ipu *ipu) ...@@ -1669,7 +1678,7 @@ static int __init ipu_idmac_init(struct ipu *ipu)
/* Compulsory for DMA_SLAVE fields */ /* Compulsory for DMA_SLAVE fields */
dma->device_prep_slave_sg = idmac_prep_slave_sg; dma->device_prep_slave_sg = idmac_prep_slave_sg;
dma->device_terminate_all = idmac_terminate_all; dma->device_control = idmac_control;
INIT_LIST_HEAD(&dma->channels); INIT_LIST_HEAD(&dma->channels);
for (i = 0; i < IPU_CHANNELS_NUM; i++) { for (i = 0; i < IPU_CHANNELS_NUM; i++) {
...@@ -1703,7 +1712,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu) ...@@ -1703,7 +1712,7 @@ static void __exit ipu_idmac_exit(struct ipu *ipu)
for (i = 0; i < IPU_CHANNELS_NUM; i++) { for (i = 0; i < IPU_CHANNELS_NUM; i++) {
struct idmac_channel *ichan = ipu->channel + i; struct idmac_channel *ichan = ipu->channel + i;
idmac_terminate_all(&ichan->dma_chan); idmac_control(&ichan->dma_chan, DMA_TERMINATE_ALL);
idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0); idmac_prep_slave_sg(&ichan->dma_chan, NULL, 0, DMA_NONE, 0);
} }
......
...@@ -580,12 +580,16 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg( ...@@ -580,12 +580,16 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_slave_sg(
direction, flags); direction, flags);
} }
static void sh_dmae_terminate_all(struct dma_chan *chan) static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{ {
struct sh_dmae_chan *sh_chan = to_sh_chan(chan); struct sh_dmae_chan *sh_chan = to_sh_chan(chan);
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
if (!chan) if (!chan)
return; return -EINVAL;
dmae_halt(sh_chan); dmae_halt(sh_chan);
...@@ -601,6 +605,8 @@ static void sh_dmae_terminate_all(struct dma_chan *chan) ...@@ -601,6 +605,8 @@ static void sh_dmae_terminate_all(struct dma_chan *chan)
spin_unlock_bh(&sh_chan->desc_lock); spin_unlock_bh(&sh_chan->desc_lock);
sh_dmae_chan_ld_cleanup(sh_chan, true); sh_dmae_chan_ld_cleanup(sh_chan, true);
return 0;
} }
static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all) static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
...@@ -1029,7 +1035,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) ...@@ -1029,7 +1035,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
/* Compulsory for DMA_SLAVE fields */ /* Compulsory for DMA_SLAVE fields */
shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg; shdev->common.device_prep_slave_sg = sh_dmae_prep_slave_sg;
shdev->common.device_terminate_all = sh_dmae_terminate_all; shdev->common.device_control = sh_dmae_control;
shdev->common.dev = &pdev->dev; shdev->common.dev = &pdev->dev;
/* Default transfer size of 32 bytes requires 32-byte alignment */ /* Default transfer size of 32 bytes requires 32-byte alignment */
......
...@@ -613,7 +613,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, ...@@ -613,7 +613,7 @@ static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan,
return &td_desc->txd; return &td_desc->txd;
} }
static void td_terminate_all(struct dma_chan *chan) static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{ {
struct timb_dma_chan *td_chan = struct timb_dma_chan *td_chan =
container_of(chan, struct timb_dma_chan, chan); container_of(chan, struct timb_dma_chan, chan);
...@@ -621,6 +621,9 @@ static void td_terminate_all(struct dma_chan *chan) ...@@ -621,6 +621,9 @@ static void td_terminate_all(struct dma_chan *chan)
dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); dev_dbg(chan2dev(chan), "%s: Entry\n", __func__);
if (cmd != DMA_TERMINATE_ALL)
return -ENXIO;
/* first the easy part, put the queue into the free list */ /* first the easy part, put the queue into the free list */
spin_lock_bh(&td_chan->lock); spin_lock_bh(&td_chan->lock);
list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue,
...@@ -630,6 +633,8 @@ static void td_terminate_all(struct dma_chan *chan) ...@@ -630,6 +633,8 @@ static void td_terminate_all(struct dma_chan *chan)
/* now tear down the runnning */ /* now tear down the runnning */
__td_finish(td_chan); __td_finish(td_chan);
spin_unlock_bh(&td_chan->lock); spin_unlock_bh(&td_chan->lock);
return 0;
} }
static void td_tasklet(unsigned long data) static void td_tasklet(unsigned long data)
...@@ -743,7 +748,7 @@ static int __devinit td_probe(struct platform_device *pdev) ...@@ -743,7 +748,7 @@ static int __devinit td_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, td->dma.cap_mask); dma_cap_set(DMA_SLAVE, td->dma.cap_mask);
dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); dma_cap_set(DMA_PRIVATE, td->dma.cap_mask);
td->dma.device_prep_slave_sg = td_prep_slave_sg; td->dma.device_prep_slave_sg = td_prep_slave_sg;
td->dma.device_terminate_all = td_terminate_all; td->dma.device_control = td_control;
td->dma.dev = &pdev->dev; td->dma.dev = &pdev->dev;
......
...@@ -938,12 +938,16 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ...@@ -938,12 +938,16 @@ txx9dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
return &first->txd; return &first->txd;
} }
static void txx9dmac_terminate_all(struct dma_chan *chan) static int txx9dmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd)
{ {
struct txx9dmac_chan *dc = to_txx9dmac_chan(chan); struct txx9dmac_chan *dc = to_txx9dmac_chan(chan);
struct txx9dmac_desc *desc, *_desc; struct txx9dmac_desc *desc, *_desc;
LIST_HEAD(list); LIST_HEAD(list);
/* Only supports DMA_TERMINATE_ALL */
if (cmd != DMA_TERMINATE_ALL)
return -EINVAL;
dev_vdbg(chan2dev(chan), "terminate_all\n"); dev_vdbg(chan2dev(chan), "terminate_all\n");
spin_lock_bh(&dc->lock); spin_lock_bh(&dc->lock);
...@@ -958,6 +962,8 @@ static void txx9dmac_terminate_all(struct dma_chan *chan) ...@@ -958,6 +962,8 @@ static void txx9dmac_terminate_all(struct dma_chan *chan)
/* Flush all pending and queued descriptors */ /* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node) list_for_each_entry_safe(desc, _desc, &list, desc_node)
txx9dmac_descriptor_complete(dc, desc); txx9dmac_descriptor_complete(dc, desc);
return 0;
} }
static enum dma_status static enum dma_status
...@@ -1153,7 +1159,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev) ...@@ -1153,7 +1159,7 @@ static int __init txx9dmac_chan_probe(struct platform_device *pdev)
dc->dma.dev = &pdev->dev; dc->dma.dev = &pdev->dev;
dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources; dc->dma.device_alloc_chan_resources = txx9dmac_alloc_chan_resources;
dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources; dc->dma.device_free_chan_resources = txx9dmac_free_chan_resources;
dc->dma.device_terminate_all = txx9dmac_terminate_all; dc->dma.device_control = txx9dmac_control;
dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete; dc->dma.device_is_tx_complete = txx9dmac_is_tx_complete;
dc->dma.device_issue_pending = txx9dmac_issue_pending; dc->dma.device_issue_pending = txx9dmac_issue_pending;
if (pdata && pdata->memcpy_chan == ch) { if (pdata && pdata->memcpy_chan == ch) {
......
...@@ -578,7 +578,7 @@ static void atmci_stop_dma(struct atmel_mci *host) ...@@ -578,7 +578,7 @@ static void atmci_stop_dma(struct atmel_mci *host)
struct dma_chan *chan = host->data_chan; struct dma_chan *chan = host->data_chan;
if (chan) { if (chan) {
chan->device->device_terminate_all(chan); chan->device->device_control(chan, DMA_TERMINATE_ALL);
atmci_dma_cleanup(host); atmci_dma_cleanup(host);
} else { } else {
/* Data transfer was stopped by the interrupt handler */ /* Data transfer was stopped by the interrupt handler */
......
...@@ -1087,7 +1087,7 @@ static void work_fn_rx(struct work_struct *work) ...@@ -1087,7 +1087,7 @@ static void work_fn_rx(struct work_struct *work)
unsigned long flags; unsigned long flags;
int count; int count;
chan->device->device_terminate_all(chan); chan->device->device_control(chan, DMA_TERMINATE_ALL);
dev_dbg(port->dev, "Read %u bytes with cookie %d\n", dev_dbg(port->dev, "Read %u bytes with cookie %d\n",
sh_desc->partial, sh_desc->cookie); sh_desc->partial, sh_desc->cookie);
......
...@@ -387,7 +387,8 @@ static void sdc_disable_channel(struct mx3fb_info *mx3_fbi) ...@@ -387,7 +387,8 @@ static void sdc_disable_channel(struct mx3fb_info *mx3_fbi)
spin_unlock_irqrestore(&mx3fb->lock, flags); spin_unlock_irqrestore(&mx3fb->lock, flags);
mx3_fbi->txd->chan->device->device_terminate_all(mx3_fbi->txd->chan); mx3_fbi->txd->chan->device->device_control(mx3_fbi->txd->chan,
DMA_TERMINATE_ALL);
mx3_fbi->txd = NULL; mx3_fbi->txd = NULL;
mx3_fbi->cookie = -EINVAL; mx3_fbi->cookie = -EINVAL;
} }
......
...@@ -106,6 +106,19 @@ enum dma_ctrl_flags { ...@@ -106,6 +106,19 @@ enum dma_ctrl_flags {
DMA_PREP_FENCE = (1 << 9), DMA_PREP_FENCE = (1 << 9),
}; };
/**
* enum dma_ctrl_cmd - DMA operations that can optionally be exercised
* on a running channel.
* @DMA_TERMINATE_ALL: terminate all ongoing transfers
* @DMA_PAUSE: pause ongoing transfers
* @DMA_RESUME: resume paused transfer
*/
enum dma_ctrl_cmd {
DMA_TERMINATE_ALL,
DMA_PAUSE,
DMA_RESUME,
};
/** /**
* enum sum_check_bits - bit position of pq_check_flags * enum sum_check_bits - bit position of pq_check_flags
*/ */
...@@ -261,7 +274,8 @@ struct dma_async_tx_descriptor { ...@@ -261,7 +274,8 @@ struct dma_async_tx_descriptor {
* @device_prep_dma_memset: prepares a memset operation * @device_prep_dma_memset: prepares a memset operation
* @device_prep_dma_interrupt: prepares an end of chain interrupt operation * @device_prep_dma_interrupt: prepares an end of chain interrupt operation
* @device_prep_slave_sg: prepares a slave dma operation * @device_prep_slave_sg: prepares a slave dma operation
* @device_terminate_all: terminate all pending operations * @device_control: manipulate all pending operations on a channel, returns
* zero or error code
* @device_is_tx_complete: poll for transaction completion * @device_is_tx_complete: poll for transaction completion
* @device_issue_pending: push pending transactions to hardware * @device_issue_pending: push pending transactions to hardware
*/ */
...@@ -313,7 +327,7 @@ struct dma_device { ...@@ -313,7 +327,7 @@ struct dma_device {
struct dma_chan *chan, struct scatterlist *sgl, struct dma_chan *chan, struct scatterlist *sgl,
unsigned int sg_len, enum dma_data_direction direction, unsigned int sg_len, enum dma_data_direction direction,
unsigned long flags); unsigned long flags);
void (*device_terminate_all)(struct dma_chan *chan); int (*device_control)(struct dma_chan *chan, enum dma_ctrl_cmd cmd);
enum dma_status (*device_is_tx_complete)(struct dma_chan *chan, enum dma_status (*device_is_tx_complete)(struct dma_chan *chan,
dma_cookie_t cookie, dma_cookie_t *last, dma_cookie_t cookie, dma_cookie_t *last,
......
...@@ -159,7 +159,7 @@ static void txx9aclc_dma_tasklet(unsigned long data) ...@@ -159,7 +159,7 @@ static void txx9aclc_dma_tasklet(unsigned long data)
void __iomem *base = drvdata->base; void __iomem *base = drvdata->base;
spin_unlock_irqrestore(&dmadata->dma_lock, flags); spin_unlock_irqrestore(&dmadata->dma_lock, flags);
chan->device->device_terminate_all(chan); chan->device->device_control(chan, DMA_TERMINATE_ALL);
/* first time */ /* first time */
for (i = 0; i < NR_DMA_CHAIN; i++) { for (i = 0; i < NR_DMA_CHAIN; i++) {
desc = txx9aclc_dma_submit(dmadata, desc = txx9aclc_dma_submit(dmadata,
...@@ -267,7 +267,7 @@ static int txx9aclc_pcm_close(struct snd_pcm_substream *substream) ...@@ -267,7 +267,7 @@ static int txx9aclc_pcm_close(struct snd_pcm_substream *substream)
struct dma_chan *chan = dmadata->dma_chan; struct dma_chan *chan = dmadata->dma_chan;
dmadata->frag_count = -1; dmadata->frag_count = -1;
chan->device->device_terminate_all(chan); chan->device->device_control(chan, DMA_TERMINATE_ALL);
return 0; return 0;
} }
...@@ -396,7 +396,7 @@ static int txx9aclc_pcm_remove(struct platform_device *pdev) ...@@ -396,7 +396,7 @@ static int txx9aclc_pcm_remove(struct platform_device *pdev)
struct dma_chan *chan = dmadata->dma_chan; struct dma_chan *chan = dmadata->dma_chan;
if (chan) { if (chan) {
dmadata->frag_count = -1; dmadata->frag_count = -1;
chan->device->device_terminate_all(chan); chan->device->device_control(chan, DMA_TERMINATE_ALL);
dma_release_channel(chan); dma_release_channel(chan);
} }
dev->dmadata[i].dma_chan = NULL; dev->dmadata[i].dma_chan = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment