Commit be60f940 authored by Maxime Ripard's avatar Maxime Ripard Committed by Vinod Koul

dmaengine: sh: Split device_control

Split the device_control callback of the Super-H DMA driver to make use of the
newly introduced callbacks, that will eventually be used to retrieve slave
capabilities.
Signed-off-by: default avatarMaxime Ripard <maxime.ripard@free-electrons.com>
Acked-by: default avatarLaurent Pinchart <laurent.pinchart@ideasonboard.com>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 4a533218
...@@ -729,18 +729,13 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic( ...@@ -729,18 +729,13 @@ static struct dma_async_tx_descriptor *shdma_prep_dma_cyclic(
return desc; return desc;
} }
static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, static int shdma_terminate_all(struct dma_chan *chan)
unsigned long arg)
{ {
struct shdma_chan *schan = to_shdma_chan(chan); struct shdma_chan *schan = to_shdma_chan(chan);
struct shdma_dev *sdev = to_shdma_dev(chan->device); struct shdma_dev *sdev = to_shdma_dev(chan->device);
const struct shdma_ops *ops = sdev->ops; const struct shdma_ops *ops = sdev->ops;
struct dma_slave_config *config;
unsigned long flags; unsigned long flags;
int ret;
switch (cmd) {
case DMA_TERMINATE_ALL:
spin_lock_irqsave(&schan->chan_lock, flags); spin_lock_irqsave(&schan->chan_lock, flags);
ops->halt_channel(schan); ops->halt_channel(schan);
...@@ -754,30 +749,28 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, ...@@ -754,30 +749,28 @@ static int shdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
spin_unlock_irqrestore(&schan->chan_lock, flags); spin_unlock_irqrestore(&schan->chan_lock, flags);
shdma_chan_ld_cleanup(schan, true); shdma_chan_ld_cleanup(schan, true);
break;
case DMA_SLAVE_CONFIG: return 0;
}
static int shdma_config(struct dma_chan *chan,
struct dma_slave_config *config)
{
struct shdma_chan *schan = to_shdma_chan(chan);
/* /*
* So far only .slave_id is used, but the slave drivers are * So far only .slave_id is used, but the slave drivers are
* encouraged to also set a transfer direction and an address. * encouraged to also set a transfer direction and an address.
*/ */
if (!arg) if (!config)
return -EINVAL; return -EINVAL;
/* /*
* We could lock this, but you shouldn't be configuring the * We could lock this, but you shouldn't be configuring the
* channel, while using it... * channel, while using it...
*/ */
config = (struct dma_slave_config *)arg; return shdma_setup_slave(schan, config->slave_id,
ret = shdma_setup_slave(schan, config->slave_id,
config->direction == DMA_DEV_TO_MEM ? config->direction == DMA_DEV_TO_MEM ?
config->src_addr : config->dst_addr); config->src_addr : config->dst_addr);
if (ret < 0)
return ret;
break;
default:
return -ENXIO;
}
return 0;
} }
static void shdma_issue_pending(struct dma_chan *chan) static void shdma_issue_pending(struct dma_chan *chan)
...@@ -1002,7 +995,8 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev, ...@@ -1002,7 +995,8 @@ int shdma_init(struct device *dev, struct shdma_dev *sdev,
/* Compulsory for DMA_SLAVE fields */ /* Compulsory for DMA_SLAVE fields */
dma_dev->device_prep_slave_sg = shdma_prep_slave_sg; dma_dev->device_prep_slave_sg = shdma_prep_slave_sg;
dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic; dma_dev->device_prep_dma_cyclic = shdma_prep_dma_cyclic;
dma_dev->device_control = shdma_control; dma_dev->device_config = shdma_config;
dma_dev->device_terminate_all = shdma_terminate_all;
dma_dev->dev = dev; dma_dev->dev = dev;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment