Commit 5f81158f authored by Rabin Vincent's avatar Rabin Vincent Committed by Dan Williams

dma40: combine desc init functions

The desc init code can be shared between the mem and slave prep routines.
Acked-by: default avatarPer Forlin <per.forlin@stericsson.com>
Acked-by: default avatarJonas Aaberg <jonas.aberg@stericsson.com>
Signed-off-by: default avatarRabin Vincent <rabin.vincent@stericsson.com>
Signed-off-by: default avatarLinus Walleij <linus.walleij@stericsson.com>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 95944c6e
...@@ -1617,6 +1617,35 @@ static u32 stedma40_residue(struct dma_chan *chan) ...@@ -1617,6 +1617,35 @@ static u32 stedma40_residue(struct dma_chan *chan)
return bytes_left; return bytes_left;
} }
static struct d40_desc *
d40_prep_desc(struct d40_chan *chan, struct scatterlist *sg,
unsigned int sg_len, unsigned long dma_flags)
{
struct stedma40_chan_cfg *cfg = &chan->dma_cfg;
struct d40_desc *desc;
desc = d40_desc_get(chan);
if (!desc)
return NULL;
desc->lli_len = d40_sg_2_dmalen(sg, sg_len, cfg->src_info.data_width,
cfg->dst_info.data_width);
if (desc->lli_len < 0) {
chan_err(chan, "Unaligned size\n");
d40_desc_free(chan, desc);
return NULL;
}
desc->lli_current = 0;
desc->txd.flags = dma_flags;
desc->txd.tx_submit = d40_tx_submit;
dma_async_tx_descriptor_init(&desc->txd, &chan->chan);
return desc;
}
struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
struct scatterlist *sgl_dst, struct scatterlist *sgl_dst,
struct scatterlist *sgl_src, struct scatterlist *sgl_src,
...@@ -1635,21 +1664,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, ...@@ -1635,21 +1664,10 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
} }
spin_lock_irqsave(&d40c->lock, flags); spin_lock_irqsave(&d40c->lock, flags);
d40d = d40_desc_get(d40c);
if (d40d == NULL) d40d = d40_prep_desc(d40c, sgl_dst, sgl_len, dma_flags);
goto err; if (!d40d)
d40d->lli_len = d40_sg_2_dmalen(sgl_dst, sgl_len,
d40c->dma_cfg.src_info.data_width,
d40c->dma_cfg.dst_info.data_width);
if (d40d->lli_len < 0) {
chan_err(d40c, "Unaligned size\n");
goto err; goto err;
}
d40d->lli_current = 0;
d40d->txd.flags = dma_flags;
if (chan_is_logical(d40c)) { if (chan_is_logical(d40c)) {
...@@ -1708,10 +1726,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan, ...@@ -1708,10 +1726,6 @@ struct dma_async_tx_descriptor *stedma40_memcpy_sg(struct dma_chan *chan,
d40d->lli_pool.size, DMA_TO_DEVICE); d40d->lli_pool.size, DMA_TO_DEVICE);
} }
dma_async_tx_descriptor_init(&d40d->txd, chan);
d40d->txd.tx_submit = d40_tx_submit;
spin_unlock_irqrestore(&d40c->lock, flags); spin_unlock_irqrestore(&d40c->lock, flags);
return &d40d->txd; return &d40d->txd;
...@@ -1900,21 +1914,11 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d, ...@@ -1900,21 +1914,11 @@ static int d40_prep_slave_sg_log(struct d40_desc *d40d,
dma_addr_t dev_addr = 0; dma_addr_t dev_addr = 0;
int total_size; int total_size;
d40d->lli_len = d40_sg_2_dmalen(sgl, sg_len,
d40c->dma_cfg.src_info.data_width,
d40c->dma_cfg.dst_info.data_width);
if (d40d->lli_len < 0) {
chan_err(d40c, "Unaligned size\n");
return -EINVAL;
}
if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) { if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, true) < 0) {
chan_err(d40c, "Out of memory\n"); chan_err(d40c, "Out of memory\n");
return -ENOMEM; return -ENOMEM;
} }
d40d->lli_current = 0;
if (direction == DMA_FROM_DEVICE) if (direction == DMA_FROM_DEVICE)
if (d40c->runtime_addr) if (d40c->runtime_addr)
dev_addr = d40c->runtime_addr; dev_addr = d40c->runtime_addr;
...@@ -1954,21 +1958,11 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d, ...@@ -1954,21 +1958,11 @@ static int d40_prep_slave_sg_phy(struct d40_desc *d40d,
dma_addr_t dst_dev_addr; dma_addr_t dst_dev_addr;
int res; int res;
d40d->lli_len = d40_sg_2_dmalen(sgl, sgl_len,
d40c->dma_cfg.src_info.data_width,
d40c->dma_cfg.dst_info.data_width);
if (d40d->lli_len < 0) {
chan_err(d40c, "Unaligned size\n");
return -EINVAL;
}
if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) { if (d40_pool_lli_alloc(d40c, d40d, d40d->lli_len, false) < 0) {
chan_err(d40c, "Out of memory\n"); chan_err(d40c, "Out of memory\n");
return -ENOMEM; return -ENOMEM;
} }
d40d->lli_current = 0;
if (direction == DMA_FROM_DEVICE) { if (direction == DMA_FROM_DEVICE) {
dst_dev_addr = 0; dst_dev_addr = 0;
if (d40c->runtime_addr) if (d40c->runtime_addr)
...@@ -2031,8 +2025,8 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, ...@@ -2031,8 +2025,8 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
} }
spin_lock_irqsave(&d40c->lock, flags); spin_lock_irqsave(&d40c->lock, flags);
d40d = d40_desc_get(d40c);
d40d = d40_prep_desc(d40c, sgl, sg_len, dma_flags);
if (d40d == NULL) if (d40d == NULL)
goto err; goto err;
...@@ -2048,12 +2042,6 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan, ...@@ -2048,12 +2042,6 @@ static struct dma_async_tx_descriptor *d40_prep_slave_sg(struct dma_chan *chan,
goto err; goto err;
} }
d40d->txd.flags = dma_flags;
dma_async_tx_descriptor_init(&d40d->txd, chan);
d40d->txd.tx_submit = d40_tx_submit;
spin_unlock_irqrestore(&d40c->lock, flags); spin_unlock_irqrestore(&d40c->lock, flags);
return &d40d->txd; return &d40d->txd;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment