Commit 193a750d authored by Michael Tretter's avatar Michael Tretter Committed by Vinod Koul

dmaengine: zynqmp_dma: refine dma descriptor locking

The descriptor lists are locked for the entire tasklet that completes
the descriptors. This is not necessary, because the lock actually only
protects the descriptor lists.

Make the spin lock more fine-grained and only protect functions that
actually operate on the descriptor lists. This decreases the time when
the lock is held.
Signed-off-by: default avatarMichael Tretter <m.tretter@pengutronix.de>
Link: https://lore.kernel.org/r/20210826094742.1302009-7-m.tretter@pengutronix.deSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 16ed0ef3
......@@ -600,6 +600,9 @@ static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan)
static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
{
struct zynqmp_dma_desc_sw *desc, *next;
unsigned long irqflags;
spin_lock_irqsave(&chan->lock, irqflags);
list_for_each_entry_safe(desc, next, &chan->done_list, node) {
dma_async_tx_callback callback;
......@@ -616,6 +619,8 @@ static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
/* Run any dependencies, then free the descriptor */
zynqmp_dma_free_descriptor(chan, desc);
}
spin_unlock_irqrestore(&chan->lock, irqflags);
}
/**
......@@ -655,9 +660,13 @@ static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
*/
static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
{
unsigned long irqflags;
spin_lock_irqsave(&chan->lock, irqflags);
zynqmp_dma_free_desc_list(chan, &chan->active_list);
zynqmp_dma_free_desc_list(chan, &chan->pending_list);
zynqmp_dma_free_desc_list(chan, &chan->done_list);
spin_unlock_irqrestore(&chan->lock, irqflags);
}
/**
......@@ -667,11 +676,8 @@ static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
unsigned long irqflags;
spin_lock_irqsave(&chan->lock, irqflags);
zynqmp_dma_free_descriptors(chan);
spin_unlock_irqrestore(&chan->lock, irqflags);
dma_free_coherent(chan->dev,
(2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
chan->desc_pool_v, chan->desc_pool_p);
......@@ -686,11 +692,16 @@ static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
*/
static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan)
{
unsigned long irqflags;
writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
spin_lock_irqsave(&chan->lock, irqflags);
zynqmp_dma_complete_descriptor(chan);
spin_unlock_irqrestore(&chan->lock, irqflags);
zynqmp_dma_chan_desc_cleanup(chan);
zynqmp_dma_free_descriptors(chan);
zynqmp_dma_init(chan);
}
......@@ -746,28 +757,27 @@ static void zynqmp_dma_do_tasklet(struct tasklet_struct *t)
u32 count;
unsigned long irqflags;
spin_lock_irqsave(&chan->lock, irqflags);
if (chan->err) {
zynqmp_dma_reset(chan);
chan->err = false;
goto unlock;
return;
}
spin_lock_irqsave(&chan->lock, irqflags);
count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
while (count) {
zynqmp_dma_complete_descriptor(chan);
count--;
}
spin_unlock_irqrestore(&chan->lock, irqflags);
zynqmp_dma_chan_desc_cleanup(chan);
if (chan->idle)
if (chan->idle) {
spin_lock_irqsave(&chan->lock, irqflags);
zynqmp_dma_start_transfer(chan);
unlock:
spin_unlock_irqrestore(&chan->lock, irqflags);
spin_unlock_irqrestore(&chan->lock, irqflags);
}
}
/**
......@@ -779,12 +789,9 @@ static void zynqmp_dma_do_tasklet(struct tasklet_struct *t)
static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
{
struct zynqmp_dma_chan *chan = to_chan(dchan);
unsigned long irqflags;
spin_lock_irqsave(&chan->lock, irqflags);
writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
zynqmp_dma_free_descriptors(chan);
spin_unlock_irqrestore(&chan->lock, irqflags);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment