Commit 4bd19c83 authored by Paul Cercueil's avatar Paul Cercueil Committed by Greg Kroah-Hartman

dmaengine: jz4780: Fix transfers being ACKed too soon

[ Upstream commit 4e4106f5 ]

When a multi-descriptor DMA transfer is in progress, the "IRQ pending"
flag will apparently be set for that channel as soon as the last
descriptor loads, way before the IRQ actually happens. This behaviour
has been observed on the JZ4725B, but maybe other SoCs are affected.

In the case where another DMA transfer is running into completion on a
separate channel, the IRQ handler would then run the completion handler
for our previous channel even if the transfer didn't actually finish.

Fix this by checking in the completion handler that we're indeed done;
if not the interrupted DMA transfer will simply be resumed.
Signed-off-by: default avatarPaul Cercueil <paul@crapouillou.net>
Signed-off-by: default avatarVinod Koul <vkoul@kernel.org>
Signed-off-by: default avatarSasha Levin <sashal@kernel.org>
parent f23dc2c3
...@@ -666,10 +666,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan, ...@@ -666,10 +666,11 @@ static enum dma_status jz4780_dma_tx_status(struct dma_chan *chan,
return status; return status;
} }
static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, static bool jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
struct jz4780_dma_chan *jzchan) struct jz4780_dma_chan *jzchan)
{ {
uint32_t dcs; uint32_t dcs;
bool ack = true;
spin_lock(&jzchan->vchan.lock); spin_lock(&jzchan->vchan.lock);
...@@ -692,12 +693,20 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, ...@@ -692,12 +693,20 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) { if ((dcs & (JZ_DMA_DCS_AR | JZ_DMA_DCS_HLT)) == 0) {
if (jzchan->desc->type == DMA_CYCLIC) { if (jzchan->desc->type == DMA_CYCLIC) {
vchan_cyclic_callback(&jzchan->desc->vdesc); vchan_cyclic_callback(&jzchan->desc->vdesc);
} else {
jz4780_dma_begin(jzchan);
} else if (dcs & JZ_DMA_DCS_TT) {
vchan_cookie_complete(&jzchan->desc->vdesc); vchan_cookie_complete(&jzchan->desc->vdesc);
jzchan->desc = NULL; jzchan->desc = NULL;
}
jz4780_dma_begin(jzchan); jz4780_dma_begin(jzchan);
} else {
/* False positive - continue the transfer */
ack = false;
jz4780_dma_chn_writel(jzdma, jzchan->id,
JZ_DMA_REG_DCS,
JZ_DMA_DCS_CTE);
}
} }
} else { } else {
dev_err(&jzchan->vchan.chan.dev->device, dev_err(&jzchan->vchan.chan.dev->device,
...@@ -705,21 +714,22 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma, ...@@ -705,21 +714,22 @@ static void jz4780_dma_chan_irq(struct jz4780_dma_dev *jzdma,
} }
spin_unlock(&jzchan->vchan.lock); spin_unlock(&jzchan->vchan.lock);
return ack;
} }
static irqreturn_t jz4780_dma_irq_handler(int irq, void *data) static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
{ {
struct jz4780_dma_dev *jzdma = data; struct jz4780_dma_dev *jzdma = data;
unsigned int nb_channels = jzdma->soc_data->nb_channels;
uint32_t pending, dmac; uint32_t pending, dmac;
int i; int i;
pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP); pending = jz4780_dma_ctrl_readl(jzdma, JZ_DMA_REG_DIRQP);
for (i = 0; i < jzdma->soc_data->nb_channels; i++) { for_each_set_bit(i, (unsigned long *)&pending, nb_channels) {
if (!(pending & (1<<i))) if (jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]))
continue; pending &= ~BIT(i);
jz4780_dma_chan_irq(jzdma, &jzdma->chan[i]);
} }
/* Clear halt and address error status of all channels. */ /* Clear halt and address error status of all channels. */
...@@ -728,7 +738,7 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data) ...@@ -728,7 +738,7 @@ static irqreturn_t jz4780_dma_irq_handler(int irq, void *data)
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac); jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DMAC, dmac);
/* Clear interrupt pending status. */ /* Clear interrupt pending status. */
jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, 0); jz4780_dma_ctrl_writel(jzdma, JZ_DMA_REG_DIRQP, pending);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment