Commit 556dfd8d authored by Andy Shevchenko's avatar Andy Shevchenko Committed by Greg Kroah-Hartman

dmaengine: dw: disable BLOCK IRQs for non-cyclic xfer

commit ee1cdcda upstream.

The commit 2895b2ca ("dmaengine: dw: fix cyclic transfer callbacks")
re-enabled BLOCK interrupts with regard to make cyclic transfers work. However,
this change becomes a regression for non-cyclic transfers as interrupt counters
under stress test had been grown enormously (approximately per 4-5 bytes in the
UART loop back test).

Taking into consideration above enable BLOCK interrupts if and only if channel
is programmed to perform cyclic transfer.

Fixes: 2895b2ca ("dmaengine: dw: fix cyclic transfer callbacks")
Signed-off-by: default avatarAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Acked-by: default avatarMans Rullgard <mans@mansr.com>
Tested-by: default avatarMans Rullgard <mans@mansr.com>
Acked-by: default avatarViresh Kumar <viresh.kumar@linaro.org>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent f4fa3d6f
...@@ -156,7 +156,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc) ...@@ -156,7 +156,6 @@ static void dwc_initialize(struct dw_dma_chan *dwc)
/* Enable interrupts */ /* Enable interrupts */
channel_set_bit(dw, MASK.XFER, dwc->mask); channel_set_bit(dw, MASK.XFER, dwc->mask);
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
channel_set_bit(dw, MASK.ERROR, dwc->mask); channel_set_bit(dw, MASK.ERROR, dwc->mask);
dwc->initialized = true; dwc->initialized = true;
...@@ -588,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc, ...@@ -588,6 +587,9 @@ static void dwc_handle_cyclic(struct dw_dma *dw, struct dw_dma_chan *dwc,
spin_unlock_irqrestore(&dwc->lock, flags); spin_unlock_irqrestore(&dwc->lock, flags);
} }
/* Re-enable interrupts */
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
} }
/* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */
...@@ -618,11 +620,8 @@ static void dw_dma_tasklet(unsigned long data) ...@@ -618,11 +620,8 @@ static void dw_dma_tasklet(unsigned long data)
dwc_scan_descriptors(dw, dwc); dwc_scan_descriptors(dw, dwc);
} }
/* /* Re-enable interrupts */
* Re-enable interrupts.
*/
channel_set_bit(dw, MASK.XFER, dw->all_chan_mask); channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask); channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
} }
...@@ -1256,6 +1255,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan) ...@@ -1256,6 +1255,7 @@ static void dwc_free_chan_resources(struct dma_chan *chan)
int dw_dma_cyclic_start(struct dma_chan *chan) int dw_dma_cyclic_start(struct dma_chan *chan)
{ {
struct dw_dma_chan *dwc = to_dw_dma_chan(chan); struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
struct dw_dma *dw = to_dw_dma(chan->device);
unsigned long flags; unsigned long flags;
if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) { if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
...@@ -1264,7 +1264,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan) ...@@ -1264,7 +1264,12 @@ int dw_dma_cyclic_start(struct dma_chan *chan)
} }
spin_lock_irqsave(&dwc->lock, flags); spin_lock_irqsave(&dwc->lock, flags);
/* Enable interrupts to perform cyclic transfer */
channel_set_bit(dw, MASK.BLOCK, dwc->mask);
dwc_dostart(dwc, dwc->cdesc->desc[0]); dwc_dostart(dwc, dwc->cdesc->desc[0]);
spin_unlock_irqrestore(&dwc->lock, flags); spin_unlock_irqrestore(&dwc->lock, flags);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment