Commit 078a6506 authored by Tudor Ambarus's avatar Tudor Ambarus Committed by Vinod Koul

dmaengine: at_hdmac: Fix deadlocks

Fix the following deadlocks:
1/ atc_handle_cyclic() and atc_chain_complete() called
dmaengine_desc_get_callback_invoke() while wrongly holding the
atchan->lock. Clients can set the callback to dmaengine_terminate_sync()
which will end up trying to get the same lock, thus a deadlock occurred.
2/ dma_run_dependencies() was called with the atchan->lock held, but the
method calls device_issue_pending() which tries to get the same lock,
and so a deadlock occurred.

The driver must not hold the lock when invoking the callback or when
running dependencies. Releasing the spinlock within a called function
before calling the callback is not a nice thing to do -> called functions
become non-atomic when called within an atomic region. Thus the lock is
now taken in the child routines whereever is needed.
Signed-off-by: default avatarTudor Ambarus <tudor.ambarus@microchip.com>
Acked-by: default avatarLudovic Desroches <ludovic.desroches@microchip.com>
Link: https://lore.kernel.org/r/20200123140237.125799-6-tudor.ambarus@microchip.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 247b4d83
...@@ -426,17 +426,19 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie) ...@@ -426,17 +426,19 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
* atc_chain_complete - finish work for one transaction chain * atc_chain_complete - finish work for one transaction chain
* @atchan: channel we work on * @atchan: channel we work on
* @desc: descriptor at the head of the chain we want do complete * @desc: descriptor at the head of the chain we want do complete
* */
* Called with atchan->lock held and bh disabled */
static void static void
atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
{ {
struct dma_async_tx_descriptor *txd = &desc->txd; struct dma_async_tx_descriptor *txd = &desc->txd;
struct at_dma *atdma = to_at_dma(atchan->chan_common.device); struct at_dma *atdma = to_at_dma(atchan->chan_common.device);
unsigned long flags;
dev_vdbg(chan2dev(&atchan->chan_common), dev_vdbg(chan2dev(&atchan->chan_common),
"descriptor %u complete\n", txd->cookie); "descriptor %u complete\n", txd->cookie);
spin_lock_irqsave(&atchan->lock, flags);
/* mark the descriptor as complete for non cyclic cases only */ /* mark the descriptor as complete for non cyclic cases only */
if (!atc_chan_is_cyclic(atchan)) if (!atc_chan_is_cyclic(atchan))
dma_cookie_complete(txd); dma_cookie_complete(txd);
...@@ -453,16 +455,13 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc) ...@@ -453,16 +455,13 @@ atc_chain_complete(struct at_dma_chan *atchan, struct at_desc *desc)
/* move myself to free_list */ /* move myself to free_list */
list_move(&desc->desc_node, &atchan->free_list); list_move(&desc->desc_node, &atchan->free_list);
spin_unlock_irqrestore(&atchan->lock, flags);
dma_descriptor_unmap(txd); dma_descriptor_unmap(txd);
/* for cyclic transfers, /* for cyclic transfers,
* no need to replay callback function while stopping */ * no need to replay callback function while stopping */
if (!atc_chan_is_cyclic(atchan)) { if (!atc_chan_is_cyclic(atchan))
/*
* The API requires that no submissions are done from a
* callback, so we don't need to drop the lock here
*/
dmaengine_desc_get_callback_invoke(txd, NULL); dmaengine_desc_get_callback_invoke(txd, NULL);
}
dma_run_dependencies(txd); dma_run_dependencies(txd);
} }
...@@ -480,9 +479,12 @@ static void atc_complete_all(struct at_dma_chan *atchan) ...@@ -480,9 +479,12 @@ static void atc_complete_all(struct at_dma_chan *atchan)
{ {
struct at_desc *desc, *_desc; struct at_desc *desc, *_desc;
LIST_HEAD(list); LIST_HEAD(list);
unsigned long flags;
dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n"); dev_vdbg(chan2dev(&atchan->chan_common), "complete all\n");
spin_lock_irqsave(&atchan->lock, flags);
/* /*
* Submit queued descriptors ASAP, i.e. before we go through * Submit queued descriptors ASAP, i.e. before we go through
* the completed ones. * the completed ones.
...@@ -494,6 +496,8 @@ static void atc_complete_all(struct at_dma_chan *atchan) ...@@ -494,6 +496,8 @@ static void atc_complete_all(struct at_dma_chan *atchan)
/* empty queue list by moving descriptors (if any) to active_list */ /* empty queue list by moving descriptors (if any) to active_list */
list_splice_init(&atchan->queue, &atchan->active_list); list_splice_init(&atchan->queue, &atchan->active_list);
spin_unlock_irqrestore(&atchan->lock, flags);
list_for_each_entry_safe(desc, _desc, &list, desc_node) list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc); atc_chain_complete(atchan, desc);
} }
...@@ -501,38 +505,44 @@ static void atc_complete_all(struct at_dma_chan *atchan) ...@@ -501,38 +505,44 @@ static void atc_complete_all(struct at_dma_chan *atchan)
/** /**
* atc_advance_work - at the end of a transaction, move forward * atc_advance_work - at the end of a transaction, move forward
* @atchan: channel where the transaction ended * @atchan: channel where the transaction ended
*
* Called with atchan->lock held and bh disabled
*/ */
static void atc_advance_work(struct at_dma_chan *atchan) static void atc_advance_work(struct at_dma_chan *atchan)
{ {
unsigned long flags;
int ret;
dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n"); dev_vdbg(chan2dev(&atchan->chan_common), "advance_work\n");
if (atc_chan_is_enabled(atchan)) spin_lock_irqsave(&atchan->lock, flags);
ret = atc_chan_is_enabled(atchan);
spin_unlock_irqrestore(&atchan->lock, flags);
if (ret)
return; return;
if (list_empty(&atchan->active_list) || if (list_empty(&atchan->active_list) ||
list_is_singular(&atchan->active_list)) { list_is_singular(&atchan->active_list))
atc_complete_all(atchan); return atc_complete_all(atchan);
} else {
atc_chain_complete(atchan, atc_first_active(atchan)); atc_chain_complete(atchan, atc_first_active(atchan));
/* advance work */ /* advance work */
spin_lock_irqsave(&atchan->lock, flags);
atc_dostart(atchan, atc_first_active(atchan)); atc_dostart(atchan, atc_first_active(atchan));
} spin_unlock_irqrestore(&atchan->lock, flags);
} }
/** /**
* atc_handle_error - handle errors reported by DMA controller * atc_handle_error - handle errors reported by DMA controller
* @atchan: channel where error occurs * @atchan: channel where error occurs
*
* Called with atchan->lock held and bh disabled
*/ */
static void atc_handle_error(struct at_dma_chan *atchan) static void atc_handle_error(struct at_dma_chan *atchan)
{ {
struct at_desc *bad_desc; struct at_desc *bad_desc;
struct at_desc *child; struct at_desc *child;
unsigned long flags;
spin_lock_irqsave(&atchan->lock, flags);
/* /*
* The descriptor currently at the head of the active list is * The descriptor currently at the head of the active list is
* broked. Since we don't have any way to report errors, we'll * broked. Since we don't have any way to report errors, we'll
...@@ -564,6 +574,8 @@ static void atc_handle_error(struct at_dma_chan *atchan) ...@@ -564,6 +574,8 @@ static void atc_handle_error(struct at_dma_chan *atchan)
list_for_each_entry(child, &bad_desc->tx_list, desc_node) list_for_each_entry(child, &bad_desc->tx_list, desc_node)
atc_dump_lli(atchan, &child->lli); atc_dump_lli(atchan, &child->lli);
spin_unlock_irqrestore(&atchan->lock, flags);
/* Pretend the descriptor completed successfully */ /* Pretend the descriptor completed successfully */
atc_chain_complete(atchan, bad_desc); atc_chain_complete(atchan, bad_desc);
} }
...@@ -571,8 +583,6 @@ static void atc_handle_error(struct at_dma_chan *atchan) ...@@ -571,8 +583,6 @@ static void atc_handle_error(struct at_dma_chan *atchan)
/** /**
* atc_handle_cyclic - at the end of a period, run callback function * atc_handle_cyclic - at the end of a period, run callback function
* @atchan: channel used for cyclic operations * @atchan: channel used for cyclic operations
*
* Called with atchan->lock held and bh disabled
*/ */
static void atc_handle_cyclic(struct at_dma_chan *atchan) static void atc_handle_cyclic(struct at_dma_chan *atchan)
{ {
...@@ -591,17 +601,14 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan) ...@@ -591,17 +601,14 @@ static void atc_handle_cyclic(struct at_dma_chan *atchan)
static void atc_tasklet(unsigned long data) static void atc_tasklet(unsigned long data)
{ {
struct at_dma_chan *atchan = (struct at_dma_chan *)data; struct at_dma_chan *atchan = (struct at_dma_chan *)data;
unsigned long flags;
spin_lock_irqsave(&atchan->lock, flags);
if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status)) if (test_and_clear_bit(ATC_IS_ERROR, &atchan->status))
atc_handle_error(atchan); return atc_handle_error(atchan);
else if (atc_chan_is_cyclic(atchan))
atc_handle_cyclic(atchan);
else
atc_advance_work(atchan);
spin_unlock_irqrestore(&atchan->lock, flags); if (atc_chan_is_cyclic(atchan))
return atc_handle_cyclic(atchan);
atc_advance_work(atchan);
} }
static irqreturn_t at_dma_interrupt(int irq, void *dev_id) static irqreturn_t at_dma_interrupt(int irq, void *dev_id)
...@@ -1437,6 +1444,8 @@ static int atc_terminate_all(struct dma_chan *chan) ...@@ -1437,6 +1444,8 @@ static int atc_terminate_all(struct dma_chan *chan)
list_splice_init(&atchan->queue, &list); list_splice_init(&atchan->queue, &list);
list_splice_init(&atchan->active_list, &list); list_splice_init(&atchan->active_list, &list);
spin_unlock_irqrestore(&atchan->lock, flags);
/* Flush all pending and queued descriptors */ /* Flush all pending and queued descriptors */
list_for_each_entry_safe(desc, _desc, &list, desc_node) list_for_each_entry_safe(desc, _desc, &list, desc_node)
atc_chain_complete(atchan, desc); atc_chain_complete(atchan, desc);
...@@ -1445,8 +1454,6 @@ static int atc_terminate_all(struct dma_chan *chan) ...@@ -1445,8 +1454,6 @@ static int atc_terminate_all(struct dma_chan *chan)
/* if channel dedicated to cyclic operations, free it */ /* if channel dedicated to cyclic operations, free it */
clear_bit(ATC_IS_CYCLIC, &atchan->status); clear_bit(ATC_IS_CYCLIC, &atchan->status);
spin_unlock_irqrestore(&atchan->lock, flags);
return 0; return 0;
} }
...@@ -1507,7 +1514,6 @@ atc_tx_status(struct dma_chan *chan, ...@@ -1507,7 +1514,6 @@ atc_tx_status(struct dma_chan *chan,
static void atc_issue_pending(struct dma_chan *chan) static void atc_issue_pending(struct dma_chan *chan)
{ {
struct at_dma_chan *atchan = to_at_dma_chan(chan); struct at_dma_chan *atchan = to_at_dma_chan(chan);
unsigned long flags;
dev_vdbg(chan2dev(chan), "issue_pending\n"); dev_vdbg(chan2dev(chan), "issue_pending\n");
...@@ -1515,9 +1521,7 @@ static void atc_issue_pending(struct dma_chan *chan) ...@@ -1515,9 +1521,7 @@ static void atc_issue_pending(struct dma_chan *chan)
if (atc_chan_is_cyclic(atchan)) if (atc_chan_is_cyclic(atchan))
return; return;
spin_lock_irqsave(&atchan->lock, flags);
atc_advance_work(atchan); atc_advance_work(atchan);
spin_unlock_irqrestore(&atchan->lock, flags);
} }
/** /**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment