Commit 51fe9cd2 authored by Sascha Hauer's avatar Sascha Hauer Committed by Vinod Koul

dmaengine: virt-dma: Add missing locking

Originally freeing descriptors was split into a locked and an unlocked
part. The locked part in vchan_get_all_descriptors() collected all
descriptors on a separate list_head. This was done to allow iterating
over that new list in vchan_dma_desc_free_list() without a lock held.

This became broken in 13bb26ae ("dmaengine: virt-dma: don't always
free descriptor upon completion"). With this commit
vchan_dma_desc_free_list() no longer exclusively operates on the
separate list, but starts to put descriptors which can be reused back on
&vc->desc_allocated. This list operation should have been locked, but
wasn't.
In the mean time drivers started to call vchan_dma_desc_free_list() with
their lock held so that we now have the situation that
vchan_dma_desc_free_list() is called locked from some drivers and
unlocked from others.
To clean this up we have to do two things:

1. Add missing locking in vchan_dma_desc_free_list()
2. Make sure drivers call vchan_dma_desc_free_list() unlocked

This needs to be done atomically, so in this patch the locking is added
and all drivers are fixed.
Signed-off-by: default avatarSascha Hauer <s.hauer@pengutronix.de>
Reviewed-by: default avatarPeter Ujfalusi <peter.ujfalusi@ti.com>
Tested-by: default avatarPeter Ujfalusi <peter.ujfalusi@ti.com>
Reviewed-by: default avatarGreen Wan <green.wan@sifive.com>
Tested-by: default avatarGreen Wan <green.wan@sifive.com>
Link: https://lore.kernel.org/r/20191216105328.15198-3-s.hauer@pengutronix.deSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 5c8aacbb
...@@ -636,14 +636,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan) ...@@ -636,14 +636,10 @@ static int dma_chan_terminate_all(struct dma_chan *dchan)
vchan_get_all_descriptors(&chan->vc, &head); vchan_get_all_descriptors(&chan->vc, &head);
/*
* As vchan_dma_desc_free_list can access to desc_allocated list
* we need to call it in vc.lock context.
*/
vchan_dma_desc_free_list(&chan->vc, &head);
spin_unlock_irqrestore(&chan->vc.lock, flags); spin_unlock_irqrestore(&chan->vc.lock, flags);
vchan_dma_desc_free_list(&chan->vc, &head);
dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan)); dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
return 0; return 0;
......
...@@ -430,9 +430,10 @@ static int mtk_uart_apdma_terminate_all(struct dma_chan *chan) ...@@ -430,9 +430,10 @@ static int mtk_uart_apdma_terminate_all(struct dma_chan *chan)
spin_lock_irqsave(&c->vc.lock, flags); spin_lock_irqsave(&c->vc.lock, flags);
vchan_get_all_descriptors(&c->vc, &head); vchan_get_all_descriptors(&c->vc, &head);
vchan_dma_desc_free_list(&c->vc, &head);
spin_unlock_irqrestore(&c->vc.lock, flags); spin_unlock_irqrestore(&c->vc.lock, flags);
vchan_dma_desc_free_list(&c->vc, &head);
return 0; return 0;
} }
......
...@@ -674,10 +674,11 @@ static int owl_dma_terminate_all(struct dma_chan *chan) ...@@ -674,10 +674,11 @@ static int owl_dma_terminate_all(struct dma_chan *chan)
} }
vchan_get_all_descriptors(&vchan->vc, &head); vchan_get_all_descriptors(&vchan->vc, &head);
vchan_dma_desc_free_list(&vchan->vc, &head);
spin_unlock_irqrestore(&vchan->vc.lock, flags); spin_unlock_irqrestore(&vchan->vc.lock, flags);
vchan_dma_desc_free_list(&vchan->vc, &head);
return 0; return 0;
} }
......
...@@ -519,15 +519,6 @@ static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan) ...@@ -519,15 +519,6 @@ static void s3c24xx_dma_start_next_txd(struct s3c24xx_dma_chan *s3cchan)
s3c24xx_dma_start_next_sg(s3cchan, txd); s3c24xx_dma_start_next_sg(s3cchan, txd);
} }
static void s3c24xx_dma_free_txd_list(struct s3c24xx_dma_engine *s3cdma,
struct s3c24xx_dma_chan *s3cchan)
{
LIST_HEAD(head);
vchan_get_all_descriptors(&s3cchan->vc, &head);
vchan_dma_desc_free_list(&s3cchan->vc, &head);
}
/* /*
* Try to allocate a physical channel. When successful, assign it to * Try to allocate a physical channel. When successful, assign it to
* this virtual channel, and initiate the next descriptor. The * this virtual channel, and initiate the next descriptor. The
...@@ -709,8 +700,9 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) ...@@ -709,8 +700,9 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
{ {
struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan); struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
struct s3c24xx_dma_engine *s3cdma = s3cchan->host; struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
LIST_HEAD(head);
unsigned long flags; unsigned long flags;
int ret = 0; int ret;
spin_lock_irqsave(&s3cchan->vc.lock, flags); spin_lock_irqsave(&s3cchan->vc.lock, flags);
...@@ -734,7 +726,15 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan) ...@@ -734,7 +726,15 @@ static int s3c24xx_dma_terminate_all(struct dma_chan *chan)
} }
/* Dequeue jobs not yet fired as well */ /* Dequeue jobs not yet fired as well */
s3c24xx_dma_free_txd_list(s3cdma, s3cchan);
vchan_get_all_descriptors(&s3cchan->vc, &head);
spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
vchan_dma_desc_free_list(&s3cchan->vc, &head);
return 0;
unlock: unlock:
spin_unlock_irqrestore(&s3cchan->vc.lock, flags); spin_unlock_irqrestore(&s3cchan->vc.lock, flags);
......
...@@ -155,9 +155,9 @@ static void sf_pdma_free_chan_resources(struct dma_chan *dchan) ...@@ -155,9 +155,9 @@ static void sf_pdma_free_chan_resources(struct dma_chan *dchan)
kfree(chan->desc); kfree(chan->desc);
chan->desc = NULL; chan->desc = NULL;
vchan_get_all_descriptors(&chan->vchan, &head); vchan_get_all_descriptors(&chan->vchan, &head);
vchan_dma_desc_free_list(&chan->vchan, &head);
sf_pdma_disclaim_chan(chan); sf_pdma_disclaim_chan(chan);
spin_unlock_irqrestore(&chan->vchan.lock, flags); spin_unlock_irqrestore(&chan->vchan.lock, flags);
vchan_dma_desc_free_list(&chan->vchan, &head);
} }
static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan, static size_t sf_pdma_desc_residue(struct sf_pdma_chan *chan,
...@@ -220,8 +220,8 @@ static int sf_pdma_terminate_all(struct dma_chan *dchan) ...@@ -220,8 +220,8 @@ static int sf_pdma_terminate_all(struct dma_chan *dchan)
chan->desc = NULL; chan->desc = NULL;
chan->xfer_err = false; chan->xfer_err = false;
vchan_get_all_descriptors(&chan->vchan, &head); vchan_get_all_descriptors(&chan->vchan, &head);
vchan_dma_desc_free_list(&chan->vchan, &head);
spin_unlock_irqrestore(&chan->vchan.lock, flags); spin_unlock_irqrestore(&chan->vchan.lock, flags);
vchan_dma_desc_free_list(&chan->vchan, &head);
return 0; return 0;
} }
......
...@@ -885,12 +885,13 @@ static int sun4i_dma_terminate_all(struct dma_chan *chan) ...@@ -885,12 +885,13 @@ static int sun4i_dma_terminate_all(struct dma_chan *chan)
} }
spin_lock_irqsave(&vchan->vc.lock, flags); spin_lock_irqsave(&vchan->vc.lock, flags);
vchan_dma_desc_free_list(&vchan->vc, &head);
/* Clear these so the vchan is usable again */ /* Clear these so the vchan is usable again */
vchan->processing = NULL; vchan->processing = NULL;
vchan->pchan = NULL; vchan->pchan = NULL;
spin_unlock_irqrestore(&vchan->vc.lock, flags); spin_unlock_irqrestore(&vchan->vc.lock, flags);
vchan_dma_desc_free_list(&vchan->vc, &head);
return 0; return 0;
} }
......
...@@ -116,7 +116,11 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) ...@@ -116,7 +116,11 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
list_for_each_entry_safe(vd, _vd, head, node) { list_for_each_entry_safe(vd, _vd, head, node) {
if (dmaengine_desc_test_reuse(&vd->tx)) { if (dmaengine_desc_test_reuse(&vd->tx)) {
unsigned long flags;
spin_lock_irqsave(&vc->lock, flags);
list_move_tail(&vd->node, &vc->desc_allocated); list_move_tail(&vd->node, &vc->desc_allocated);
spin_unlock_irqrestore(&vc->lock, flags);
} else { } else {
dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
list_del(&vd->node); list_del(&vd->node);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment