Commit 13bb26ae authored by Robert Jarzmik's avatar Robert Jarzmik Committed by Vinod Koul

dmaengine: virt-dma: don't always free descriptor upon completion

This patch attempts to enhance the case of a transfer submitted multiple
times, and where the cost of creating the descriptors chain is not
negligible.

This happens with big video buffers (several megabytes, ie. several
thousands of linked descriptors in one scatter-gather list). In these
cases, a video driver would want to do :
 - tx = dmaengine_prep_slave_sg()
 - dma_engine_submit(tx);
 - dma_async_issue_pending()
 - wait for video completion
 - read video data (or not, skipping a frame is also possible)
 - dma_engine_submit(tx)
   => here, the descriptors chain recalculation will take time
   => the dma coherent allocation over and over might create holes in
      the dma pool, which is counter-productive.
 - dma_async_issue_pending()
 - etc ...

In order to cope with this case, virt-dma is modified to prevent freeing
the descriptors upon completion if DMA_CTRL_REUSE flag is set in the
transfer.

This patch is a respin of the former DMA_CTRL_ACK approach, which was
reverted due to a regression in audio drivers.
Signed-off-by: default avatarRobert Jarzmik <robert.jarzmik@free.fr>
Signed-off-by: default avatarVinod Koul <vinod.koul@intel.com>
parent 8005c49d
...@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
spin_lock_irqsave(&vc->lock, flags); spin_lock_irqsave(&vc->lock, flags);
cookie = dma_cookie_assign(tx); cookie = dma_cookie_assign(tx);
list_add_tail(&vd->node, &vc->desc_submitted); list_move_tail(&vd->node, &vc->desc_submitted);
spin_unlock_irqrestore(&vc->lock, flags); spin_unlock_irqrestore(&vc->lock, flags);
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
...@@ -39,6 +39,33 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -39,6 +39,33 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
} }
EXPORT_SYMBOL_GPL(vchan_tx_submit); EXPORT_SYMBOL_GPL(vchan_tx_submit);
/**
* vchan_tx_desc_free - free a reusable descriptor
* @tx: the transfer
*
* This function frees a previously allocated reusable descriptor. The only
* other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
* transfer.
*
* Returns 0 upon success
*/
int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
{
struct virt_dma_chan *vc = to_virt_chan(tx->chan);
struct virt_dma_desc *vd = to_virt_desc(tx);
unsigned long flags;
spin_lock_irqsave(&vc->lock, flags);
list_del(&vd->node);
spin_unlock_irqrestore(&vc->lock, flags);
dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
vc, vd, vd->tx.cookie);
vc->desc_free(vd);
return 0;
}
EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
dma_cookie_t cookie) dma_cookie_t cookie)
{ {
...@@ -83,8 +110,10 @@ static void vchan_complete(unsigned long arg) ...@@ -83,8 +110,10 @@ static void vchan_complete(unsigned long arg)
cb_data = vd->tx.callback_param; cb_data = vd->tx.callback_param;
list_del(&vd->node); list_del(&vd->node);
if (dmaengine_desc_test_reuse(&vd->tx))
vc->desc_free(vd); list_add(&vd->node, &vc->desc_allocated);
else
vc->desc_free(vd);
if (cb) if (cb)
cb(cb_data); cb(cb_data);
...@@ -96,9 +125,13 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) ...@@ -96,9 +125,13 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
while (!list_empty(head)) { while (!list_empty(head)) {
struct virt_dma_desc *vd = list_first_entry(head, struct virt_dma_desc *vd = list_first_entry(head,
struct virt_dma_desc, node); struct virt_dma_desc, node);
list_del(&vd->node); if (dmaengine_desc_test_reuse(&vd->tx)) {
dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); list_move_tail(&vd->node, &vc->desc_allocated);
vc->desc_free(vd); } else {
dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
list_del(&vd->node);
vc->desc_free(vd);
}
} }
} }
EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
...@@ -108,6 +141,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) ...@@ -108,6 +141,7 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
dma_cookie_init(&vc->chan); dma_cookie_init(&vc->chan);
spin_lock_init(&vc->lock); spin_lock_init(&vc->lock);
INIT_LIST_HEAD(&vc->desc_allocated);
INIT_LIST_HEAD(&vc->desc_submitted); INIT_LIST_HEAD(&vc->desc_submitted);
INIT_LIST_HEAD(&vc->desc_issued); INIT_LIST_HEAD(&vc->desc_issued);
INIT_LIST_HEAD(&vc->desc_completed); INIT_LIST_HEAD(&vc->desc_completed);
......
...@@ -29,6 +29,7 @@ struct virt_dma_chan { ...@@ -29,6 +29,7 @@ struct virt_dma_chan {
spinlock_t lock; spinlock_t lock;
/* protected by vc.lock */ /* protected by vc.lock */
struct list_head desc_allocated;
struct list_head desc_submitted; struct list_head desc_submitted;
struct list_head desc_issued; struct list_head desc_issued;
struct list_head desc_completed; struct list_head desc_completed;
...@@ -55,10 +56,17 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan ...@@ -55,10 +56,17 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
struct virt_dma_desc *vd, unsigned long tx_flags) struct virt_dma_desc *vd, unsigned long tx_flags)
{ {
extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
unsigned long flags;
dma_async_tx_descriptor_init(&vd->tx, &vc->chan); dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
vd->tx.flags = tx_flags; vd->tx.flags = tx_flags;
vd->tx.tx_submit = vchan_tx_submit; vd->tx.tx_submit = vchan_tx_submit;
vd->tx.desc_free = vchan_tx_desc_free;
spin_lock_irqsave(&vc->lock, flags);
list_add_tail(&vd->node, &vc->desc_allocated);
spin_unlock_irqrestore(&vc->lock, flags);
return &vd->tx; return &vd->tx;
} }
...@@ -134,6 +142,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) ...@@ -134,6 +142,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
struct list_head *head) struct list_head *head)
{ {
list_splice_tail_init(&vc->desc_allocated, head);
list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_issued, head);
list_splice_tail_init(&vc->desc_completed, head); list_splice_tail_init(&vc->desc_completed, head);
...@@ -141,11 +150,14 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, ...@@ -141,11 +150,14 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
{ {
struct virt_dma_desc *vd;
unsigned long flags; unsigned long flags;
LIST_HEAD(head); LIST_HEAD(head);
spin_lock_irqsave(&vc->lock, flags); spin_lock_irqsave(&vc->lock, flags);
vchan_get_all_descriptors(vc, &head); vchan_get_all_descriptors(vc, &head);
list_for_each_entry(vd, &head, node)
dmaengine_desc_clear_reuse(&vd->tx);
spin_unlock_irqrestore(&vc->lock, flags); spin_unlock_irqrestore(&vc->lock, flags);
vchan_dma_desc_free_list(vc, &head); vchan_dma_desc_free_list(vc, &head);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment