Commit 4f1be396 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:
 "A bunch of driver fixes, notably:

   - idxd driver fixes for submission race, driver remove sequence,
     setup sequence for MSIXPERM, array index and updating descriptor
     vector

   - usb-dmac, pm reference leak fix

   - xilinx_dma, read-after-free fix

   - uniphier-xdmac fix for using atomic readl_poll_timeout_atomic()

   - of-dma, router_xlate to return

   - imx-dma, generic dma fix"

* tag 'dmaengine-fix-5.14' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine:
  dmaengine: imx-dma: configure the generic DMA type to make it work
  dmaengine: of-dma: router_xlate to return -EPROBE_DEFER if controller is not yet available
  dmaengine: stm32-dmamux: Fix PM usage counter unbalance in stm32 dmamux ops
  dmaengine: stm32-dma: Fix PM usage counter imbalance in stm32 dma ops
  dmaengine: uniphier-xdmac: Use readl_poll_timeout_atomic() in atomic state
  dmaengine: idxd: fix submission race window
  dmaengine: idxd: fix sequence for pci driver remove() and shutdown()
  dmaengine: idxd: fix desc->vector that isn't being updated
  dmaengine: idxd: fix setup sequence for MSIXPERM table
  dmaengine: idxd: fix array index when int_handles are being used
  dmaengine: usb-dmac: Fix PM reference leak in usb_dmac_probe()
  dmaengine: xilinx_dma: Fix read-after-free bug when terminating transfers
parents b4b927fc 7199dded
...@@ -294,6 +294,14 @@ struct idxd_desc { ...@@ -294,6 +294,14 @@ struct idxd_desc {
struct idxd_wq *wq; struct idxd_wq *wq;
}; };
/*
* This is software defined error for the completion status. We overload the error code
* that will never appear in completion status and only SWERR register.
*/
enum idxd_completion_status {
IDXD_COMP_DESC_ABORT = 0xff,
};
#define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev) #define confdev_to_idxd(dev) container_of(dev, struct idxd_device, conf_dev)
#define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev) #define confdev_to_wq(dev) container_of(dev, struct idxd_wq, conf_dev)
...@@ -482,4 +490,10 @@ static inline void perfmon_init(void) {} ...@@ -482,4 +490,10 @@ static inline void perfmon_init(void) {}
static inline void perfmon_exit(void) {} static inline void perfmon_exit(void) {}
#endif #endif
static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
{
idxd_dma_complete_txd(desc, reason);
idxd_free_desc(desc->wq, desc);
}
#endif #endif
...@@ -102,6 +102,8 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) ...@@ -102,6 +102,8 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
spin_lock_init(&idxd->irq_entries[i].list_lock); spin_lock_init(&idxd->irq_entries[i].list_lock);
} }
idxd_msix_perm_setup(idxd);
irq_entry = &idxd->irq_entries[0]; irq_entry = &idxd->irq_entries[0];
rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread, rc = request_threaded_irq(irq_entry->vector, NULL, idxd_misc_thread,
0, "idxd-misc", irq_entry); 0, "idxd-misc", irq_entry);
...@@ -148,7 +150,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) ...@@ -148,7 +150,6 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
} }
idxd_unmask_error_interrupts(idxd); idxd_unmask_error_interrupts(idxd);
idxd_msix_perm_setup(idxd);
return 0; return 0;
err_wq_irqs: err_wq_irqs:
...@@ -162,6 +163,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd) ...@@ -162,6 +163,7 @@ static int idxd_setup_interrupts(struct idxd_device *idxd)
err_misc_irq: err_misc_irq:
/* Disable error interrupt generation */ /* Disable error interrupt generation */
idxd_mask_error_interrupts(idxd); idxd_mask_error_interrupts(idxd);
idxd_msix_perm_clear(idxd);
err_irq_entries: err_irq_entries:
pci_free_irq_vectors(pdev); pci_free_irq_vectors(pdev);
dev_err(dev, "No usable interrupts\n"); dev_err(dev, "No usable interrupts\n");
...@@ -758,32 +760,40 @@ static void idxd_shutdown(struct pci_dev *pdev) ...@@ -758,32 +760,40 @@ static void idxd_shutdown(struct pci_dev *pdev)
for (i = 0; i < msixcnt; i++) { for (i = 0; i < msixcnt; i++) {
irq_entry = &idxd->irq_entries[i]; irq_entry = &idxd->irq_entries[i];
synchronize_irq(irq_entry->vector); synchronize_irq(irq_entry->vector);
free_irq(irq_entry->vector, irq_entry);
if (i == 0) if (i == 0)
continue; continue;
idxd_flush_pending_llist(irq_entry); idxd_flush_pending_llist(irq_entry);
idxd_flush_work_list(irq_entry); idxd_flush_work_list(irq_entry);
} }
flush_workqueue(idxd->wq);
idxd_msix_perm_clear(idxd);
idxd_release_int_handles(idxd);
pci_free_irq_vectors(pdev);
pci_iounmap(pdev, idxd->reg_base);
pci_disable_device(pdev);
destroy_workqueue(idxd->wq);
} }
static void idxd_remove(struct pci_dev *pdev) static void idxd_remove(struct pci_dev *pdev)
{ {
struct idxd_device *idxd = pci_get_drvdata(pdev); struct idxd_device *idxd = pci_get_drvdata(pdev);
struct idxd_irq_entry *irq_entry;
int msixcnt = pci_msix_vec_count(pdev);
int i;
dev_dbg(&pdev->dev, "%s called\n", __func__); dev_dbg(&pdev->dev, "%s called\n", __func__);
idxd_shutdown(pdev); idxd_shutdown(pdev);
if (device_pasid_enabled(idxd)) if (device_pasid_enabled(idxd))
idxd_disable_system_pasid(idxd); idxd_disable_system_pasid(idxd);
idxd_unregister_devices(idxd); idxd_unregister_devices(idxd);
perfmon_pmu_remove(idxd);
for (i = 0; i < msixcnt; i++) {
irq_entry = &idxd->irq_entries[i];
free_irq(irq_entry->vector, irq_entry);
}
idxd_msix_perm_clear(idxd);
idxd_release_int_handles(idxd);
pci_free_irq_vectors(pdev);
pci_iounmap(pdev, idxd->reg_base);
iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA); iommu_dev_disable_feature(&pdev->dev, IOMMU_DEV_FEAT_SVA);
pci_disable_device(pdev);
destroy_workqueue(idxd->wq);
perfmon_pmu_remove(idxd);
device_unregister(&idxd->conf_dev);
} }
static struct pci_driver idxd_pci_driver = { static struct pci_driver idxd_pci_driver = {
......
...@@ -245,12 +245,6 @@ static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr) ...@@ -245,12 +245,6 @@ static inline bool match_fault(struct idxd_desc *desc, u64 fault_addr)
return false; return false;
} }
static inline void complete_desc(struct idxd_desc *desc, enum idxd_complete_type reason)
{
idxd_dma_complete_txd(desc, reason);
idxd_free_desc(desc->wq, desc);
}
static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
enum irq_work_type wtype, enum irq_work_type wtype,
int *processed, u64 data) int *processed, u64 data)
...@@ -272,8 +266,16 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, ...@@ -272,8 +266,16 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
reason = IDXD_COMPLETE_DEV_FAIL; reason = IDXD_COMPLETE_DEV_FAIL;
llist_for_each_entry_safe(desc, t, head, llnode) { llist_for_each_entry_safe(desc, t, head, llnode) {
if (desc->completion->status) { u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS)
if (status) {
if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
(*processed)++;
continue;
}
if (unlikely(status != DSA_COMP_SUCCESS))
match_fault(desc, data); match_fault(desc, data);
complete_desc(desc, reason); complete_desc(desc, reason);
(*processed)++; (*processed)++;
...@@ -329,7 +331,14 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry, ...@@ -329,7 +331,14 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
spin_unlock_irqrestore(&irq_entry->list_lock, flags); spin_unlock_irqrestore(&irq_entry->list_lock, flags);
list_for_each_entry(desc, &flist, list) { list_for_each_entry(desc, &flist, list) {
if ((desc->completion->status & DSA_COMP_STATUS_MASK) != DSA_COMP_SUCCESS) u8 status = desc->completion->status & DSA_COMP_STATUS_MASK;
if (unlikely(status == IDXD_COMP_DESC_ABORT)) {
complete_desc(desc, IDXD_COMPLETE_ABORT);
continue;
}
if (unlikely(status != DSA_COMP_SUCCESS))
match_fault(desc, data); match_fault(desc, data);
complete_desc(desc, reason); complete_desc(desc, reason);
} }
......
...@@ -25,11 +25,10 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu) ...@@ -25,11 +25,10 @@ static struct idxd_desc *__get_desc(struct idxd_wq *wq, int idx, int cpu)
* Descriptor completion vectors are 1...N for MSIX. We will round * Descriptor completion vectors are 1...N for MSIX. We will round
* robin through the N vectors. * robin through the N vectors.
*/ */
wq->vec_ptr = (wq->vec_ptr % idxd->num_wq_irqs) + 1; wq->vec_ptr = desc->vector = (wq->vec_ptr % idxd->num_wq_irqs) + 1;
if (!idxd->int_handles) { if (!idxd->int_handles) {
desc->hw->int_handle = wq->vec_ptr; desc->hw->int_handle = wq->vec_ptr;
} else { } else {
desc->vector = wq->vec_ptr;
/* /*
* int_handles are only for descriptor completion. However for device * int_handles are only for descriptor completion. However for device
* MSIX enumeration, vec 0 is used for misc interrupts. Therefore even * MSIX enumeration, vec 0 is used for misc interrupts. Therefore even
...@@ -88,9 +87,64 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc) ...@@ -88,9 +87,64 @@ void idxd_free_desc(struct idxd_wq *wq, struct idxd_desc *desc)
sbitmap_queue_clear(&wq->sbq, desc->id, cpu); sbitmap_queue_clear(&wq->sbq, desc->id, cpu);
} }
static struct idxd_desc *list_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
struct idxd_desc *desc)
{
struct idxd_desc *d, *n;
lockdep_assert_held(&ie->list_lock);
list_for_each_entry_safe(d, n, &ie->work_list, list) {
if (d == desc) {
list_del(&d->list);
return d;
}
}
/*
* At this point, the desc needs to be aborted is held by the completion
* handler where it has taken it off the pending list but has not added to the
* work list. It will be cleaned up by the interrupt handler when it sees the
* IDXD_COMP_DESC_ABORT for completion status.
*/
return NULL;
}
static void llist_abort_desc(struct idxd_wq *wq, struct idxd_irq_entry *ie,
struct idxd_desc *desc)
{
struct idxd_desc *d, *t, *found = NULL;
struct llist_node *head;
unsigned long flags;
desc->completion->status = IDXD_COMP_DESC_ABORT;
/*
* Grab the list lock so it will block the irq thread handler. This allows the
* abort code to locate the descriptor need to be aborted.
*/
spin_lock_irqsave(&ie->list_lock, flags);
head = llist_del_all(&ie->pending_llist);
if (head) {
llist_for_each_entry_safe(d, t, head, llnode) {
if (d == desc) {
found = desc;
continue;
}
list_add_tail(&desc->list, &ie->work_list);
}
}
if (!found)
found = list_abort_desc(wq, ie, desc);
spin_unlock_irqrestore(&ie->list_lock, flags);
if (found)
complete_desc(found, IDXD_COMPLETE_ABORT);
}
int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
{ {
struct idxd_device *idxd = wq->idxd; struct idxd_device *idxd = wq->idxd;
struct idxd_irq_entry *ie = NULL;
void __iomem *portal; void __iomem *portal;
int rc; int rc;
...@@ -108,6 +162,16 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) ...@@ -108,6 +162,16 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
* even on UP because the recipient is a device. * even on UP because the recipient is a device.
*/ */
wmb(); wmb();
/*
* Pending the descriptor to the lockless list for the irq_entry
* that we designated the descriptor to.
*/
if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
ie = &idxd->irq_entries[desc->vector];
llist_add(&desc->llnode, &ie->pending_llist);
}
if (wq_dedicated(wq)) { if (wq_dedicated(wq)) {
iosubmit_cmds512(portal, desc->hw, 1); iosubmit_cmds512(portal, desc->hw, 1);
} else { } else {
...@@ -118,29 +182,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) ...@@ -118,29 +182,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
* device is not accepting descriptor at all. * device is not accepting descriptor at all.
*/ */
rc = enqcmds(portal, desc->hw); rc = enqcmds(portal, desc->hw);
if (rc < 0) if (rc < 0) {
if (ie)
llist_abort_desc(wq, ie, desc);
return rc; return rc;
}
} }
percpu_ref_put(&wq->wq_active); percpu_ref_put(&wq->wq_active);
/*
* Pending the descriptor to the lockless list for the irq_entry
* that we designated the descriptor to.
*/
if (desc->hw->flags & IDXD_OP_FLAG_RCI) {
int vec;
/*
* If the driver is on host kernel, it would be the value
* assigned to interrupt handle, which is index for MSIX
* vector. If it's guest then can't use the int_handle since
* that is the index to IMS for the entire device. The guest
* device local index will be used.
*/
vec = !idxd->int_handles ? desc->hw->int_handle : desc->vector;
llist_add(&desc->llnode, &idxd->irq_entries[vec].pending_llist);
}
return 0; return 0;
} }
...@@ -1744,8 +1744,6 @@ void idxd_unregister_devices(struct idxd_device *idxd) ...@@ -1744,8 +1744,6 @@ void idxd_unregister_devices(struct idxd_device *idxd)
device_unregister(&group->conf_dev); device_unregister(&group->conf_dev);
} }
device_unregister(&idxd->conf_dev);
} }
int idxd_register_bus_type(void) int idxd_register_bus_type(void)
......
...@@ -812,6 +812,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg( ...@@ -812,6 +812,8 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
dma_length += sg_dma_len(sg); dma_length += sg_dma_len(sg);
} }
imxdma_config_write(chan, &imxdmac->config, direction);
switch (imxdmac->word_size) { switch (imxdmac->word_size) {
case DMA_SLAVE_BUSWIDTH_4_BYTES: case DMA_SLAVE_BUSWIDTH_4_BYTES:
if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3) if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
......
...@@ -67,8 +67,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, ...@@ -67,8 +67,12 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
return NULL; return NULL;
ofdma_target = of_dma_find_controller(&dma_spec_target); ofdma_target = of_dma_find_controller(&dma_spec_target);
if (!ofdma_target) if (!ofdma_target) {
return NULL; ofdma->dma_router->route_free(ofdma->dma_router->dev,
route_data);
chan = ERR_PTR(-EPROBE_DEFER);
goto err;
}
chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target); chan = ofdma_target->of_dma_xlate(&dma_spec_target, ofdma_target);
if (IS_ERR_OR_NULL(chan)) { if (IS_ERR_OR_NULL(chan)) {
...@@ -89,6 +93,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec, ...@@ -89,6 +93,7 @@ static struct dma_chan *of_dma_router_xlate(struct of_phandle_args *dma_spec,
} }
} }
err:
/* /*
* Need to put the node back since the ofdma->of_dma_route_allocate * Need to put the node back since the ofdma->of_dma_route_allocate
* has taken it for generating the new, translated dma_spec * has taken it for generating the new, translated dma_spec
......
...@@ -855,8 +855,8 @@ static int usb_dmac_probe(struct platform_device *pdev) ...@@ -855,8 +855,8 @@ static int usb_dmac_probe(struct platform_device *pdev)
error: error:
of_dma_controller_free(pdev->dev.of_node); of_dma_controller_free(pdev->dev.of_node);
pm_runtime_put(&pdev->dev);
error_pm: error_pm:
pm_runtime_put(&pdev->dev);
pm_runtime_disable(&pdev->dev); pm_runtime_disable(&pdev->dev);
return ret; return ret;
} }
......
...@@ -1200,7 +1200,7 @@ static int stm32_dma_alloc_chan_resources(struct dma_chan *c) ...@@ -1200,7 +1200,7 @@ static int stm32_dma_alloc_chan_resources(struct dma_chan *c)
chan->config_init = false; chan->config_init = false;
ret = pm_runtime_get_sync(dmadev->ddev.dev); ret = pm_runtime_resume_and_get(dmadev->ddev.dev);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -1470,7 +1470,7 @@ static int stm32_dma_suspend(struct device *dev) ...@@ -1470,7 +1470,7 @@ static int stm32_dma_suspend(struct device *dev)
struct stm32_dma_device *dmadev = dev_get_drvdata(dev); struct stm32_dma_device *dmadev = dev_get_drvdata(dev);
int id, ret, scr; int id, ret, scr;
ret = pm_runtime_get_sync(dev); ret = pm_runtime_resume_and_get(dev);
if (ret < 0) if (ret < 0)
return ret; return ret;
......
...@@ -137,7 +137,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec, ...@@ -137,7 +137,7 @@ static void *stm32_dmamux_route_allocate(struct of_phandle_args *dma_spec,
/* Set dma request */ /* Set dma request */
spin_lock_irqsave(&dmamux->lock, flags); spin_lock_irqsave(&dmamux->lock, flags);
ret = pm_runtime_get_sync(&pdev->dev); ret = pm_runtime_resume_and_get(&pdev->dev);
if (ret < 0) { if (ret < 0) {
spin_unlock_irqrestore(&dmamux->lock, flags); spin_unlock_irqrestore(&dmamux->lock, flags);
goto error; goto error;
...@@ -336,7 +336,7 @@ static int stm32_dmamux_suspend(struct device *dev) ...@@ -336,7 +336,7 @@ static int stm32_dmamux_suspend(struct device *dev)
struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev); struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
int i, ret; int i, ret;
ret = pm_runtime_get_sync(dev); ret = pm_runtime_resume_and_get(dev);
if (ret < 0) if (ret < 0)
return ret; return ret;
...@@ -361,7 +361,7 @@ static int stm32_dmamux_resume(struct device *dev) ...@@ -361,7 +361,7 @@ static int stm32_dmamux_resume(struct device *dev)
if (ret < 0) if (ret < 0)
return ret; return ret;
ret = pm_runtime_get_sync(dev); ret = pm_runtime_resume_and_get(dev);
if (ret < 0) if (ret < 0)
return ret; return ret;
......
...@@ -209,8 +209,8 @@ static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc) ...@@ -209,8 +209,8 @@ static int uniphier_xdmac_chan_stop(struct uniphier_xdmac_chan *xc)
writel(0, xc->reg_ch_base + XDMAC_TSS); writel(0, xc->reg_ch_base + XDMAC_TSS);
/* wait until transfer is stopped */ /* wait until transfer is stopped */
return readl_poll_timeout(xc->reg_ch_base + XDMAC_STAT, val, return readl_poll_timeout_atomic(xc->reg_ch_base + XDMAC_STAT, val,
!(val & XDMAC_STAT_TENF), 100, 1000); !(val & XDMAC_STAT_TENF), 100, 1000);
} }
/* xc->vc.lock must be held by caller */ /* xc->vc.lock must be held by caller */
......
...@@ -394,6 +394,7 @@ struct xilinx_dma_tx_descriptor { ...@@ -394,6 +394,7 @@ struct xilinx_dma_tx_descriptor {
* @genlock: Support genlock mode * @genlock: Support genlock mode
* @err: Channel has errors * @err: Channel has errors
* @idle: Check for channel idle * @idle: Check for channel idle
* @terminating: Check for channel being synchronized by user
* @tasklet: Cleanup work after irq * @tasklet: Cleanup work after irq
* @config: Device configuration info * @config: Device configuration info
* @flush_on_fsync: Flush on Frame sync * @flush_on_fsync: Flush on Frame sync
...@@ -431,6 +432,7 @@ struct xilinx_dma_chan { ...@@ -431,6 +432,7 @@ struct xilinx_dma_chan {
bool genlock; bool genlock;
bool err; bool err;
bool idle; bool idle;
bool terminating;
struct tasklet_struct tasklet; struct tasklet_struct tasklet;
struct xilinx_vdma_config config; struct xilinx_vdma_config config;
bool flush_on_fsync; bool flush_on_fsync;
...@@ -1049,6 +1051,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) ...@@ -1049,6 +1051,13 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
/* Run any dependencies, then free the descriptor */ /* Run any dependencies, then free the descriptor */
dma_run_dependencies(&desc->async_tx); dma_run_dependencies(&desc->async_tx);
xilinx_dma_free_tx_descriptor(chan, desc); xilinx_dma_free_tx_descriptor(chan, desc);
/*
* While we ran a callback the user called a terminate function,
* which takes care of cleaning up any remaining descriptors
*/
if (chan->terminating)
break;
} }
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
...@@ -1965,6 +1974,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) ...@@ -1965,6 +1974,8 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
if (desc->cyclic) if (desc->cyclic)
chan->cyclic = true; chan->cyclic = true;
chan->terminating = false;
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
return cookie; return cookie;
...@@ -2436,6 +2447,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan) ...@@ -2436,6 +2447,7 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
xilinx_dma_chan_reset(chan); xilinx_dma_chan_reset(chan);
/* Remove and free all of the descriptors in the lists */ /* Remove and free all of the descriptors in the lists */
chan->terminating = true;
xilinx_dma_free_descriptors(chan); xilinx_dma_free_descriptors(chan);
chan->idle = true; chan->idle = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment