Commit cedd54f7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-5.7-rc7' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
 "Some driver fixes:

   - dmatest restoration of defaults

   - tegra210-adma probe handling fix

   - k3-udma flags fixed for slave_sg and memcpy

   - list fix for zynqmp_dma

   - idxd interrupt completion fix

   - lock fix for owl"

* tag 'dmaengine-fix-5.7-rc7' of git://git.infradead.org/users/vkoul/slave-dma:
  dmaengine: tegra210-adma: Fix an error handling path in 'tegra_adma_probe()'
  dmaengine: ti: k3-udma: Fix TR mode flags for slave_sg and memcpy
  dmaengine: zynqmp_dma: Move list_del inside zynqmp_dma_free_descriptor.
  dmaengine: dmatest: Restore default for channel
  dmaengine: idxd: fix interrupt completion after unmasking
  dmaengine: owl: Use correct lock in owl_dma_get_pchan()
parents 57f1b0cf 3a5fd0db
...@@ -1166,10 +1166,11 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) ...@@ -1166,10 +1166,11 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp)
mutex_unlock(&info->lock); mutex_unlock(&info->lock);
return ret; return ret;
} else if (dmatest_run) { } else if (dmatest_run) {
if (is_threaded_test_pending(info)) if (!is_threaded_test_pending(info)) {
pr_info("No channels configured, continue with any\n");
add_threaded_test(info);
}
start_threaded_tests(info); start_threaded_tests(info);
else
pr_info("Could not start test, no channels configured\n");
} else { } else {
stop_threaded_test(info); stop_threaded_test(info);
} }
......
...@@ -62,6 +62,13 @@ int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id) ...@@ -62,6 +62,13 @@ int idxd_unmask_msix_vector(struct idxd_device *idxd, int vec_id)
perm.ignore = 0; perm.ignore = 0;
iowrite32(perm.bits, idxd->reg_base + offset); iowrite32(perm.bits, idxd->reg_base + offset);
/*
* A readback from the device ensures that any previously generated
* completion record writes are visible to software based on PCI
* ordering rules.
*/
perm.bits = ioread32(idxd->reg_base + offset);
return 0; return 0;
} }
......
...@@ -173,6 +173,7 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry, ...@@ -173,6 +173,7 @@ static int irq_process_pending_llist(struct idxd_irq_entry *irq_entry,
struct llist_node *head; struct llist_node *head;
int queued = 0; int queued = 0;
*processed = 0;
head = llist_del_all(&irq_entry->pending_llist); head = llist_del_all(&irq_entry->pending_llist);
if (!head) if (!head)
return 0; return 0;
...@@ -197,6 +198,7 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry, ...@@ -197,6 +198,7 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
struct list_head *node, *next; struct list_head *node, *next;
int queued = 0; int queued = 0;
*processed = 0;
if (list_empty(&irq_entry->work_list)) if (list_empty(&irq_entry->work_list))
return 0; return 0;
...@@ -218,10 +220,9 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry, ...@@ -218,10 +220,9 @@ static int irq_process_work_list(struct idxd_irq_entry *irq_entry,
return queued; return queued;
} }
irqreturn_t idxd_wq_thread(int irq, void *data) static int idxd_desc_process(struct idxd_irq_entry *irq_entry)
{ {
struct idxd_irq_entry *irq_entry = data; int rc, processed, total = 0;
int rc, processed = 0, retry = 0;
/* /*
* There are two lists we are processing. The pending_llist is where * There are two lists we are processing. The pending_llist is where
...@@ -244,15 +245,26 @@ irqreturn_t idxd_wq_thread(int irq, void *data) ...@@ -244,15 +245,26 @@ irqreturn_t idxd_wq_thread(int irq, void *data)
*/ */
do { do {
rc = irq_process_work_list(irq_entry, &processed); rc = irq_process_work_list(irq_entry, &processed);
if (rc != 0) { total += processed;
retry++; if (rc != 0)
continue; continue;
}
rc = irq_process_pending_llist(irq_entry, &processed); rc = irq_process_pending_llist(irq_entry, &processed);
} while (rc != 0 && retry != 10); total += processed;
} while (rc != 0);
return total;
}
irqreturn_t idxd_wq_thread(int irq, void *data)
{
struct idxd_irq_entry *irq_entry = data;
int processed;
processed = idxd_desc_process(irq_entry);
idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id); idxd_unmask_msix_vector(irq_entry->idxd, irq_entry->id);
/* catch anything unprocessed after unmasking */
processed += idxd_desc_process(irq_entry);
if (processed == 0) if (processed == 0)
return IRQ_NONE; return IRQ_NONE;
......
...@@ -175,13 +175,11 @@ struct owl_dma_txd { ...@@ -175,13 +175,11 @@ struct owl_dma_txd {
* @id: physical index to this channel * @id: physical index to this channel
* @base: virtual memory base for the dma channel * @base: virtual memory base for the dma channel
* @vchan: the virtual channel currently being served by this physical channel * @vchan: the virtual channel currently being served by this physical channel
* @lock: a lock to use when altering an instance of this struct
*/ */
struct owl_dma_pchan { struct owl_dma_pchan {
u32 id; u32 id;
void __iomem *base; void __iomem *base;
struct owl_dma_vchan *vchan; struct owl_dma_vchan *vchan;
spinlock_t lock;
}; };
/** /**
...@@ -437,14 +435,14 @@ static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od, ...@@ -437,14 +435,14 @@ static struct owl_dma_pchan *owl_dma_get_pchan(struct owl_dma *od,
for (i = 0; i < od->nr_pchans; i++) { for (i = 0; i < od->nr_pchans; i++) {
pchan = &od->pchans[i]; pchan = &od->pchans[i];
spin_lock_irqsave(&pchan->lock, flags); spin_lock_irqsave(&od->lock, flags);
if (!pchan->vchan) { if (!pchan->vchan) {
pchan->vchan = vchan; pchan->vchan = vchan;
spin_unlock_irqrestore(&pchan->lock, flags); spin_unlock_irqrestore(&od->lock, flags);
break; break;
} }
spin_unlock_irqrestore(&pchan->lock, flags); spin_unlock_irqrestore(&od->lock, flags);
} }
return pchan; return pchan;
......
...@@ -900,7 +900,7 @@ static int tegra_adma_probe(struct platform_device *pdev) ...@@ -900,7 +900,7 @@ static int tegra_adma_probe(struct platform_device *pdev)
ret = dma_async_device_register(&tdma->dma_dev); ret = dma_async_device_register(&tdma->dma_dev);
if (ret < 0) { if (ret < 0) {
dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret); dev_err(&pdev->dev, "ADMA registration failed: %d\n", ret);
goto irq_dispose; goto rpm_put;
} }
ret = of_dma_controller_register(pdev->dev.of_node, ret = of_dma_controller_register(pdev->dev.of_node,
......
...@@ -2156,7 +2156,8 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl, ...@@ -2156,7 +2156,8 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
d->residue += sg_dma_len(sgent); d->residue += sg_dma_len(sgent);
} }
cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_EOP); cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags,
CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
return d; return d;
} }
...@@ -2733,7 +2734,8 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src, ...@@ -2733,7 +2734,8 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
tr_req[1].dicnt3 = 1; tr_req[1].dicnt3 = 1;
} }
cppi5_tr_csf_set(&tr_req[num_tr - 1].flags, CPPI5_TR_CSF_EOP); cppi5_tr_csf_set(&tr_req[num_tr - 1].flags,
CPPI5_TR_CSF_SUPR_EVT | CPPI5_TR_CSF_EOP);
if (uc->config.metadata_size) if (uc->config.metadata_size)
d->vd.tx.metadata_ops = &metadata_ops; d->vd.tx.metadata_ops = &metadata_ops;
......
...@@ -434,6 +434,7 @@ static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan, ...@@ -434,6 +434,7 @@ static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
struct zynqmp_dma_desc_sw *child, *next; struct zynqmp_dma_desc_sw *child, *next;
chan->desc_free_cnt++; chan->desc_free_cnt++;
list_del(&sdesc->node);
list_add_tail(&sdesc->node, &chan->free_list); list_add_tail(&sdesc->node, &chan->free_list);
list_for_each_entry_safe(child, next, &sdesc->tx_list, node) { list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
chan->desc_free_cnt++; chan->desc_free_cnt++;
...@@ -608,8 +609,6 @@ static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan) ...@@ -608,8 +609,6 @@ static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
dma_async_tx_callback callback; dma_async_tx_callback callback;
void *callback_param; void *callback_param;
list_del(&desc->node);
callback = desc->async_tx.callback; callback = desc->async_tx.callback;
callback_param = desc->async_tx.callback_param; callback_param = desc->async_tx.callback_param;
if (callback) { if (callback) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment