Commit ed6889db authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-5.7-rc4' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine fixes from Vinod Koul:
 "Core:
   - Documentation typo fixes
   - fix the channel indexes
   - dmatest: fixes for process hang and iterations

  Drivers:
   - hisilicon: build error fix without PCI_MSI
   - ti-k3: deadlock fix
   - uniphier-xdmac: fix for reg region
   - pch: fix data race
   - tegra: fix clock state"

* tag 'dmaengine-fix-5.7-rc4' of git://git.infradead.org/users/vkoul/slave-dma:
  dmaengine: dmatest: Fix process hang when reading 'wait' parameter
  dmaengine: dmatest: Fix iteration non-stop logic
  dmaengine: tegra-apb: Ensure that clock is enabled during of DMA synchronization
  dmaengine: fix channel index enumeration
  dmaengine: mmp_tdma: Reset channel error on release
  dmaengine: mmp_tdma: Do not ignore slave config validation errors
  dmaengine: pch_dma.c: Avoid data race between probe and irq handler
  dt-bindings: dma: uniphier-xdmac: switch to single reg region
  include/linux/dmaengine: Typos fixes in API documentation
  dmaengine: xilinx_dma: Add missing check for empty list
  dmaengine: ti: k3-psil: fix deadlock on error path
  dmaengine: hisilicon: Fix build error without PCI_MSI
parents 690e2aba aa72f1d2
...@@ -22,9 +22,7 @@ properties: ...@@ -22,9 +22,7 @@ properties:
const: socionext,uniphier-xdmac const: socionext,uniphier-xdmac
reg: reg:
items: maxItems: 1
- description: XDMAC base register region (offset and length)
- description: XDMAC extension register region (offset and length)
interrupts: interrupts:
maxItems: 1 maxItems: 1
...@@ -49,12 +47,13 @@ required: ...@@ -49,12 +47,13 @@ required:
- reg - reg
- interrupts - interrupts
- "#dma-cells" - "#dma-cells"
- dma-channels
examples: examples:
- | - |
xdmac: dma-controller@5fc10000 { xdmac: dma-controller@5fc10000 {
compatible = "socionext,uniphier-xdmac"; compatible = "socionext,uniphier-xdmac";
reg = <0x5fc10000 0x1000>, <0x5fc20000 0x800>; reg = <0x5fc10000 0x5300>;
interrupts = <0 188 4>; interrupts = <0 188 4>;
#dma-cells = <2>; #dma-cells = <2>;
dma-channels = <16>; dma-channels = <16>;
......
...@@ -241,7 +241,8 @@ config FSL_RAID ...@@ -241,7 +241,8 @@ config FSL_RAID
config HISI_DMA config HISI_DMA
tristate "HiSilicon DMA Engine support" tristate "HiSilicon DMA Engine support"
depends on ARM64 || (COMPILE_TEST && PCI_MSI) depends on ARM64 || COMPILE_TEST
depends on PCI_MSI
select DMA_ENGINE select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS select DMA_VIRTUAL_CHANNELS
help help
......
...@@ -232,10 +232,6 @@ static void chan_dev_release(struct device *dev) ...@@ -232,10 +232,6 @@ static void chan_dev_release(struct device *dev)
struct dma_chan_dev *chan_dev; struct dma_chan_dev *chan_dev;
chan_dev = container_of(dev, typeof(*chan_dev), device); chan_dev = container_of(dev, typeof(*chan_dev), device);
if (atomic_dec_and_test(chan_dev->idr_ref)) {
ida_free(&dma_ida, chan_dev->dev_id);
kfree(chan_dev->idr_ref);
}
kfree(chan_dev); kfree(chan_dev);
} }
...@@ -1043,27 +1039,9 @@ static int get_dma_id(struct dma_device *device) ...@@ -1043,27 +1039,9 @@ static int get_dma_id(struct dma_device *device)
} }
static int __dma_async_device_channel_register(struct dma_device *device, static int __dma_async_device_channel_register(struct dma_device *device,
struct dma_chan *chan, struct dma_chan *chan)
int chan_id)
{ {
int rc = 0; int rc = 0;
int chancnt = device->chancnt;
atomic_t *idr_ref;
struct dma_chan *tchan;
tchan = list_first_entry_or_null(&device->channels,
struct dma_chan, device_node);
if (!tchan)
return -ENODEV;
if (tchan->dev) {
idr_ref = tchan->dev->idr_ref;
} else {
idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
if (!idr_ref)
return -ENOMEM;
atomic_set(idr_ref, 0);
}
chan->local = alloc_percpu(typeof(*chan->local)); chan->local = alloc_percpu(typeof(*chan->local));
if (!chan->local) if (!chan->local)
...@@ -1079,29 +1057,36 @@ static int __dma_async_device_channel_register(struct dma_device *device, ...@@ -1079,29 +1057,36 @@ static int __dma_async_device_channel_register(struct dma_device *device,
* When the chan_id is a negative value, we are dynamically adding * When the chan_id is a negative value, we are dynamically adding
* the channel. Otherwise we are static enumerating. * the channel. Otherwise we are static enumerating.
*/ */
chan->chan_id = chan_id < 0 ? chancnt : chan_id; mutex_lock(&device->chan_mutex);
chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
mutex_unlock(&device->chan_mutex);
if (chan->chan_id < 0) {
pr_err("%s: unable to alloc ida for chan: %d\n",
__func__, chan->chan_id);
goto err_out;
}
chan->dev->device.class = &dma_devclass; chan->dev->device.class = &dma_devclass;
chan->dev->device.parent = device->dev; chan->dev->device.parent = device->dev;
chan->dev->chan = chan; chan->dev->chan = chan;
chan->dev->idr_ref = idr_ref;
chan->dev->dev_id = device->dev_id; chan->dev->dev_id = device->dev_id;
atomic_inc(idr_ref);
dev_set_name(&chan->dev->device, "dma%dchan%d", dev_set_name(&chan->dev->device, "dma%dchan%d",
device->dev_id, chan->chan_id); device->dev_id, chan->chan_id);
rc = device_register(&chan->dev->device); rc = device_register(&chan->dev->device);
if (rc) if (rc)
goto err_out; goto err_out_ida;
chan->client_count = 0; chan->client_count = 0;
device->chancnt = chan->chan_id + 1; device->chancnt++;
return 0; return 0;
err_out_ida:
mutex_lock(&device->chan_mutex);
ida_free(&device->chan_ida, chan->chan_id);
mutex_unlock(&device->chan_mutex);
err_out: err_out:
free_percpu(chan->local); free_percpu(chan->local);
kfree(chan->dev); kfree(chan->dev);
if (atomic_dec_return(idr_ref) == 0)
kfree(idr_ref);
return rc; return rc;
} }
...@@ -1110,7 +1095,7 @@ int dma_async_device_channel_register(struct dma_device *device, ...@@ -1110,7 +1095,7 @@ int dma_async_device_channel_register(struct dma_device *device,
{ {
int rc; int rc;
rc = __dma_async_device_channel_register(device, chan, -1); rc = __dma_async_device_channel_register(device, chan);
if (rc < 0) if (rc < 0)
return rc; return rc;
...@@ -1130,6 +1115,9 @@ static void __dma_async_device_channel_unregister(struct dma_device *device, ...@@ -1130,6 +1115,9 @@ static void __dma_async_device_channel_unregister(struct dma_device *device,
device->chancnt--; device->chancnt--;
chan->dev->chan = NULL; chan->dev->chan = NULL;
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
mutex_lock(&device->chan_mutex);
ida_free(&device->chan_ida, chan->chan_id);
mutex_unlock(&device->chan_mutex);
device_unregister(&chan->dev->device); device_unregister(&chan->dev->device);
free_percpu(chan->local); free_percpu(chan->local);
} }
...@@ -1152,7 +1140,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister); ...@@ -1152,7 +1140,7 @@ EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
*/ */
int dma_async_device_register(struct dma_device *device) int dma_async_device_register(struct dma_device *device)
{ {
int rc, i = 0; int rc;
struct dma_chan* chan; struct dma_chan* chan;
if (!device) if (!device)
...@@ -1257,9 +1245,12 @@ int dma_async_device_register(struct dma_device *device) ...@@ -1257,9 +1245,12 @@ int dma_async_device_register(struct dma_device *device)
if (rc != 0) if (rc != 0)
return rc; return rc;
mutex_init(&device->chan_mutex);
ida_init(&device->chan_ida);
/* represent channels in sysfs. Probably want devs too */ /* represent channels in sysfs. Probably want devs too */
list_for_each_entry(chan, &device->channels, device_node) { list_for_each_entry(chan, &device->channels, device_node) {
rc = __dma_async_device_channel_register(device, chan, i++); rc = __dma_async_device_channel_register(device, chan);
if (rc < 0) if (rc < 0)
goto err_out; goto err_out;
} }
...@@ -1334,6 +1325,7 @@ void dma_async_device_unregister(struct dma_device *device) ...@@ -1334,6 +1325,7 @@ void dma_async_device_unregister(struct dma_device *device)
*/ */
dma_cap_set(DMA_PRIVATE, device->cap_mask); dma_cap_set(DMA_PRIVATE, device->cap_mask);
dma_channel_rebalance(); dma_channel_rebalance();
ida_free(&dma_ida, device->dev_id);
dma_device_put(device); dma_device_put(device);
mutex_unlock(&dma_list_mutex); mutex_unlock(&dma_list_mutex);
} }
......
...@@ -240,7 +240,7 @@ static bool is_threaded_test_run(struct dmatest_info *info) ...@@ -240,7 +240,7 @@ static bool is_threaded_test_run(struct dmatest_info *info)
struct dmatest_thread *thread; struct dmatest_thread *thread;
list_for_each_entry(thread, &dtc->threads, node) { list_for_each_entry(thread, &dtc->threads, node) {
if (!thread->done) if (!thread->done && !thread->pending)
return true; return true;
} }
} }
...@@ -662,8 +662,8 @@ static int dmatest_func(void *data) ...@@ -662,8 +662,8 @@ static int dmatest_func(void *data)
flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT; flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
ktime = ktime_get(); ktime = ktime_get();
while (!kthread_should_stop() while (!(kthread_should_stop() ||
&& !(params->iterations && total_tests >= params->iterations)) { (params->iterations && total_tests >= params->iterations))) {
struct dma_async_tx_descriptor *tx = NULL; struct dma_async_tx_descriptor *tx = NULL;
struct dmaengine_unmap_data *um; struct dmaengine_unmap_data *um;
dma_addr_t *dsts; dma_addr_t *dsts;
......
...@@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac) ...@@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
gen_pool_free(gpool, (unsigned long)tdmac->desc_arr, gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
size); size);
tdmac->desc_arr = NULL; tdmac->desc_arr = NULL;
if (tdmac->status == DMA_ERROR)
tdmac->status = DMA_COMPLETE;
return; return;
} }
...@@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic( ...@@ -443,7 +445,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
if (!desc) if (!desc)
goto err_out; goto err_out;
mmp_tdma_config_write(chan, direction, &tdmac->slave_config); if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
goto err_out;
while (buf < buf_len) { while (buf < buf_len) {
desc = &tdmac->desc_arr[i]; desc = &tdmac->desc_arr[i];
......
...@@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev, ...@@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
} }
pci_set_master(pdev); pci_set_master(pdev);
pd->dma.dev = &pdev->dev;
err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd); err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
if (err) { if (err) {
...@@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev, ...@@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
goto err_free_irq; goto err_free_irq;
} }
pd->dma.dev = &pdev->dev;
INIT_LIST_HEAD(&pd->dma.channels); INIT_LIST_HEAD(&pd->dma.channels);
......
...@@ -816,6 +816,13 @@ static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc) ...@@ -816,6 +816,13 @@ static bool tegra_dma_eoc_interrupt_deasserted(struct tegra_dma_channel *tdc)
static void tegra_dma_synchronize(struct dma_chan *dc) static void tegra_dma_synchronize(struct dma_chan *dc)
{ {
struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc); struct tegra_dma_channel *tdc = to_tegra_dma_chan(dc);
int err;
err = pm_runtime_get_sync(tdc->tdma->dev);
if (err < 0) {
dev_err(tdc2dev(tdc), "Failed to synchronize DMA: %d\n", err);
return;
}
/* /*
* CPU, which handles interrupt, could be busy in * CPU, which handles interrupt, could be busy in
...@@ -825,6 +832,8 @@ static void tegra_dma_synchronize(struct dma_chan *dc) ...@@ -825,6 +832,8 @@ static void tegra_dma_synchronize(struct dma_chan *dc)
wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc)); wait_event(tdc->wq, tegra_dma_eoc_interrupt_deasserted(tdc));
tasklet_kill(&tdc->tasklet); tasklet_kill(&tdc->tasklet);
pm_runtime_put(tdc->tdma->dev);
} }
static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc, static unsigned int tegra_dma_sg_bytes_xferred(struct tegra_dma_channel *tdc,
......
...@@ -27,6 +27,7 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id) ...@@ -27,6 +27,7 @@ struct psil_endpoint_config *psil_get_ep_config(u32 thread_id)
soc_ep_map = &j721e_ep_map; soc_ep_map = &j721e_ep_map;
} else { } else {
pr_err("PSIL: No compatible machine found for map\n"); pr_err("PSIL: No compatible machine found for map\n");
mutex_unlock(&ep_map_mutex);
return ERR_PTR(-ENOTSUPP); return ERR_PTR(-ENOTSUPP);
} }
pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name); pr_debug("%s: Using map for %s\n", __func__, soc_ep_map->name);
......
...@@ -1230,7 +1230,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, ...@@ -1230,7 +1230,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
return ret; return ret;
spin_lock_irqsave(&chan->lock, flags); spin_lock_irqsave(&chan->lock, flags);
if (!list_empty(&chan->active_list)) {
desc = list_last_entry(&chan->active_list, desc = list_last_entry(&chan->active_list,
struct xilinx_dma_tx_descriptor, node); struct xilinx_dma_tx_descriptor, node);
/* /*
...@@ -1239,7 +1239,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, ...@@ -1239,7 +1239,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
*/ */
if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA) if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
residue = xilinx_dma_get_residue(chan, desc); residue = xilinx_dma_get_residue(chan, desc);
}
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock_irqrestore(&chan->lock, flags);
dma_set_residue(txstate, residue); dma_set_residue(txstate, residue);
......
...@@ -83,9 +83,9 @@ enum dma_transfer_direction { ...@@ -83,9 +83,9 @@ enum dma_transfer_direction {
/** /**
* Interleaved Transfer Request * Interleaved Transfer Request
* ---------------------------- * ----------------------------
* A chunk is collection of contiguous bytes to be transfered. * A chunk is collection of contiguous bytes to be transferred.
* The gap(in bytes) between two chunks is called inter-chunk-gap(ICG). * The gap(in bytes) between two chunks is called inter-chunk-gap(ICG).
* ICGs may or maynot change between chunks. * ICGs may or may not change between chunks.
* A FRAME is the smallest series of contiguous {chunk,icg} pairs, * A FRAME is the smallest series of contiguous {chunk,icg} pairs,
* that when repeated an integral number of times, specifies the transfer. * that when repeated an integral number of times, specifies the transfer.
* A transfer template is specification of a Frame, the number of times * A transfer template is specification of a Frame, the number of times
...@@ -341,13 +341,11 @@ struct dma_chan { ...@@ -341,13 +341,11 @@ struct dma_chan {
* @chan: driver channel device * @chan: driver channel device
* @device: sysfs device * @device: sysfs device
* @dev_id: parent dma_device dev_id * @dev_id: parent dma_device dev_id
* @idr_ref: reference count to gate release of dma_device dev_id
*/ */
struct dma_chan_dev { struct dma_chan_dev {
struct dma_chan *chan; struct dma_chan *chan;
struct device device; struct device device;
int dev_id; int dev_id;
atomic_t *idr_ref;
}; };
/** /**
...@@ -835,6 +833,8 @@ struct dma_device { ...@@ -835,6 +833,8 @@ struct dma_device {
int dev_id; int dev_id;
struct device *dev; struct device *dev;
struct module *owner; struct module *owner;
struct ida chan_ida;
struct mutex chan_mutex; /* to protect chan_ida */
u32 src_addr_widths; u32 src_addr_widths;
u32 dst_addr_widths; u32 dst_addr_widths;
...@@ -1069,7 +1069,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan) ...@@ -1069,7 +1069,7 @@ static inline int dmaengine_terminate_all(struct dma_chan *chan)
* dmaengine_synchronize() needs to be called before it is safe to free * dmaengine_synchronize() needs to be called before it is safe to free
* any memory that is accessed by previously submitted descriptors or before * any memory that is accessed by previously submitted descriptors or before
* freeing any resources accessed from within the completion callback of any * freeing any resources accessed from within the completion callback of any
* perviously submitted descriptors. * previously submitted descriptors.
* *
* This function can be called from atomic context as well as from within a * This function can be called from atomic context as well as from within a
* complete callback of a descriptor submitted on the same channel. * complete callback of a descriptor submitted on the same channel.
...@@ -1091,7 +1091,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan) ...@@ -1091,7 +1091,7 @@ static inline int dmaengine_terminate_async(struct dma_chan *chan)
* *
* Synchronizes to the DMA channel termination to the current context. When this * Synchronizes to the DMA channel termination to the current context. When this
* function returns it is guaranteed that all transfers for previously issued * function returns it is guaranteed that all transfers for previously issued
* descriptors have stopped and and it is safe to free the memory assoicated * descriptors have stopped and it is safe to free the memory associated
* with them. Furthermore it is guaranteed that all complete callback functions * with them. Furthermore it is guaranteed that all complete callback functions
* for a previously submitted descriptor have finished running and it is safe to * for a previously submitted descriptor have finished running and it is safe to
* free resources accessed from within the complete callbacks. * free resources accessed from within the complete callbacks.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment