Commit 35929dae authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:

 - email address Update for Jie Hai

 - fix double increment of client_count in dma_chan_get()

 - idxd driver fixes: use after free, probe error handling and callback
   on wq disable

 - fix for qcom gpi driver GO tre

 - ptdma locking fix

 - tegra & imx-sdma mem leak fix

* tag 'dmaengine-fix-6.2' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine:
  ptdma: pt_core_execute_cmd() should use spinlock
  dmaengine: tegra: Fix memory leak in terminate_all()
  dmaengine: xilinx_dma: call of_node_put() when breaking out of for_each_child_of_node()
  dmaengine: imx-sdma: Fix a possible memory leak in sdma_transfer_init
  dmaengine: Fix double increment of client_count in dma_chan_get()
  dmaengine: tegra210-adma: fix global intr clear
  Add exception protection processing for vd in axi_chan_handle_err function
  dmaengine: lgm: Move DT parsing after initialization
  MAINTAINERS: update Jie Hai's email address
  dmaengine: ti: k3-udma: Do conditional decrement of UDMA_CHAN_RT_PEER_BCNT_REG
  dmaengine: idxd: Do not call DMX TX callbacks during workqueue disable
  dmaengine: idxd: Prevent use after free on completion memory
  dmaengine: idxd: Let probe fail when workqueue cannot be enabled
  dmaengine: qcom: gpi: Set link_rx bit on GO TRE for rx operation
parents aaaf919c 95e5fda3
......@@ -9298,7 +9298,7 @@ F: net/dsa/tag_hellcreek.c
HISILICON DMA DRIVER
M: Zhou Wang <wangzhou1@hisilicon.com>
M: Jie Hai <haijie1@hisilicon.com>
M: Jie Hai <haijie1@huawei.com>
L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/hisi_dma.c
......
......@@ -451,7 +451,8 @@ static int dma_chan_get(struct dma_chan *chan)
/* The channel is already in use, update client count */
if (chan->client_count) {
__module_get(owner);
goto out;
chan->client_count++;
return 0;
}
if (!try_module_get(owner))
......@@ -470,11 +471,11 @@ static int dma_chan_get(struct dma_chan *chan)
goto err_out;
}
chan->client_count++;
if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
balance_ref_count(chan);
out:
chan->client_count++;
return 0;
err_out:
......
......@@ -1018,6 +1018,11 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
/* The bad descriptor currently is in the head of vc list */
vd = vchan_next_desc(&chan->vc);
if (!vd) {
dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
axi_chan_name(chan));
goto out;
}
/* Remove the completed descriptor from issued list */
list_del(&vd->node);
......@@ -1032,6 +1037,7 @@ static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
/* Try to restart the controller */
axi_chan_start_first_queued(chan);
out:
spin_unlock_irqrestore(&chan->vc.lock, flags);
}
......
......@@ -1172,8 +1172,19 @@ static void idxd_flush_pending_descs(struct idxd_irq_entry *ie)
spin_unlock(&ie->list_lock);
list_for_each_entry_safe(desc, itr, &flist, list) {
struct dma_async_tx_descriptor *tx;
list_del(&desc->list);
ctype = desc->completion->status ? IDXD_COMPLETE_NORMAL : IDXD_COMPLETE_ABORT;
/*
* wq is being disabled. Any remaining descriptors are
* likely to be stuck and can be dropped. callback could
* point to code that is no longer accessible, for example
* if dmatest module has been unloaded.
*/
tx = &desc->txd;
tx->callback = NULL;
tx->callback_result = NULL;
idxd_dma_complete_txd(desc, ctype, true);
}
}
......@@ -1390,8 +1401,7 @@ int drv_enable_wq(struct idxd_wq *wq)
err_irq:
idxd_wq_unmap_portal(wq);
err_map_portal:
rc = idxd_wq_disable(wq, false);
if (rc < 0)
if (idxd_wq_disable(wq, false))
dev_dbg(dev, "wq %s disable failed\n", dev_name(wq_confdev(wq)));
err:
return rc;
......@@ -1408,11 +1418,11 @@ void drv_disable_wq(struct idxd_wq *wq)
dev_warn(dev, "Clients has claim on wq %d: %d\n",
wq->id, idxd_wq_refcount(wq));
idxd_wq_free_resources(wq);
idxd_wq_unmap_portal(wq);
idxd_wq_drain(wq);
idxd_wq_free_irq(wq);
idxd_wq_reset(wq);
idxd_wq_free_resources(wq);
percpu_ref_exit(&wq->wq_active);
wq->type = IDXD_WQT_NONE;
wq->client_count = 0;
......
......@@ -1521,10 +1521,12 @@ static struct sdma_desc *sdma_transfer_init(struct sdma_channel *sdmac,
sdma_config_ownership(sdmac, false, true, false);
if (sdma_load_context(sdmac))
goto err_desc_out;
goto err_bd_out;
return desc;
err_bd_out:
sdma_free_bd(desc);
err_desc_out:
kfree(desc);
err_out:
......
......@@ -914,7 +914,7 @@ static void ldma_dev_init(struct ldma_dev *d)
}
}
static int ldma_cfg_init(struct ldma_dev *d)
static int ldma_parse_dt(struct ldma_dev *d)
{
struct fwnode_handle *fwnode = dev_fwnode(d->dev);
struct ldma_port *p;
......@@ -1661,10 +1661,6 @@ static int intel_ldma_probe(struct platform_device *pdev)
p->ldev = d;
}
ret = ldma_cfg_init(d);
if (ret)
return ret;
dma_dev->dev = &pdev->dev;
ch_mask = (unsigned long)d->channels_mask;
......@@ -1675,6 +1671,10 @@ static int intel_ldma_probe(struct platform_device *pdev)
ldma_dma_init_v3X(j, d);
}
ret = ldma_parse_dt(d);
if (ret)
return ret;
dma_dev->device_alloc_chan_resources = ldma_alloc_chan_resources;
dma_dev->device_free_chan_resources = ldma_free_chan_resources;
dma_dev->device_terminate_all = ldma_terminate_all;
......
......@@ -71,12 +71,13 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
bool soc = FIELD_GET(DWORD0_SOC, desc->dw0);
u8 *q_desc = (u8 *)&cmd_q->qbase[cmd_q->qidx];
u32 tail;
unsigned long flags;
if (soc) {
desc->dw0 |= FIELD_PREP(DWORD0_IOC, desc->dw0);
desc->dw0 &= ~DWORD0_SOC;
}
mutex_lock(&cmd_q->q_mutex);
spin_lock_irqsave(&cmd_q->q_lock, flags);
/* Copy 32-byte command descriptor to hw queue. */
memcpy(q_desc, desc, 32);
......@@ -91,7 +92,7 @@ static int pt_core_execute_cmd(struct ptdma_desc *desc, struct pt_cmd_queue *cmd
/* Turn the queue back on using our cached control register */
pt_start_queue(cmd_q);
mutex_unlock(&cmd_q->q_mutex);
spin_unlock_irqrestore(&cmd_q->q_lock, flags);
return 0;
}
......@@ -199,7 +200,7 @@ int pt_core_init(struct pt_device *pt)
cmd_q->pt = pt;
cmd_q->dma_pool = dma_pool;
mutex_init(&cmd_q->q_mutex);
spin_lock_init(&cmd_q->q_lock);
/* Page alignment satisfies our needs for N <= 128 */
cmd_q->qsize = Q_SIZE(Q_DESC_SIZE);
......
......@@ -196,7 +196,7 @@ struct pt_cmd_queue {
struct ptdma_desc *qbase;
/* Aligned queue start address (per requirement) */
struct mutex q_mutex ____cacheline_aligned;
spinlock_t q_lock ____cacheline_aligned;
unsigned int qidx;
unsigned int qsize;
......
......@@ -1756,6 +1756,7 @@ static int gpi_create_spi_tre(struct gchan *chan, struct gpi_desc *desc,
tre->dword[3] = u32_encode_bits(TRE_TYPE_GO, TRE_FLAGS_TYPE);
if (spi->cmd == SPI_RX) {
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_IEOB);
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_LINK);
} else if (spi->cmd == SPI_TX) {
tre->dword[3] |= u32_encode_bits(1, TRE_FLAGS_CHAIN);
} else { /* SPI_DUPLEX */
......
......@@ -711,6 +711,7 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
return err;
}
vchan_terminate_vdesc(&tdc->dma_desc->vd);
tegra_dma_disable(tdc);
tdc->dma_desc = NULL;
}
......
......@@ -221,7 +221,7 @@ static int tegra_adma_init(struct tegra_adma *tdma)
int ret;
/* Clear any interrupts */
tdma_write(tdma, tdma->cdata->global_int_clear, 0x1);
tdma_write(tdma, tdma->cdata->ch_base_offset + tdma->cdata->global_int_clear, 0x1);
/* Assert soft reset */
tdma_write(tdma, ADMA_GLOBAL_SOFT_RESET, 0x1);
......
......@@ -762,11 +762,12 @@ static void udma_decrement_byte_counters(struct udma_chan *uc, u32 val)
if (uc->desc->dir == DMA_DEV_TO_MEM) {
udma_rchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
udma_rchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
if (uc->config.ep_type != PSIL_EP_NATIVE)
udma_rchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
} else {
udma_tchanrt_write(uc, UDMA_CHAN_RT_BCNT_REG, val);
udma_tchanrt_write(uc, UDMA_CHAN_RT_SBCNT_REG, val);
if (!uc->bchan)
if (!uc->bchan && uc->config.ep_type != PSIL_EP_NATIVE)
udma_tchanrt_write(uc, UDMA_CHAN_RT_PEER_BCNT_REG, val);
}
}
......
......@@ -3143,9 +3143,11 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Initialize the channels */
for_each_child_of_node(node, child) {
err = xilinx_dma_child_probe(xdev, child);
if (err < 0)
if (err < 0) {
of_node_put(child);
goto error;
}
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
for (i = 0; i < xdev->dma_config->max_channels; i++)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment