Commit 6d04b70e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-fix-6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine fixes from Vinod Koul:

 - SPI PDMA data fix for TI k3-psil drivers

 - suspend fix, pointer check, logic for arbitration fix and channel
   leak fix in fsl-edma driver

 - couple of fixes in idxd driver for GRPCFG descriptions and int_handle
   field handling

 - single fix for stm32 driver for bitfield overflow

* tag 'dmaengine-fix-6.7' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine:
  dmaengine: fsl-edma: fix DMA channel leak in eDMAv4
  dmaengine: fsl-edma: fix wrong pointer check in fsl_edma3_attach_pd()
  dmaengine: idxd: Fix incorrect descriptions for GRPCFG register
  dmaengine: idxd: Protect int_handle field in hw descriptor
  dmaengine: stm32-dma: avoid bitfield overflow assertion
  dmaengine: fsl-edma: Add judgment on enabling round robin arbitration
  dmaengine: fsl-edma: Do not suspend and resume the masked dma channel when the system is sleeping
  dmaengine: ti: k3-psil-am62a: Fix SPI PDMA data
  dmaengine: ti: k3-psil-am62: Fix SPI PDMA data
parents 134fdb80 4ee632c8
...@@ -828,6 +828,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan) ...@@ -828,6 +828,7 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
dma_pool_destroy(fsl_chan->tcd_pool); dma_pool_destroy(fsl_chan->tcd_pool);
fsl_chan->tcd_pool = NULL; fsl_chan->tcd_pool = NULL;
fsl_chan->is_sw = false; fsl_chan->is_sw = false;
fsl_chan->srcid = 0;
} }
void fsl_edma_cleanup_vchan(struct dma_device *dmadev) void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
......
...@@ -396,9 +396,8 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng ...@@ -396,9 +396,8 @@ static int fsl_edma3_attach_pd(struct platform_device *pdev, struct fsl_edma_eng
link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS | link = device_link_add(dev, pd_chan, DL_FLAG_STATELESS |
DL_FLAG_PM_RUNTIME | DL_FLAG_PM_RUNTIME |
DL_FLAG_RPM_ACTIVE); DL_FLAG_RPM_ACTIVE);
if (IS_ERR(link)) { if (!link) {
dev_err(dev, "Failed to add device_link to %d: %ld\n", i, dev_err(dev, "Failed to add device_link to %d\n", i);
PTR_ERR(link));
return -EINVAL; return -EINVAL;
} }
...@@ -631,6 +630,8 @@ static int fsl_edma_suspend_late(struct device *dev) ...@@ -631,6 +630,8 @@ static int fsl_edma_suspend_late(struct device *dev)
for (i = 0; i < fsl_edma->n_chans; i++) { for (i = 0; i < fsl_edma->n_chans; i++) {
fsl_chan = &fsl_edma->chans[i]; fsl_chan = &fsl_edma->chans[i];
if (fsl_edma->chan_masked & BIT(i))
continue;
spin_lock_irqsave(&fsl_chan->vchan.lock, flags); spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
/* Make sure chan is idle or will force disable. */ /* Make sure chan is idle or will force disable. */
if (unlikely(!fsl_chan->idle)) { if (unlikely(!fsl_chan->idle)) {
...@@ -655,13 +656,16 @@ static int fsl_edma_resume_early(struct device *dev) ...@@ -655,13 +656,16 @@ static int fsl_edma_resume_early(struct device *dev)
for (i = 0; i < fsl_edma->n_chans; i++) { for (i = 0; i < fsl_edma->n_chans; i++) {
fsl_chan = &fsl_edma->chans[i]; fsl_chan = &fsl_edma->chans[i];
if (fsl_edma->chan_masked & BIT(i))
continue;
fsl_chan->pm_state = RUNNING; fsl_chan->pm_state = RUNNING;
edma_write_tcdreg(fsl_chan, 0, csr); edma_write_tcdreg(fsl_chan, 0, csr);
if (fsl_chan->slave_id != 0) if (fsl_chan->slave_id != 0)
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true); fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true);
} }
edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr); if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
edma_writel(fsl_edma, EDMA_CR_ERGA | EDMA_CR_ERCA, regs->cr);
return 0; return 0;
} }
......
...@@ -440,12 +440,14 @@ union wqcfg { ...@@ -440,12 +440,14 @@ union wqcfg {
/* /*
* This macro calculates the offset into the GRPCFG register * This macro calculates the offset into the GRPCFG register
* idxd - struct idxd * * idxd - struct idxd *
* n - wq id * n - group id
* ofs - the index of the 32b dword for the config register * ofs - the index of the 64b qword for the config register
* *
* The WQCFG register block is divided into groups per each wq. The n index * The GRPCFG register block is divided into three sub-registers, which
* allows us to move to the register group that's for that particular wq. * are GRPWQCFG, GRPENGCFG and GRPFLGCFG. The n index allows us to move
* Each register is 32bits. The ofs gives us the number of register to access. * to the register block that contains the three sub-registers.
* Each register block is 64bits. And the ofs gives us the offset
* within the GRPWQCFG register to access.
*/ */
#define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\ #define GRPWQCFG_OFFSET(idxd_dev, n, ofs) ((idxd_dev)->grpcfg_offset +\
(n) * GRPCFG_SIZE + sizeof(u64) * (ofs)) (n) * GRPCFG_SIZE + sizeof(u64) * (ofs))
......
...@@ -182,13 +182,6 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) ...@@ -182,13 +182,6 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
portal = idxd_wq_portal_addr(wq); portal = idxd_wq_portal_addr(wq);
/*
* The wmb() flushes writes to coherent DMA data before
* possibly triggering a DMA read. The wmb() is necessary
* even on UP because the recipient is a device.
*/
wmb();
/* /*
* Pending the descriptor to the lockless list for the irq_entry * Pending the descriptor to the lockless list for the irq_entry
* that we designated the descriptor to. * that we designated the descriptor to.
...@@ -199,6 +192,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc) ...@@ -199,6 +192,13 @@ int idxd_submit_desc(struct idxd_wq *wq, struct idxd_desc *desc)
llist_add(&desc->llnode, &ie->pending_llist); llist_add(&desc->llnode, &ie->pending_llist);
} }
/*
* The wmb() flushes writes to coherent DMA data before
* possibly triggering a DMA read. The wmb() is necessary
* even on UP because the recipient is a device.
*/
wmb();
if (wq_dedicated(wq)) { if (wq_dedicated(wq)) {
iosubmit_cmds512(portal, desc->hw, 1); iosubmit_cmds512(portal, desc->hw, 1);
} else { } else {
......
...@@ -1246,8 +1246,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy( ...@@ -1246,8 +1246,8 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
enum dma_slave_buswidth max_width; enum dma_slave_buswidth max_width;
struct stm32_dma_desc *desc; struct stm32_dma_desc *desc;
size_t xfer_count, offset; size_t xfer_count, offset;
u32 num_sgs, best_burst, dma_burst, threshold; u32 num_sgs, best_burst, threshold;
int i; int dma_burst, i;
num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS); num_sgs = DIV_ROUND_UP(len, STM32_DMA_ALIGNED_MAX_DATA_ITEMS);
desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT); desc = kzalloc(struct_size(desc, sg_req, num_sgs), GFP_NOWAIT);
...@@ -1266,6 +1266,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy( ...@@ -1266,6 +1266,10 @@ static struct dma_async_tx_descriptor *stm32_dma_prep_dma_memcpy(
best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST, best_burst = stm32_dma_get_best_burst(len, STM32_DMA_MAX_BURST,
threshold, max_width); threshold, max_width);
dma_burst = stm32_dma_get_burst(chan, best_burst); dma_burst = stm32_dma_get_burst(chan, best_burst);
if (dma_burst < 0) {
kfree(desc);
return NULL;
}
stm32_dma_clear_reg(&desc->sg_req[i].chan_reg); stm32_dma_clear_reg(&desc->sg_req[i].chan_reg);
desc->sg_req[i].chan_reg.dma_scr = desc->sg_req[i].chan_reg.dma_scr =
......
...@@ -74,7 +74,9 @@ static struct psil_ep am62_src_ep_map[] = { ...@@ -74,7 +74,9 @@ static struct psil_ep am62_src_ep_map[] = {
PSIL_SAUL(0x7505, 21, 35, 8, 36, 0), PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
PSIL_SAUL(0x7506, 22, 43, 8, 43, 0), PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
PSIL_SAUL(0x7507, 23, 43, 8, 44, 0), PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
/* PDMA_MAIN0 - SPI0-3 */ /* PDMA_MAIN0 - SPI0-2 */
PSIL_PDMA_XY_PKT(0x4300),
PSIL_PDMA_XY_PKT(0x4301),
PSIL_PDMA_XY_PKT(0x4302), PSIL_PDMA_XY_PKT(0x4302),
PSIL_PDMA_XY_PKT(0x4303), PSIL_PDMA_XY_PKT(0x4303),
PSIL_PDMA_XY_PKT(0x4304), PSIL_PDMA_XY_PKT(0x4304),
...@@ -85,8 +87,6 @@ static struct psil_ep am62_src_ep_map[] = { ...@@ -85,8 +87,6 @@ static struct psil_ep am62_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4309), PSIL_PDMA_XY_PKT(0x4309),
PSIL_PDMA_XY_PKT(0x430a), PSIL_PDMA_XY_PKT(0x430a),
PSIL_PDMA_XY_PKT(0x430b), PSIL_PDMA_XY_PKT(0x430b),
PSIL_PDMA_XY_PKT(0x430c),
PSIL_PDMA_XY_PKT(0x430d),
/* PDMA_MAIN1 - UART0-6 */ /* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0x4400), PSIL_PDMA_XY_PKT(0x4400),
PSIL_PDMA_XY_PKT(0x4401), PSIL_PDMA_XY_PKT(0x4401),
...@@ -141,7 +141,9 @@ static struct psil_ep am62_dst_ep_map[] = { ...@@ -141,7 +141,9 @@ static struct psil_ep am62_dst_ep_map[] = {
/* SAUL */ /* SAUL */
PSIL_SAUL(0xf500, 27, 83, 8, 83, 1), PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
PSIL_SAUL(0xf501, 28, 91, 8, 91, 1), PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
/* PDMA_MAIN0 - SPI0-3 */ /* PDMA_MAIN0 - SPI0-2 */
PSIL_PDMA_XY_PKT(0xc300),
PSIL_PDMA_XY_PKT(0xc301),
PSIL_PDMA_XY_PKT(0xc302), PSIL_PDMA_XY_PKT(0xc302),
PSIL_PDMA_XY_PKT(0xc303), PSIL_PDMA_XY_PKT(0xc303),
PSIL_PDMA_XY_PKT(0xc304), PSIL_PDMA_XY_PKT(0xc304),
...@@ -152,8 +154,6 @@ static struct psil_ep am62_dst_ep_map[] = { ...@@ -152,8 +154,6 @@ static struct psil_ep am62_dst_ep_map[] = {
PSIL_PDMA_XY_PKT(0xc309), PSIL_PDMA_XY_PKT(0xc309),
PSIL_PDMA_XY_PKT(0xc30a), PSIL_PDMA_XY_PKT(0xc30a),
PSIL_PDMA_XY_PKT(0xc30b), PSIL_PDMA_XY_PKT(0xc30b),
PSIL_PDMA_XY_PKT(0xc30c),
PSIL_PDMA_XY_PKT(0xc30d),
/* PDMA_MAIN1 - UART0-6 */ /* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0xc400), PSIL_PDMA_XY_PKT(0xc400),
PSIL_PDMA_XY_PKT(0xc401), PSIL_PDMA_XY_PKT(0xc401),
......
...@@ -84,7 +84,9 @@ static struct psil_ep am62a_src_ep_map[] = { ...@@ -84,7 +84,9 @@ static struct psil_ep am62a_src_ep_map[] = {
PSIL_SAUL(0x7505, 21, 35, 8, 36, 0), PSIL_SAUL(0x7505, 21, 35, 8, 36, 0),
PSIL_SAUL(0x7506, 22, 43, 8, 43, 0), PSIL_SAUL(0x7506, 22, 43, 8, 43, 0),
PSIL_SAUL(0x7507, 23, 43, 8, 44, 0), PSIL_SAUL(0x7507, 23, 43, 8, 44, 0),
/* PDMA_MAIN0 - SPI0-3 */ /* PDMA_MAIN0 - SPI0-2 */
PSIL_PDMA_XY_PKT(0x4300),
PSIL_PDMA_XY_PKT(0x4301),
PSIL_PDMA_XY_PKT(0x4302), PSIL_PDMA_XY_PKT(0x4302),
PSIL_PDMA_XY_PKT(0x4303), PSIL_PDMA_XY_PKT(0x4303),
PSIL_PDMA_XY_PKT(0x4304), PSIL_PDMA_XY_PKT(0x4304),
...@@ -95,8 +97,6 @@ static struct psil_ep am62a_src_ep_map[] = { ...@@ -95,8 +97,6 @@ static struct psil_ep am62a_src_ep_map[] = {
PSIL_PDMA_XY_PKT(0x4309), PSIL_PDMA_XY_PKT(0x4309),
PSIL_PDMA_XY_PKT(0x430a), PSIL_PDMA_XY_PKT(0x430a),
PSIL_PDMA_XY_PKT(0x430b), PSIL_PDMA_XY_PKT(0x430b),
PSIL_PDMA_XY_PKT(0x430c),
PSIL_PDMA_XY_PKT(0x430d),
/* PDMA_MAIN1 - UART0-6 */ /* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0x4400), PSIL_PDMA_XY_PKT(0x4400),
PSIL_PDMA_XY_PKT(0x4401), PSIL_PDMA_XY_PKT(0x4401),
...@@ -151,7 +151,9 @@ static struct psil_ep am62a_dst_ep_map[] = { ...@@ -151,7 +151,9 @@ static struct psil_ep am62a_dst_ep_map[] = {
/* SAUL */ /* SAUL */
PSIL_SAUL(0xf500, 27, 83, 8, 83, 1), PSIL_SAUL(0xf500, 27, 83, 8, 83, 1),
PSIL_SAUL(0xf501, 28, 91, 8, 91, 1), PSIL_SAUL(0xf501, 28, 91, 8, 91, 1),
/* PDMA_MAIN0 - SPI0-3 */ /* PDMA_MAIN0 - SPI0-2 */
PSIL_PDMA_XY_PKT(0xc300),
PSIL_PDMA_XY_PKT(0xc301),
PSIL_PDMA_XY_PKT(0xc302), PSIL_PDMA_XY_PKT(0xc302),
PSIL_PDMA_XY_PKT(0xc303), PSIL_PDMA_XY_PKT(0xc303),
PSIL_PDMA_XY_PKT(0xc304), PSIL_PDMA_XY_PKT(0xc304),
...@@ -162,8 +164,6 @@ static struct psil_ep am62a_dst_ep_map[] = { ...@@ -162,8 +164,6 @@ static struct psil_ep am62a_dst_ep_map[] = {
PSIL_PDMA_XY_PKT(0xc309), PSIL_PDMA_XY_PKT(0xc309),
PSIL_PDMA_XY_PKT(0xc30a), PSIL_PDMA_XY_PKT(0xc30a),
PSIL_PDMA_XY_PKT(0xc30b), PSIL_PDMA_XY_PKT(0xc30b),
PSIL_PDMA_XY_PKT(0xc30c),
PSIL_PDMA_XY_PKT(0xc30d),
/* PDMA_MAIN1 - UART0-6 */ /* PDMA_MAIN1 - UART0-6 */
PSIL_PDMA_XY_PKT(0xc400), PSIL_PDMA_XY_PKT(0xc400),
PSIL_PDMA_XY_PKT(0xc401), PSIL_PDMA_XY_PKT(0xc401),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment