Commit 055128ee authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-5.2-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:

 - Updates to stm32 dma residue calculations

 - Interleave dma capability to axi-dmac and support for ZynqMP arch

 - Rework of channel assignment for rcar dma

 - Debugfs for pl330 driver

 - Support for Tegra186/Tegra194, refactoring for new chips and support
   for pause/resume

 - Updates to axi-dmac, bcm2835, fsl-edma, idma64, imx-sdma, rcar-dmac,
   stm32-dma etc

 - dev_get_drvdata() updates on few drivers

* tag 'dmaengine-5.2-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (34 commits)
  dmaengine: tegra210-adma: restore channel status
  dmaengine: tegra210-dma: free dma controller in remove()
  dmaengine: tegra210-adma: add pause/resume support
  dmaengine: tegra210-adma: add support for Tegra186/Tegra194
  Documentation: DT: Add compatibility binding for Tegra186
  dmaengine: tegra210-adma: prepare for supporting newer Tegra chips
  dmaengine: at_xdmac: remove a stray bottom half unlock
  dmaengine: fsl-edma: Adjust indentation
  dmaengine: fsl-edma: Fix typo in Vybrid name
  dmaengine: stm32-dma: fix residue calculation in stm32-dma
  dmaengine: nbpfaxi: Use dev_get_drvdata()
  dmaengine: bcm-sba-raid: Use dev_get_drvdata()
  dmaengine: stm32-dma: Fix unsigned variable compared with zero
  dmaengine: stm32-dma: use platform_get_irq()
  dmaengine: rcar-dmac: Update copyright information
  dmaengine: imx-sdma: Only check ratio on parts that support 1:1
  dmaengine: xgene-dma: fix spelling mistake "descripto" -> "descriptor"
  dmaengine: idma64: Move driver name to the header
  dmaengine: bcm2835: Drop duplicate capability setting.
  dmaengine: pl330: _stop: clear interrupt status
  ...
parents ddab5337 f33e7bb3
...@@ -18,7 +18,6 @@ Required properties for adi,channels sub-node: ...@@ -18,7 +18,6 @@ Required properties for adi,channels sub-node:
Required channel sub-node properties: Required channel sub-node properties:
- reg: Which channel this node refers to. - reg: Which channel this node refers to.
- adi,length-width: Width of the DMA transfer length register.
- adi,source-bus-width, - adi,source-bus-width,
adi,destination-bus-width: Width of the source or destination bus in bits. adi,destination-bus-width: Width of the source or destination bus in bits.
- adi,source-bus-type, - adi,source-bus-type,
...@@ -28,7 +27,8 @@ Required channel sub-node properties: ...@@ -28,7 +27,8 @@ Required channel sub-node properties:
1 (AXI_DMAC_TYPE_AXI_STREAM): Streaming AXI interface 1 (AXI_DMAC_TYPE_AXI_STREAM): Streaming AXI interface
2 (AXI_DMAC_TYPE_AXI_FIFO): FIFO interface 2 (AXI_DMAC_TYPE_AXI_FIFO): FIFO interface
Optional channel properties: Deprecated optional channel properties:
- adi,length-width: Width of the DMA transfer length register.
- adi,cyclic: Must be set if the channel supports hardware cyclic DMA - adi,cyclic: Must be set if the channel supports hardware cyclic DMA
transfers. transfers.
- adi,2d: Must be set if the channel supports hardware 2D DMA transfers. - adi,2d: Must be set if the channel supports hardware 2D DMA transfers.
......
...@@ -4,7 +4,9 @@ The Tegra Audio DMA controller that is used for transferring data ...@@ -4,7 +4,9 @@ The Tegra Audio DMA controller that is used for transferring data
between system memory and the Audio Processing Engine (APE). between system memory and the Audio Processing Engine (APE).
Required properties: Required properties:
- compatible: Must be "nvidia,tegra210-adma". - compatible: Should contain one of the following:
- "nvidia,tegra210-adma": for Tegra210
- "nvidia,tegra186-adma": for Tegra186 and Tegra194
- reg: Should contain DMA registers location and length. This should be - reg: Should contain DMA registers location and length. This should be
a single entry that includes all of the per-channel registers in one a single entry that includes all of the per-channel registers in one
contiguous bank. contiguous bank.
......
...@@ -99,7 +99,7 @@ config AT_XDMAC ...@@ -99,7 +99,7 @@ config AT_XDMAC
config AXI_DMAC config AXI_DMAC
tristate "Analog Devices AXI-DMAC DMA support" tristate "Analog Devices AXI-DMAC DMA support"
depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_SOCFPGA || COMPILE_TEST depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST
select DMA_ENGINE select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS select DMA_VIRTUAL_CHANNELS
help help
......
...@@ -254,6 +254,7 @@ enum pl08x_dma_chan_state { ...@@ -254,6 +254,7 @@ enum pl08x_dma_chan_state {
* @slave: whether this channel is a device (slave) or for memcpy * @slave: whether this channel is a device (slave) or for memcpy
* @signal: the physical DMA request signal which this channel is using * @signal: the physical DMA request signal which this channel is using
* @mux_use: count of descriptors using this DMA request signal setting * @mux_use: count of descriptors using this DMA request signal setting
* @waiting_at: time in jiffies when this channel moved to waiting state
*/ */
struct pl08x_dma_chan { struct pl08x_dma_chan {
struct virt_dma_chan vc; struct virt_dma_chan vc;
...@@ -267,6 +268,7 @@ struct pl08x_dma_chan { ...@@ -267,6 +268,7 @@ struct pl08x_dma_chan {
bool slave; bool slave;
int signal; int signal;
unsigned mux_use; unsigned mux_use;
unsigned long waiting_at;
}; };
/** /**
...@@ -875,6 +877,7 @@ static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan) ...@@ -875,6 +877,7 @@ static void pl08x_phy_alloc_and_start(struct pl08x_dma_chan *plchan)
if (!ch) { if (!ch) {
dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name); dev_dbg(&pl08x->adev->dev, "no physical channel available for xfer on %s\n", plchan->name);
plchan->state = PL08X_CHAN_WAITING; plchan->state = PL08X_CHAN_WAITING;
plchan->waiting_at = jiffies;
return; return;
} }
...@@ -913,22 +916,29 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan) ...@@ -913,22 +916,29 @@ static void pl08x_phy_free(struct pl08x_dma_chan *plchan)
{ {
struct pl08x_driver_data *pl08x = plchan->host; struct pl08x_driver_data *pl08x = plchan->host;
struct pl08x_dma_chan *p, *next; struct pl08x_dma_chan *p, *next;
unsigned long waiting_at;
retry: retry:
next = NULL; next = NULL;
waiting_at = jiffies;
/* Find a waiting virtual channel for the next transfer. */ /*
* Find a waiting virtual channel for the next transfer.
* To be fair, time when each channel reached waiting state is compared
* to select channel that is waiting for the longest time.
*/
list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node) list_for_each_entry(p, &pl08x->memcpy.channels, vc.chan.device_node)
if (p->state == PL08X_CHAN_WAITING) { if (p->state == PL08X_CHAN_WAITING &&
p->waiting_at <= waiting_at) {
next = p; next = p;
break; waiting_at = p->waiting_at;
} }
if (!next && pl08x->has_slave) { if (!next && pl08x->has_slave) {
list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node) list_for_each_entry(p, &pl08x->slave.channels, vc.chan.device_node)
if (p->state == PL08X_CHAN_WAITING) { if (p->state == PL08X_CHAN_WAITING &&
p->waiting_at <= waiting_at) {
next = p; next = p;
break; waiting_at = p->waiting_at;
} }
} }
......
...@@ -308,6 +308,11 @@ static inline int at_xdmac_csize(u32 maxburst) ...@@ -308,6 +308,11 @@ static inline int at_xdmac_csize(u32 maxburst)
return csize; return csize;
}; };
static inline bool at_xdmac_chan_is_peripheral_xfer(u32 cfg)
{
return cfg & AT_XDMAC_CC_TYPE_PER_TRAN;
}
static inline u8 at_xdmac_get_dwidth(u32 cfg) static inline u8 at_xdmac_get_dwidth(u32 cfg)
{ {
return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET; return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
...@@ -389,7 +394,13 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan, ...@@ -389,7 +394,13 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC)); at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff); at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE; reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE;
/*
* Request Overflow Error is only for peripheral synchronized transfers
*/
if (at_xdmac_chan_is_peripheral_xfer(first->lld.mbr_cfg))
reg |= AT_XDMAC_CIE_ROIE;
/* /*
* There is no end of list when doing cyclic dma, we need to get * There is no end of list when doing cyclic dma, we need to get
* an interrupt after each periods. * an interrupt after each periods.
...@@ -1575,6 +1586,46 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan) ...@@ -1575,6 +1586,46 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
dmaengine_desc_get_callback_invoke(txd, NULL); dmaengine_desc_get_callback_invoke(txd, NULL);
} }
static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
{
struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
struct at_xdmac_desc *bad_desc;
/*
* The descriptor currently at the head of the active list is
* broken. Since we don't have any way to report errors, we'll
* just have to scream loudly and try to continue with other
* descriptors queued (if any).
*/
if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
dev_err(chan2dev(&atchan->chan), "read bus error!!!");
if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
dev_err(chan2dev(&atchan->chan), "write bus error!!!");
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
spin_lock_bh(&atchan->lock);
/* Channel must be disabled first as it's not done automatically */
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
cpu_relax();
bad_desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc,
xfer_node);
spin_unlock_bh(&atchan->lock);
/* Print bad descriptor's details if needed */
dev_dbg(chan2dev(&atchan->chan),
"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
__func__, &bad_desc->lld.mbr_sa, &bad_desc->lld.mbr_da,
bad_desc->lld.mbr_ubc);
/* Then continue with usual descriptor management */
}
static void at_xdmac_tasklet(unsigned long data) static void at_xdmac_tasklet(unsigned long data)
{ {
struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data; struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
...@@ -1594,19 +1645,19 @@ static void at_xdmac_tasklet(unsigned long data) ...@@ -1594,19 +1645,19 @@ static void at_xdmac_tasklet(unsigned long data)
|| (atchan->irq_status & error_mask)) { || (atchan->irq_status & error_mask)) {
struct dma_async_tx_descriptor *txd; struct dma_async_tx_descriptor *txd;
if (atchan->irq_status & AT_XDMAC_CIS_RBEIS) if (atchan->irq_status & error_mask)
dev_err(chan2dev(&atchan->chan), "read bus error!!!"); at_xdmac_handle_error(atchan);
if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
dev_err(chan2dev(&atchan->chan), "write bus error!!!");
if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
spin_lock(&atchan->lock); spin_lock(&atchan->lock);
desc = list_first_entry(&atchan->xfers_list, desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc, struct at_xdmac_desc,
xfer_node); xfer_node);
dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc); dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
BUG_ON(!desc->active_xfer); if (!desc->active_xfer) {
dev_err(chan2dev(&atchan->chan), "Xfer not active: exiting");
spin_unlock(&atchan->lock);
return;
}
txd = &desc->tx_dma_desc; txd = &desc->tx_dma_desc;
......
...@@ -1459,8 +1459,7 @@ static void sba_receive_message(struct mbox_client *cl, void *msg) ...@@ -1459,8 +1459,7 @@ static void sba_receive_message(struct mbox_client *cl, void *msg)
static int sba_debugfs_stats_show(struct seq_file *file, void *offset) static int sba_debugfs_stats_show(struct seq_file *file, void *offset)
{ {
struct platform_device *pdev = to_platform_device(file->private); struct sba_device *sba = dev_get_drvdata(file->private);
struct sba_device *sba = platform_get_drvdata(pdev);
/* Write stats in file */ /* Write stats in file */
sba_write_stats_in_seqfile(sba, file); sba_write_stats_in_seqfile(sba, file);
......
...@@ -891,7 +891,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev) ...@@ -891,7 +891,6 @@ static int bcm2835_dma_probe(struct platform_device *pdev)
dma_cap_set(DMA_SLAVE, od->ddev.cap_mask); dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask); dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask); dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask); dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources; od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources; od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
......
...@@ -166,7 +166,7 @@ static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan) ...@@ -166,7 +166,7 @@ static int axi_dmac_dest_is_mem(struct axi_dmac_chan *chan)
static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len) static bool axi_dmac_check_len(struct axi_dmac_chan *chan, unsigned int len)
{ {
if (len == 0 || len > chan->max_length) if (len == 0)
return false; return false;
if ((len & chan->align_mask) != 0) /* Not aligned */ if ((len & chan->align_mask) != 0) /* Not aligned */
return false; return false;
...@@ -379,6 +379,49 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs) ...@@ -379,6 +379,49 @@ static struct axi_dmac_desc *axi_dmac_alloc_desc(unsigned int num_sgs)
return desc; return desc;
} }
static struct axi_dmac_sg *axi_dmac_fill_linear_sg(struct axi_dmac_chan *chan,
enum dma_transfer_direction direction, dma_addr_t addr,
unsigned int num_periods, unsigned int period_len,
struct axi_dmac_sg *sg)
{
unsigned int num_segments, i;
unsigned int segment_size;
unsigned int len;
/* Split into multiple equally sized segments if necessary */
num_segments = DIV_ROUND_UP(period_len, chan->max_length);
segment_size = DIV_ROUND_UP(period_len, num_segments);
/* Take care of alignment */
segment_size = ((segment_size - 1) | chan->align_mask) + 1;
for (i = 0; i < num_periods; i++) {
len = period_len;
while (len > segment_size) {
if (direction == DMA_DEV_TO_MEM)
sg->dest_addr = addr;
else
sg->src_addr = addr;
sg->x_len = segment_size;
sg->y_len = 1;
sg++;
addr += segment_size;
len -= segment_size;
}
if (direction == DMA_DEV_TO_MEM)
sg->dest_addr = addr;
else
sg->src_addr = addr;
sg->x_len = len;
sg->y_len = 1;
sg++;
addr += len;
}
return sg;
}
static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
struct dma_chan *c, struct scatterlist *sgl, struct dma_chan *c, struct scatterlist *sgl,
unsigned int sg_len, enum dma_transfer_direction direction, unsigned int sg_len, enum dma_transfer_direction direction,
...@@ -386,16 +429,24 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( ...@@ -386,16 +429,24 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
{ {
struct axi_dmac_chan *chan = to_axi_dmac_chan(c); struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc; struct axi_dmac_desc *desc;
struct axi_dmac_sg *dsg;
struct scatterlist *sg; struct scatterlist *sg;
unsigned int num_sgs;
unsigned int i; unsigned int i;
if (direction != chan->direction) if (direction != chan->direction)
return NULL; return NULL;
desc = axi_dmac_alloc_desc(sg_len); num_sgs = 0;
for_each_sg(sgl, sg, sg_len, i)
num_sgs += DIV_ROUND_UP(sg_dma_len(sg), chan->max_length);
desc = axi_dmac_alloc_desc(num_sgs);
if (!desc) if (!desc)
return NULL; return NULL;
dsg = desc->sg;
for_each_sg(sgl, sg, sg_len, i) { for_each_sg(sgl, sg, sg_len, i) {
if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) || if (!axi_dmac_check_addr(chan, sg_dma_address(sg)) ||
!axi_dmac_check_len(chan, sg_dma_len(sg))) { !axi_dmac_check_len(chan, sg_dma_len(sg))) {
...@@ -403,12 +454,8 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg( ...@@ -403,12 +454,8 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_slave_sg(
return NULL; return NULL;
} }
if (direction == DMA_DEV_TO_MEM) dsg = axi_dmac_fill_linear_sg(chan, direction, sg_dma_address(sg), 1,
desc->sg[i].dest_addr = sg_dma_address(sg); sg_dma_len(sg), dsg);
else
desc->sg[i].src_addr = sg_dma_address(sg);
desc->sg[i].x_len = sg_dma_len(sg);
desc->sg[i].y_len = 1;
} }
desc->cyclic = false; desc->cyclic = false;
...@@ -423,7 +470,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( ...@@ -423,7 +470,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
{ {
struct axi_dmac_chan *chan = to_axi_dmac_chan(c); struct axi_dmac_chan *chan = to_axi_dmac_chan(c);
struct axi_dmac_desc *desc; struct axi_dmac_desc *desc;
unsigned int num_periods, i; unsigned int num_periods, num_segments;
if (direction != chan->direction) if (direction != chan->direction)
return NULL; return NULL;
...@@ -436,20 +483,14 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic( ...@@ -436,20 +483,14 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_dma_cyclic(
return NULL; return NULL;
num_periods = buf_len / period_len; num_periods = buf_len / period_len;
num_segments = DIV_ROUND_UP(period_len, chan->max_length);
desc = axi_dmac_alloc_desc(num_periods); desc = axi_dmac_alloc_desc(num_periods * num_segments);
if (!desc) if (!desc)
return NULL; return NULL;
for (i = 0; i < num_periods; i++) { axi_dmac_fill_linear_sg(chan, direction, buf_addr, num_periods,
if (direction == DMA_DEV_TO_MEM) period_len, desc->sg);
desc->sg[i].dest_addr = buf_addr;
else
desc->sg[i].src_addr = buf_addr;
desc->sg[i].x_len = period_len;
desc->sg[i].y_len = 1;
buf_addr += period_len;
}
desc->cyclic = true; desc->cyclic = true;
...@@ -485,7 +526,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved( ...@@ -485,7 +526,7 @@ static struct dma_async_tx_descriptor *axi_dmac_prep_interleaved(
if (chan->hw_2d) { if (chan->hw_2d) {
if (!axi_dmac_check_len(chan, xt->sgl[0].size) || if (!axi_dmac_check_len(chan, xt->sgl[0].size) ||
!axi_dmac_check_len(chan, xt->numf)) xt->numf == 0)
return NULL; return NULL;
if (xt->sgl[0].size + dst_icg > chan->max_length || if (xt->sgl[0].size + dst_icg > chan->max_length ||
xt->sgl[0].size + src_icg > chan->max_length) xt->sgl[0].size + src_icg > chan->max_length)
...@@ -577,15 +618,6 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan, ...@@ -577,15 +618,6 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
return ret; return ret;
chan->dest_width = val / 8; chan->dest_width = val / 8;
ret = of_property_read_u32(of_chan, "adi,length-width", &val);
if (ret)
return ret;
if (val >= 32)
chan->max_length = UINT_MAX;
else
chan->max_length = (1ULL << val) - 1;
chan->align_mask = max(chan->dest_width, chan->src_width) - 1; chan->align_mask = max(chan->dest_width, chan->src_width) - 1;
if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan)) if (axi_dmac_dest_is_mem(chan) && axi_dmac_src_is_mem(chan))
...@@ -597,12 +629,27 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan, ...@@ -597,12 +629,27 @@ static int axi_dmac_parse_chan_dt(struct device_node *of_chan,
else else
chan->direction = DMA_DEV_TO_DEV; chan->direction = DMA_DEV_TO_DEV;
chan->hw_cyclic = of_property_read_bool(of_chan, "adi,cyclic");
chan->hw_2d = of_property_read_bool(of_chan, "adi,2d");
return 0; return 0;
} }
static void axi_dmac_detect_caps(struct axi_dmac *dmac)
{
struct axi_dmac_chan *chan = &dmac->chan;
axi_dmac_write(dmac, AXI_DMAC_REG_FLAGS, AXI_DMAC_FLAG_CYCLIC);
if (axi_dmac_read(dmac, AXI_DMAC_REG_FLAGS) == AXI_DMAC_FLAG_CYCLIC)
chan->hw_cyclic = true;
axi_dmac_write(dmac, AXI_DMAC_REG_Y_LENGTH, 1);
if (axi_dmac_read(dmac, AXI_DMAC_REG_Y_LENGTH) == 1)
chan->hw_2d = true;
axi_dmac_write(dmac, AXI_DMAC_REG_X_LENGTH, 0xffffffff);
chan->max_length = axi_dmac_read(dmac, AXI_DMAC_REG_X_LENGTH);
if (chan->max_length != UINT_MAX)
chan->max_length++;
}
static int axi_dmac_probe(struct platform_device *pdev) static int axi_dmac_probe(struct platform_device *pdev)
{ {
struct device_node *of_channels, *of_chan; struct device_node *of_channels, *of_chan;
...@@ -647,11 +694,12 @@ static int axi_dmac_probe(struct platform_device *pdev) ...@@ -647,11 +694,12 @@ static int axi_dmac_probe(struct platform_device *pdev)
of_node_put(of_channels); of_node_put(of_channels);
pdev->dev.dma_parms = &dmac->dma_parms; pdev->dev.dma_parms = &dmac->dma_parms;
dma_set_max_seg_size(&pdev->dev, dmac->chan.max_length); dma_set_max_seg_size(&pdev->dev, UINT_MAX);
dma_dev = &dmac->dma_dev; dma_dev = &dmac->dma_dev;
dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask); dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
dma_cap_set(DMA_INTERLEAVE, dma_dev->cap_mask);
dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources; dma_dev->device_free_chan_resources = axi_dmac_free_chan_resources;
dma_dev->device_tx_status = dma_cookie_status; dma_dev->device_tx_status = dma_cookie_status;
dma_dev->device_issue_pending = axi_dmac_issue_pending; dma_dev->device_issue_pending = axi_dmac_issue_pending;
...@@ -675,6 +723,8 @@ static int axi_dmac_probe(struct platform_device *pdev) ...@@ -675,6 +723,8 @@ static int axi_dmac_probe(struct platform_device *pdev)
if (ret < 0) if (ret < 0)
return ret; return ret;
axi_dmac_detect_caps(dmac);
axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00); axi_dmac_write(dmac, AXI_DMAC_REG_IRQ_MASK, 0x00);
ret = dma_async_device_register(dma_dev); ret = dma_async_device_register(dma_dev);
......
...@@ -136,7 +136,7 @@ struct fsl_edma_desc { ...@@ -136,7 +136,7 @@ struct fsl_edma_desc {
}; };
enum edma_version { enum edma_version {
v1, /* 32ch, Vybdir, mpc57x, etc */ v1, /* 32ch, Vybrid, mpc57x, etc */
v2, /* 64ch Coldfire */ v2, /* 64ch Coldfire */
}; };
......
...@@ -144,21 +144,21 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma ...@@ -144,21 +144,21 @@ fsl_edma_irq_init(struct platform_device *pdev, struct fsl_edma_engine *fsl_edma
fsl_edma_irq_handler, 0, "eDMA", fsl_edma); fsl_edma_irq_handler, 0, "eDMA", fsl_edma);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Can't register eDMA IRQ.\n"); dev_err(&pdev->dev, "Can't register eDMA IRQ.\n");
return ret; return ret;
} }
} else { } else {
ret = devm_request_irq(&pdev->dev, fsl_edma->txirq, ret = devm_request_irq(&pdev->dev, fsl_edma->txirq,
fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma); fsl_edma_tx_handler, 0, "eDMA tx", fsl_edma);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n"); dev_err(&pdev->dev, "Can't register eDMA tx IRQ.\n");
return ret; return ret;
} }
ret = devm_request_irq(&pdev->dev, fsl_edma->errirq, ret = devm_request_irq(&pdev->dev, fsl_edma->errirq,
fsl_edma_err_handler, 0, "eDMA err", fsl_edma); fsl_edma_err_handler, 0, "eDMA err", fsl_edma);
if (ret) { if (ret) {
dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n"); dev_err(&pdev->dev, "Can't register eDMA err IRQ.\n");
return ret; return ret;
} }
} }
......
...@@ -19,10 +19,9 @@ ...@@ -19,10 +19,9 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h> #include <linux/slab.h>
#include "idma64.h" #include <linux/dma/idma64.h>
/* Platform driver name */ #include "idma64.h"
#define DRV_NAME "idma64"
/* For now we support only two channels */ /* For now we support only two channels */
#define IDMA64_NR_CHAN 2 #define IDMA64_NR_CHAN 2
...@@ -592,7 +591,7 @@ static int idma64_probe(struct idma64_chip *chip) ...@@ -592,7 +591,7 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); idma64->dma.directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST; idma64->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
idma64->dma.dev = chip->dev; idma64->dma.dev = chip->sysdev;
dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
...@@ -632,6 +631,7 @@ static int idma64_platform_probe(struct platform_device *pdev) ...@@ -632,6 +631,7 @@ static int idma64_platform_probe(struct platform_device *pdev)
{ {
struct idma64_chip *chip; struct idma64_chip *chip;
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct device *sysdev = dev->parent;
struct resource *mem; struct resource *mem;
int ret; int ret;
...@@ -648,11 +648,12 @@ static int idma64_platform_probe(struct platform_device *pdev) ...@@ -648,11 +648,12 @@ static int idma64_platform_probe(struct platform_device *pdev)
if (IS_ERR(chip->regs)) if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs); return PTR_ERR(chip->regs);
ret = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); ret = dma_coerce_mask_and_coherent(sysdev, DMA_BIT_MASK(64));
if (ret) if (ret)
return ret; return ret;
chip->dev = dev; chip->dev = dev;
chip->sysdev = sysdev;
ret = idma64_probe(chip); ret = idma64_probe(chip);
if (ret) if (ret)
...@@ -697,7 +698,7 @@ static struct platform_driver idma64_platform_driver = { ...@@ -697,7 +698,7 @@ static struct platform_driver idma64_platform_driver = {
.probe = idma64_platform_probe, .probe = idma64_platform_probe,
.remove = idma64_platform_remove, .remove = idma64_platform_remove,
.driver = { .driver = {
.name = DRV_NAME, .name = LPSS_IDMA64_DRIVER_NAME,
.pm = &idma64_dev_pm_ops, .pm = &idma64_dev_pm_ops,
}, },
}; };
...@@ -707,4 +708,4 @@ module_platform_driver(idma64_platform_driver); ...@@ -707,4 +708,4 @@ module_platform_driver(idma64_platform_driver);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("iDMA64 core driver"); MODULE_DESCRIPTION("iDMA64 core driver");
MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>"); MODULE_AUTHOR("Andy Shevchenko <andriy.shevchenko@linux.intel.com>");
MODULE_ALIAS("platform:" DRV_NAME); MODULE_ALIAS("platform:" LPSS_IDMA64_DRIVER_NAME);
...@@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value) ...@@ -216,12 +216,14 @@ static inline void idma64_writel(struct idma64 *idma64, int offset, u32 value)
/** /**
* struct idma64_chip - representation of iDMA 64-bit controller hardware * struct idma64_chip - representation of iDMA 64-bit controller hardware
* @dev: struct device of the DMA controller * @dev: struct device of the DMA controller
* @sysdev: struct device of the physical device that does DMA
* @irq: irq line * @irq: irq line
* @regs: memory mapped I/O space * @regs: memory mapped I/O space
* @idma64: struct idma64 that is filed by idma64_probe() * @idma64: struct idma64 that is filed by idma64_probe()
*/ */
struct idma64_chip { struct idma64_chip {
struct device *dev; struct device *dev;
struct device *sysdev;
int irq; int irq;
void __iomem *regs; void __iomem *regs;
struct idma64 *idma64; struct idma64 *idma64;
......
...@@ -419,6 +419,7 @@ struct sdma_driver_data { ...@@ -419,6 +419,7 @@ struct sdma_driver_data {
int chnenbl0; int chnenbl0;
int num_events; int num_events;
struct sdma_script_start_addrs *script_addrs; struct sdma_script_start_addrs *script_addrs;
bool check_ratio;
}; };
struct sdma_engine { struct sdma_engine {
...@@ -557,6 +558,13 @@ static struct sdma_driver_data sdma_imx7d = { ...@@ -557,6 +558,13 @@ static struct sdma_driver_data sdma_imx7d = {
.script_addrs = &sdma_script_imx7d, .script_addrs = &sdma_script_imx7d,
}; };
static struct sdma_driver_data sdma_imx8mq = {
.chnenbl0 = SDMA_CHNENBL0_IMX35,
.num_events = 48,
.script_addrs = &sdma_script_imx7d,
.check_ratio = 1,
};
static const struct platform_device_id sdma_devtypes[] = { static const struct platform_device_id sdma_devtypes[] = {
{ {
.name = "imx25-sdma", .name = "imx25-sdma",
...@@ -579,6 +587,9 @@ static const struct platform_device_id sdma_devtypes[] = { ...@@ -579,6 +587,9 @@ static const struct platform_device_id sdma_devtypes[] = {
}, { }, {
.name = "imx7d-sdma", .name = "imx7d-sdma",
.driver_data = (unsigned long)&sdma_imx7d, .driver_data = (unsigned long)&sdma_imx7d,
}, {
.name = "imx8mq-sdma",
.driver_data = (unsigned long)&sdma_imx8mq,
}, { }, {
/* sentinel */ /* sentinel */
} }
...@@ -593,6 +604,7 @@ static const struct of_device_id sdma_dt_ids[] = { ...@@ -593,6 +604,7 @@ static const struct of_device_id sdma_dt_ids[] = {
{ .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, }, { .compatible = "fsl,imx31-sdma", .data = &sdma_imx31, },
{ .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, }, { .compatible = "fsl,imx25-sdma", .data = &sdma_imx25, },
{ .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, }, { .compatible = "fsl,imx7d-sdma", .data = &sdma_imx7d, },
{ .compatible = "fsl,imx8mq-sdma", .data = &sdma_imx8mq, },
{ /* sentinel */ } { /* sentinel */ }
}; };
MODULE_DEVICE_TABLE(of, sdma_dt_ids); MODULE_DEVICE_TABLE(of, sdma_dt_ids);
...@@ -1852,7 +1864,8 @@ static int sdma_init(struct sdma_engine *sdma) ...@@ -1852,7 +1864,8 @@ static int sdma_init(struct sdma_engine *sdma)
if (ret) if (ret)
goto disable_clk_ipg; goto disable_clk_ipg;
if (clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)) if (sdma->drvdata->check_ratio &&
(clk_get_rate(sdma->clk_ahb) == clk_get_rate(sdma->clk_ipg)))
sdma->clk_ratio = 1; sdma->clk_ratio = 1;
/* Be sure SDMA has not started yet */ /* Be sure SDMA has not started yet */
......
...@@ -1491,14 +1491,14 @@ MODULE_DEVICE_TABLE(platform, nbpf_ids); ...@@ -1491,14 +1491,14 @@ MODULE_DEVICE_TABLE(platform, nbpf_ids);
#ifdef CONFIG_PM #ifdef CONFIG_PM
static int nbpf_runtime_suspend(struct device *dev) static int nbpf_runtime_suspend(struct device *dev)
{ {
struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); struct nbpf_device *nbpf = dev_get_drvdata(dev);
clk_disable_unprepare(nbpf->clk); clk_disable_unprepare(nbpf->clk);
return 0; return 0;
} }
static int nbpf_runtime_resume(struct device *dev) static int nbpf_runtime_resume(struct device *dev)
{ {
struct nbpf_device *nbpf = platform_get_drvdata(to_platform_device(dev)); struct nbpf_device *nbpf = dev_get_drvdata(dev);
return clk_prepare_enable(nbpf->clk); return clk_prepare_enable(nbpf->clk);
} }
#endif #endif
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
* (at your option) any later version. * (at your option) any later version.
*/ */
#include <linux/debugfs.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/init.h> #include <linux/init.h>
...@@ -966,6 +967,7 @@ static void _stop(struct pl330_thread *thrd) ...@@ -966,6 +967,7 @@ static void _stop(struct pl330_thread *thrd)
{ {
void __iomem *regs = thrd->dmac->base; void __iomem *regs = thrd->dmac->base;
u8 insn[6] = {0, 0, 0, 0, 0, 0}; u8 insn[6] = {0, 0, 0, 0, 0, 0};
u32 inten = readl(regs + INTEN);
if (_state(thrd) == PL330_STATE_FAULT_COMPLETING) if (_state(thrd) == PL330_STATE_FAULT_COMPLETING)
UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING); UNTIL(thrd, PL330_STATE_FAULTING | PL330_STATE_KILLING);
...@@ -978,10 +980,13 @@ static void _stop(struct pl330_thread *thrd) ...@@ -978,10 +980,13 @@ static void _stop(struct pl330_thread *thrd)
_emit_KILL(0, insn); _emit_KILL(0, insn);
/* Stop generating interrupts for SEV */
writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN);
_execute_DBGINSN(thrd, insn, is_manager(thrd)); _execute_DBGINSN(thrd, insn, is_manager(thrd));
/* clear the event */
if (inten & (1 << thrd->ev))
writel(1 << thrd->ev, regs + INTCLR);
/* Stop generating interrupts for SEV */
writel(inten & ~(1 << thrd->ev), regs + INTEN);
} }
/* Start doing req 'idx' of thread 'thrd' */ /* Start doing req 'idx' of thread 'thrd' */
...@@ -2896,6 +2901,55 @@ static irqreturn_t pl330_irq_handler(int irq, void *data) ...@@ -2896,6 +2901,55 @@ static irqreturn_t pl330_irq_handler(int irq, void *data)
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
#ifdef CONFIG_DEBUG_FS
static int pl330_debugfs_show(struct seq_file *s, void *data)
{
struct pl330_dmac *pl330 = s->private;
int chans, pchs, ch, pr;
chans = pl330->pcfg.num_chan;
pchs = pl330->num_peripherals;
seq_puts(s, "PL330 physical channels:\n");
seq_puts(s, "THREAD:\t\tCHANNEL:\n");
seq_puts(s, "--------\t-----\n");
for (ch = 0; ch < chans; ch++) {
struct pl330_thread *thrd = &pl330->channels[ch];
int found = -1;
for (pr = 0; pr < pchs; pr++) {
struct dma_pl330_chan *pch = &pl330->peripherals[pr];
if (!pch->thread || thrd->id != pch->thread->id)
continue;
found = pr;
}
seq_printf(s, "%d\t\t", thrd->id);
if (found == -1)
seq_puts(s, "--\n");
else
seq_printf(s, "%d\n", found);
}
return 0;
}
DEFINE_SHOW_ATTRIBUTE(pl330_debugfs);
static inline void init_pl330_debugfs(struct pl330_dmac *pl330)
{
debugfs_create_file(dev_name(pl330->ddma.dev),
S_IFREG | 0444, NULL, pl330,
&pl330_debugfs_fops);
}
#else
static inline void init_pl330_debugfs(struct pl330_dmac *pl330)
{
}
#endif
/* /*
* Runtime PM callbacks are provided by amba/bus.c driver. * Runtime PM callbacks are provided by amba/bus.c driver.
* *
...@@ -3082,6 +3136,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -3082,6 +3136,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
dev_err(&adev->dev, "unable to set the seg size\n"); dev_err(&adev->dev, "unable to set the seg size\n");
init_pl330_debugfs(pl330);
dev_info(&adev->dev, dev_info(&adev->dev,
"Loaded driver for PL330 DMAC-%x\n", adev->periphid); "Loaded driver for PL330 DMAC-%x\n", adev->periphid);
dev_info(&adev->dev, dev_info(&adev->dev,
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* /*
* Renesas R-Car Gen2 DMA Controller Driver * Renesas R-Car Gen2/Gen3 DMA Controller Driver
* *
* Copyright (C) 2014 Renesas Electronics Inc. * Copyright (C) 2014-2019 Renesas Electronics Inc.
* *
* Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com> * Author: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
*/ */
......
...@@ -1042,33 +1042,97 @@ static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan) ...@@ -1042,33 +1042,97 @@ static u32 stm32_dma_get_remaining_bytes(struct stm32_dma_chan *chan)
return ndtr << width; return ndtr << width;
} }
/**
* stm32_dma_is_current_sg - check that expected sg_req is currently transferred
* @chan: dma channel
*
* This function called when IRQ are disable, checks that the hardware has not
* switched on the next transfer in double buffer mode. The test is done by
* comparing the next_sg memory address with the hardware related register
* (based on CT bit value).
*
* Returns true if expected current transfer is still running or double
* buffer mode is not activated.
*/
static bool stm32_dma_is_current_sg(struct stm32_dma_chan *chan)
{
struct stm32_dma_device *dmadev = stm32_dma_get_dev(chan);
struct stm32_dma_sg_req *sg_req;
u32 dma_scr, dma_smar, id;
id = chan->id;
dma_scr = stm32_dma_read(dmadev, STM32_DMA_SCR(id));
if (!(dma_scr & STM32_DMA_SCR_DBM))
return true;
sg_req = &chan->desc->sg_req[chan->next_sg];
if (dma_scr & STM32_DMA_SCR_CT) {
dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM0AR(id));
return (dma_smar == sg_req->chan_reg.dma_sm0ar);
}
dma_smar = stm32_dma_read(dmadev, STM32_DMA_SM1AR(id));
return (dma_smar == sg_req->chan_reg.dma_sm1ar);
}
static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan, static size_t stm32_dma_desc_residue(struct stm32_dma_chan *chan,
struct stm32_dma_desc *desc, struct stm32_dma_desc *desc,
u32 next_sg) u32 next_sg)
{ {
u32 modulo, burst_size; u32 modulo, burst_size;
u32 residue = 0; u32 residue;
u32 n_sg = next_sg;
struct stm32_dma_sg_req *sg_req = &chan->desc->sg_req[chan->next_sg];
int i; int i;
/* /*
* In cyclic mode, for the last period, residue = remaining bytes from * Calculate the residue means compute the descriptors
* NDTR * information:
* - the sg_req currently transferred
* - the Hardware remaining position in this sg (NDTR bits field).
*
* A race condition may occur if DMA is running in cyclic or double
* buffer mode, since the DMA register are automatically reloaded at end
* of period transfer. The hardware may have switched to the next
* transfer (CT bit updated) just before the position (SxNDTR reg) is
* read.
* In this case the SxNDTR reg could (or not) correspond to the new
* transfer position, and not the expected one.
* The strategy implemented in the stm32 driver is to:
* - read the SxNDTR register
* - crosscheck that hardware is still in current transfer.
* In case of switch, we can assume that the DMA is at the beginning of
* the next transfer. So we approximate the residue in consequence, by
* pointing on the beginning of next transfer.
*
* This race condition doesn't apply for none cyclic mode, as double
* buffer is not used. In such situation registers are updated by the
* software.
*/ */
if (chan->desc->cyclic && next_sg == 0) {
residue = stm32_dma_get_remaining_bytes(chan); residue = stm32_dma_get_remaining_bytes(chan);
goto end;
if (!stm32_dma_is_current_sg(chan)) {
n_sg++;
if (n_sg == chan->desc->num_sgs)
n_sg = 0;
residue = sg_req->len;
} }
/* /*
* For all other periods in cyclic mode, and in sg mode, * In cyclic mode, for the last period, residue = remaining bytes
* residue = remaining bytes from NDTR + remaining periods/sg to be * from NDTR,
* transferred * else for all other periods in cyclic mode, and in sg mode,
* residue = remaining bytes from NDTR + remaining
* periods/sg to be transferred
*/ */
for (i = next_sg; i < desc->num_sgs; i++) if (!chan->desc->cyclic || n_sg != 0)
residue += desc->sg_req[i].len; for (i = n_sg; i < desc->num_sgs; i++)
residue += stm32_dma_get_remaining_bytes(chan); residue += desc->sg_req[i].len;
end:
if (!chan->mem_burst) if (!chan->mem_burst)
return residue; return residue;
...@@ -1302,13 +1366,16 @@ static int stm32_dma_probe(struct platform_device *pdev) ...@@ -1302,13 +1366,16 @@ static int stm32_dma_probe(struct platform_device *pdev)
for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) { for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
chan = &dmadev->chan[i]; chan = &dmadev->chan[i];
res = platform_get_resource(pdev, IORESOURCE_IRQ, i); chan->irq = platform_get_irq(pdev, i);
if (!res) { ret = platform_get_irq(pdev, i);
ret = -EINVAL; if (ret < 0) {
dev_err(&pdev->dev, "No irq resource for chan %d\n", i); if (ret != -EPROBE_DEFER)
dev_err(&pdev->dev,
"No irq resource for chan %d\n", i);
goto err_unregister; goto err_unregister;
} }
chan->irq = res->start; chan->irq = ret;
ret = devm_request_irq(&pdev->dev, chan->irq, ret = devm_request_irq(&pdev->dev, chan->irq,
stm32_dma_chan_irq, 0, stm32_dma_chan_irq, 0,
dev_name(chan2dev(chan)), chan); dev_name(chan2dev(chan)), chan);
......
This diff is collapsed.
...@@ -703,7 +703,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) ...@@ -703,7 +703,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
INIT_LIST_HEAD(&ld_completed); INIT_LIST_HEAD(&ld_completed);
spin_lock_bh(&chan->lock); spin_lock(&chan->lock);
/* Clean already completed and acked descriptors */ /* Clean already completed and acked descriptors */
xgene_dma_clean_completed_descriptor(chan); xgene_dma_clean_completed_descriptor(chan);
...@@ -772,7 +772,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan) ...@@ -772,7 +772,7 @@ static void xgene_dma_cleanup_descriptors(struct xgene_dma_chan *chan)
*/ */
xgene_chan_xfer_ld_pending(chan); xgene_chan_xfer_ld_pending(chan);
spin_unlock_bh(&chan->lock); spin_unlock(&chan->lock);
/* Run the callback for each descriptor, in order */ /* Run the callback for each descriptor, in order */
list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) { list_for_each_entry_safe(desc_sw, _desc_sw, &ld_completed, node) {
...@@ -797,7 +797,7 @@ static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan) ...@@ -797,7 +797,7 @@ static int xgene_dma_alloc_chan_resources(struct dma_chan *dchan)
return -ENOMEM; return -ENOMEM;
} }
chan_dbg(chan, "Allocate descripto pool\n"); chan_dbg(chan, "Allocate descriptor pool\n");
return 1; return 1;
} }
......
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#include <linux/seq_file.h> #include <linux/seq_file.h>
#include <linux/io-64-nonatomic-lo-hi.h> #include <linux/io-64-nonatomic-lo-hi.h>
#include <linux/dma/idma64.h>
#include "intel-lpss.h" #include "intel-lpss.h"
#define LPSS_DEV_OFFSET 0x000 #define LPSS_DEV_OFFSET 0x000
...@@ -96,8 +98,6 @@ static const struct resource intel_lpss_idma64_resources[] = { ...@@ -96,8 +98,6 @@ static const struct resource intel_lpss_idma64_resources[] = {
DEFINE_RES_IRQ(0), DEFINE_RES_IRQ(0),
}; };
#define LPSS_IDMA64_DRIVER_NAME "idma64"
/* /*
* Cells needs to be ordered so that the iDMA is created first. This is * Cells needs to be ordered so that the iDMA is created first. This is
* because we need to be sure the DMA is available when the host controller * because we need to be sure the DMA is available when the host controller
......
...@@ -1498,12 +1498,7 @@ static int pxa2xx_spi_get_port_id(struct acpi_device *adev) ...@@ -1498,12 +1498,7 @@ static int pxa2xx_spi_get_port_id(struct acpi_device *adev)
static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param) static bool pxa2xx_spi_idma_filter(struct dma_chan *chan, void *param)
{ {
struct device *dev = param; return param == chan->device->dev;
if (dev != chan->device->dev->parent)
return false;
return true;
} }
#endif /* CONFIG_PCI */ #endif /* CONFIG_PCI */
......
...@@ -365,7 +365,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param) ...@@ -365,7 +365,7 @@ static bool dw8250_fallback_dma_filter(struct dma_chan *chan, void *param)
static bool dw8250_idma_filter(struct dma_chan *chan, void *param) static bool dw8250_idma_filter(struct dma_chan *chan, void *param)
{ {
return param == chan->device->dev->parent; return param == chan->device->dev;
} }
/* /*
...@@ -434,7 +434,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data) ...@@ -434,7 +434,7 @@ static void dw8250_quirks(struct uart_port *p, struct dw8250_data *data)
data->uart_16550_compatible = true; data->uart_16550_compatible = true;
} }
/* Platforms with iDMA */ /* Platforms with iDMA 64-bit */
if (platform_get_resource_byname(to_platform_device(p->dev), if (platform_get_resource_byname(to_platform_device(p->dev),
IORESOURCE_MEM, "lpss_priv")) { IORESOURCE_MEM, "lpss_priv")) {
data->dma.rx_param = p->dev->parent; data->dma.rx_param = p->dev->parent;
......
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Definitions for the Intel integrated DMA 64-bit
*
* Copyright (C) 2019 Intel Corporation
*/
#ifndef __LINUX_DMA_IDMA64_H__
#define __LINUX_DMA_IDMA64_H__
/* Platform driver name */
#define LPSS_IDMA64_DRIVER_NAME "idma64"
#endif /* __LINUX_DMA_IDMA64_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment