Commit 47ebe00b authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-5.3-rc1' of git://git.infradead.org/users/vkoul/slave-dma

Pull dmaengine updates from Vinod Koul:

 - Add support in dmaengine core to do device node checks for DT devices
   and update bunch of drivers to use that and remove open coding from
   drivers

 - New driver/driver support for new hardware, namely:
     - MediaTek UART APDMA
     - Freescale i.mx7ulp edma2
     - Synopsys eDMA IP core version 0
     - Allwinner H6 DMA

 - Updates to axi-dma and support for interleaved cyclic transfers

 - Greg's debugfs return value check removals on drivers

 - Updates to stm32-dma, hsu, dw, pl330, tegra drivers

* tag 'dmaengine-5.3-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (68 commits)
  dmaengine: Revert "dmaengine: fsl-edma: add i.mx7ulp edma2 version support"
  dmaengine: at_xdmac: check for non-empty xfers_list before invoking callback
  Documentation: dmaengine: clean up description of dmatest usage
  dmaengine: tegra210-adma: remove PM_CLK dependency
  dmaengine: fsl-edma: add i.mx7ulp edma2 version support
  dt-bindings: dma: fsl-edma: add new i.mx7ulp-edma
  dmaengine: fsl-edma-common: version check for v2 instead
  dmaengine: fsl-edma-common: move dmamux register to another single function
  dmaengine: fsl-edma: add drvdata for fsl-edma
  dmaengine: Revert "dmaengine: fsl-edma: support little endian for edma driver"
  dmaengine: rcar-dmac: Reject zero-length slave DMA requests
  dmaengine: dw: Enable iDMA 32-bit on Intel Elkhart Lake
  dmaengine: dw-edma: fix semicolon.cocci warnings
  dmaengine: sh: usb-dmac: Use [] to denote a flexible array member
  dmaengine: dmatest: timeout value of -1 should specify infinite wait
  dmaengine: dw: Distinguish ->remove() between DW and iDMA 32-bit
  dmaengine: fsl-edma: support little endian for edma driver
  dmaengine: hsu: Revert "set HSU_CH_MTSR to memory width"
  dmagengine: pl330: add code to get reset property
  dt-bindings: pl330: document the optional resets property
  ...
parents fa121bb3 5c274ca4
......@@ -16,6 +16,9 @@ Optional properties:
- dma-channels: contains the total number of DMA channels supported by the DMAC
- dma-requests: contains the total number of DMA requests supported by the DMAC
- arm,pl330-broken-no-flushp: quirk for avoiding to execute DMAFLUSHP
- resets: contains an entry for each entry in reset-names.
See ../reset/reset.txt for details.
- reset-names: must contain at least "dma", and optional is "dma-ocp".
Example:
......
......@@ -9,15 +9,16 @@ group, DMAMUX0 or DMAMUX1, but not both.
Required properties:
- compatible :
- "fsl,vf610-edma" for eDMA used similar to that on Vybrid vf610 SoC
- "fsl,imx7ulp-edma" for eDMA2 used similar to that on i.mx7ulp
- reg : Specifies base physical address(s) and size of the eDMA registers.
The 1st region is eDMA control register's address and size.
The 2nd and the 3rd regions are programmable channel multiplexing
control register's address and size.
- interrupts : A list of interrupt-specifiers, one for each entry in
interrupt-names.
- interrupt-names : Should contain:
"edma-tx" - the transmission interrupt
"edma-err" - the error interrupt
interrupt-names on vf610 similar SoC. But for i.mx7ulp per channel
per transmission interrupt, total 16 channel interrupt and 1
error interrupt(located in the last), no interrupt-names list on
i.mx7ulp for clean on dts.
- #dma-cells : Must be <2>.
The 1st cell specifies the DMAMUX(0 for DMAMUX0 and 1 for DMAMUX1).
Specific request source can only be multiplexed by specific channels
......@@ -28,6 +29,7 @@ Required properties:
- clock-names : A list of channel group clock names. Should contain:
"dmamux0" - clock name of mux0 group
"dmamux1" - clock name of mux1 group
Note: No dmamux0 on i.mx7ulp, but another 'dma' clk added on i.mx7ulp.
- clocks : A list of phandle and clock-specifier pairs, one for each entry in
clock-names.
......@@ -35,6 +37,10 @@ Optional properties:
- big-endian: If present registers and hardware scatter/gather descriptors
of the eDMA are implemented in big endian mode, otherwise in little
mode.
- interrupt-names : Should contain the below on vf610 similar SoC but not used
on i.mx7ulp similar SoC:
"edma-tx" - the transmission interrupt
"edma-err" - the error interrupt
Examples:
......@@ -52,8 +58,36 @@ edma0: dma-controller@40018000 {
clock-names = "dmamux0", "dmamux1";
clocks = <&clks VF610_CLK_DMAMUX0>,
<&clks VF610_CLK_DMAMUX1>;
};
}; /* vf610 */
edma1: dma-controller@40080000 {
#dma-cells = <2>;
compatible = "fsl,imx7ulp-edma";
reg = <0x40080000 0x2000>,
<0x40210000 0x1000>;
dma-channels = <32>;
interrupts = <GIC_SPI 0 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 1 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 2 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 3 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 4 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 5 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 6 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 7 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 9 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 10 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 11 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 12 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 13 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 14 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 15 IRQ_TYPE_LEVEL_HIGH>,
/* last is eDMA2-ERR interrupt */
<GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
clock-names = "dma", "dmamux0";
clocks = <&pcc2 IMX7ULP_CLK_DMA1>,
<&pcc2 IMX7ULP_CLK_DMA_MUX1>;
}; /* i.mx7ulp */
* DMA clients
DMA client drivers that uses the DMA function must use the format described
......
......@@ -8,26 +8,47 @@ Required properties:
- reg: The base address of the APDMA register bank.
- interrupts: A single interrupt specifier.
One interrupt per dma-requests, or 8 if no dma-requests property is present
- dma-requests: The number of DMA channels
- clocks : Must contain an entry for each entry in clock-names.
See ../clocks/clock-bindings.txt for details.
- clock-names: The APDMA clock for register accesses
- mediatek,dma-33bits: Present if the DMA requires support
Examples:
apdma: dma-controller@11000380 {
apdma: dma-controller@11000400 {
compatible = "mediatek,mt2712-uart-dma";
reg = <0 0x11000380 0 0x400>;
interrupts = <GIC_SPI 63 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 64 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 65 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 66 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 67 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 68 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 69 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 70 IRQ_TYPE_LEVEL_LOW>;
reg = <0 0x11000400 0 0x80>,
<0 0x11000480 0 0x80>,
<0 0x11000500 0 0x80>,
<0 0x11000580 0 0x80>,
<0 0x11000600 0 0x80>,
<0 0x11000680 0 0x80>,
<0 0x11000700 0 0x80>,
<0 0x11000780 0 0x80>,
<0 0x11000800 0 0x80>,
<0 0x11000880 0 0x80>,
<0 0x11000900 0 0x80>,
<0 0x11000980 0 0x80>;
interrupts = <GIC_SPI 103 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 104 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 105 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 106 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 107 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 108 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 109 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 110 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 111 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 112 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 113 IRQ_TYPE_LEVEL_LOW>,
<GIC_SPI 114 IRQ_TYPE_LEVEL_LOW>;
dma-requests = <12>;
clocks = <&pericfg CLK_PERI_AP_DMA>;
clock-names = "apdma";
mediatek,dma-33bits;
#dma-cells = <1>;
};
......@@ -28,12 +28,17 @@ Example:
};
------------------------------------------------------------------------------
For A64 DMA controller:
For A64 and H6 DMA controller:
Required properties:
- compatible: "allwinner,sun50i-a64-dma"
- compatible: Must be one of
"allwinner,sun50i-a64-dma"
"allwinner,sun50i-h6-dma"
- dma-channels: Number of DMA channels supported by the controller.
Refer to Documentation/devicetree/bindings/dma/dma.txt
- clocks: In addition to parent AHB clock, it should also contain mbus
clock (H6 only)
- clock-names: Should contain "bus" and "mbus" (H6 only)
- all properties above, i.e. reg, interrupts, clocks, resets and #dma-cells
Optional properties:
......
......@@ -44,7 +44,8 @@ Example of usage::
dmatest.timeout=2000 dmatest.iterations=1 dmatest.channel=dma0chan0 dmatest.run=1
Example of multi-channel test usage:
Example of multi-channel test usage (new in the 5.0 kernel)::
% modprobe dmatest
% echo 2000 > /sys/module/dmatest/parameters/timeout
% echo 1 > /sys/module/dmatest/parameters/iterations
......@@ -53,15 +54,18 @@ Example of multi-channel test usage:
% echo dma0chan2 > /sys/module/dmatest/parameters/channel
% echo 1 > /sys/module/dmatest/parameters/run
Note: the channel parameter should always be the last parameter set prior to
running the test (setting run=1), this is because upon setting the channel
parameter, that specific channel is requested using the dmaengine and a thread
is created with the existing parameters. This thread is set as pending
and will be executed once run is set to 1. Any parameters set after the thread
is created are not applied.
.. note::
For all tests, starting in the 5.0 kernel, either single- or multi-channel,
the channel parameter(s) must be set after all other parameters. It is at
that time that the existing parameter values are acquired for use by the
thread(s). All other parameters are shared. Therefore, if changes are made
to any of the other parameters, and an additional channel specified, the
(shared) parameters used for all threads will use the new values.
After the channels are specified, each thread is set as pending. All threads
begin execution when the run parameter is set to 1.
.. hint::
available channel list could be extracted by running the following command::
A list of available channels can be found by running the following command::
% ls -1 /sys/class/dma/
......@@ -204,6 +208,7 @@ Releasing Channels
Channels can be freed by setting run to 0.
Example::
% echo dma0chan1 > /sys/module/dmatest/parameters/channel
dmatest: Added 1 threads using dma0chan1
% cat /sys/class/dma/dma0chan1/in_use
......
......@@ -4683,6 +4683,13 @@ L: linux-mtd@lists.infradead.org
S: Supported
F: drivers/mtd/nand/raw/denali*
DESIGNWARE EDMA CORE IP DRIVER
M: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
L: dmaengine@vger.kernel.org
S: Maintained
F: drivers/dma/dw-edma/
F: include/linux/dma/edma.h
DESIGNWARE USB2 DRD IP DRIVER
M: Minas Harutyunyan <hminas@synopsys.com>
L: linux-usb@vger.kernel.org
......
......@@ -103,6 +103,7 @@ config AXI_DMAC
depends on MICROBLAZE || NIOS2 || ARCH_ZYNQ || ARCH_ZYNQMP || ARCH_SOCFPGA || COMPILE_TEST
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
select REGMAP_MMIO
help
Enable support for the Analog Devices AXI-DMAC peripheral. This DMA
controller is often used in Analog Device's reference designs for FPGA
......@@ -584,7 +585,7 @@ config TEGRA20_APB_DMA
config TEGRA210_ADMA
tristate "NVIDIA Tegra210 ADMA support"
depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST) && PM_CLK
depends on (ARCH_TEGRA_210_SOC || COMPILE_TEST)
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
......@@ -666,6 +667,8 @@ source "drivers/dma/qcom/Kconfig"
source "drivers/dma/dw/Kconfig"
source "drivers/dma/dw-edma/Kconfig"
source "drivers/dma/hsu/Kconfig"
source "drivers/dma/sh/Kconfig"
......
......@@ -29,6 +29,7 @@ obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o
obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_DW_EDMA) += dw-edma/
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
obj-$(CONFIG_FSL_DMA) += fsldma.o
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o fsl-edma-common.o
......
......@@ -2508,9 +2508,8 @@ DEFINE_SHOW_ATTRIBUTE(pl08x_debugfs);
static void init_pl08x_debugfs(struct pl08x_driver_data *pl08x)
{
/* Expose a simple debugfs interface to view all clocks */
(void) debugfs_create_file(dev_name(&pl08x->adev->dev),
S_IFREG | S_IRUGO, NULL, pl08x,
&pl08x_debugfs_fops);
debugfs_create_file(dev_name(&pl08x->adev->dev), S_IFREG | S_IRUGO,
NULL, pl08x, &pl08x_debugfs_fops);
}
#else
......
......@@ -1568,11 +1568,14 @@ static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
struct at_xdmac_desc *desc;
struct dma_async_tx_descriptor *txd;
desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
txd = &desc->tx_dma_desc;
if (!list_empty(&atchan->xfers_list)) {
desc = list_first_entry(&atchan->xfers_list,
struct at_xdmac_desc, xfer_node);
txd = &desc->tx_dma_desc;
if (txd->flags & DMA_PREP_INTERRUPT)
dmaengine_desc_get_callback_invoke(txd, NULL);
if (txd->flags & DMA_PREP_INTERRUPT)
dmaengine_desc_get_callback_invoke(txd, NULL);
}
}
static void at_xdmac_handle_error(struct at_xdmac_chan *atchan)
......
......@@ -164,7 +164,6 @@ struct sba_device {
struct list_head reqs_free_list;
/* DebugFS directory entries */
struct dentry *root;
struct dentry *stats;
};
/* ====== Command helper routines ===== */
......@@ -1716,17 +1715,11 @@ static int sba_probe(struct platform_device *pdev)
/* Create debugfs root entry */
sba->root = debugfs_create_dir(dev_name(sba->dev), NULL);
if (IS_ERR_OR_NULL(sba->root)) {
dev_err(sba->dev, "failed to create debugfs root entry\n");
sba->root = NULL;
goto skip_debugfs;
}
/* Create debugfs stats entry */
sba->stats = debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
sba_debugfs_stats_show);
if (IS_ERR_OR_NULL(sba->stats))
dev_err(sba->dev, "failed to create debugfs stats file\n");
debugfs_create_devm_seqfile(sba->dev, "stats", sba->root,
sba_debugfs_stats_show);
skip_debugfs:
/* Register DMA device with Linux async framework */
......
......@@ -1378,10 +1378,8 @@ static int __init init_coh901318_debugfs(void)
dma_dentry = debugfs_create_dir("dma", NULL);
(void) debugfs_create_file("status",
S_IFREG | S_IRUGO,
dma_dentry, NULL,
&coh901318_debugfs_status_operations);
debugfs_create_file("status", S_IFREG | S_IRUGO, dma_dentry, NULL,
&coh901318_debugfs_status_operations);
return 0;
}
......
This diff is collapsed.
......@@ -156,7 +156,6 @@ struct jz4780_dma_dev {
};
struct jz4780_dma_filter_data {
struct device_node *of_node;
uint32_t transfer_type;
int channel;
};
......@@ -772,8 +771,6 @@ static bool jz4780_dma_filter_fn(struct dma_chan *chan, void *param)
struct jz4780_dma_dev *jzdma = jz4780_dma_chan_parent(jzchan);
struct jz4780_dma_filter_data *data = param;
if (jzdma->dma_device.dev->of_node != data->of_node)
return false;
if (data->channel > -1) {
if (data->channel != jzchan->id)
......@@ -797,7 +794,6 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
if (dma_spec->args_count != 2)
return NULL;
data.of_node = ofdma->of_node;
data.transfer_type = dma_spec->args[0];
data.channel = dma_spec->args[1];
......@@ -822,7 +818,8 @@ static struct dma_chan *jz4780_of_dma_xlate(struct of_phandle_args *dma_spec,
return dma_get_slave_channel(
&jzdma->chan[data.channel].vchan.chan);
} else {
return dma_request_channel(mask, jz4780_dma_filter_fn, &data);
return __dma_request_channel(&mask, jz4780_dma_filter_fn, &data,
ofdma->of_node);
}
}
......
......@@ -61,7 +61,7 @@ static long dmaengine_ref_count;
/* --- sysfs implementation --- */
/**
* dev_to_dma_chan - convert a device pointer to the its sysfs container object
* dev_to_dma_chan - convert a device pointer to its sysfs container object
* @dev - device node
*
* Must be called under dma_list_mutex
......@@ -629,11 +629,13 @@ EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
* @mask: capabilities that the channel must satisfy
* @fn: optional callback to disposition available channels
* @fn_param: opaque parameter to pass to dma_filter_fn
* @np: device node to look for DMA channels
*
* Returns pointer to appropriate DMA channel on success or NULL.
*/
struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
dma_filter_fn fn, void *fn_param)
dma_filter_fn fn, void *fn_param,
struct device_node *np)
{
struct dma_device *device, *_d;
struct dma_chan *chan = NULL;
......@@ -641,6 +643,10 @@ struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
/* Find a channel */
mutex_lock(&dma_list_mutex);
list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
/* Finds a DMA controller with matching device node */
if (np && device->dev->of_node && np != device->dev->of_node)
continue;
chan = find_candidate(device, mask, fn, fn_param);
if (!IS_ERR(chan))
break;
......@@ -699,7 +705,7 @@ struct dma_chan *dma_request_chan(struct device *dev, const char *name)
chan = acpi_dma_request_slave_chan_by_name(dev, name);
if (chan) {
/* Valid channel found or requester need to be deferred */
/* Valid channel found or requester needs to be deferred */
if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
return chan;
}
......@@ -757,7 +763,7 @@ struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
if (!mask)
return ERR_PTR(-ENODEV);
chan = __dma_request_channel(mask, NULL, NULL);
chan = __dma_request_channel(mask, NULL, NULL, NULL);
if (!chan) {
mutex_lock(&dma_list_mutex);
if (list_empty(&dma_device_list))
......
......@@ -62,7 +62,7 @@ MODULE_PARM_DESC(pq_sources,
static int timeout = 3000;
module_param(timeout, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(timeout, "Transfer Timeout in msec (default: 3000), "
"Pass -1 for infinite timeout");
"Pass 0xFFFFFFFF (4294967295) for maximum timeout");
static bool noverify;
module_param(noverify, bool, S_IRUGO | S_IWUSR);
......@@ -94,7 +94,7 @@ MODULE_PARM_DESC(transfer_size, "Optional custom transfer size in bytes (default
* @iterations: iterations before stopping test
* @xor_sources: number of xor source buffers
* @pq_sources: number of p+q source buffers
* @timeout: transfer timeout in msec, -1 for infinite timeout
* @timeout: transfer timeout in msec, 0 - 0xFFFFFFFF (4294967295)
*/
struct dmatest_params {
unsigned int buf_size;
......@@ -105,7 +105,7 @@ struct dmatest_params {
unsigned int iterations;
unsigned int xor_sources;
unsigned int pq_sources;
int timeout;
unsigned int timeout;
bool noverify;
bool norandom;
int alignment;
......
# SPDX-License-Identifier: GPL-2.0
config DW_EDMA
tristate "Synopsys DesignWare eDMA controller driver"
depends on PCI && PCI_MSI
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Support the Synopsys DesignWare eDMA controller, normally
implemented on endpoints SoCs.
config DW_EDMA_PCIE
tristate "Synopsys DesignWare eDMA PCIe driver"
depends on PCI && PCI_MSI
select DW_EDMA
help
Provides a glue-logic between the Synopsys DesignWare
eDMA controller and an endpoint PCIe device. This also serves
as a reference design to whom desires to use this IP.
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DW_EDMA) += dw-edma.o
dw-edma-$(CONFIG_DEBUG_FS) := dw-edma-v0-debugfs.o
dw-edma-objs := dw-edma-core.o \
dw-edma-v0-core.o $(dw-edma-y)
obj-$(CONFIG_DW_EDMA_PCIE) += dw-edma-pcie.o
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
* Synopsys DesignWare eDMA core driver
*
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
*/
#ifndef _DW_EDMA_CORE_H
#define _DW_EDMA_CORE_H
#include <linux/msi.h>
#include <linux/dma/edma.h>
#include "../virt-dma.h"
#define EDMA_LL_SZ 24
enum dw_edma_dir {
EDMA_DIR_WRITE = 0,
EDMA_DIR_READ
};
enum dw_edma_mode {
EDMA_MODE_LEGACY = 0,
EDMA_MODE_UNROLL
};
enum dw_edma_request {
EDMA_REQ_NONE = 0,
EDMA_REQ_STOP,
EDMA_REQ_PAUSE
};
enum dw_edma_status {
EDMA_ST_IDLE = 0,
EDMA_ST_PAUSE,
EDMA_ST_BUSY
};
struct dw_edma_chan;
struct dw_edma_chunk;
struct dw_edma_burst {
struct list_head list;
u64 sar;
u64 dar;
u32 sz;
};
struct dw_edma_region {
phys_addr_t paddr;
dma_addr_t vaddr;
size_t sz;
};
struct dw_edma_chunk {
struct list_head list;
struct dw_edma_chan *chan;
struct dw_edma_burst *burst;
u32 bursts_alloc;
u8 cb;
struct dw_edma_region ll_region; /* Linked list */
};
struct dw_edma_desc {
struct virt_dma_desc vd;
struct dw_edma_chan *chan;
struct dw_edma_chunk *chunk;
u32 chunks_alloc;
u32 alloc_sz;
u32 xfer_sz;
};
struct dw_edma_chan {
struct virt_dma_chan vc;
struct dw_edma_chip *chip;
int id;
enum dw_edma_dir dir;
off_t ll_off;
u32 ll_max;
off_t dt_off;
struct msi_msg msi;
enum dw_edma_request request;
enum dw_edma_status status;
u8 configured;
struct dma_slave_config config;
};
struct dw_edma_irq {
struct msi_msg msi;
u32 wr_mask;
u32 rd_mask;
struct dw_edma *dw;
};
struct dw_edma {
char name[20];
struct dma_device wr_edma;
u16 wr_ch_cnt;
struct dma_device rd_edma;
u16 rd_ch_cnt;
struct dw_edma_region rg_region; /* Registers */
struct dw_edma_region ll_region; /* Linked list */
struct dw_edma_region dt_region; /* Data */
struct dw_edma_irq *irq;
int nr_irqs;
u32 version;
enum dw_edma_mode mode;
struct dw_edma_chan *chan;
const struct dw_edma_core_ops *ops;
raw_spinlock_t lock; /* Only for legacy */
};
struct dw_edma_sg {
struct scatterlist *sgl;
unsigned int len;
};
struct dw_edma_cyclic {
dma_addr_t paddr;
size_t len;
size_t cnt;
};
struct dw_edma_transfer {
struct dma_chan *dchan;
union dw_edma_xfer {
struct dw_edma_sg sg;
struct dw_edma_cyclic cyclic;
} xfer;
enum dma_transfer_direction direction;
unsigned long flags;
bool cyclic;
};
static inline
struct dw_edma_chan *vc2dw_edma_chan(struct virt_dma_chan *vc)
{
return container_of(vc, struct dw_edma_chan, vc);
}
static inline
struct dw_edma_chan *dchan2dw_edma_chan(struct dma_chan *dchan)
{
return vc2dw_edma_chan(to_virt_chan(dchan));
}
#endif /* _DW_EDMA_CORE_H */
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
* Synopsys DesignWare eDMA PCIe driver
*
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/device.h>
#include <linux/dma/edma.h>
#include <linux/pci-epf.h>
#include <linux/msi.h>
#include "dw-edma-core.h"
struct dw_edma_pcie_data {
/* eDMA registers location */
enum pci_barno rg_bar;
off_t rg_off;
size_t rg_sz;
/* eDMA memory linked list location */
enum pci_barno ll_bar;
off_t ll_off;
size_t ll_sz;
/* eDMA memory data location */
enum pci_barno dt_bar;
off_t dt_off;
size_t dt_sz;
/* Other */
u32 version;
enum dw_edma_mode mode;
u8 irqs;
};
static const struct dw_edma_pcie_data snps_edda_data = {
/* eDMA registers location */
.rg_bar = BAR_0,
.rg_off = 0x00001000, /* 4 Kbytes */
.rg_sz = 0x00002000, /* 8 Kbytes */
/* eDMA memory linked list location */
.ll_bar = BAR_2,
.ll_off = 0x00000000, /* 0 Kbytes */
.ll_sz = 0x00800000, /* 8 Mbytes */
/* eDMA memory data location */
.dt_bar = BAR_2,
.dt_off = 0x00800000, /* 8 Mbytes */
.dt_sz = 0x03800000, /* 56 Mbytes */
/* Other */
.version = 0,
.mode = EDMA_MODE_UNROLL,
.irqs = 1,
};
static int dw_edma_pcie_probe(struct pci_dev *pdev,
const struct pci_device_id *pid)
{
const struct dw_edma_pcie_data *pdata = (void *)pid->driver_data;
struct device *dev = &pdev->dev;
struct dw_edma_chip *chip;
int err, nr_irqs;
struct dw_edma *dw;
/* Enable PCI device */
err = pcim_enable_device(pdev);
if (err) {
pci_err(pdev, "enabling device failed\n");
return err;
}
/* Mapping PCI BAR regions */
err = pcim_iomap_regions(pdev, BIT(pdata->rg_bar) |
BIT(pdata->ll_bar) |
BIT(pdata->dt_bar),
pci_name(pdev));
if (err) {
pci_err(pdev, "eDMA BAR I/O remapping failed\n");
return err;
}
pci_set_master(pdev);
/* DMA configuration */
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
if (!err) {
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
if (err) {
pci_err(pdev, "consistent DMA mask 64 set failed\n");
return err;
}
} else {
pci_err(pdev, "DMA mask 64 set failed\n");
err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
pci_err(pdev, "DMA mask 32 set failed\n");
return err;
}
err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
if (err) {
pci_err(pdev, "consistent DMA mask 32 set failed\n");
return err;
}
}
/* Data structure allocation */
chip = devm_kzalloc(dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
dw = devm_kzalloc(dev, sizeof(*dw), GFP_KERNEL);
if (!dw)
return -ENOMEM;
/* IRQs allocation */
nr_irqs = pci_alloc_irq_vectors(pdev, 1, pdata->irqs,
PCI_IRQ_MSI | PCI_IRQ_MSIX);
if (nr_irqs < 1) {
pci_err(pdev, "fail to alloc IRQ vector (number of IRQs=%u)\n",
nr_irqs);
return -EPERM;
}
/* Data structure initialization */
chip->dw = dw;
chip->dev = dev;
chip->id = pdev->devfn;
chip->irq = pdev->irq;
dw->rg_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->rg_bar];
dw->rg_region.vaddr += pdata->rg_off;
dw->rg_region.paddr = pdev->resource[pdata->rg_bar].start;
dw->rg_region.paddr += pdata->rg_off;
dw->rg_region.sz = pdata->rg_sz;
dw->ll_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->ll_bar];
dw->ll_region.vaddr += pdata->ll_off;
dw->ll_region.paddr = pdev->resource[pdata->ll_bar].start;
dw->ll_region.paddr += pdata->ll_off;
dw->ll_region.sz = pdata->ll_sz;
dw->dt_region.vaddr = (dma_addr_t)pcim_iomap_table(pdev)[pdata->dt_bar];
dw->dt_region.vaddr += pdata->dt_off;
dw->dt_region.paddr = pdev->resource[pdata->dt_bar].start;
dw->dt_region.paddr += pdata->dt_off;
dw->dt_region.sz = pdata->dt_sz;
dw->version = pdata->version;
dw->mode = pdata->mode;
dw->nr_irqs = nr_irqs;
/* Debug info */
pci_dbg(pdev, "Version:\t%u\n", dw->version);
pci_dbg(pdev, "Mode:\t%s\n",
dw->mode == EDMA_MODE_LEGACY ? "Legacy" : "Unroll");
pci_dbg(pdev, "Registers:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
pdata->rg_bar, pdata->rg_off, pdata->rg_sz,
&dw->rg_region.vaddr, &dw->rg_region.paddr);
pci_dbg(pdev, "L. List:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
pdata->ll_bar, pdata->ll_off, pdata->ll_sz,
&dw->ll_region.vaddr, &dw->ll_region.paddr);
pci_dbg(pdev, "Data:\tBAR=%u, off=0x%.8lx, sz=0x%zx bytes, addr(v=%pa, p=%pa)\n",
pdata->dt_bar, pdata->dt_off, pdata->dt_sz,
&dw->dt_region.vaddr, &dw->dt_region.paddr);
pci_dbg(pdev, "Nr. IRQs:\t%u\n", dw->nr_irqs);
/* Validating if PCI interrupts were enabled */
if (!pci_dev_msi_enabled(pdev)) {
pci_err(pdev, "enable interrupt failed\n");
return -EPERM;
}
dw->irq = devm_kcalloc(dev, nr_irqs, sizeof(*dw->irq), GFP_KERNEL);
if (!dw->irq)
return -ENOMEM;
/* Starting eDMA driver */
err = dw_edma_probe(chip);
if (err) {
pci_err(pdev, "eDMA probe failed\n");
return err;
}
/* Saving data structure reference */
pci_set_drvdata(pdev, chip);
return 0;
}
static void dw_edma_pcie_remove(struct pci_dev *pdev)
{
struct dw_edma_chip *chip = pci_get_drvdata(pdev);
int err;
/* Stopping eDMA driver */
err = dw_edma_remove(chip);
if (err)
pci_warn(pdev, "can't remove device properly: %d\n", err);
/* Freeing IRQs */
pci_free_irq_vectors(pdev);
}
static const struct pci_device_id dw_edma_pcie_id_table[] = {
{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, &snps_edda_data) },
{ }
};
MODULE_DEVICE_TABLE(pci, dw_edma_pcie_id_table);
static struct pci_driver dw_edma_pcie_driver = {
.name = "dw-edma-pcie",
.id_table = dw_edma_pcie_id_table,
.probe = dw_edma_pcie_probe,
.remove = dw_edma_pcie_remove,
};
module_pci_driver(dw_edma_pcie_driver);
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("Synopsys DesignWare eDMA PCIe driver");
MODULE_AUTHOR("Gustavo Pimentel <gustavo.pimentel@synopsys.com>");
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
* Synopsys DesignWare eDMA v0 core
*
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
*/
#include <linux/bitfield.h>
#include "dw-edma-core.h"
#include "dw-edma-v0-core.h"
#include "dw-edma-v0-regs.h"
#include "dw-edma-v0-debugfs.h"
enum dw_edma_control {
DW_EDMA_V0_CB = BIT(0),
DW_EDMA_V0_TCB = BIT(1),
DW_EDMA_V0_LLP = BIT(2),
DW_EDMA_V0_LIE = BIT(3),
DW_EDMA_V0_RIE = BIT(4),
DW_EDMA_V0_CCS = BIT(8),
DW_EDMA_V0_LLE = BIT(9),
};
static inline struct dw_edma_v0_regs __iomem *__dw_regs(struct dw_edma *dw)
{
return (struct dw_edma_v0_regs __iomem *)dw->rg_region.vaddr;
}
#define SET(dw, name, value) \
writel(value, &(__dw_regs(dw)->name))
#define GET(dw, name) \
readl(&(__dw_regs(dw)->name))
#define SET_RW(dw, dir, name, value) \
do { \
if ((dir) == EDMA_DIR_WRITE) \
SET(dw, wr_##name, value); \
else \
SET(dw, rd_##name, value); \
} while (0)
#define GET_RW(dw, dir, name) \
((dir) == EDMA_DIR_WRITE \
? GET(dw, wr_##name) \
: GET(dw, rd_##name))
#define SET_BOTH(dw, name, value) \
do { \
SET(dw, wr_##name, value); \
SET(dw, rd_##name, value); \
} while (0)
static inline struct dw_edma_v0_ch_regs __iomem *
__dw_ch_regs(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch)
{
if (dw->mode == EDMA_MODE_LEGACY)
return &(__dw_regs(dw)->type.legacy.ch);
if (dir == EDMA_DIR_WRITE)
return &__dw_regs(dw)->type.unroll.ch[ch].wr;
return &__dw_regs(dw)->type.unroll.ch[ch].rd;
}
static inline void writel_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
u32 value, void __iomem *addr)
{
if (dw->mode == EDMA_MODE_LEGACY) {
u32 viewport_sel;
unsigned long flags;
raw_spin_lock_irqsave(&dw->lock, flags);
viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
if (dir == EDMA_DIR_READ)
viewport_sel |= BIT(31);
writel(viewport_sel,
&(__dw_regs(dw)->type.legacy.viewport_sel));
writel(value, addr);
raw_spin_unlock_irqrestore(&dw->lock, flags);
} else {
writel(value, addr);
}
}
static inline u32 readl_ch(struct dw_edma *dw, enum dw_edma_dir dir, u16 ch,
const void __iomem *addr)
{
u32 value;
if (dw->mode == EDMA_MODE_LEGACY) {
u32 viewport_sel;
unsigned long flags;
raw_spin_lock_irqsave(&dw->lock, flags);
viewport_sel = FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
if (dir == EDMA_DIR_READ)
viewport_sel |= BIT(31);
writel(viewport_sel,
&(__dw_regs(dw)->type.legacy.viewport_sel));
value = readl(addr);
raw_spin_unlock_irqrestore(&dw->lock, flags);
} else {
value = readl(addr);
}
return value;
}
#define SET_CH(dw, dir, ch, name, value) \
writel_ch(dw, dir, ch, value, &(__dw_ch_regs(dw, dir, ch)->name))
#define GET_CH(dw, dir, ch, name) \
readl_ch(dw, dir, ch, &(__dw_ch_regs(dw, dir, ch)->name))
#define SET_LL(ll, value) \
writel(value, ll)
/* eDMA management callbacks */
void dw_edma_v0_core_off(struct dw_edma *dw)
{
SET_BOTH(dw, int_mask, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
SET_BOTH(dw, int_clear, EDMA_V0_DONE_INT_MASK | EDMA_V0_ABORT_INT_MASK);
SET_BOTH(dw, engine_en, 0);
}
u16 dw_edma_v0_core_ch_count(struct dw_edma *dw, enum dw_edma_dir dir)
{
u32 num_ch;
if (dir == EDMA_DIR_WRITE)
num_ch = FIELD_GET(EDMA_V0_WRITE_CH_COUNT_MASK, GET(dw, ctrl));
else
num_ch = FIELD_GET(EDMA_V0_READ_CH_COUNT_MASK, GET(dw, ctrl));
if (num_ch > EDMA_V0_MAX_NR_CH)
num_ch = EDMA_V0_MAX_NR_CH;
return (u16)num_ch;
}
enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->chip->dw;
u32 tmp;
tmp = FIELD_GET(EDMA_V0_CH_STATUS_MASK,
GET_CH(dw, chan->dir, chan->id, ch_control1));
if (tmp == 1)
return DMA_IN_PROGRESS;
else if (tmp == 3)
return DMA_COMPLETE;
else
return DMA_ERROR;
}
void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->chip->dw;
SET_RW(dw, chan->dir, int_clear,
FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id)));
}
void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->chip->dw;
SET_RW(dw, chan->dir, int_clear,
FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id)));
}
u32 dw_edma_v0_core_status_done_int(struct dw_edma *dw, enum dw_edma_dir dir)
{
return FIELD_GET(EDMA_V0_DONE_INT_MASK, GET_RW(dw, dir, int_status));
}
u32 dw_edma_v0_core_status_abort_int(struct dw_edma *dw, enum dw_edma_dir dir)
{
return FIELD_GET(EDMA_V0_ABORT_INT_MASK, GET_RW(dw, dir, int_status));
}
static void dw_edma_v0_core_write_chunk(struct dw_edma_chunk *chunk)
{
struct dw_edma_burst *child;
struct dw_edma_v0_lli *lli;
struct dw_edma_v0_llp *llp;
u32 control = 0, i = 0;
u64 sar, dar, addr;
int j;
lli = (struct dw_edma_v0_lli *)chunk->ll_region.vaddr;
if (chunk->cb)
control = DW_EDMA_V0_CB;
j = chunk->bursts_alloc;
list_for_each_entry(child, &chunk->burst->list, list) {
j--;
if (!j)
control |= (DW_EDMA_V0_LIE | DW_EDMA_V0_RIE);
/* Channel control */
SET_LL(&lli[i].control, control);
/* Transfer size */
SET_LL(&lli[i].transfer_size, child->sz);
/* SAR - low, high */
sar = cpu_to_le64(child->sar);
SET_LL(&lli[i].sar_low, lower_32_bits(sar));
SET_LL(&lli[i].sar_high, upper_32_bits(sar));
/* DAR - low, high */
dar = cpu_to_le64(child->dar);
SET_LL(&lli[i].dar_low, lower_32_bits(dar));
SET_LL(&lli[i].dar_high, upper_32_bits(dar));
i++;
}
llp = (struct dw_edma_v0_llp *)&lli[i];
control = DW_EDMA_V0_LLP | DW_EDMA_V0_TCB;
if (!chunk->cb)
control |= DW_EDMA_V0_CB;
/* Channel control */
SET_LL(&llp->control, control);
/* Linked list - low, high */
addr = cpu_to_le64(chunk->ll_region.paddr);
SET_LL(&llp->llp_low, lower_32_bits(addr));
SET_LL(&llp->llp_high, upper_32_bits(addr));
}
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first)
{
struct dw_edma_chan *chan = chunk->chan;
struct dw_edma *dw = chan->chip->dw;
u32 tmp;
u64 llp;
dw_edma_v0_core_write_chunk(chunk);
if (first) {
/* Enable engine */
SET_RW(dw, chan->dir, engine_en, BIT(0));
/* Interrupt unmask - done, abort */
tmp = GET_RW(dw, chan->dir, int_mask);
tmp &= ~FIELD_PREP(EDMA_V0_DONE_INT_MASK, BIT(chan->id));
tmp &= ~FIELD_PREP(EDMA_V0_ABORT_INT_MASK, BIT(chan->id));
SET_RW(dw, chan->dir, int_mask, tmp);
/* Linked list error */
tmp = GET_RW(dw, chan->dir, linked_list_err_en);
tmp |= FIELD_PREP(EDMA_V0_LINKED_LIST_ERR_MASK, BIT(chan->id));
SET_RW(dw, chan->dir, linked_list_err_en, tmp);
/* Channel control */
SET_CH(dw, chan->dir, chan->id, ch_control1,
(DW_EDMA_V0_CCS | DW_EDMA_V0_LLE));
/* Linked list - low, high */
llp = cpu_to_le64(chunk->ll_region.paddr);
SET_CH(dw, chan->dir, chan->id, llp_low, lower_32_bits(llp));
SET_CH(dw, chan->dir, chan->id, llp_high, upper_32_bits(llp));
}
/* Doorbell */
SET_RW(dw, chan->dir, doorbell,
FIELD_PREP(EDMA_V0_DOORBELL_CH_MASK, chan->id));
}
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan)
{
struct dw_edma *dw = chan->chip->dw;
u32 tmp = 0;
/* MSI done addr - low, high */
SET_RW(dw, chan->dir, done_imwr_low, chan->msi.address_lo);
SET_RW(dw, chan->dir, done_imwr_high, chan->msi.address_hi);
/* MSI abort addr - low, high */
SET_RW(dw, chan->dir, abort_imwr_low, chan->msi.address_lo);
SET_RW(dw, chan->dir, abort_imwr_high, chan->msi.address_hi);
/* MSI data - low, high */
switch (chan->id) {
case 0:
case 1:
tmp = GET_RW(dw, chan->dir, ch01_imwr_data);
break;
case 2:
case 3:
tmp = GET_RW(dw, chan->dir, ch23_imwr_data);
break;
case 4:
case 5:
tmp = GET_RW(dw, chan->dir, ch45_imwr_data);
break;
case 6:
case 7:
tmp = GET_RW(dw, chan->dir, ch67_imwr_data);
break;
}
if (chan->id & BIT(0)) {
/* Channel odd {1, 3, 5, 7} */
tmp &= EDMA_V0_CH_EVEN_MSI_DATA_MASK;
tmp |= FIELD_PREP(EDMA_V0_CH_ODD_MSI_DATA_MASK,
chan->msi.data);
} else {
/* Channel even {0, 2, 4, 6} */
tmp &= EDMA_V0_CH_ODD_MSI_DATA_MASK;
tmp |= FIELD_PREP(EDMA_V0_CH_EVEN_MSI_DATA_MASK,
chan->msi.data);
}
switch (chan->id) {
case 0:
case 1:
SET_RW(dw, chan->dir, ch01_imwr_data, tmp);
break;
case 2:
case 3:
SET_RW(dw, chan->dir, ch23_imwr_data, tmp);
break;
case 4:
case 5:
SET_RW(dw, chan->dir, ch45_imwr_data, tmp);
break;
case 6:
case 7:
SET_RW(dw, chan->dir, ch67_imwr_data, tmp);
break;
}
return 0;
}
/* eDMA debugfs callbacks */
void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip)
{
dw_edma_v0_debugfs_on(chip);
}
void dw_edma_v0_core_debugfs_off(void)
{
dw_edma_v0_debugfs_off();
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
* Synopsys DesignWare eDMA v0 core
*
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
*/
#ifndef _DW_EDMA_V0_CORE_H
#define _DW_EDMA_V0_CORE_H
#include <linux/dma/edma.h>
/* eDMA management callbacks */
void dw_edma_v0_core_off(struct dw_edma *chan);
u16 dw_edma_v0_core_ch_count(struct dw_edma *chan, enum dw_edma_dir dir);
enum dma_status dw_edma_v0_core_ch_status(struct dw_edma_chan *chan);
void dw_edma_v0_core_clear_done_int(struct dw_edma_chan *chan);
void dw_edma_v0_core_clear_abort_int(struct dw_edma_chan *chan);
u32 dw_edma_v0_core_status_done_int(struct dw_edma *chan, enum dw_edma_dir dir);
u32 dw_edma_v0_core_status_abort_int(struct dw_edma *chan, enum dw_edma_dir dir);
void dw_edma_v0_core_start(struct dw_edma_chunk *chunk, bool first);
int dw_edma_v0_core_device_config(struct dw_edma_chan *chan);
/* eDMA debug fs callbacks */
void dw_edma_v0_core_debugfs_on(struct dw_edma_chip *chip);
void dw_edma_v0_core_debugfs_off(void);
#endif /* _DW_EDMA_V0_CORE_H */
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
* Synopsys DesignWare eDMA v0 core
*
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
*/
#include <linux/debugfs.h>
#include <linux/bitfield.h>
#include "dw-edma-v0-debugfs.h"
#include "dw-edma-v0-regs.h"
#include "dw-edma-core.h"
#define REGS_ADDR(name) \
((dma_addr_t *)&regs->name)
#define REGISTER(name) \
{ #name, REGS_ADDR(name) }
#define WR_REGISTER(name) \
{ #name, REGS_ADDR(wr_##name) }
#define RD_REGISTER(name) \
{ #name, REGS_ADDR(rd_##name) }
#define WR_REGISTER_LEGACY(name) \
{ #name, REGS_ADDR(type.legacy.wr_##name) }
#define RD_REGISTER_LEGACY(name) \
{ #name, REGS_ADDR(type.legacy.rd_##name) }
#define WR_REGISTER_UNROLL(name) \
{ #name, REGS_ADDR(type.unroll.wr_##name) }
#define RD_REGISTER_UNROLL(name) \
{ #name, REGS_ADDR(type.unroll.rd_##name) }
#define WRITE_STR "write"
#define READ_STR "read"
#define CHANNEL_STR "channel"
#define REGISTERS_STR "registers"
static struct dentry *base_dir;
static struct dw_edma *dw;
static struct dw_edma_v0_regs *regs;
static struct {
void *start;
void *end;
} lim[2][EDMA_V0_MAX_NR_CH];
struct debugfs_entries {
char name[24];
dma_addr_t *reg;
};
static int dw_edma_debugfs_u32_get(void *data, u64 *val)
{
if (dw->mode == EDMA_MODE_LEGACY &&
data >= (void *)&regs->type.legacy.ch) {
void *ptr = (void *)&regs->type.legacy.ch;
u32 viewport_sel = 0;
unsigned long flags;
u16 ch;
for (ch = 0; ch < dw->wr_ch_cnt; ch++)
if (lim[0][ch].start >= data && data < lim[0][ch].end) {
ptr += (data - lim[0][ch].start);
goto legacy_sel_wr;
}
for (ch = 0; ch < dw->rd_ch_cnt; ch++)
if (lim[1][ch].start >= data && data < lim[1][ch].end) {
ptr += (data - lim[1][ch].start);
goto legacy_sel_rd;
}
return 0;
legacy_sel_rd:
viewport_sel = BIT(31);
legacy_sel_wr:
viewport_sel |= FIELD_PREP(EDMA_V0_VIEWPORT_MASK, ch);
raw_spin_lock_irqsave(&dw->lock, flags);
writel(viewport_sel, &regs->type.legacy.viewport_sel);
*val = readl(ptr);
raw_spin_unlock_irqrestore(&dw->lock, flags);
} else {
*val = readl(data);
}
return 0;
}
DEFINE_DEBUGFS_ATTRIBUTE(fops_x32, dw_edma_debugfs_u32_get, NULL, "0x%08llx\n");
static void dw_edma_debugfs_create_x32(const struct debugfs_entries entries[],
int nr_entries, struct dentry *dir)
{
int i;
for (i = 0; i < nr_entries; i++) {
if (!debugfs_create_file_unsafe(entries[i].name, 0444, dir,
entries[i].reg, &fops_x32))
break;
}
}
static void dw_edma_debugfs_regs_ch(struct dw_edma_v0_ch_regs *regs,
struct dentry *dir)
{
int nr_entries;
const struct debugfs_entries debugfs_regs[] = {
REGISTER(ch_control1),
REGISTER(ch_control2),
REGISTER(transfer_size),
REGISTER(sar_low),
REGISTER(sar_high),
REGISTER(dar_low),
REGISTER(dar_high),
REGISTER(llp_low),
REGISTER(llp_high),
};
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, dir);
}
static void dw_edma_debugfs_regs_wr(struct dentry *dir)
{
const struct debugfs_entries debugfs_regs[] = {
/* eDMA global registers */
WR_REGISTER(engine_en),
WR_REGISTER(doorbell),
WR_REGISTER(ch_arb_weight_low),
WR_REGISTER(ch_arb_weight_high),
/* eDMA interrupts registers */
WR_REGISTER(int_status),
WR_REGISTER(int_mask),
WR_REGISTER(int_clear),
WR_REGISTER(err_status),
WR_REGISTER(done_imwr_low),
WR_REGISTER(done_imwr_high),
WR_REGISTER(abort_imwr_low),
WR_REGISTER(abort_imwr_high),
WR_REGISTER(ch01_imwr_data),
WR_REGISTER(ch23_imwr_data),
WR_REGISTER(ch45_imwr_data),
WR_REGISTER(ch67_imwr_data),
WR_REGISTER(linked_list_err_en),
};
const struct debugfs_entries debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
WR_REGISTER_UNROLL(engine_chgroup),
WR_REGISTER_UNROLL(engine_hshake_cnt_low),
WR_REGISTER_UNROLL(engine_hshake_cnt_high),
WR_REGISTER_UNROLL(ch0_pwr_en),
WR_REGISTER_UNROLL(ch1_pwr_en),
WR_REGISTER_UNROLL(ch2_pwr_en),
WR_REGISTER_UNROLL(ch3_pwr_en),
WR_REGISTER_UNROLL(ch4_pwr_en),
WR_REGISTER_UNROLL(ch5_pwr_en),
WR_REGISTER_UNROLL(ch6_pwr_en),
WR_REGISTER_UNROLL(ch7_pwr_en),
};
struct dentry *regs_dir, *ch_dir;
int nr_entries, i;
char name[16];
regs_dir = debugfs_create_dir(WRITE_STR, dir);
if (!regs_dir)
return;
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
if (dw->mode == EDMA_MODE_UNROLL) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
}
for (i = 0; i < dw->wr_ch_cnt; i++) {
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
ch_dir = debugfs_create_dir(name, regs_dir);
if (!ch_dir)
return;
dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].wr, ch_dir);
lim[0][i].start = &regs->type.unroll.ch[i].wr;
lim[0][i].end = &regs->type.unroll.ch[i].padding_1[0];
}
}
static void dw_edma_debugfs_regs_rd(struct dentry *dir)
{
const struct debugfs_entries debugfs_regs[] = {
/* eDMA global registers */
RD_REGISTER(engine_en),
RD_REGISTER(doorbell),
RD_REGISTER(ch_arb_weight_low),
RD_REGISTER(ch_arb_weight_high),
/* eDMA interrupts registers */
RD_REGISTER(int_status),
RD_REGISTER(int_mask),
RD_REGISTER(int_clear),
RD_REGISTER(err_status_low),
RD_REGISTER(err_status_high),
RD_REGISTER(linked_list_err_en),
RD_REGISTER(done_imwr_low),
RD_REGISTER(done_imwr_high),
RD_REGISTER(abort_imwr_low),
RD_REGISTER(abort_imwr_high),
RD_REGISTER(ch01_imwr_data),
RD_REGISTER(ch23_imwr_data),
RD_REGISTER(ch45_imwr_data),
RD_REGISTER(ch67_imwr_data),
};
const struct debugfs_entries debugfs_unroll_regs[] = {
/* eDMA channel context grouping */
RD_REGISTER_UNROLL(engine_chgroup),
RD_REGISTER_UNROLL(engine_hshake_cnt_low),
RD_REGISTER_UNROLL(engine_hshake_cnt_high),
RD_REGISTER_UNROLL(ch0_pwr_en),
RD_REGISTER_UNROLL(ch1_pwr_en),
RD_REGISTER_UNROLL(ch2_pwr_en),
RD_REGISTER_UNROLL(ch3_pwr_en),
RD_REGISTER_UNROLL(ch4_pwr_en),
RD_REGISTER_UNROLL(ch5_pwr_en),
RD_REGISTER_UNROLL(ch6_pwr_en),
RD_REGISTER_UNROLL(ch7_pwr_en),
};
struct dentry *regs_dir, *ch_dir;
int nr_entries, i;
char name[16];
regs_dir = debugfs_create_dir(READ_STR, dir);
if (!regs_dir)
return;
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
if (dw->mode == EDMA_MODE_UNROLL) {
nr_entries = ARRAY_SIZE(debugfs_unroll_regs);
dw_edma_debugfs_create_x32(debugfs_unroll_regs, nr_entries,
regs_dir);
}
for (i = 0; i < dw->rd_ch_cnt; i++) {
snprintf(name, sizeof(name), "%s:%d", CHANNEL_STR, i);
ch_dir = debugfs_create_dir(name, regs_dir);
if (!ch_dir)
return;
dw_edma_debugfs_regs_ch(&regs->type.unroll.ch[i].rd, ch_dir);
lim[1][i].start = &regs->type.unroll.ch[i].rd;
lim[1][i].end = &regs->type.unroll.ch[i].padding_2[0];
}
}
static void dw_edma_debugfs_regs(void)
{
const struct debugfs_entries debugfs_regs[] = {
REGISTER(ctrl_data_arb_prior),
REGISTER(ctrl),
};
struct dentry *regs_dir;
int nr_entries;
regs_dir = debugfs_create_dir(REGISTERS_STR, base_dir);
if (!regs_dir)
return;
nr_entries = ARRAY_SIZE(debugfs_regs);
dw_edma_debugfs_create_x32(debugfs_regs, nr_entries, regs_dir);
dw_edma_debugfs_regs_wr(regs_dir);
dw_edma_debugfs_regs_rd(regs_dir);
}
void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
{
dw = chip->dw;
if (!dw)
return;
regs = (struct dw_edma_v0_regs *)dw->rg_region.vaddr;
if (!regs)
return;
base_dir = debugfs_create_dir(dw->name, 0);
if (!base_dir)
return;
debugfs_create_u32("version", 0444, base_dir, &dw->version);
debugfs_create_u32("mode", 0444, base_dir, &dw->mode);
debugfs_create_u16("wr_ch_cnt", 0444, base_dir, &dw->wr_ch_cnt);
debugfs_create_u16("rd_ch_cnt", 0444, base_dir, &dw->rd_ch_cnt);
dw_edma_debugfs_regs();
}
void dw_edma_v0_debugfs_off(void)
{
debugfs_remove_recursive(base_dir);
}
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
* Synopsys DesignWare eDMA v0 core
*
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
*/
#ifndef _DW_EDMA_V0_DEBUG_FS_H
#define _DW_EDMA_V0_DEBUG_FS_H
#include <linux/dma/edma.h>
#ifdef CONFIG_DEBUG_FS
void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip);
void dw_edma_v0_debugfs_off(void);
#else
static inline void dw_edma_v0_debugfs_on(struct dw_edma_chip *chip)
{
}
static inline void dw_edma_v0_debugfs_off(void)
{
}
#endif /* CONFIG_DEBUG_FS */
#endif /* _DW_EDMA_V0_DEBUG_FS_H */
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (c) 2018-2019 Synopsys, Inc. and/or its affiliates.
* Synopsys DesignWare eDMA v0 core
*
* Author: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
*/
#ifndef _DW_EDMA_V0_REGS_H
#define _DW_EDMA_V0_REGS_H
#include <linux/dmaengine.h>
#define EDMA_V0_MAX_NR_CH 8
#define EDMA_V0_VIEWPORT_MASK GENMASK(2, 0)
#define EDMA_V0_DONE_INT_MASK GENMASK(7, 0)
#define EDMA_V0_ABORT_INT_MASK GENMASK(23, 16)
#define EDMA_V0_WRITE_CH_COUNT_MASK GENMASK(3, 0)
#define EDMA_V0_READ_CH_COUNT_MASK GENMASK(19, 16)
#define EDMA_V0_CH_STATUS_MASK GENMASK(6, 5)
#define EDMA_V0_DOORBELL_CH_MASK GENMASK(2, 0)
#define EDMA_V0_LINKED_LIST_ERR_MASK GENMASK(7, 0)
#define EDMA_V0_CH_ODD_MSI_DATA_MASK GENMASK(31, 16)
#define EDMA_V0_CH_EVEN_MSI_DATA_MASK GENMASK(15, 0)
struct dw_edma_v0_ch_regs {
u32 ch_control1; /* 0x000 */
u32 ch_control2; /* 0x004 */
u32 transfer_size; /* 0x008 */
u32 sar_low; /* 0x00c */
u32 sar_high; /* 0x010 */
u32 dar_low; /* 0x014 */
u32 dar_high; /* 0x018 */
u32 llp_low; /* 0x01c */
u32 llp_high; /* 0x020 */
};
struct dw_edma_v0_ch {
struct dw_edma_v0_ch_regs wr; /* 0x200 */
u32 padding_1[55]; /* [0x224..0x2fc] */
struct dw_edma_v0_ch_regs rd; /* 0x300 */
u32 padding_2[55]; /* [0x224..0x2fc] */
};
struct dw_edma_v0_unroll {
u32 padding_1; /* 0x0f8 */
u32 wr_engine_chgroup; /* 0x100 */
u32 rd_engine_chgroup; /* 0x104 */
u32 wr_engine_hshake_cnt_low; /* 0x108 */
u32 wr_engine_hshake_cnt_high; /* 0x10c */
u32 padding_2[2]; /* [0x110..0x114] */
u32 rd_engine_hshake_cnt_low; /* 0x118 */
u32 rd_engine_hshake_cnt_high; /* 0x11c */
u32 padding_3[2]; /* [0x120..0x124] */
u32 wr_ch0_pwr_en; /* 0x128 */
u32 wr_ch1_pwr_en; /* 0x12c */
u32 wr_ch2_pwr_en; /* 0x130 */
u32 wr_ch3_pwr_en; /* 0x134 */
u32 wr_ch4_pwr_en; /* 0x138 */
u32 wr_ch5_pwr_en; /* 0x13c */
u32 wr_ch6_pwr_en; /* 0x140 */
u32 wr_ch7_pwr_en; /* 0x144 */
u32 padding_4[8]; /* [0x148..0x164] */
u32 rd_ch0_pwr_en; /* 0x168 */
u32 rd_ch1_pwr_en; /* 0x16c */
u32 rd_ch2_pwr_en; /* 0x170 */
u32 rd_ch3_pwr_en; /* 0x174 */
u32 rd_ch4_pwr_en; /* 0x178 */
u32 rd_ch5_pwr_en; /* 0x18c */
u32 rd_ch6_pwr_en; /* 0x180 */
u32 rd_ch7_pwr_en; /* 0x184 */
u32 padding_5[30]; /* [0x188..0x1fc] */
struct dw_edma_v0_ch ch[EDMA_V0_MAX_NR_CH]; /* [0x200..0x1120] */
};
struct dw_edma_v0_legacy {
u32 viewport_sel; /* 0x0f8 */
struct dw_edma_v0_ch_regs ch; /* [0x100..0x120] */
};
struct dw_edma_v0_regs {
/* eDMA global registers */
u32 ctrl_data_arb_prior; /* 0x000 */
u32 padding_1; /* 0x004 */
u32 ctrl; /* 0x008 */
u32 wr_engine_en; /* 0x00c */
u32 wr_doorbell; /* 0x010 */
u32 padding_2; /* 0x014 */
u32 wr_ch_arb_weight_low; /* 0x018 */
u32 wr_ch_arb_weight_high; /* 0x01c */
u32 padding_3[3]; /* [0x020..0x028] */
u32 rd_engine_en; /* 0x02c */
u32 rd_doorbell; /* 0x030 */
u32 padding_4; /* 0x034 */
u32 rd_ch_arb_weight_low; /* 0x038 */
u32 rd_ch_arb_weight_high; /* 0x03c */
u32 padding_5[3]; /* [0x040..0x048] */
/* eDMA interrupts registers */
u32 wr_int_status; /* 0x04c */
u32 padding_6; /* 0x050 */
u32 wr_int_mask; /* 0x054 */
u32 wr_int_clear; /* 0x058 */
u32 wr_err_status; /* 0x05c */
u32 wr_done_imwr_low; /* 0x060 */
u32 wr_done_imwr_high; /* 0x064 */
u32 wr_abort_imwr_low; /* 0x068 */
u32 wr_abort_imwr_high; /* 0x06c */
u32 wr_ch01_imwr_data; /* 0x070 */
u32 wr_ch23_imwr_data; /* 0x074 */
u32 wr_ch45_imwr_data; /* 0x078 */
u32 wr_ch67_imwr_data; /* 0x07c */
u32 padding_7[4]; /* [0x080..0x08c] */
u32 wr_linked_list_err_en; /* 0x090 */
u32 padding_8[3]; /* [0x094..0x09c] */
u32 rd_int_status; /* 0x0a0 */
u32 padding_9; /* 0x0a4 */
u32 rd_int_mask; /* 0x0a8 */
u32 rd_int_clear; /* 0x0ac */
u32 padding_10; /* 0x0b0 */
u32 rd_err_status_low; /* 0x0b4 */
u32 rd_err_status_high; /* 0x0b8 */
u32 padding_11[2]; /* [0x0bc..0x0c0] */
u32 rd_linked_list_err_en; /* 0x0c4 */
u32 padding_12; /* 0x0c8 */
u32 rd_done_imwr_low; /* 0x0cc */
u32 rd_done_imwr_high; /* 0x0d0 */
u32 rd_abort_imwr_low; /* 0x0d4 */
u32 rd_abort_imwr_high; /* 0x0d8 */
u32 rd_ch01_imwr_data; /* 0x0dc */
u32 rd_ch23_imwr_data; /* 0x0e0 */
u32 rd_ch45_imwr_data; /* 0x0e4 */
u32 rd_ch67_imwr_data; /* 0x0e8 */
u32 padding_13[4]; /* [0x0ec..0x0f8] */
/* eDMA channel context grouping */
union dw_edma_v0_type {
struct dw_edma_v0_legacy legacy; /* [0x0f8..0x120] */
struct dw_edma_v0_unroll unroll; /* [0x0f8..0x1120] */
} type;
};
struct dw_edma_v0_lli {
u32 control;
u32 transfer_size;
u32 sar_low;
u32 sar_high;
u32 dar_low;
u32 dar_high;
};
struct dw_edma_v0_llp {
u32 control;
u32 reserved;
u32 llp_low;
u32 llp_high;
};
#endif /* _DW_EDMA_V0_REGS_H */
......@@ -15,10 +15,13 @@
struct dw_dma_pci_data {
const struct dw_dma_platform_data *pdata;
int (*probe)(struct dw_dma_chip *chip);
int (*remove)(struct dw_dma_chip *chip);
struct dw_dma_chip *chip;
};
static const struct dw_dma_pci_data dw_pci_data = {
.probe = dw_dma_probe,
.remove = dw_dma_remove,
};
static const struct dw_dma_platform_data idma32_pdata = {
......@@ -34,11 +37,13 @@ static const struct dw_dma_platform_data idma32_pdata = {
static const struct dw_dma_pci_data idma32_pci_data = {
.pdata = &idma32_pdata,
.probe = idma32_dma_probe,
.remove = idma32_dma_remove,
};
static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
{
const struct dw_dma_pci_data *data = (void *)pid->driver_data;
const struct dw_dma_pci_data *drv_data = (void *)pid->driver_data;
struct dw_dma_pci_data *data;
struct dw_dma_chip *chip;
int ret;
......@@ -63,6 +68,10 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
if (ret)
return ret;
data = devm_kmemdup(&pdev->dev, drv_data, sizeof(*drv_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
if (!chip)
return -ENOMEM;
......@@ -73,21 +82,24 @@ static int dw_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid)
chip->irq = pdev->irq;
chip->pdata = data->pdata;
data->chip = chip;
ret = data->probe(chip);
if (ret)
return ret;
pci_set_drvdata(pdev, chip);
pci_set_drvdata(pdev, data);
return 0;
}
static void dw_pci_remove(struct pci_dev *pdev)
{
struct dw_dma_chip *chip = pci_get_drvdata(pdev);
struct dw_dma_pci_data *data = pci_get_drvdata(pdev);
struct dw_dma_chip *chip = data->chip;
int ret;
ret = dw_dma_remove(chip);
ret = data->remove(chip);
if (ret)
dev_warn(&pdev->dev, "can't remove device properly: %d\n", ret);
}
......@@ -96,16 +108,16 @@ static void dw_pci_remove(struct pci_dev *pdev)
static int dw_pci_suspend_late(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
struct dw_dma_chip *chip = pci_get_drvdata(pci);
struct dw_dma_pci_data *data = dev_get_drvdata(dev);
struct dw_dma_chip *chip = data->chip;
return do_dw_dma_disable(chip);
};
static int dw_pci_resume_early(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
struct dw_dma_chip *chip = pci_get_drvdata(pci);
struct dw_dma_pci_data *data = dev_get_drvdata(dev);
struct dw_dma_chip *chip = data->chip;
return do_dw_dma_enable(chip);
};
......@@ -131,6 +143,11 @@ static const struct pci_device_id dw_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, 0x2286), (kernel_ulong_t)&dw_pci_data },
{ PCI_VDEVICE(INTEL, 0x22c0), (kernel_ulong_t)&dw_pci_data },
/* Elkhart Lake iDMA 32-bit (OSE DMA) */
{ PCI_VDEVICE(INTEL, 0x4bb4), (kernel_ulong_t)&idma32_pci_data },
{ PCI_VDEVICE(INTEL, 0x4bb5), (kernel_ulong_t)&idma32_pci_data },
{ PCI_VDEVICE(INTEL, 0x4bb6), (kernel_ulong_t)&idma32_pci_data },
/* Haswell */
{ PCI_VDEVICE(INTEL, 0x9c60), (kernel_ulong_t)&dw_pci_data },
......
......@@ -47,7 +47,7 @@ static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
if (fsl_chan->edma->version == v1) {
if (fsl_chan->edma->drvdata->version == v1) {
edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
edma_writeb(fsl_chan->edma, ch, regs->serq);
} else {
......@@ -64,7 +64,7 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
struct edma_regs *regs = &fsl_chan->edma->regs;
u32 ch = fsl_chan->vchan.chan.chan_id;
if (fsl_chan->edma->version == v1) {
if (fsl_chan->edma->drvdata->version == v1) {
edma_writeb(fsl_chan->edma, ch, regs->cerq);
edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
} else {
......@@ -77,22 +77,33 @@ void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
}
EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
static void mux_configure8(struct fsl_edma_chan *fsl_chan, void __iomem *addr,
u32 off, u32 slot, bool enable)
{
u8 val8;
if (enable)
val8 = EDMAMUX_CHCFG_ENBL | slot;
else
val8 = EDMAMUX_CHCFG_DIS;
iowrite8(val8, addr + off);
}
void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
unsigned int slot, bool enable)
{
u32 ch = fsl_chan->vchan.chan.chan_id;
void __iomem *muxaddr;
unsigned int chans_per_mux, ch_off;
u32 dmamux_nr = fsl_chan->edma->drvdata->dmamuxs;
chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
chans_per_mux = fsl_chan->edma->n_chans / dmamux_nr;
ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
slot = EDMAMUX_CHCFG_SOURCE(slot);
if (enable)
iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
else
iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
mux_configure8(fsl_chan, muxaddr, ch_off, slot, enable);
}
EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
......@@ -647,28 +658,28 @@ void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
edma->regs.erql = edma->membase + EDMA_ERQ;
edma->regs.eeil = edma->membase + EDMA_EEI;
edma->regs.serq = edma->membase + ((edma->version == v1) ?
EDMA_SERQ : EDMA64_SERQ);
edma->regs.cerq = edma->membase + ((edma->version == v1) ?
EDMA_CERQ : EDMA64_CERQ);
edma->regs.seei = edma->membase + ((edma->version == v1) ?
EDMA_SEEI : EDMA64_SEEI);
edma->regs.ceei = edma->membase + ((edma->version == v1) ?
EDMA_CEEI : EDMA64_CEEI);
edma->regs.cint = edma->membase + ((edma->version == v1) ?
EDMA_CINT : EDMA64_CINT);
edma->regs.cerr = edma->membase + ((edma->version == v1) ?
EDMA_CERR : EDMA64_CERR);
edma->regs.ssrt = edma->membase + ((edma->version == v1) ?
EDMA_SSRT : EDMA64_SSRT);
edma->regs.cdne = edma->membase + ((edma->version == v1) ?
EDMA_CDNE : EDMA64_CDNE);
edma->regs.intl = edma->membase + ((edma->version == v1) ?
EDMA_INTR : EDMA64_INTL);
edma->regs.errl = edma->membase + ((edma->version == v1) ?
EDMA_ERR : EDMA64_ERRL);
if (edma->version == v2) {
edma->regs.serq = edma->membase + ((edma->drvdata->version == v2) ?
EDMA64_SERQ : EDMA_SERQ);
edma->regs.cerq = edma->membase + ((edma->drvdata->version == v2) ?
EDMA64_CERQ : EDMA_CERQ);
edma->regs.seei = edma->membase + ((edma->drvdata->version == v2) ?
EDMA64_SEEI : EDMA_SEEI);
edma->regs.ceei = edma->membase + ((edma->drvdata->version == v2) ?
EDMA64_CEEI : EDMA_CEEI);
edma->regs.cint = edma->membase + ((edma->drvdata->version == v2) ?
EDMA64_CINT : EDMA_CINT);
edma->regs.cerr = edma->membase + ((edma->drvdata->version == v2) ?
EDMA64_CERR : EDMA_CERR);
edma->regs.ssrt = edma->membase + ((edma->drvdata->version == v2) ?
EDMA64_SSRT : EDMA_SSRT);
edma->regs.cdne = edma->membase + ((edma->drvdata->version == v2) ?
EDMA64_CDNE : EDMA_CDNE);
edma->regs.intl = edma->membase + ((edma->drvdata->version == v2) ?
EDMA64_INTL : EDMA_INTR);
edma->regs.errl = edma->membase + ((edma->drvdata->version == v2) ?
EDMA64_ERRL : EDMA_ERR);
if (edma->drvdata->version == v2) {
edma->regs.erqh = edma->membase + EDMA64_ERQH;
edma->regs.eeih = edma->membase + EDMA64_EEIH;
edma->regs.errh = edma->membase + EDMA64_ERRH;
......
......@@ -7,6 +7,7 @@
#define _FSL_EDMA_COMMON_H_
#include <linux/dma-direction.h>
#include <linux/platform_device.h>
#include "virt-dma.h"
#define EDMA_CR_EDBG BIT(1)
......@@ -140,17 +141,24 @@ enum edma_version {
v2, /* 64ch Coldfire */
};
struct fsl_edma_drvdata {
enum edma_version version;
u32 dmamuxs;
int (*setup_irq)(struct platform_device *pdev,
struct fsl_edma_engine *fsl_edma);
};
struct fsl_edma_engine {
struct dma_device dma_dev;
void __iomem *membase;
void __iomem *muxbase[DMAMUX_NR];
struct clk *muxclk[DMAMUX_NR];
struct mutex fsl_edma_mutex;
const struct fsl_edma_drvdata *drvdata;
u32 n_chans;
int txirq;
int errirq;
bool big_endian;
enum edma_version version;
struct edma_regs regs;
struct fsl_edma_chan chans[];
};
......
......@@ -92,7 +92,8 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
struct fsl_edma_engine *fsl_edma = ofdma->of_dma_data;
struct dma_chan *chan, *_chan;
struct fsl_edma_chan *fsl_chan;
unsigned long chans_per_mux = fsl_edma->n_chans / DMAMUX_NR;
u32 dmamux_nr = fsl_edma->drvdata->dmamuxs;
unsigned long chans_per_mux = fsl_edma->n_chans / dmamux_nr;
if (dma_spec->args_count != 2)
return NULL;
......@@ -180,16 +181,38 @@ static void fsl_disable_clocks(struct fsl_edma_engine *fsl_edma, int nr_clocks)
clk_disable_unprepare(fsl_edma->muxclk[i]);
}
static struct fsl_edma_drvdata vf610_data = {
.version = v1,
.dmamuxs = DMAMUX_NR,
.setup_irq = fsl_edma_irq_init,
};
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", .data = &vf610_data},
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
static int fsl_edma_probe(struct platform_device *pdev)
{
const struct of_device_id *of_id =
of_match_device(fsl_edma_dt_ids, &pdev->dev);
struct device_node *np = pdev->dev.of_node;
struct fsl_edma_engine *fsl_edma;
const struct fsl_edma_drvdata *drvdata = NULL;
struct fsl_edma_chan *fsl_chan;
struct edma_regs *regs;
struct resource *res;
int len, chans;
int ret, i;
if (of_id)
drvdata = of_id->data;
if (!drvdata) {
dev_err(&pdev->dev, "unable to find driver data\n");
return -EINVAL;
}
ret = of_property_read_u32(np, "dma-channels", &chans);
if (ret) {
dev_err(&pdev->dev, "Can't get dma-channels.\n");
......@@ -201,7 +224,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (!fsl_edma)
return -ENOMEM;
fsl_edma->version = v1;
fsl_edma->drvdata = drvdata;
fsl_edma->n_chans = chans;
mutex_init(&fsl_edma->fsl_edma_mutex);
......@@ -213,7 +236,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_edma_setup_regs(fsl_edma);
regs = &fsl_edma->regs;
for (i = 0; i < DMAMUX_NR; i++) {
for (i = 0; i < fsl_edma->drvdata->dmamuxs; i++) {
char clkname[32];
res = platform_get_resource(pdev, IORESOURCE_MEM, 1 + i);
......@@ -259,7 +282,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
}
edma_writel(fsl_edma, ~0, regs->intl);
ret = fsl_edma_irq_init(pdev, fsl_edma);
ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
if (ret)
return ret;
......@@ -291,7 +314,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
if (ret) {
dev_err(&pdev->dev,
"Can't register Freescale eDMA engine. (%d)\n", ret);
fsl_disable_clocks(fsl_edma, DMAMUX_NR);
fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
return ret;
}
......@@ -300,7 +323,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
dev_err(&pdev->dev,
"Can't register Freescale eDMA of_dma. (%d)\n", ret);
dma_async_device_unregister(&fsl_edma->dma_dev);
fsl_disable_clocks(fsl_edma, DMAMUX_NR);
fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
return ret;
}
......@@ -319,7 +342,7 @@ static int fsl_edma_remove(struct platform_device *pdev)
fsl_edma_cleanup_vchan(&fsl_edma->dma_dev);
of_dma_controller_free(np);
dma_async_device_unregister(&fsl_edma->dma_dev);
fsl_disable_clocks(fsl_edma, DMAMUX_NR);
fsl_disable_clocks(fsl_edma, fsl_edma->drvdata->dmamuxs);
return 0;
}
......@@ -378,12 +401,6 @@ static const struct dev_pm_ops fsl_edma_pm_ops = {
.resume_early = fsl_edma_resume_early,
};
static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,vf610-edma", },
{ /* sentinel */ }
};
MODULE_DEVICE_TABLE(of, fsl_edma_dt_ids);
static struct platform_driver fsl_edma_driver = {
.driver = {
.name = "fsl-edma",
......
......@@ -113,6 +113,7 @@
/* Field definition for Descriptor offset */
#define QDMA_CCDF_STATUS 20
#define QDMA_CCDF_OFFSET 20
#define QDMA_SDDF_CMD(x) (((u64)(x)) << 32)
/* Field definition for safe loop count*/
#define FSL_QDMA_HALT_COUNT 1500
......@@ -341,6 +342,7 @@ static void fsl_qdma_free_chan_resources(struct dma_chan *chan)
static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
dma_addr_t dst, dma_addr_t src, u32 len)
{
u32 cmd;
struct fsl_qdma_format *sdf, *ddf;
struct fsl_qdma_format *ccdf, *csgf_desc, *csgf_src, *csgf_dest;
......@@ -369,14 +371,14 @@ static void fsl_qdma_comp_fill_memcpy(struct fsl_qdma_comp *fsl_comp,
/* This entry is the last entry. */
qdma_csgf_set_f(csgf_dest, len);
/* Descriptor Buffer */
sdf->data =
cpu_to_le64(FSL_QDMA_CMD_RWTTYPE <<
FSL_QDMA_CMD_RWTTYPE_OFFSET);
ddf->data =
cpu_to_le64(FSL_QDMA_CMD_RWTTYPE <<
FSL_QDMA_CMD_RWTTYPE_OFFSET);
ddf->data |=
cpu_to_le64(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
FSL_QDMA_CMD_RWTTYPE_OFFSET);
sdf->data = QDMA_SDDF_CMD(cmd);
cmd = cpu_to_le32(FSL_QDMA_CMD_RWTTYPE <<
FSL_QDMA_CMD_RWTTYPE_OFFSET);
cmd |= cpu_to_le32(FSL_QDMA_CMD_LWC << FSL_QDMA_CMD_LWC_OFFSET);
ddf->data = QDMA_SDDF_CMD(cmd);
}
/*
......
......@@ -61,10 +61,10 @@ static void hsu_dma_chan_start(struct hsu_dma_chan *hsuc)
if (hsuc->direction == DMA_MEM_TO_DEV) {
bsr = config->dst_maxburst;
mtsr = config->src_addr_width;
mtsr = config->dst_addr_width;
} else if (hsuc->direction == DMA_DEV_TO_MEM) {
bsr = config->src_maxburst;
mtsr = config->dst_addr_width;
mtsr = config->src_addr_width;
}
hsu_chan_disable(hsuc);
......
......@@ -1934,16 +1934,11 @@ static int sdma_init(struct sdma_engine *sdma)
static bool sdma_filter_fn(struct dma_chan *chan, void *fn_param)
{
struct sdma_channel *sdmac = to_sdma_chan(chan);
struct sdma_engine *sdma = sdmac->sdma;
struct imx_dma_data *data = fn_param;
if (!imx_dma_is_general_purpose(chan))
return false;
/* return false if it's not the right device */
if (sdma->dev->of_node != data->of_node)
return false;
sdmac->data = *data;
chan->private = &sdmac->data;
......@@ -1971,9 +1966,9 @@ static struct dma_chan *sdma_xlate(struct of_phandle_args *dma_spec,
* be set to sdmac->event_id1.
*/
data.dma_request2 = 0;
data.of_node = ofdma->of_node;
return dma_request_channel(mask, sdma_filter_fn, &data);
return __dma_request_channel(&mask, sdma_filter_fn, &data,
ofdma->of_node);
}
static int sdma_probe(struct platform_device *pdev)
......
......@@ -164,6 +164,11 @@ static void mcf_edma_irq_free(struct platform_device *pdev,
free_irq(irq, mcf_edma);
}
static struct fsl_edma_drvdata mcf_data = {
.version = v2,
.setup_irq = mcf_edma_irq_init,
};
static int mcf_edma_probe(struct platform_device *pdev)
{
struct mcf_edma_platform_data *pdata;
......@@ -187,8 +192,8 @@ static int mcf_edma_probe(struct platform_device *pdev)
mcf_edma->n_chans = chans;
/* Set up version for ColdFire edma */
mcf_edma->version = v2;
/* Set up drvdata for ColdFire edma */
mcf_edma->drvdata = &mcf_data;
mcf_edma->big_endian = 1;
if (!mcf_edma->n_chans) {
......@@ -223,7 +228,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
iowrite32(~0, regs->inth);
iowrite32(~0, regs->intl);
ret = mcf_edma_irq_init(pdev, mcf_edma);
ret = mcf_edma->drvdata->setup_irq(pdev, mcf_edma);
if (ret)
return ret;
......
......@@ -25,3 +25,14 @@ config MTK_CQDMA
This controller provides the channels which is dedicated to
memory-to-memory transfer to offload from CPU.
config MTK_UART_APDMA
tristate "MediaTek SoCs APDMA support for UART"
depends on OF && SERIAL_8250_MT6577
select DMA_ENGINE
select DMA_VIRTUAL_CHANNELS
help
Support for the UART DMA engine found on MediaTek MTK SoCs.
When SERIAL_8250_MT6577 is enabled, and if you want to use DMA,
you can enable the config. The DMA engine can only be used
with MediaTek SoCs.
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_MTK_UART_APDMA) += mtk-uart-apdma.o
obj-$(CONFIG_MTK_HSDMA) += mtk-hsdma.o
obj-$(CONFIG_MTK_CQDMA) += mtk-cqdma.o
This diff is collapsed.
......@@ -717,10 +717,8 @@ static int mic_dma_driver_probe(struct mbus_device *mbdev)
if (mic_dma_dbg) {
mic_dma_dev->dbg_dir = debugfs_create_dir(dev_name(&mbdev->dev),
mic_dma_dbg);
if (mic_dma_dev->dbg_dir)
debugfs_create_file("mic_dma_reg", 0444,
mic_dma_dev->dbg_dir, mic_dma_dev,
&mic_dma_reg_fops);
debugfs_create_file("mic_dma_reg", 0444, mic_dma_dev->dbg_dir,
mic_dma_dev, &mic_dma_reg_fops);
}
return 0;
}
......
......@@ -582,18 +582,12 @@ static int mmp_tdma_chan_init(struct mmp_tdma_device *tdev,
}
struct mmp_tdma_filter_param {
struct device_node *of_node;
unsigned int chan_id;
};
static bool mmp_tdma_filter_fn(struct dma_chan *chan, void *fn_param)
{
struct mmp_tdma_filter_param *param = fn_param;
struct mmp_tdma_chan *tdmac = to_mmp_tdma_chan(chan);
struct dma_device *pdma_device = tdmac->chan.device;
if (pdma_device->dev->of_node != param->of_node)
return false;
if (chan->chan_id != param->chan_id)
return false;
......@@ -611,13 +605,13 @@ static struct dma_chan *mmp_tdma_xlate(struct of_phandle_args *dma_spec,
if (dma_spec->args_count != 1)
return NULL;
param.of_node = ofdma->of_node;
param.chan_id = dma_spec->args[0];
if (param.chan_id >= TDMA_CHANNEL_NUM)
return NULL;
return dma_request_channel(mask, mmp_tdma_filter_fn, &param);
return __dma_request_channel(&mask, mmp_tdma_filter_fn, &param,
ofdma->of_node);
}
static const struct of_device_id mmp_tdma_dt_ids[] = {
......
......@@ -719,7 +719,6 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
}
struct mxs_dma_filter_param {
struct device_node *of_node;
unsigned int chan_id;
};
......@@ -730,9 +729,6 @@ static bool mxs_dma_filter_fn(struct dma_chan *chan, void *fn_param)
struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
int chan_irq;
if (mxs_dma->dma_device.dev->of_node != param->of_node)
return false;
if (chan->chan_id != param->chan_id)
return false;
......@@ -755,13 +751,13 @@ static struct dma_chan *mxs_dma_xlate(struct of_phandle_args *dma_spec,
if (dma_spec->args_count != 1)
return NULL;
param.of_node = ofdma->of_node;
param.chan_id = dma_spec->args[0];
if (param.chan_id >= mxs_dma->nr_channels)
return NULL;
return dma_request_channel(mask, mxs_dma_filter_fn, &param);
return __dma_request_channel(&mask, mxs_dma_filter_fn, &param,
ofdma->of_node);
}
static int __init mxs_dma_probe(struct platform_device *pdev)
......
......@@ -313,8 +313,8 @@ struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
if (count != 1)
return NULL;
return dma_request_channel(info->dma_cap, info->filter_fn,
&dma_spec->args[0]);
return __dma_request_channel(&info->dma_cap, info->filter_fn,
&dma_spec->args[0], dma_spec->np);
}
EXPORT_SYMBOL_GPL(of_dma_simple_xlate);
......
......@@ -25,6 +25,7 @@
#include <linux/err.h>
#include <linux/pm_runtime.h>
#include <linux/bug.h>
#include <linux/reset.h>
#include "dmaengine.h"
#define PL330_MAX_CHAN 8
......@@ -496,6 +497,9 @@ struct pl330_dmac {
unsigned int num_peripherals;
struct dma_pl330_chan *peripherals; /* keep at end */
int quirks;
struct reset_control *rstc;
struct reset_control *rstc_ocp;
};
static struct pl330_of_quirks {
......@@ -3024,6 +3028,32 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
amba_set_drvdata(adev, pl330);
pl330->rstc = devm_reset_control_get_optional(&adev->dev, "dma");
if (IS_ERR(pl330->rstc)) {
if (PTR_ERR(pl330->rstc) != -EPROBE_DEFER)
dev_err(&adev->dev, "Failed to get reset!\n");
return PTR_ERR(pl330->rstc);
} else {
ret = reset_control_deassert(pl330->rstc);
if (ret) {
dev_err(&adev->dev, "Couldn't deassert the device from reset!\n");
return ret;
}
}
pl330->rstc_ocp = devm_reset_control_get_optional(&adev->dev, "dma-ocp");
if (IS_ERR(pl330->rstc_ocp)) {
if (PTR_ERR(pl330->rstc_ocp) != -EPROBE_DEFER)
dev_err(&adev->dev, "Failed to get OCP reset!\n");
return PTR_ERR(pl330->rstc_ocp);
} else {
ret = reset_control_deassert(pl330->rstc_ocp);
if (ret) {
dev_err(&adev->dev, "Couldn't deassert the device from OCP reset!\n");
return ret;
}
}
for (i = 0; i < AMBA_NR_IRQS; i++) {
irq = adev->irq[i];
if (irq) {
......@@ -3164,6 +3194,11 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
probe_err2:
pl330_del(pl330);
if (pl330->rstc_ocp)
reset_control_assert(pl330->rstc_ocp);
if (pl330->rstc)
reset_control_assert(pl330->rstc);
return ret;
}
......@@ -3202,6 +3237,11 @@ static int pl330_remove(struct amba_device *adev)
pl330_del(pl330);
if (pl330->rstc_ocp)
reset_control_assert(pl330->rstc_ocp);
if (pl330->rstc)
reset_control_assert(pl330->rstc);
return 0;
}
......
......@@ -129,7 +129,6 @@ struct pxad_device {
spinlock_t phy_lock; /* Phy association */
#ifdef CONFIG_DEBUG_FS
struct dentry *dbgfs_root;
struct dentry *dbgfs_state;
struct dentry **dbgfs_chan;
#endif
};
......@@ -323,31 +322,18 @@ static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev,
int ch, struct dentry *chandir)
{
char chan_name[11];
struct dentry *chan, *chan_state = NULL, *chan_descr = NULL;
struct dentry *chan_reqs = NULL;
struct dentry *chan;
void *dt;
scnprintf(chan_name, sizeof(chan_name), "%d", ch);
chan = debugfs_create_dir(chan_name, chandir);
dt = (void *)&pdev->phys[ch];
if (chan)
chan_state = debugfs_create_file("state", 0400, chan, dt,
&chan_state_fops);
if (chan_state)
chan_descr = debugfs_create_file("descriptors", 0400, chan, dt,
&descriptors_fops);
if (chan_descr)
chan_reqs = debugfs_create_file("requesters", 0400, chan, dt,
&requester_chan_fops);
if (!chan_reqs)
goto err_state;
debugfs_create_file("state", 0400, chan, dt, &chan_state_fops);
debugfs_create_file("descriptors", 0400, chan, dt, &descriptors_fops);
debugfs_create_file("requesters", 0400, chan, dt, &requester_chan_fops);
return chan;
err_state:
debugfs_remove_recursive(chan);
return NULL;
}
static void pxad_init_debugfs(struct pxad_device *pdev)
......@@ -355,40 +341,20 @@ static void pxad_init_debugfs(struct pxad_device *pdev)
int i;
struct dentry *chandir;
pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
if (IS_ERR(pdev->dbgfs_root) || !pdev->dbgfs_root)
goto err_root;
pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root,
pdev, &state_fops);
if (!pdev->dbgfs_state)
goto err_state;
pdev->dbgfs_chan =
kmalloc_array(pdev->nr_chans, sizeof(*pdev->dbgfs_state),
kmalloc_array(pdev->nr_chans, sizeof(struct dentry *),
GFP_KERNEL);
if (!pdev->dbgfs_chan)
goto err_alloc;
return;
pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
debugfs_create_file("state", 0400, pdev->dbgfs_root, pdev, &state_fops);
chandir = debugfs_create_dir("channels", pdev->dbgfs_root);
if (!chandir)
goto err_chandir;
for (i = 0; i < pdev->nr_chans; i++) {
for (i = 0; i < pdev->nr_chans; i++)
pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir);
if (!pdev->dbgfs_chan[i])
goto err_chans;
}
return;
err_chans:
err_chandir:
kfree(pdev->dbgfs_chan);
err_alloc:
err_state:
debugfs_remove_recursive(pdev->dbgfs_root);
err_root:
pr_err("pxad: debugfs is not available\n");
}
static void pxad_cleanup_debugfs(struct pxad_device *pdev)
......
......@@ -93,8 +93,6 @@ struct hidma_chan {
* It is used by the DMA complete notification to
* locate the descriptor that initiated the transfer.
*/
struct dentry *debugfs;
struct dentry *stats;
struct hidma_dev *dmadev;
struct hidma_desc *running;
......@@ -126,7 +124,6 @@ struct hidma_dev {
struct dma_device ddev;
struct dentry *debugfs;
struct dentry *stats;
/* sysfs entry for the channel id */
struct device_attribute *chid_attrs;
......@@ -158,6 +155,6 @@ irqreturn_t hidma_ll_inthandler(int irq, void *arg);
irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause);
void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
u8 err_code);
int hidma_debug_init(struct hidma_dev *dmadev);
void hidma_debug_init(struct hidma_dev *dmadev);
void hidma_debug_uninit(struct hidma_dev *dmadev);
#endif
......@@ -138,17 +138,13 @@ void hidma_debug_uninit(struct hidma_dev *dmadev)
debugfs_remove_recursive(dmadev->debugfs);
}
int hidma_debug_init(struct hidma_dev *dmadev)
void hidma_debug_init(struct hidma_dev *dmadev)
{
int rc = 0;
int chidx = 0;
struct list_head *position = NULL;
struct dentry *dir;
dmadev->debugfs = debugfs_create_dir(dev_name(dmadev->ddev.dev), NULL);
if (!dmadev->debugfs) {
rc = -ENODEV;
return rc;
}
/* walk through the virtual channel list */
list_for_each(position, &dmadev->ddev.channels) {
......@@ -157,32 +153,13 @@ int hidma_debug_init(struct hidma_dev *dmadev)
chan = list_entry(position, struct hidma_chan,
chan.device_node);
sprintf(chan->dbg_name, "chan%d", chidx);
chan->debugfs = debugfs_create_dir(chan->dbg_name,
dir = debugfs_create_dir(chan->dbg_name,
dmadev->debugfs);
if (!chan->debugfs) {
rc = -ENOMEM;
goto cleanup;
}
chan->stats = debugfs_create_file("stats", S_IRUGO,
chan->debugfs, chan,
&hidma_chan_fops);
if (!chan->stats) {
rc = -ENOMEM;
goto cleanup;
}
debugfs_create_file("stats", S_IRUGO, dir, chan,
&hidma_chan_fops);
chidx++;
}
dmadev->stats = debugfs_create_file("stats", S_IRUGO,
dmadev->debugfs, dmadev,
&hidma_dma_fops);
if (!dmadev->stats) {
rc = -ENOMEM;
goto cleanup;
}
return 0;
cleanup:
hidma_debug_uninit(dmadev);
return rc;
debugfs_create_file("stats", S_IRUGO, dmadev->debugfs, dmadev,
&hidma_dma_fops);
}
......@@ -47,9 +47,3 @@ config RENESAS_USB_DMAC
help
This driver supports the USB-DMA controller found in the Renesas
SoCs.
config SUDMAC
tristate "Renesas SUDMAC support"
depends on SH_DMAE_BASE
help
Enable support for the Renesas SUDMAC controllers.
......@@ -15,4 +15,3 @@ obj-$(CONFIG_SH_DMAE) += shdma.o
obj-$(CONFIG_RCAR_DMAC) += rcar-dmac.o
obj-$(CONFIG_RENESAS_USB_DMAC) += usb-dmac.o
obj-$(CONFIG_SUDMAC) += sudmac.o
......@@ -1165,7 +1165,7 @@ rcar_dmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
struct rcar_dmac_chan *rchan = to_rcar_dmac_chan(chan);
/* Someone calling slave DMA on a generic channel? */
if (rchan->mid_rid < 0 || !sg_len) {
if (rchan->mid_rid < 0 || !sg_len || !sg_dma_len(sgl)) {
dev_warn(chan->device->dev,
"%s: bad parameter: len=%d, id=%d\n",
__func__, sg_len, rchan->mid_rid);
......@@ -1654,8 +1654,7 @@ static bool rcar_dmac_chan_filter(struct dma_chan *chan, void *arg)
* Forcing it to call dma_request_channel() and iterate through all
* channels from all controllers is just pointless.
*/
if (chan->device->device_config != rcar_dmac_device_config ||
dma_spec->np != chan->device->dev->of_node)
if (chan->device->device_config != rcar_dmac_device_config)
return false;
return !test_and_set_bit(dma_spec->args[0], dmac->modules);
......@@ -1675,7 +1674,8 @@ static struct dma_chan *rcar_dmac_of_xlate(struct of_phandle_args *dma_spec,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
chan = dma_request_channel(mask, rcar_dmac_chan_filter, dma_spec);
chan = __dma_request_channel(&mask, rcar_dmac_chan_filter, dma_spec,
ofdma->of_node);
if (!chan)
return NULL;
......
This diff is collapsed.
......@@ -57,7 +57,7 @@ struct usb_dmac_desc {
u32 residue;
struct list_head node;
dma_cookie_t done_cookie;
struct usb_dmac_sg sg[0];
struct usb_dmac_sg sg[];
};
#define to_usb_dmac_desc(vd) container_of(vd, struct usb_dmac_desc, vd)
......@@ -636,9 +636,6 @@ static bool usb_dmac_chan_filter(struct dma_chan *chan, void *arg)
struct usb_dmac_chan *uchan = to_usb_dmac_chan(chan);
struct of_phandle_args *dma_spec = arg;
if (dma_spec->np != chan->device->dev->of_node)
return false;
/* USB-DMAC should be used with fixed usb controller's FIFO */
if (uchan->index != dma_spec->args[0])
return false;
......@@ -659,7 +656,8 @@ static struct dma_chan *usb_dmac_of_xlate(struct of_phandle_args *dma_spec,
dma_cap_zero(mask);
dma_cap_set(DMA_SLAVE, mask);
chan = dma_request_channel(mask, usb_dmac_chan_filter, dma_spec);
chan = __dma_request_channel(&mask, usb_dmac_chan_filter, dma_spec,
ofdma->of_node);
if (!chan)
return NULL;
......
......@@ -1365,7 +1365,6 @@ static int stm32_dma_probe(struct platform_device *pdev)
for (i = 0; i < STM32_DMA_MAX_CHANNELS; i++) {
chan = &dmadev->chan[i];
chan->irq = platform_get_irq(pdev, i);
ret = platform_get_irq(pdev, i);
if (ret < 0) {
if (ret != -EPROBE_DEFER)
......
......@@ -295,8 +295,7 @@ static int stm32_dmamux_probe(struct platform_device *pdev)
#ifdef CONFIG_PM
static int stm32_dmamux_runtime_suspend(struct device *dev)
{
struct platform_device *pdev =
container_of(dev, struct platform_device, dev);
struct platform_device *pdev = to_platform_device(dev);
struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
clk_disable_unprepare(stm32_dmamux->clk);
......@@ -306,8 +305,7 @@ static int stm32_dmamux_runtime_suspend(struct device *dev)
static int stm32_dmamux_runtime_resume(struct device *dev)
{
struct platform_device *pdev =
container_of(dev, struct platform_device, dev);
struct platform_device *pdev = to_platform_device(dev);
struct stm32_dmamux_data *stm32_dmamux = platform_get_drvdata(pdev);
int ret;
......
This diff is collapsed.
......@@ -977,8 +977,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_slave_sg(
csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
}
if (flags & DMA_PREP_INTERRUPT)
if (flags & DMA_PREP_INTERRUPT) {
csr |= TEGRA_APBDMA_CSR_IE_EOC;
} else {
WARN_ON_ONCE(1);
return NULL;
}
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
......@@ -1120,8 +1124,12 @@ static struct dma_async_tx_descriptor *tegra_dma_prep_dma_cyclic(
csr |= tdc->slave_id << TEGRA_APBDMA_CSR_REQ_SEL_SHIFT;
}
if (flags & DMA_PREP_INTERRUPT)
if (flags & DMA_PREP_INTERRUPT) {
csr |= TEGRA_APBDMA_CSR_IE_EOC;
} else {
WARN_ON_ONCE(1);
return NULL;
}
apb_seq |= TEGRA_APBDMA_APBSEQ_WRAP_WORD_1;
......
......@@ -98,7 +98,7 @@ static void vchan_complete(unsigned long arg)
}
spin_unlock_irq(&vc->lock);
dmaengine_desc_callback_invoke(&cb, NULL);
dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
list_for_each_entry_safe(vd, _vd, &head, node) {
dmaengine_desc_get_callback(&vd->tx, &cb);
......@@ -106,7 +106,7 @@ static void vchan_complete(unsigned long arg)
list_del(&vd->node);
vchan_vdesc_fini(vd);
dmaengine_desc_callback_invoke(&cb, NULL);
dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
}
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -793,7 +793,7 @@ static const struct pci_device_id pci_endpoint_test_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA74x) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_DRA72x) },
{ PCI_DEVICE(PCI_VENDOR_ID_FREESCALE, 0x81c0) },
{ PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 0xedda) },
{ PCI_DEVICE_DATA(SYNOPSYS, EDDA, NULL) },
{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_AM654),
.driver_data = (kernel_ulong_t)&am654_data
},
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment