Commit d4e034b4 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dmaengine-6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine

Pull dmaengine updates from Vinod Koul:
 "New HW support:
   - Freescale i.MX8ULP edma support in edma driver
   - StarFive JH8100 DMA support in Synopsis axi-dmac driver

  Updates:
   - Tracing support for freescale edma driver, updates to dpaa2 driver
   - Remove unused QCom hidma DT support
   - Support for i2c dma in imx-sdma
   - Maintainers update for idxd and edma drivers"

* tag 'dmaengine-6.10-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/vkoul/dmaengine: (42 commits)
  MAINTAINERS: Update role for IDXD driver
  dmaengine: fsl-edma: use _Generic to handle difference type
  dmaengine: fsl-edma: add trace event support
  dmaengine: idxd: Avoid unnecessary destruction of file_ida
  dmaengine: xilinx: xdma: fix module autoloading
  dt-bindings: dma: fsl-edma: allow 'power-domains' property
  dt-bindings: dma: fsl-edma: remove 'clocks' from required
  dmaengine: fsl-dpaa2-qdma: Fix kernel-doc check warning
  dmaengine: imx-sdma: Add i2c dma support
  dmaengine: imx-sdma: utilize compiler to calculate ADDRS_ARRAY_SIZE_V<n>
  dt-bindings: fsl-imx-sdma: Add I2C peripheral types ID
  dt-bindings: fsl-dma: fsl-edma: clean up unused "fsl,imx8qm-adma" compatible string
  dmaengine: fsl-edma: clean up unused "fsl,imx8qm-adma" compatible string
  dt-bindings: dma: Drop unused QCom hidma binding
  dmaengine: qcom: Drop hidma DT support
  dmaengine: pl08x: Use kcalloc() instead of kzalloc()
  dmaengine: fsl-dpaa2-qdma: Update DPDMAI interfaces to version 3
  dmaengine: fsl-edma: fix miss mutex unlock at an error return path
  dmaengine: pch_dma: remove unused function chan2parent
  dmaengine: fsl-dpaa2-qdma: Add dpdmai_cmd_open
  ...
parents 34dcc466 28059ddb
...@@ -21,8 +21,8 @@ properties: ...@@ -21,8 +21,8 @@ properties:
- enum: - enum:
- fsl,vf610-edma - fsl,vf610-edma
- fsl,imx7ulp-edma - fsl,imx7ulp-edma
- fsl,imx8qm-adma
- fsl,imx8qm-edma - fsl,imx8qm-edma
- fsl,imx8ulp-edma
- fsl,imx93-edma3 - fsl,imx93-edma3
- fsl,imx93-edma4 - fsl,imx93-edma4
- fsl,imx95-edma5 - fsl,imx95-edma5
...@@ -43,6 +43,17 @@ properties: ...@@ -43,6 +43,17 @@ properties:
maxItems: 64 maxItems: 64
"#dma-cells": "#dma-cells":
description: |
Specifies the number of cells needed to encode an DMA channel.
Encode for cells number 2:
cell 0: index of dma channel mux instance.
cell 1: peripheral dma request id.
Encode for cells number 3:
cell 0: peripheral dma request id.
cell 1: dma channel priority.
cell 2: bitmask, defined at include/dt-bindings/dma/fsl-edma.h
enum: enum:
- 2 - 2
- 3 - 3
...@@ -53,11 +64,18 @@ properties: ...@@ -53,11 +64,18 @@ properties:
clocks: clocks:
minItems: 1 minItems: 1
maxItems: 2 maxItems: 33
clock-names: clock-names:
minItems: 1 minItems: 1
maxItems: 2 maxItems: 33
power-domains:
description:
The number of power domains matches the number of channels, arranged
in ascending order according to their associated DMA channels.
minItems: 1
maxItems: 64
big-endian: big-endian:
description: | description: |
...@@ -70,7 +88,6 @@ required: ...@@ -70,7 +88,6 @@ required:
- compatible - compatible
- reg - reg
- interrupts - interrupts
- clocks
- dma-channels - dma-channels
allOf: allOf:
...@@ -80,7 +97,6 @@ allOf: ...@@ -80,7 +97,6 @@ allOf:
compatible: compatible:
contains: contains:
enum: enum:
- fsl,imx8qm-adma
- fsl,imx8qm-edma - fsl,imx8qm-edma
- fsl,imx93-edma3 - fsl,imx93-edma3
- fsl,imx93-edma4 - fsl,imx93-edma4
...@@ -108,6 +124,7 @@ allOf: ...@@ -108,6 +124,7 @@ allOf:
properties: properties:
clocks: clocks:
minItems: 2 minItems: 2
maxItems: 2
clock-names: clock-names:
items: items:
- const: dmamux0 - const: dmamux0
...@@ -136,6 +153,7 @@ allOf: ...@@ -136,6 +153,7 @@ allOf:
properties: properties:
clock: clock:
minItems: 2 minItems: 2
maxItems: 2
clock-names: clock-names:
items: items:
- const: dma - const: dma
...@@ -151,6 +169,58 @@ allOf: ...@@ -151,6 +169,58 @@ allOf:
dma-channels: dma-channels:
const: 32 const: 32
- if:
properties:
compatible:
contains:
const: fsl,imx8ulp-edma
then:
properties:
clocks:
minItems: 33
clock-names:
minItems: 33
items:
oneOf:
- const: dma
- pattern: "^ch(0[0-9]|[1-2][0-9]|3[01])$"
interrupt-names: false
interrupts:
minItems: 32
"#dma-cells":
const: 3
- if:
properties:
compatible:
contains:
enum:
- fsl,vf610-edma
- fsl,imx7ulp-edma
- fsl,imx93-edma3
- fsl,imx93-edma4
- fsl,imx95-edma5
- fsl,imx8ulp-edma
- fsl,ls1028a-edma
then:
required:
- clocks
- if:
properties:
compatible:
contains:
enum:
- fsl,imx8qm-adma
- fsl,imx8qm-edma
then:
required:
- power-domains
else:
properties:
power-domains: false
unevaluatedProperties: false unevaluatedProperties: false
examples: examples:
...@@ -206,44 +276,27 @@ examples: ...@@ -206,44 +276,27 @@ examples:
- | - |
#include <dt-bindings/interrupt-controller/arm-gic.h> #include <dt-bindings/interrupt-controller/arm-gic.h>
#include <dt-bindings/clock/imx93-clock.h> #include <dt-bindings/firmware/imx/rsrc.h>
dma-controller@44000000 { dma-controller@5a9f0000 {
compatible = "fsl,imx93-edma3"; compatible = "fsl,imx8qm-edma";
reg = <0x44000000 0x200000>; reg = <0x5a9f0000 0x90000>;
#dma-cells = <3>; #dma-cells = <3>;
dma-channels = <31>; dma-channels = <8>;
interrupts = <GIC_SPI 95 IRQ_TYPE_LEVEL_HIGH>, interrupts = <GIC_SPI 424 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 425 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 97 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 426 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 98 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 427 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 99 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 428 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 429 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 101 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 430 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, <GIC_SPI 431 IRQ_TYPE_LEVEL_HIGH>;
<GIC_SPI 103 IRQ_TYPE_LEVEL_HIGH>, power-domains = <&pd IMX_SC_R_DMA_3_CH0>,
<GIC_SPI 104 IRQ_TYPE_LEVEL_HIGH>, <&pd IMX_SC_R_DMA_3_CH1>,
<GIC_SPI 105 IRQ_TYPE_LEVEL_HIGH>, <&pd IMX_SC_R_DMA_3_CH2>,
<GIC_SPI 106 IRQ_TYPE_LEVEL_HIGH>, <&pd IMX_SC_R_DMA_3_CH3>,
<GIC_SPI 107 IRQ_TYPE_LEVEL_HIGH>, <&pd IMX_SC_R_DMA_3_CH4>,
<GIC_SPI 108 IRQ_TYPE_LEVEL_HIGH>, <&pd IMX_SC_R_DMA_3_CH5>,
<GIC_SPI 109 IRQ_TYPE_LEVEL_HIGH>, <&pd IMX_SC_R_DMA_3_CH6>,
<GIC_SPI 110 IRQ_TYPE_LEVEL_HIGH>, <&pd IMX_SC_R_DMA_3_CH7>;
<GIC_SPI 111 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 112 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 113 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 114 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 115 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 116 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 117 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 118 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 119 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 124 IRQ_TYPE_LEVEL_HIGH>,
<GIC_SPI 125 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clk IMX93_CLK_EDMA1_GATE>;
clock-names = "dma";
}; };
...@@ -94,6 +94,7 @@ properties: ...@@ -94,6 +94,7 @@ properties:
- SAI: 24 - SAI: 24
- Multi SAI: 25 - Multi SAI: 25
- HDMI Audio: 26 - HDMI Audio: 26
- I2C: 27
The third cell: transfer priority ID The third cell: transfer priority ID
enum: enum:
......
Qualcomm Technologies HIDMA Management interface
Qualcomm Technologies HIDMA is a high speed DMA device. It only supports
memcpy and memset capabilities. It has been designed for virtualized
environments.
Each HIDMA HW instance consists of multiple DMA channels. These channels
share the same bandwidth. The bandwidth utilization can be partitioned
among channels based on the priority and weight assignments.
There are only two priority levels and 15 weigh assignments possible.
Other parameters here determine how much of the system bus this HIDMA
instance can use like maximum read/write request and number of bytes to
read/write in a single burst.
Main node required properties:
- compatible: "qcom,hidma-mgmt-1.0";
- reg: Address range for DMA device
- dma-channels: Number of channels supported by this DMA controller.
- max-write-burst-bytes: Maximum write burst in bytes that HIDMA can
occupy the bus for in a single transaction. A memcpy requested is
fragmented to multiples of this amount. This parameter is used while
writing into destination memory. Setting this value incorrectly can
starve other peripherals in the system.
- max-read-burst-bytes: Maximum read burst in bytes that HIDMA can
occupy the bus for in a single transaction. A memcpy request is
fragmented to multiples of this amount. This parameter is used while
reading the source memory. Setting this value incorrectly can starve
other peripherals in the system.
- max-write-transactions: This value is how many times a write burst is
applied back to back while writing to the destination before yielding
the bus.
- max-read-transactions: This value is how many times a read burst is
applied back to back while reading the source before yielding the bus.
- channel-reset-timeout-cycles: Channel reset timeout in cycles for this SOC.
Once a reset is applied to the HW, HW starts a timer for reset operation
to confirm. If reset is not completed within this time, HW reports reset
failure.
Sub-nodes:
HIDMA has one or more DMA channels that are used to move data from one
memory location to another.
When the OS is not in control of the management interface (i.e. it's a guest),
the channel nodes appear on their own, not under a management node.
Required properties:
- compatible: must contain "qcom,hidma-1.0" for initial HW or
"qcom,hidma-1.1"/"qcom,hidma-1.2" for MSI capable HW.
- reg: Addresses for the transfer and event channel
- interrupts: Should contain the event interrupt
- desc-count: Number of asynchronous requests this channel can handle
- iommus: required a iommu node
Optional properties for MSI:
- msi-parent : See the generic MSI binding described in
devicetree/bindings/interrupt-controller/msi.txt for a description of the
msi-parent property.
Example:
Hypervisor OS configuration:
hidma-mgmt@f9984000 = {
compatible = "qcom,hidma-mgmt-1.0";
reg = <0xf9984000 0x15000>;
dma-channels = <6>;
max-write-burst-bytes = <1024>;
max-read-burst-bytes = <1024>;
max-write-transactions = <31>;
max-read-transactions = <31>;
channel-reset-timeout-cycles = <0x500>;
hidma_24: dma-controller@5c050000 {
compatible = "qcom,hidma-1.0";
reg = <0 0x5c050000 0x0 0x1000>,
<0 0x5c0b0000 0x0 0x1000>;
interrupts = <0 389 0>;
desc-count = <10>;
iommus = <&system_mmu>;
};
};
Guest OS configuration:
hidma_24: dma-controller@5c050000 {
compatible = "qcom,hidma-1.0";
reg = <0 0x5c050000 0x0 0x1000>,
<0 0x5c0b0000 0x0 0x1000>;
interrupts = <0 389 0>;
desc-count = <10>;
iommus = <&system_mmu>;
};
...@@ -93,7 +93,7 @@ properties: ...@@ -93,7 +93,7 @@ properties:
data-width: data-width:
$ref: /schemas/types.yaml#/definitions/uint32-array $ref: /schemas/types.yaml#/definitions/uint32-array
description: Data bus width per each DMA master in bytes. description: Data bus width per each DMA master in bytes.
items: minItems: 1
maxItems: 4 maxItems: 4
items: items:
enum: [4, 8, 16, 32] enum: [4, 8, 16, 32]
...@@ -106,7 +106,7 @@ properties: ...@@ -106,7 +106,7 @@ properties:
deprecated. It' usage is discouraged in favor of data-width one. Moreover deprecated. It' usage is discouraged in favor of data-width one. Moreover
the property incorrectly permits to define data-bus width of 8 and 16 the property incorrectly permits to define data-bus width of 8 and 16
bits, which is impossible in accordance with DW DMAC IP-core data book. bits, which is impossible in accordance with DW DMAC IP-core data book.
items: minItems: 1
maxItems: 4 maxItems: 4
items: items:
enum: enum:
...@@ -123,7 +123,7 @@ properties: ...@@ -123,7 +123,7 @@ properties:
description: | description: |
LLP-based multi-block transfer supported by hardware per LLP-based multi-block transfer supported by hardware per
each DMA channel. each DMA channel.
items: minItems: 1
maxItems: 8 maxItems: 8
items: items:
enum: [0, 1] enum: [0, 1]
...@@ -138,7 +138,7 @@ properties: ...@@ -138,7 +138,7 @@ properties:
will be from 1 to max-burst-len words. It's an array property with one will be from 1 to max-burst-len words. It's an array property with one
cell per channel in the units determined by the value set in the cell per channel in the units determined by the value set in the
CTLx.SRC_TR_WIDTH/CTLx.DST_TR_WIDTH fields (data width). CTLx.SRC_TR_WIDTH/CTLx.DST_TR_WIDTH fields (data width).
items: minItems: 1
maxItems: 8 maxItems: 8
items: items:
enum: [4, 8, 16, 32, 64, 128, 256] enum: [4, 8, 16, 32, 64, 128, 256]
......
...@@ -21,6 +21,7 @@ properties: ...@@ -21,6 +21,7 @@ properties:
- snps,axi-dma-1.01a - snps,axi-dma-1.01a
- intel,kmb-axi-dma - intel,kmb-axi-dma
- starfive,jh7110-axi-dma - starfive,jh7110-axi-dma
- starfive,jh8100-axi-dma
reg: reg:
minItems: 1 minItems: 1
......
...@@ -6181,7 +6181,6 @@ F: drivers/mtd/nand/raw/denali* ...@@ -6181,7 +6181,6 @@ F: drivers/mtd/nand/raw/denali*
DESIGNWARE EDMA CORE IP DRIVER DESIGNWARE EDMA CORE IP DRIVER
M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org> M: Manivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
R: Gustavo Pimentel <gustavo.pimentel@synopsys.com>
R: Serge Semin <fancer.lancer@gmail.com> R: Serge Semin <fancer.lancer@gmail.com>
L: dmaengine@vger.kernel.org L: dmaengine@vger.kernel.org
S: Maintained S: Maintained
...@@ -11090,7 +11089,7 @@ F: drivers/idle/intel_idle.c ...@@ -11090,7 +11089,7 @@ F: drivers/idle/intel_idle.c
INTEL IDXD DRIVER INTEL IDXD DRIVER
M: Fenghua Yu <fenghua.yu@intel.com> M: Fenghua Yu <fenghua.yu@intel.com>
M: Dave Jiang <dave.jiang@intel.com> R: Dave Jiang <dave.jiang@intel.com>
L: dmaengine@vger.kernel.org L: dmaengine@vger.kernel.org
S: Supported S: Supported
F: drivers/dma/idxd/* F: drivers/dma/idxd/*
......
...@@ -31,10 +31,12 @@ obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/ ...@@ -31,10 +31,12 @@ obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
obj-$(CONFIG_DW_DMAC_CORE) += dw/ obj-$(CONFIG_DW_DMAC_CORE) += dw/
obj-$(CONFIG_DW_EDMA) += dw-edma/ obj-$(CONFIG_DW_EDMA) += dw-edma/
obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
fsl-edma-trace-$(CONFIG_TRACING) := fsl-edma-trace.o
CFLAGS_fsl-edma-trace.o := -I$(src)
obj-$(CONFIG_FSL_DMA) += fsldma.o obj-$(CONFIG_FSL_DMA) += fsldma.o
fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o fsl-edma-objs := fsl-edma-main.o fsl-edma-common.o ${fsl-edma-trace-y}
obj-$(CONFIG_FSL_EDMA) += fsl-edma.o obj-$(CONFIG_FSL_EDMA) += fsl-edma.o
mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o mcf-edma-objs := mcf-edma-main.o fsl-edma-common.o ${fsl-edma-trace-y}
obj-$(CONFIG_MCF_EDMA) += mcf-edma.o obj-$(CONFIG_MCF_EDMA) += mcf-edma.o
obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o obj-$(CONFIG_FSL_QDMA) += fsl-qdma.o
obj-$(CONFIG_FSL_RAID) += fsl_raid.o obj-$(CONFIG_FSL_RAID) += fsl_raid.o
......
...@@ -2855,7 +2855,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id) ...@@ -2855,7 +2855,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
} }
/* Initialize physical channels */ /* Initialize physical channels */
pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)), pl08x->phy_chans = kcalloc(vd->channels, sizeof(*pl08x->phy_chans),
GFP_KERNEL); GFP_KERNEL);
if (!pl08x->phy_chans) { if (!pl08x->phy_chans) {
ret = -ENOMEM; ret = -ENOMEM;
......
...@@ -1002,6 +1002,16 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version) ...@@ -1002,6 +1002,16 @@ static int axi_dmac_detect_caps(struct axi_dmac *dmac, unsigned int version)
return 0; return 0;
} }
static void axi_dmac_tasklet_kill(void *task)
{
tasklet_kill(task);
}
static void axi_dmac_free_dma_controller(void *of_node)
{
of_dma_controller_free(of_node);
}
static int axi_dmac_probe(struct platform_device *pdev) static int axi_dmac_probe(struct platform_device *pdev)
{ {
struct dma_device *dma_dev; struct dma_device *dma_dev;
...@@ -1025,14 +1035,10 @@ static int axi_dmac_probe(struct platform_device *pdev) ...@@ -1025,14 +1035,10 @@ static int axi_dmac_probe(struct platform_device *pdev)
if (IS_ERR(dmac->base)) if (IS_ERR(dmac->base))
return PTR_ERR(dmac->base); return PTR_ERR(dmac->base);
dmac->clk = devm_clk_get(&pdev->dev, NULL); dmac->clk = devm_clk_get_enabled(&pdev->dev, NULL);
if (IS_ERR(dmac->clk)) if (IS_ERR(dmac->clk))
return PTR_ERR(dmac->clk); return PTR_ERR(dmac->clk);
ret = clk_prepare_enable(dmac->clk);
if (ret < 0)
return ret;
version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION); version = axi_dmac_read(dmac, ADI_AXI_REG_VERSION);
if (version >= ADI_AXI_PCORE_VER(4, 3, 'a')) if (version >= ADI_AXI_PCORE_VER(4, 3, 'a'))
...@@ -1041,7 +1047,7 @@ static int axi_dmac_probe(struct platform_device *pdev) ...@@ -1041,7 +1047,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
ret = axi_dmac_parse_dt(&pdev->dev, dmac); ret = axi_dmac_parse_dt(&pdev->dev, dmac);
if (ret < 0) if (ret < 0)
goto err_clk_disable; return ret;
INIT_LIST_HEAD(&dmac->chan.active_descs); INIT_LIST_HEAD(&dmac->chan.active_descs);
...@@ -1072,7 +1078,7 @@ static int axi_dmac_probe(struct platform_device *pdev) ...@@ -1072,7 +1078,7 @@ static int axi_dmac_probe(struct platform_device *pdev)
ret = axi_dmac_detect_caps(dmac, version); ret = axi_dmac_detect_caps(dmac, version);
if (ret) if (ret)
goto err_clk_disable; return ret;
dma_dev->copy_align = (dmac->chan.address_align_mask + 1); dma_dev->copy_align = (dmac->chan.address_align_mask + 1);
...@@ -1088,57 +1094,42 @@ static int axi_dmac_probe(struct platform_device *pdev) ...@@ -1088,57 +1094,42 @@ static int axi_dmac_probe(struct platform_device *pdev)
!AXI_DMAC_DST_COHERENT_GET(ret)) { !AXI_DMAC_DST_COHERENT_GET(ret)) {
dev_err(dmac->dma_dev.dev, dev_err(dmac->dma_dev.dev,
"Coherent DMA not supported in hardware"); "Coherent DMA not supported in hardware");
ret = -EINVAL; return -EINVAL;
goto err_clk_disable;
} }
} }
ret = dma_async_device_register(dma_dev); ret = dmaenginem_async_device_register(dma_dev);
if (ret) if (ret)
goto err_clk_disable; return ret;
/*
* Put the action in here so it get's done before unregistering the DMA
* device.
*/
ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_tasklet_kill,
&dmac->chan.vchan.task);
if (ret)
return ret;
ret = of_dma_controller_register(pdev->dev.of_node, ret = of_dma_controller_register(pdev->dev.of_node,
of_dma_xlate_by_chan_id, dma_dev); of_dma_xlate_by_chan_id, dma_dev);
if (ret) if (ret)
goto err_unregister_device; return ret;
ret = request_irq(dmac->irq, axi_dmac_interrupt_handler, IRQF_SHARED, ret = devm_add_action_or_reset(&pdev->dev, axi_dmac_free_dma_controller,
dev_name(&pdev->dev), dmac); pdev->dev.of_node);
if (ret) if (ret)
goto err_unregister_of; return ret;
platform_set_drvdata(pdev, dmac); ret = devm_request_irq(&pdev->dev, dmac->irq, axi_dmac_interrupt_handler,
IRQF_SHARED, dev_name(&pdev->dev), dmac);
if (ret)
return ret;
regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base, regmap = devm_regmap_init_mmio(&pdev->dev, dmac->base,
&axi_dmac_regmap_config); &axi_dmac_regmap_config);
if (IS_ERR(regmap)) {
ret = PTR_ERR(regmap);
goto err_free_irq;
}
return 0;
err_free_irq:
free_irq(dmac->irq, dmac);
err_unregister_of:
of_dma_controller_free(pdev->dev.of_node);
err_unregister_device:
dma_async_device_unregister(&dmac->dma_dev);
err_clk_disable:
clk_disable_unprepare(dmac->clk);
return ret;
}
static void axi_dmac_remove(struct platform_device *pdev)
{
struct axi_dmac *dmac = platform_get_drvdata(pdev);
of_dma_controller_free(pdev->dev.of_node); return PTR_ERR_OR_ZERO(regmap);
free_irq(dmac->irq, dmac);
tasklet_kill(&dmac->chan.vchan.task);
dma_async_device_unregister(&dmac->dma_dev);
clk_disable_unprepare(dmac->clk);
} }
static const struct of_device_id axi_dmac_of_match_table[] = { static const struct of_device_id axi_dmac_of_match_table[] = {
...@@ -1153,7 +1144,6 @@ static struct platform_driver axi_dmac_driver = { ...@@ -1153,7 +1144,6 @@ static struct platform_driver axi_dmac_driver = {
.of_match_table = axi_dmac_of_match_table, .of_match_table = axi_dmac_of_match_table,
}, },
.probe = axi_dmac_probe, .probe = axi_dmac_probe,
.remove_new = axi_dmac_remove,
}; };
module_platform_driver(axi_dmac_driver); module_platform_driver(axi_dmac_driver);
......
...@@ -302,6 +302,7 @@ static struct axi_dma_desc *axi_desc_alloc(u32 num) ...@@ -302,6 +302,7 @@ static struct axi_dma_desc *axi_desc_alloc(u32 num)
kfree(desc); kfree(desc);
return NULL; return NULL;
} }
desc->nr_hw_descs = num;
return desc; return desc;
} }
...@@ -328,7 +329,7 @@ static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan, ...@@ -328,7 +329,7 @@ static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
static void axi_desc_put(struct axi_dma_desc *desc) static void axi_desc_put(struct axi_dma_desc *desc)
{ {
struct axi_dma_chan *chan = desc->chan; struct axi_dma_chan *chan = desc->chan;
int count = atomic_read(&chan->descs_allocated); int count = desc->nr_hw_descs;
struct axi_dma_hw_desc *hw_desc; struct axi_dma_hw_desc *hw_desc;
int descs_put; int descs_put;
...@@ -1139,9 +1140,6 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan) ...@@ -1139,9 +1140,6 @@ static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
/* Remove the completed descriptor from issued list before completing */ /* Remove the completed descriptor from issued list before completing */
list_del(&vd->node); list_del(&vd->node);
vchan_cookie_complete(vd); vchan_cookie_complete(vd);
/* Submit queued descriptors after processing the completed ones */
axi_chan_start_first_queued(chan);
} }
out: out:
...@@ -1445,6 +1443,24 @@ static int parse_device_properties(struct axi_dma_chip *chip) ...@@ -1445,6 +1443,24 @@ static int parse_device_properties(struct axi_dma_chip *chip)
return 0; return 0;
} }
static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip)
{
int irq_count = platform_irq_count(pdev);
int ret;
for (int i = 0; i < irq_count; i++) {
chip->irq[i] = platform_get_irq(pdev, i);
if (chip->irq[i] < 0)
return chip->irq[i];
ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt,
IRQF_SHARED, KBUILD_MODNAME, chip);
if (ret < 0)
return ret;
}
return 0;
}
static int dw_probe(struct platform_device *pdev) static int dw_probe(struct platform_device *pdev)
{ {
struct axi_dma_chip *chip; struct axi_dma_chip *chip;
...@@ -1471,10 +1487,6 @@ static int dw_probe(struct platform_device *pdev) ...@@ -1471,10 +1487,6 @@ static int dw_probe(struct platform_device *pdev)
chip->dev = &pdev->dev; chip->dev = &pdev->dev;
chip->dw->hdata = hdata; chip->dw->hdata = hdata;
chip->irq = platform_get_irq(pdev, 0);
if (chip->irq < 0)
return chip->irq;
chip->regs = devm_platform_ioremap_resource(pdev, 0); chip->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(chip->regs)) if (IS_ERR(chip->regs))
return PTR_ERR(chip->regs); return PTR_ERR(chip->regs);
...@@ -1515,8 +1527,7 @@ static int dw_probe(struct platform_device *pdev) ...@@ -1515,8 +1527,7 @@ static int dw_probe(struct platform_device *pdev)
if (!dw->chan) if (!dw->chan)
return -ENOMEM; return -ENOMEM;
ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt, ret = axi_req_irqs(pdev, chip);
IRQF_SHARED, KBUILD_MODNAME, chip);
if (ret) if (ret)
return ret; return ret;
...@@ -1629,7 +1640,9 @@ static void dw_remove(struct platform_device *pdev) ...@@ -1629,7 +1640,9 @@ static void dw_remove(struct platform_device *pdev)
pm_runtime_disable(chip->dev); pm_runtime_disable(chip->dev);
axi_dma_suspend(chip); axi_dma_suspend(chip);
devm_free_irq(chip->dev, chip->irq, chip); for (i = 0; i < DMAC_MAX_CHANNELS; i++)
if (chip->irq[i] > 0)
devm_free_irq(chip->dev, chip->irq[i], chip);
of_dma_controller_free(chip->dev->of_node); of_dma_controller_free(chip->dev->of_node);
...@@ -1653,6 +1666,9 @@ static const struct of_device_id dw_dma_of_id_table[] = { ...@@ -1653,6 +1666,9 @@ static const struct of_device_id dw_dma_of_id_table[] = {
}, { }, {
.compatible = "starfive,jh7110-axi-dma", .compatible = "starfive,jh7110-axi-dma",
.data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2), .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),
}, {
.compatible = "starfive,jh8100-axi-dma",
.data = (void *)AXI_DMA_FLAG_HAS_RESETS,
}, },
{} {}
}; };
......
...@@ -65,7 +65,7 @@ struct dw_axi_dma { ...@@ -65,7 +65,7 @@ struct dw_axi_dma {
struct axi_dma_chip { struct axi_dma_chip {
struct device *dev; struct device *dev;
int irq; int irq[DMAC_MAX_CHANNELS];
void __iomem *regs; void __iomem *regs;
void __iomem *apb_regs; void __iomem *apb_regs;
struct clk *core_clk; struct clk *core_clk;
...@@ -104,6 +104,7 @@ struct axi_dma_desc { ...@@ -104,6 +104,7 @@ struct axi_dma_desc {
u32 completed_blocks; u32 completed_blocks;
u32 length; u32 length;
u32 period_len; u32 period_len;
u32 nr_hw_descs;
}; };
struct axi_dma_chan_config { struct axi_dma_chan_config {
......
...@@ -362,7 +362,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev) ...@@ -362,7 +362,7 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
for (i = 0; i < priv->num_pairs; i++) { for (i = 0; i < priv->num_pairs; i++) {
err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, err = dpdmai_get_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
i, &priv->rx_queue_attr[i]); i, 0, &priv->rx_queue_attr[i]);
if (err) { if (err) {
dev_err(dev, "dpdmai_get_rx_queue() failed\n"); dev_err(dev, "dpdmai_get_rx_queue() failed\n");
goto exit; goto exit;
...@@ -370,13 +370,13 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev) ...@@ -370,13 +370,13 @@ static int __cold dpaa2_qdma_setup(struct fsl_mc_device *ls_dev)
ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid; ppriv->rsp_fqid = priv->rx_queue_attr[i].fqid;
err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle, err = dpdmai_get_tx_queue(priv->mc_io, 0, ls_dev->mc_handle,
i, &priv->tx_fqid[i]); i, 0, &priv->tx_queue_attr[i]);
if (err) { if (err) {
dev_err(dev, "dpdmai_get_tx_queue() failed\n"); dev_err(dev, "dpdmai_get_tx_queue() failed\n");
goto exit; goto exit;
} }
ppriv->req_fqid = priv->tx_fqid[i]; ppriv->req_fqid = priv->tx_queue_attr[i].fqid;
ppriv->prio = i; ppriv->prio = DPAA2_QDMA_DEFAULT_PRIORITY;
ppriv->priv = priv; ppriv->priv = priv;
ppriv++; ppriv++;
} }
...@@ -542,7 +542,7 @@ static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv) ...@@ -542,7 +542,7 @@ static int __cold dpaa2_dpdmai_bind(struct dpaa2_qdma_priv *priv)
rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id; rx_queue_cfg.dest_cfg.dest_id = ppriv->nctx.dpio_id;
rx_queue_cfg.dest_cfg.priority = ppriv->prio; rx_queue_cfg.dest_cfg.priority = ppriv->prio;
err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle, err = dpdmai_set_rx_queue(priv->mc_io, 0, ls_dev->mc_handle,
rx_queue_cfg.dest_cfg.priority, rx_queue_cfg.dest_cfg.priority, 0,
&rx_queue_cfg); &rx_queue_cfg);
if (err) { if (err) {
dev_err(dev, "dpdmai_set_rx_queue() failed\n"); dev_err(dev, "dpdmai_set_rx_queue() failed\n");
...@@ -642,7 +642,7 @@ static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma) ...@@ -642,7 +642,7 @@ static int dpaa2_dpdmai_init_channels(struct dpaa2_qdma_engine *dpaa2_qdma)
for (i = 0; i < dpaa2_qdma->n_chans; i++) { for (i = 0; i < dpaa2_qdma->n_chans; i++) {
dpaa2_chan = &dpaa2_qdma->chans[i]; dpaa2_chan = &dpaa2_qdma->chans[i];
dpaa2_chan->qdma = dpaa2_qdma; dpaa2_chan->qdma = dpaa2_qdma;
dpaa2_chan->fqid = priv->tx_fqid[i % num]; dpaa2_chan->fqid = priv->tx_queue_attr[i % num].fqid;
dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc; dpaa2_chan->vchan.desc_free = dpaa2_qdma_free_desc;
vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev); vchan_init(&dpaa2_chan->vchan, &dpaa2_qdma->dma_dev);
spin_lock_init(&dpaa2_chan->queue_lock); spin_lock_init(&dpaa2_chan->queue_lock);
...@@ -802,7 +802,7 @@ static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev) ...@@ -802,7 +802,7 @@ static void dpaa2_qdma_shutdown(struct fsl_mc_device *ls_dev)
dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle); dpdmai_disable(priv->mc_io, 0, ls_dev->mc_handle);
dpaa2_dpdmai_dpio_unbind(priv); dpaa2_dpdmai_dpio_unbind(priv);
dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle); dpdmai_close(priv->mc_io, 0, ls_dev->mc_handle);
dpdmai_destroy(priv->mc_io, 0, ls_dev->mc_handle); dpdmai_destroy(priv->mc_io, 0, priv->dpqdma_id, ls_dev->mc_handle);
} }
static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = { static const struct fsl_mc_device_id dpaa2_qdma_id_table[] = {
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define DPAA2_QDMA_STORE_SIZE 16 #define DPAA2_QDMA_STORE_SIZE 16
#define NUM_CH 8 #define NUM_CH 8
#define DPAA2_QDMA_DEFAULT_PRIORITY 0
struct dpaa2_qdma_sd_d { struct dpaa2_qdma_sd_d {
u32 rsv:32; u32 rsv:32;
...@@ -122,8 +123,8 @@ struct dpaa2_qdma_priv { ...@@ -122,8 +123,8 @@ struct dpaa2_qdma_priv {
struct dpaa2_qdma_engine *dpaa2_qdma; struct dpaa2_qdma_engine *dpaa2_qdma;
struct dpaa2_qdma_priv_per_prio *ppriv; struct dpaa2_qdma_priv_per_prio *ppriv;
struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_PRIO_NUM]; struct dpdmai_rx_queue_attr rx_queue_attr[DPDMAI_MAX_QUEUE_NUM];
u32 tx_fqid[DPDMAI_PRIO_NUM]; struct dpdmai_tx_queue_attr tx_queue_attr[DPDMAI_MAX_QUEUE_NUM];
}; };
struct dpaa2_qdma_priv_per_prio { struct dpaa2_qdma_priv_per_prio {
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
// Copyright 2019 NXP // Copyright 2019 NXP
#include <linux/bitfield.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/fsl/mc.h> #include <linux/fsl/mc.h>
#include "dpdmai.h" #include "dpdmai.h"
#define DEST_TYPE_MASK 0xF
struct dpdmai_rsp_get_attributes { struct dpdmai_rsp_get_attributes {
__le32 id; __le32 id;
u8 num_of_priorities; u8 num_of_priorities;
u8 pad0[3]; u8 num_of_queues;
u8 pad0[2];
__le16 major; __le16 major;
__le16 minor; __le16 minor;
}; };
struct dpdmai_cmd_queue { struct dpdmai_cmd_queue {
__le32 dest_id; __le32 dest_id;
u8 priority; u8 dest_priority;
union {
u8 queue; u8 queue;
u8 pri;
};
u8 dest_type; u8 dest_type;
u8 pad; u8 queue_idx;
__le64 user_ctx; __le64 user_ctx;
union { union {
__le32 options; __le32 options;
__le32 fqid; __le32 fqid;
}; };
}; } __packed;
struct dpdmai_rsp_get_tx_queue { struct dpdmai_rsp_get_tx_queue {
__le64 pad; __le64 pad;
__le32 fqid; __le32 fqid;
}; };
#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ struct dpdmai_cmd_open {
((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) __le32 dpdmai_id;
} __packed;
/* cmd, param, offset, width, type, arg_name */ struct dpdmai_cmd_destroy {
#define DPDMAI_CMD_CREATE(cmd, cfg) \ __le32 dpdmai_id;
do { \ } __packed;
MC_CMD_OP(cmd, 0, 8, 8, u8, (cfg)->priorities[0]);\
MC_CMD_OP(cmd, 0, 16, 8, u8, (cfg)->priorities[1]);\
} while (0)
static inline u64 mc_enc(int lsoffset, int width, u64 val) static inline u64 mc_enc(int lsoffset, int width, u64 val)
{ {
...@@ -68,16 +73,16 @@ static inline u64 mc_enc(int lsoffset, int width, u64 val) ...@@ -68,16 +73,16 @@ static inline u64 mc_enc(int lsoffset, int width, u64 val)
int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
int dpdmai_id, u16 *token) int dpdmai_id, u16 *token)
{ {
struct dpdmai_cmd_open *cmd_params;
struct fsl_mc_command cmd = { 0 }; struct fsl_mc_command cmd = { 0 };
__le64 *cmd_dpdmai_id;
int err; int err;
/* prepare command */ /* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN, cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_OPEN,
cmd_flags, 0); cmd_flags, 0);
cmd_dpdmai_id = cmd.params; cmd_params = (struct dpdmai_cmd_open *)&cmd.params;
*cmd_dpdmai_id = cpu_to_le32(dpdmai_id); cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
/* send command to mc*/ /* send command to mc*/
err = mc_send_command(mc_io, &cmd); err = mc_send_command(mc_io, &cmd);
...@@ -115,66 +120,27 @@ int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) ...@@ -115,66 +120,27 @@ int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token)
} }
EXPORT_SYMBOL_GPL(dpdmai_close); EXPORT_SYMBOL_GPL(dpdmai_close);
/**
* dpdmai_create() - Create the DPDMAI object
* @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @cfg: Configuration structure
* @token: Returned token; use in subsequent API calls
*
* Create the DPDMAI object, allocate required resources and
* perform required initialization.
*
* The object can be created either by declaring it in the
* DPL file, or by calling this function.
*
* This function returns a unique authentication token,
* associated with the specific object ID and the specific MC
* portal; this token must be used in all subsequent calls to
* this specific object. For objects that are created using the
* DPL file, call dpdmai_open() function to get an authentication
* token first.
*
* Return: '0' on Success; Error code otherwise.
*/
int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
const struct dpdmai_cfg *cfg, u16 *token)
{
struct fsl_mc_command cmd = { 0 };
int err;
/* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_CREATE,
cmd_flags, 0);
DPDMAI_CMD_CREATE(cmd, cfg);
/* send command to mc*/
err = mc_send_command(mc_io, &cmd);
if (err)
return err;
/* retrieve response parameters */
*token = mc_cmd_hdr_read_token(&cmd);
return 0;
}
/** /**
* dpdmai_destroy() - Destroy the DPDMAI object and release all its resources. * dpdmai_destroy() - Destroy the DPDMAI object and release all its resources.
* @mc_io: Pointer to MC portal's I/O object * @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @dpdmai_id: The object id; it must be a valid id within the container that created this object;
* @token: Token of DPDMAI object * @token: Token of DPDMAI object
* *
* Return: '0' on Success; error code otherwise. * Return: '0' on Success; error code otherwise.
*/ */
int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token) int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u32 dpdmai_id, u16 token)
{ {
struct dpdmai_cmd_destroy *cmd_params;
struct fsl_mc_command cmd = { 0 }; struct fsl_mc_command cmd = { 0 };
/* prepare command */ /* prepare command */
cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY, cmd.header = mc_encode_cmd_header(DPDMAI_CMDID_DESTROY,
cmd_flags, token); cmd_flags, token);
cmd_params = (struct dpdmai_cmd_destroy *)&cmd.params;
cmd_params->dpdmai_id = cpu_to_le32(dpdmai_id);
/* send command to mc*/ /* send command to mc*/
return mc_send_command(mc_io, &cmd); return mc_send_command(mc_io, &cmd);
} }
...@@ -274,6 +240,7 @@ int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, ...@@ -274,6 +240,7 @@ int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
attr->version.major = le16_to_cpu(rsp_params->major); attr->version.major = le16_to_cpu(rsp_params->major);
attr->version.minor = le16_to_cpu(rsp_params->minor); attr->version.minor = le16_to_cpu(rsp_params->minor);
attr->num_of_priorities = rsp_params->num_of_priorities; attr->num_of_priorities = rsp_params->num_of_priorities;
attr->num_of_queues = rsp_params->num_of_queues;
return 0; return 0;
} }
...@@ -284,13 +251,14 @@ EXPORT_SYMBOL_GPL(dpdmai_get_attributes); ...@@ -284,13 +251,14 @@ EXPORT_SYMBOL_GPL(dpdmai_get_attributes);
* @mc_io: Pointer to MC portal's I/O object * @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object * @token: Token of DPDMAI object
* @queue_idx: DMA queue index
* @priority: Select the queue relative to number of * @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation * priorities configured at DPDMAI creation
* @cfg: Rx queue configuration * @cfg: Rx queue configuration
* *
* Return: '0' on Success; Error code otherwise. * Return: '0' on Success; Error code otherwise.
*/ */
int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue_idx,
u8 priority, const struct dpdmai_rx_queue_cfg *cfg) u8 priority, const struct dpdmai_rx_queue_cfg *cfg)
{ {
struct dpdmai_cmd_queue *cmd_params; struct dpdmai_cmd_queue *cmd_params;
...@@ -302,11 +270,12 @@ int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ...@@ -302,11 +270,12 @@ int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
cmd_params = (struct dpdmai_cmd_queue *)cmd.params; cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id); cmd_params->dest_id = cpu_to_le32(cfg->dest_cfg.dest_id);
cmd_params->priority = cfg->dest_cfg.priority; cmd_params->dest_priority = cfg->dest_cfg.priority;
cmd_params->queue = priority; cmd_params->pri = priority;
cmd_params->dest_type = cfg->dest_cfg.dest_type; cmd_params->dest_type = cfg->dest_cfg.dest_type;
cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx); cmd_params->user_ctx = cpu_to_le64(cfg->user_ctx);
cmd_params->options = cpu_to_le32(cfg->options); cmd_params->options = cpu_to_le32(cfg->options);
cmd_params->queue_idx = queue_idx;
/* send command to mc*/ /* send command to mc*/
return mc_send_command(mc_io, &cmd); return mc_send_command(mc_io, &cmd);
...@@ -318,13 +287,14 @@ EXPORT_SYMBOL_GPL(dpdmai_set_rx_queue); ...@@ -318,13 +287,14 @@ EXPORT_SYMBOL_GPL(dpdmai_set_rx_queue);
* @mc_io: Pointer to MC portal's I/O object * @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object * @token: Token of DPDMAI object
* @queue_idx: DMA Queue index
* @priority: Select the queue relative to number of * @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation * priorities configured at DPDMAI creation
* @attr: Returned Rx queue attributes * @attr: Returned Rx queue attributes
* *
* Return: '0' on Success; Error code otherwise. * Return: '0' on Success; Error code otherwise.
*/ */
int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, u8 queue_idx,
u8 priority, struct dpdmai_rx_queue_attr *attr) u8 priority, struct dpdmai_rx_queue_attr *attr)
{ {
struct dpdmai_cmd_queue *cmd_params; struct dpdmai_cmd_queue *cmd_params;
...@@ -337,6 +307,7 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ...@@ -337,6 +307,7 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
cmd_params = (struct dpdmai_cmd_queue *)cmd.params; cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
cmd_params->queue = priority; cmd_params->queue = priority;
cmd_params->queue_idx = queue_idx;
/* send command to mc*/ /* send command to mc*/
err = mc_send_command(mc_io, &cmd); err = mc_send_command(mc_io, &cmd);
...@@ -345,8 +316,8 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, ...@@ -345,8 +316,8 @@ int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
/* retrieve response parameters */ /* retrieve response parameters */
attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id); attr->dest_cfg.dest_id = le32_to_cpu(cmd_params->dest_id);
attr->dest_cfg.priority = cmd_params->priority; attr->dest_cfg.priority = cmd_params->dest_priority;
attr->dest_cfg.dest_type = cmd_params->dest_type; attr->dest_cfg.dest_type = FIELD_GET(DEST_TYPE_MASK, cmd_params->dest_type);
attr->user_ctx = le64_to_cpu(cmd_params->user_ctx); attr->user_ctx = le64_to_cpu(cmd_params->user_ctx);
attr->fqid = le32_to_cpu(cmd_params->fqid); attr->fqid = le32_to_cpu(cmd_params->fqid);
...@@ -359,14 +330,15 @@ EXPORT_SYMBOL_GPL(dpdmai_get_rx_queue); ...@@ -359,14 +330,15 @@ EXPORT_SYMBOL_GPL(dpdmai_get_rx_queue);
* @mc_io: Pointer to MC portal's I/O object * @mc_io: Pointer to MC portal's I/O object
* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
* @token: Token of DPDMAI object * @token: Token of DPDMAI object
* @queue_idx: DMA queue index
* @priority: Select the queue relative to number of * @priority: Select the queue relative to number of
* priorities configured at DPDMAI creation * priorities configured at DPDMAI creation
* @fqid: Returned Tx queue * @attr: Returned DMA Tx queue attributes
* *
* Return: '0' on Success; Error code otherwise. * Return: '0' on Success; Error code otherwise.
*/ */
int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
u16 token, u8 priority, u32 *fqid) u16 token, u8 queue_idx, u8 priority, struct dpdmai_tx_queue_attr *attr)
{ {
struct dpdmai_rsp_get_tx_queue *rsp_params; struct dpdmai_rsp_get_tx_queue *rsp_params;
struct dpdmai_cmd_queue *cmd_params; struct dpdmai_cmd_queue *cmd_params;
...@@ -379,6 +351,7 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, ...@@ -379,6 +351,7 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
cmd_params = (struct dpdmai_cmd_queue *)cmd.params; cmd_params = (struct dpdmai_cmd_queue *)cmd.params;
cmd_params->queue = priority; cmd_params->queue = priority;
cmd_params->queue_idx = queue_idx;
/* send command to mc*/ /* send command to mc*/
err = mc_send_command(mc_io, &cmd); err = mc_send_command(mc_io, &cmd);
...@@ -388,7 +361,7 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, ...@@ -388,7 +361,7 @@ int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
/* retrieve response parameters */ /* retrieve response parameters */
rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params; rsp_params = (struct dpdmai_rsp_get_tx_queue *)cmd.params;
*fqid = le32_to_cpu(rsp_params->fqid); attr->fqid = le32_to_cpu(rsp_params->fqid);
return 0; return 0;
} }
......
...@@ -5,14 +5,19 @@ ...@@ -5,14 +5,19 @@
#define __FSL_DPDMAI_H #define __FSL_DPDMAI_H
/* DPDMAI Version */ /* DPDMAI Version */
#define DPDMAI_VER_MAJOR 2 #define DPDMAI_VER_MAJOR 3
#define DPDMAI_VER_MINOR 2 #define DPDMAI_VER_MINOR 3
#define DPDMAI_CMD_BASE_VERSION 0 #define DPDMAI_CMD_BASE_VERSION 1
#define DPDMAI_CMD_ID_OFFSET 4 #define DPDMAI_CMD_ID_OFFSET 4
#define DPDMAI_CMDID_FORMAT(x) (((x) << DPDMAI_CMD_ID_OFFSET) | \ /*
DPDMAI_CMD_BASE_VERSION) * Maximum number of Tx/Rx queues per DPDMAI object
*/
#define DPDMAI_MAX_QUEUE_NUM 8
#define DPDMAI_CMDID_FORMAT_V(x, v) (((x) << DPDMAI_CMD_ID_OFFSET) | (v))
#define DPDMAI_CMDID_FORMAT(x) DPDMAI_CMDID_FORMAT_V(x, DPDMAI_CMD_BASE_VERSION)
/* Command IDs */ /* Command IDs */
#define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800) #define DPDMAI_CMDID_CLOSE DPDMAI_CMDID_FORMAT(0x800)
...@@ -26,18 +31,9 @@ ...@@ -26,18 +31,9 @@
#define DPDMAI_CMDID_RESET DPDMAI_CMDID_FORMAT(0x005) #define DPDMAI_CMDID_RESET DPDMAI_CMDID_FORMAT(0x005)
#define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMDID_FORMAT(0x006) #define DPDMAI_CMDID_IS_ENABLED DPDMAI_CMDID_FORMAT(0x006)
#define DPDMAI_CMDID_SET_IRQ DPDMAI_CMDID_FORMAT(0x010) #define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A0, 2)
#define DPDMAI_CMDID_GET_IRQ DPDMAI_CMDID_FORMAT(0x011) #define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A1, 2)
#define DPDMAI_CMDID_SET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x012) #define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT_V(0x1A2, 2)
#define DPDMAI_CMDID_GET_IRQ_ENABLE DPDMAI_CMDID_FORMAT(0x013)
#define DPDMAI_CMDID_SET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x014)
#define DPDMAI_CMDID_GET_IRQ_MASK DPDMAI_CMDID_FORMAT(0x015)
#define DPDMAI_CMDID_GET_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x016)
#define DPDMAI_CMDID_CLEAR_IRQ_STATUS DPDMAI_CMDID_FORMAT(0x017)
#define DPDMAI_CMDID_SET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A0)
#define DPDMAI_CMDID_GET_RX_QUEUE DPDMAI_CMDID_FORMAT(0x1A1)
#define DPDMAI_CMDID_GET_TX_QUEUE DPDMAI_CMDID_FORMAT(0x1A2)
#define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */ #define MC_CMD_HDR_TOKEN_O 32 /* Token field offset */
#define MC_CMD_HDR_TOKEN_S 16 /* Token field size */ #define MC_CMD_HDR_TOKEN_S 16 /* Token field size */
...@@ -49,30 +45,32 @@ ...@@ -49,30 +45,32 @@
* Contains initialization APIs and runtime control APIs for DPDMAI * Contains initialization APIs and runtime control APIs for DPDMAI
*/ */
/** /*
* Maximum number of Tx/Rx priorities per DPDMAI object * Maximum number of Tx/Rx priorities per DPDMAI object
*/ */
#define DPDMAI_PRIO_NUM 2 #define DPDMAI_PRIO_NUM 2
/* DPDMAI queue modification options */ /* DPDMAI queue modification options */
/** /*
* Select to modify the user's context associated with the queue * Select to modify the user's context associated with the queue
*/ */
#define DPDMAI_QUEUE_OPT_USER_CTX 0x1 #define DPDMAI_QUEUE_OPT_USER_CTX 0x1
/** /*
* Select to modify the queue's destination * Select to modify the queue's destination
*/ */
#define DPDMAI_QUEUE_OPT_DEST 0x2 #define DPDMAI_QUEUE_OPT_DEST 0x2
/** /**
* struct dpdmai_cfg - Structure representing DPDMAI configuration * struct dpdmai_cfg - Structure representing DPDMAI configuration
* @num_queues: Number of the DMA queues
* @priorities: Priorities for the DMA hardware processing; valid priorities are * @priorities: Priorities for the DMA hardware processing; valid priorities are
* configured with values 1-8; the entry following last valid entry * configured with values 1-8; the entry following last valid entry
* should be configured with 0 * should be configured with 0
*/ */
struct dpdmai_cfg { struct dpdmai_cfg {
u8 num_queues;
u8 priorities[DPDMAI_PRIO_NUM]; u8 priorities[DPDMAI_PRIO_NUM];
}; };
...@@ -80,20 +78,19 @@ struct dpdmai_cfg { ...@@ -80,20 +78,19 @@ struct dpdmai_cfg {
* struct dpdmai_attr - Structure representing DPDMAI attributes * struct dpdmai_attr - Structure representing DPDMAI attributes
* @id: DPDMAI object ID * @id: DPDMAI object ID
* @version: DPDMAI version * @version: DPDMAI version
* @version.major: DPDMAI major version
* @version.minor: DPDMAI minor version
* @num_of_priorities: number of priorities * @num_of_priorities: number of priorities
* @num_of_queues: number of the DMA queues
*/ */
struct dpdmai_attr { struct dpdmai_attr {
int id; int id;
/**
* struct version - DPDMAI version
* @major: DPDMAI major version
* @minor: DPDMAI minor version
*/
struct { struct {
u16 major; u16 major;
u16 minor; u16 minor;
} version; } version;
u8 num_of_priorities; u8 num_of_priorities;
u8 num_of_queues;
}; };
/** /**
...@@ -158,22 +155,24 @@ struct dpdmai_rx_queue_attr { ...@@ -158,22 +155,24 @@ struct dpdmai_rx_queue_attr {
u32 fqid; u32 fqid;
}; };
struct dpdmai_tx_queue_attr {
u32 fqid;
};
int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpdmai_open(struct fsl_mc_io *mc_io, u32 cmd_flags,
int dpdmai_id, u16 *token); int dpdmai_id, u16 *token);
int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpdmai_close(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpdmai_destroy(struct fsl_mc_io *mc_io, u32 cmd_flags, u32 dpdmai_id, u16 token);
int dpdmai_create(struct fsl_mc_io *mc_io, u32 cmd_flags,
const struct dpdmai_cfg *cfg, u16 *token);
int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpdmai_enable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpdmai_disable(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token); int dpdmai_reset(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token);
int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpdmai_get_attributes(struct fsl_mc_io *mc_io, u32 cmd_flags,
u16 token, struct dpdmai_attr *attr); u16 token, struct dpdmai_attr *attr);
int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, int dpdmai_set_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u8 priority, const struct dpdmai_rx_queue_cfg *cfg); u8 queue_idx, u8 priority, const struct dpdmai_rx_queue_cfg *cfg);
int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token, int dpdmai_get_rx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, u16 token,
u8 priority, struct dpdmai_rx_queue_attr *attr); u8 queue_idx, u8 priority, struct dpdmai_rx_queue_attr *attr);
int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags, int dpdmai_get_tx_queue(struct fsl_mc_io *mc_io, u32 cmd_flags,
u16 token, u8 priority, u32 *fqid); u16 token, u8 queue_idx, u8 priority, struct dpdmai_tx_queue_attr *attr);
#endif /* __FSL_DPDMAI_H */ #endif /* __FSL_DPDMAI_H */
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
// Copyright (c) 2013-2014 Freescale Semiconductor, Inc // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
// Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it> // Copyright (c) 2017 Sysam, Angelo Dureghello <angelo@sysam.it>
#include <linux/cleanup.h>
#include <linux/clk.h>
#include <linux/dmapool.h> #include <linux/dmapool.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -74,18 +76,10 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan) ...@@ -74,18 +76,10 @@ static void fsl_edma3_enable_request(struct fsl_edma_chan *fsl_chan)
flags = fsl_edma_drvflags(fsl_chan); flags = fsl_edma_drvflags(fsl_chan);
val = edma_readl_chreg(fsl_chan, ch_sbr); val = edma_readl_chreg(fsl_chan, ch_sbr);
/* Remote/local swapped wrongly on iMX8 QM Audio edma */
if (flags & FSL_EDMA_DRV_QUIRK_SWAPPED) {
if (!fsl_chan->is_rxchan)
val |= EDMA_V3_CH_SBR_RD;
else
val |= EDMA_V3_CH_SBR_WR;
} else {
if (fsl_chan->is_rxchan) if (fsl_chan->is_rxchan)
val |= EDMA_V3_CH_SBR_RD; val |= EDMA_V3_CH_SBR_RD;
else else
val |= EDMA_V3_CH_SBR_WR; val |= EDMA_V3_CH_SBR_WR;
}
if (fsl_chan->is_remote) if (fsl_chan->is_remote)
val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR); val &= ~(EDMA_V3_CH_SBR_RD | EDMA_V3_CH_SBR_WR);
...@@ -546,6 +540,8 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan, ...@@ -546,6 +540,8 @@ void fsl_edma_fill_tcd(struct fsl_edma_chan *fsl_chan,
csr |= EDMA_TCD_CSR_START; csr |= EDMA_TCD_CSR_START;
fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr); fsl_edma_set_tcd_to_le(fsl_chan, tcd, csr, csr);
trace_edma_fill_tcd(fsl_chan, tcd);
} }
static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
...@@ -810,6 +806,9 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan) ...@@ -810,6 +806,9 @@ int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
{ {
struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan); struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
clk_prepare_enable(fsl_chan->clk);
fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev, fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ? fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_TCD64 ?
sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd), sizeof(struct fsl_edma_hw_tcd64) : sizeof(struct fsl_edma_hw_tcd),
...@@ -838,6 +837,8 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan) ...@@ -838,6 +837,8 @@ void fsl_edma_free_chan_resources(struct dma_chan *chan)
fsl_chan->tcd_pool = NULL; fsl_chan->tcd_pool = NULL;
fsl_chan->is_sw = false; fsl_chan->is_sw = false;
fsl_chan->srcid = 0; fsl_chan->srcid = 0;
if (fsl_edma_drvflags(fsl_chan) & FSL_EDMA_DRV_HAS_CHCLK)
clk_disable_unprepare(fsl_chan->clk);
} }
void fsl_edma_cleanup_vchan(struct dma_device *dmadev) void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
......
...@@ -151,7 +151,6 @@ struct fsl_edma_chan { ...@@ -151,7 +151,6 @@ struct fsl_edma_chan {
enum dma_status status; enum dma_status status;
enum fsl_edma_pm_state pm_state; enum fsl_edma_pm_state pm_state;
bool idle; bool idle;
u32 slave_id;
struct fsl_edma_engine *edma; struct fsl_edma_engine *edma;
struct fsl_edma_desc *edesc; struct fsl_edma_desc *edesc;
struct dma_slave_config cfg; struct dma_slave_config cfg;
...@@ -195,8 +194,6 @@ struct fsl_edma_desc { ...@@ -195,8 +194,6 @@ struct fsl_edma_desc {
#define FSL_EDMA_DRV_HAS_PD BIT(5) #define FSL_EDMA_DRV_HAS_PD BIT(5)
#define FSL_EDMA_DRV_HAS_CHCLK BIT(6) #define FSL_EDMA_DRV_HAS_CHCLK BIT(6)
#define FSL_EDMA_DRV_HAS_CHMUX BIT(7) #define FSL_EDMA_DRV_HAS_CHMUX BIT(7)
/* imx8 QM audio edma remote local swapped */
#define FSL_EDMA_DRV_QUIRK_SWAPPED BIT(8)
/* control and status register is in tcd address space, edma3 reg layout */ /* control and status register is in tcd address space, edma3 reg layout */
#define FSL_EDMA_DRV_SPLIT_REG BIT(9) #define FSL_EDMA_DRV_SPLIT_REG BIT(9)
#define FSL_EDMA_DRV_BUS_8BYTE BIT(10) #define FSL_EDMA_DRV_BUS_8BYTE BIT(10)
...@@ -238,7 +235,6 @@ struct fsl_edma_engine { ...@@ -238,7 +235,6 @@ struct fsl_edma_engine {
void __iomem *muxbase[DMAMUX_NR]; void __iomem *muxbase[DMAMUX_NR];
struct clk *muxclk[DMAMUX_NR]; struct clk *muxclk[DMAMUX_NR];
struct clk *dmaclk; struct clk *dmaclk;
struct clk *chclk;
struct mutex fsl_edma_mutex; struct mutex fsl_edma_mutex;
const struct fsl_edma_drvdata *drvdata; const struct fsl_edma_drvdata *drvdata;
u32 n_chans; u32 n_chans;
...@@ -250,13 +246,17 @@ struct fsl_edma_engine { ...@@ -250,13 +246,17 @@ struct fsl_edma_engine {
struct fsl_edma_chan chans[] __counted_by(n_chans); struct fsl_edma_chan chans[] __counted_by(n_chans);
}; };
static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan)
{
return fsl_chan->edma->drvdata->flags;
}
#define edma_read_tcdreg_c(chan, _tcd, __name) \ #define edma_read_tcdreg_c(chan, _tcd, __name) \
(sizeof((_tcd)->__name) == sizeof(u64) ? \ _Generic(((_tcd)->__name), \
edma_readq(chan->edma, &(_tcd)->__name) : \ __iomem __le64 : edma_readq(chan->edma, &(_tcd)->__name), \
((sizeof((_tcd)->__name) == sizeof(u32)) ? \ __iomem __le32 : edma_readl(chan->edma, &(_tcd)->__name), \
edma_readl(chan->edma, &(_tcd)->__name) : \ __iomem __le16 : edma_readw(chan->edma, &(_tcd)->__name) \
edma_readw(chan->edma, &(_tcd)->__name) \ )
))
#define edma_read_tcdreg(chan, __name) \ #define edma_read_tcdreg(chan, __name) \
((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \ ((fsl_edma_drvflags(chan) & FSL_EDMA_DRV_TCD64) ? \
...@@ -265,22 +265,12 @@ struct fsl_edma_engine { ...@@ -265,22 +265,12 @@ struct fsl_edma_engine {
) )
#define edma_write_tcdreg_c(chan, _tcd, _val, __name) \ #define edma_write_tcdreg_c(chan, _tcd, _val, __name) \
do { \ _Generic((_tcd->__name), \
switch (sizeof(_tcd->__name)) { \ __iomem __le64 : edma_writeq(chan->edma, (u64 __force)(_val), &_tcd->__name), \
case sizeof(u64): \ __iomem __le32 : edma_writel(chan->edma, (u32 __force)(_val), &_tcd->__name), \
edma_writeq(chan->edma, (u64 __force)_val, &_tcd->__name); \ __iomem __le16 : edma_writew(chan->edma, (u16 __force)(_val), &_tcd->__name), \
break; \ __iomem u8 : edma_writeb(chan->edma, _val, &_tcd->__name) \
case sizeof(u32): \ )
edma_writel(chan->edma, (u32 __force)_val, &_tcd->__name); \
break; \
case sizeof(u16): \
edma_writew(chan->edma, (u16 __force)_val, &_tcd->__name); \
break; \
case sizeof(u8): \
edma_writeb(chan->edma, (u8 __force)_val, &_tcd->__name); \
break; \
} \
} while (0)
#define edma_write_tcdreg(chan, val, __name) \ #define edma_write_tcdreg(chan, val, __name) \
do { \ do { \
...@@ -321,9 +311,11 @@ do { \ ...@@ -321,9 +311,11 @@ do { \
(((struct fsl_edma_hw_tcd *)_tcd)->_field)) (((struct fsl_edma_hw_tcd *)_tcd)->_field))
#define fsl_edma_le_to_cpu(x) \ #define fsl_edma_le_to_cpu(x) \
(sizeof(x) == sizeof(u64) ? le64_to_cpu((__force __le64)(x)) : \ _Generic((x), \
(sizeof(x) == sizeof(u32) ? le32_to_cpu((__force __le32)(x)) : \ __le64 : le64_to_cpu((x)), \
le16_to_cpu((__force __le16)(x)))) __le32 : le32_to_cpu((x)), \
__le16 : le16_to_cpu((x)) \
)
#define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \ #define fsl_edma_get_tcd_to_cpu(_chan, _tcd, _field) \
(fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \ (fsl_edma_drvflags(_chan) & FSL_EDMA_DRV_TCD64 ? \
...@@ -331,19 +323,11 @@ do { \ ...@@ -331,19 +323,11 @@ do { \
fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field)) fsl_edma_le_to_cpu(((struct fsl_edma_hw_tcd *)_tcd)->_field))
#define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \ #define fsl_edma_set_tcd_to_le_c(_tcd, _val, _field) \
do { \ _Generic(((_tcd)->_field), \
switch (sizeof((_tcd)->_field)) { \ __le64 : (_tcd)->_field = cpu_to_le64(_val), \
case sizeof(u64): \ __le32 : (_tcd)->_field = cpu_to_le32(_val), \
*(__force __le64 *)(&((_tcd)->_field)) = cpu_to_le64(_val); \ __le16 : (_tcd)->_field = cpu_to_le16(_val) \
break; \ )
case sizeof(u32): \
*(__force __le32 *)(&((_tcd)->_field)) = cpu_to_le32(_val); \
break; \
case sizeof(u16): \
*(__force __le16 *)(&((_tcd)->_field)) = cpu_to_le16(_val); \
break; \
} \
} while (0)
#define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \ #define fsl_edma_set_tcd_to_le(_chan, _tcd, _val, _field) \
do { \ do { \
...@@ -353,6 +337,9 @@ do { \ ...@@ -353,6 +337,9 @@ do { \
fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \ fsl_edma_set_tcd_to_le_c((struct fsl_edma_hw_tcd *)_tcd, _val, _field); \
} while (0) } while (0)
/* Need after struct defination */
#include "fsl-edma-trace.h"
/* /*
* R/W functions for big- or little-endian registers: * R/W functions for big- or little-endian registers:
* The eDMA controller's endian is independent of the CPU core's endian. * The eDMA controller's endian is independent of the CPU core's endian.
...@@ -371,23 +358,38 @@ static inline u64 edma_readq(struct fsl_edma_engine *edma, void __iomem *addr) ...@@ -371,23 +358,38 @@ static inline u64 edma_readq(struct fsl_edma_engine *edma, void __iomem *addr)
h = ioread32(addr + 4); h = ioread32(addr + 4);
} }
trace_edma_readl(edma, addr, l);
trace_edma_readl(edma, addr + 4, h);
return (h << 32) | l; return (h << 32) | l;
} }
static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr) static inline u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
{ {
u32 val;
if (edma->big_endian) if (edma->big_endian)
return ioread32be(addr); val = ioread32be(addr);
else else
return ioread32(addr); val = ioread32(addr);
trace_edma_readl(edma, addr, val);
return val;
} }
static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr) static inline u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
{ {
u16 val;
if (edma->big_endian) if (edma->big_endian)
return ioread16be(addr); val = ioread16be(addr);
else else
return ioread16(addr); val = ioread16(addr);
trace_edma_readw(edma, addr, val);
return val;
} }
static inline void edma_writeb(struct fsl_edma_engine *edma, static inline void edma_writeb(struct fsl_edma_engine *edma,
...@@ -398,6 +400,8 @@ static inline void edma_writeb(struct fsl_edma_engine *edma, ...@@ -398,6 +400,8 @@ static inline void edma_writeb(struct fsl_edma_engine *edma,
iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3)); iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
else else
iowrite8(val, addr); iowrite8(val, addr);
trace_edma_writeb(edma, addr, val);
} }
static inline void edma_writew(struct fsl_edma_engine *edma, static inline void edma_writew(struct fsl_edma_engine *edma,
...@@ -408,6 +412,8 @@ static inline void edma_writew(struct fsl_edma_engine *edma, ...@@ -408,6 +412,8 @@ static inline void edma_writew(struct fsl_edma_engine *edma,
iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2)); iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
else else
iowrite16(val, addr); iowrite16(val, addr);
trace_edma_writew(edma, addr, val);
} }
static inline void edma_writel(struct fsl_edma_engine *edma, static inline void edma_writel(struct fsl_edma_engine *edma,
...@@ -417,6 +423,8 @@ static inline void edma_writel(struct fsl_edma_engine *edma, ...@@ -417,6 +423,8 @@ static inline void edma_writel(struct fsl_edma_engine *edma,
iowrite32be(val, addr); iowrite32be(val, addr);
else else
iowrite32(val, addr); iowrite32(val, addr);
trace_edma_writel(edma, addr, val);
} }
static inline void edma_writeq(struct fsl_edma_engine *edma, static inline void edma_writeq(struct fsl_edma_engine *edma,
...@@ -429,6 +437,9 @@ static inline void edma_writeq(struct fsl_edma_engine *edma, ...@@ -429,6 +437,9 @@ static inline void edma_writeq(struct fsl_edma_engine *edma,
iowrite32(val & 0xFFFFFFFF, addr); iowrite32(val & 0xFFFFFFFF, addr);
iowrite32(val >> 32, addr + 4); iowrite32(val >> 32, addr + 4);
} }
trace_edma_writel(edma, addr, val & 0xFFFFFFFF);
trace_edma_writel(edma, addr + 4, val >> 32);
} }
static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan) static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
...@@ -436,11 +447,6 @@ static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan) ...@@ -436,11 +447,6 @@ static inline struct fsl_edma_chan *to_fsl_edma_chan(struct dma_chan *chan)
return container_of(chan, struct fsl_edma_chan, vchan.chan); return container_of(chan, struct fsl_edma_chan, vchan.chan);
} }
static inline u32 fsl_edma_drvflags(struct fsl_edma_chan *fsl_chan)
{
return fsl_chan->edma->drvdata->flags;
}
static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd) static inline struct fsl_edma_desc *to_fsl_edma_desc(struct virt_dma_desc *vd)
{ {
return container_of(vd, struct fsl_edma_desc, vdesc); return container_of(vd, struct fsl_edma_desc, vdesc);
......
...@@ -105,7 +105,8 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, ...@@ -105,7 +105,8 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
if (dma_spec->args_count != 2) if (dma_spec->args_count != 2)
return NULL; return NULL;
mutex_lock(&fsl_edma->fsl_edma_mutex); guard(mutex)(&fsl_edma->fsl_edma_mutex);
list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) { list_for_each_entry_safe(chan, _chan, &fsl_edma->dma_dev.channels, device_node) {
if (chan->client_count) if (chan->client_count)
continue; continue;
...@@ -114,15 +115,20 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec, ...@@ -114,15 +115,20 @@ static struct dma_chan *fsl_edma_xlate(struct of_phandle_args *dma_spec,
if (chan) { if (chan) {
chan->device->privatecnt++; chan->device->privatecnt++;
fsl_chan = to_fsl_edma_chan(chan); fsl_chan = to_fsl_edma_chan(chan);
fsl_chan->slave_id = dma_spec->args[1]; fsl_chan->srcid = dma_spec->args[1];
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id,
if (!fsl_chan->srcid) {
dev_err(&fsl_chan->pdev->dev, "Invalidate srcid %d\n",
fsl_chan->srcid);
return NULL;
}
fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid,
true); true);
mutex_unlock(&fsl_edma->fsl_edma_mutex);
return chan; return chan;
} }
} }
} }
mutex_unlock(&fsl_edma->fsl_edma_mutex);
return NULL; return NULL;
} }
...@@ -342,10 +348,13 @@ static struct fsl_edma_drvdata imx8qm_data = { ...@@ -342,10 +348,13 @@ static struct fsl_edma_drvdata imx8qm_data = {
.setup_irq = fsl_edma3_irq_init, .setup_irq = fsl_edma3_irq_init,
}; };
static struct fsl_edma_drvdata imx8qm_audio_data = { static struct fsl_edma_drvdata imx8ulp_data = {
.flags = FSL_EDMA_DRV_QUIRK_SWAPPED | FSL_EDMA_DRV_HAS_PD | FSL_EDMA_DRV_EDMA3, .flags = FSL_EDMA_DRV_HAS_CHMUX | FSL_EDMA_DRV_HAS_CHCLK | FSL_EDMA_DRV_HAS_DMACLK |
FSL_EDMA_DRV_EDMA3,
.chreg_space_sz = 0x10000, .chreg_space_sz = 0x10000,
.chreg_off = 0x10000, .chreg_off = 0x10000,
.mux_off = 0x10000 + offsetof(struct fsl_edma3_ch_reg, ch_mux),
.mux_skip = 0x10000,
.setup_irq = fsl_edma3_irq_init, .setup_irq = fsl_edma3_irq_init,
}; };
...@@ -380,7 +389,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = { ...@@ -380,7 +389,7 @@ static const struct of_device_id fsl_edma_dt_ids[] = {
{ .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data}, { .compatible = "fsl,ls1028a-edma", .data = &ls1028a_data},
{ .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data}, { .compatible = "fsl,imx7ulp-edma", .data = &imx7ulp_data},
{ .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data}, { .compatible = "fsl,imx8qm-edma", .data = &imx8qm_data},
{ .compatible = "fsl,imx8qm-adma", .data = &imx8qm_audio_data}, { .compatible = "fsl,imx8ulp-edma", .data = &imx8ulp_data},
{ .compatible = "fsl,imx93-edma3", .data = &imx93_data3}, { .compatible = "fsl,imx93-edma3", .data = &imx93_data3},
{ .compatible = "fsl,imx93-edma4", .data = &imx93_data4}, { .compatible = "fsl,imx93-edma4", .data = &imx93_data4},
{ .compatible = "fsl,imx95-edma5", .data = &imx95_data5}, { .compatible = "fsl,imx95-edma5", .data = &imx95_data5},
...@@ -434,6 +443,7 @@ static int fsl_edma_probe(struct platform_device *pdev) ...@@ -434,6 +443,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
struct fsl_edma_engine *fsl_edma; struct fsl_edma_engine *fsl_edma;
const struct fsl_edma_drvdata *drvdata = NULL; const struct fsl_edma_drvdata *drvdata = NULL;
u32 chan_mask[2] = {0, 0}; u32 chan_mask[2] = {0, 0};
char clk_name[36];
struct edma_regs *regs; struct edma_regs *regs;
int chans; int chans;
int ret, i; int ret, i;
...@@ -476,14 +486,6 @@ static int fsl_edma_probe(struct platform_device *pdev) ...@@ -476,14 +486,6 @@ static int fsl_edma_probe(struct platform_device *pdev)
} }
} }
if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
fsl_edma->chclk = devm_clk_get_enabled(&pdev->dev, "mp");
if (IS_ERR(fsl_edma->chclk)) {
dev_err(&pdev->dev, "Missing MP block clock.\n");
return PTR_ERR(fsl_edma->chclk);
}
}
ret = of_property_read_variable_u32_array(np, "dma-channel-mask", chan_mask, 1, 2); ret = of_property_read_variable_u32_array(np, "dma-channel-mask", chan_mask, 1, 2);
if (ret > 0) { if (ret > 0) {
...@@ -540,7 +542,7 @@ static int fsl_edma_probe(struct platform_device *pdev) ...@@ -540,7 +542,7 @@ static int fsl_edma_probe(struct platform_device *pdev)
fsl_chan->edma = fsl_edma; fsl_chan->edma = fsl_edma;
fsl_chan->pm_state = RUNNING; fsl_chan->pm_state = RUNNING;
fsl_chan->slave_id = 0; fsl_chan->srcid = 0;
fsl_chan->idle = true; fsl_chan->idle = true;
fsl_chan->dma_dir = DMA_NONE; fsl_chan->dma_dir = DMA_NONE;
fsl_chan->vchan.desc_free = fsl_edma_free_desc; fsl_chan->vchan.desc_free = fsl_edma_free_desc;
...@@ -551,11 +553,21 @@ static int fsl_edma_probe(struct platform_device *pdev) ...@@ -551,11 +553,21 @@ static int fsl_edma_probe(struct platform_device *pdev)
+ i * drvdata->chreg_space_sz + drvdata->chreg_off + len; + i * drvdata->chreg_space_sz + drvdata->chreg_off + len;
fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip; fsl_chan->mux_addr = fsl_edma->membase + drvdata->mux_off + i * drvdata->mux_skip;
if (drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK) {
snprintf(clk_name, sizeof(clk_name), "ch%02d", i);
fsl_chan->clk = devm_clk_get_enabled(&pdev->dev,
(const char *)clk_name);
if (IS_ERR(fsl_chan->clk))
return PTR_ERR(fsl_chan->clk);
}
fsl_chan->pdev = pdev; fsl_chan->pdev = pdev;
vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev); vchan_init(&fsl_chan->vchan, &fsl_edma->dma_dev);
edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr); edma_write_tcdreg(fsl_chan, cpu_to_le32(0), csr);
fsl_edma_chan_mux(fsl_chan, 0, false); fsl_edma_chan_mux(fsl_chan, 0, false);
if (fsl_chan->edma->drvdata->flags & FSL_EDMA_DRV_HAS_CHCLK)
clk_disable_unprepare(fsl_chan->clk);
} }
ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma); ret = fsl_edma->drvdata->setup_irq(pdev, fsl_edma);
...@@ -682,8 +694,8 @@ static int fsl_edma_resume_early(struct device *dev) ...@@ -682,8 +694,8 @@ static int fsl_edma_resume_early(struct device *dev)
continue; continue;
fsl_chan->pm_state = RUNNING; fsl_chan->pm_state = RUNNING;
edma_write_tcdreg(fsl_chan, 0, csr); edma_write_tcdreg(fsl_chan, 0, csr);
if (fsl_chan->slave_id != 0) if (fsl_chan->srcid != 0)
fsl_edma_chan_mux(fsl_chan, fsl_chan->slave_id, true); fsl_edma_chan_mux(fsl_chan, fsl_chan->srcid, true);
} }
if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG)) if (!(fsl_edma->drvdata->flags & FSL_EDMA_DRV_SPLIT_REG))
......
// SPDX-License-Identifier: GPL-2.0
#define CREATE_TRACE_POINTS
#include "fsl-edma-common.h"
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright 2023 NXP.
*/
#undef TRACE_SYSTEM
#define TRACE_SYSTEM fsl_edma
#if !defined(__LINUX_FSL_EDMA_TRACE) || defined(TRACE_HEADER_MULTI_READ)
#define __LINUX_FSL_EDMA_TRACE
#include <linux/types.h>
#include <linux/tracepoint.h>
DECLARE_EVENT_CLASS(edma_log_io,
TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
TP_ARGS(edma, addr, value),
TP_STRUCT__entry(
__field(struct fsl_edma_engine *, edma)
__field(void __iomem *, addr)
__field(u32, value)
),
TP_fast_assign(
__entry->edma = edma;
__entry->addr = addr;
__entry->value = value;
),
TP_printk("offset %08x: value %08x",
(u32)(__entry->addr - __entry->edma->membase), __entry->value)
);
DEFINE_EVENT(edma_log_io, edma_readl,
TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
TP_ARGS(edma, addr, value)
);
DEFINE_EVENT(edma_log_io, edma_writel,
TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
TP_ARGS(edma, addr, value)
);
DEFINE_EVENT(edma_log_io, edma_readw,
TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
TP_ARGS(edma, addr, value)
);
DEFINE_EVENT(edma_log_io, edma_writew,
TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
TP_ARGS(edma, addr, value)
);
DEFINE_EVENT(edma_log_io, edma_readb,
TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
TP_ARGS(edma, addr, value)
);
DEFINE_EVENT(edma_log_io, edma_writeb,
TP_PROTO(struct fsl_edma_engine *edma, void __iomem *addr, u32 value),
TP_ARGS(edma, addr, value)
);
DECLARE_EVENT_CLASS(edma_log_tcd,
TP_PROTO(struct fsl_edma_chan *chan, void *tcd),
TP_ARGS(chan, tcd),
TP_STRUCT__entry(
__field(u64, saddr)
__field(u16, soff)
__field(u16, attr)
__field(u32, nbytes)
__field(u64, slast)
__field(u64, daddr)
__field(u16, doff)
__field(u16, citer)
__field(u64, dlast_sga)
__field(u16, csr)
__field(u16, biter)
),
TP_fast_assign(
__entry->saddr = fsl_edma_get_tcd_to_cpu(chan, tcd, saddr),
__entry->soff = fsl_edma_get_tcd_to_cpu(chan, tcd, soff),
__entry->attr = fsl_edma_get_tcd_to_cpu(chan, tcd, attr),
__entry->nbytes = fsl_edma_get_tcd_to_cpu(chan, tcd, nbytes),
__entry->slast = fsl_edma_get_tcd_to_cpu(chan, tcd, slast),
__entry->daddr = fsl_edma_get_tcd_to_cpu(chan, tcd, daddr),
__entry->doff = fsl_edma_get_tcd_to_cpu(chan, tcd, doff),
__entry->citer = fsl_edma_get_tcd_to_cpu(chan, tcd, citer),
__entry->dlast_sga = fsl_edma_get_tcd_to_cpu(chan, tcd, dlast_sga),
__entry->csr = fsl_edma_get_tcd_to_cpu(chan, tcd, csr),
__entry->biter = fsl_edma_get_tcd_to_cpu(chan, tcd, biter);
),
TP_printk("\n==== TCD =====\n"
" saddr: 0x%016llx\n"
" soff: 0x%04x\n"
" attr: 0x%04x\n"
" nbytes: 0x%08x\n"
" slast: 0x%016llx\n"
" daddr: 0x%016llx\n"
" doff: 0x%04x\n"
" citer: 0x%04x\n"
" dlast: 0x%016llx\n"
" csr: 0x%04x\n"
" biter: 0x%04x\n",
__entry->saddr,
__entry->soff,
__entry->attr,
__entry->nbytes,
__entry->slast,
__entry->daddr,
__entry->doff,
__entry->citer,
__entry->dlast_sga,
__entry->csr,
__entry->biter)
);
DEFINE_EVENT(edma_log_tcd, edma_fill_tcd,
TP_PROTO(struct fsl_edma_chan *chan, void *tcd),
TP_ARGS(chan, tcd)
);
#endif
/* this part must be outside header guard */
#undef TRACE_INCLUDE_PATH
#define TRACE_INCLUDE_PATH .
#undef TRACE_INCLUDE_FILE
#define TRACE_INCLUDE_FILE fsl-edma-trace
#include <trace/define_trace.h>
...@@ -598,7 +598,9 @@ static int idma64_probe(struct idma64_chip *chip) ...@@ -598,7 +598,9 @@ static int idma64_probe(struct idma64_chip *chip)
idma64->dma.dev = chip->sysdev; idma64->dma.dev = chip->sysdev;
dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK); ret = dma_set_max_seg_size(idma64->dma.dev, IDMA64C_CTLH_BLOCK_TS_MASK);
if (ret)
return ret;
ret = dma_async_device_register(&idma64->dma); ret = dma_async_device_register(&idma64->dma);
if (ret) if (ret)
......
...@@ -577,7 +577,6 @@ void idxd_wq_del_cdev(struct idxd_wq *wq) ...@@ -577,7 +577,6 @@ void idxd_wq_del_cdev(struct idxd_wq *wq)
struct idxd_cdev *idxd_cdev; struct idxd_cdev *idxd_cdev;
idxd_cdev = wq->idxd_cdev; idxd_cdev = wq->idxd_cdev;
ida_destroy(&file_ida);
wq->idxd_cdev = NULL; wq->idxd_cdev = NULL;
cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev)); cdev_device_del(&idxd_cdev->cdev, cdev_dev(idxd_cdev));
put_device(cdev_dev(idxd_cdev)); put_device(cdev_dev(idxd_cdev));
...@@ -593,6 +592,14 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) ...@@ -593,6 +592,14 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
if (idxd->state != IDXD_DEV_ENABLED) if (idxd->state != IDXD_DEV_ENABLED)
return -ENXIO; return -ENXIO;
mutex_lock(&wq->wq_lock);
if (!idxd_wq_driver_name_match(wq, dev)) {
idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
rc = -ENODEV;
goto wq_err;
}
/* /*
* User type WQ is enabled only when SVA is enabled for two reasons: * User type WQ is enabled only when SVA is enabled for two reasons:
* - If no IOMMU or IOMMU Passthrough without SVA, userspace * - If no IOMMU or IOMMU Passthrough without SVA, userspace
...@@ -608,14 +615,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev) ...@@ -608,14 +615,7 @@ static int idxd_user_drv_probe(struct idxd_dev *idxd_dev)
dev_dbg(&idxd->pdev->dev, dev_dbg(&idxd->pdev->dev,
"User type WQ cannot be enabled without SVA.\n"); "User type WQ cannot be enabled without SVA.\n");
return -EOPNOTSUPP; rc = -EOPNOTSUPP;
}
mutex_lock(&wq->wq_lock);
if (!idxd_wq_driver_name_match(wq, dev)) {
idxd->cmd_status = IDXD_SCMD_WQ_NO_DRV_NAME;
rc = -ENODEV;
goto wq_err; goto wq_err;
} }
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <linux/semaphore.h> #include <linux/semaphore.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/device.h> #include <linux/device.h>
#include <linux/genalloc.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/firmware.h> #include <linux/firmware.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -137,7 +138,11 @@ ...@@ -137,7 +138,11 @@
* 0: Source on AIPS * 0: Source on AIPS
* 12 Destination Bit(DP) 1: Destination on SPBA * 12 Destination Bit(DP) 1: Destination on SPBA
* 0: Destination on AIPS * 0: Destination on AIPS
* 13-15 --------- MUST BE 0 * 13 Source FIFO 1: Source is dual FIFO
* 0: Source is single FIFO
* 14 Destination FIFO 1: Destination is dual FIFO
* 0: Destination is single FIFO
* 15 --------- MUST BE 0
* 16-23 Higher WML HWML * 16-23 Higher WML HWML
* 24-27 N Total number of samples after * 24-27 N Total number of samples after
* which Pad adding/Swallowing * which Pad adding/Swallowing
...@@ -168,6 +173,8 @@ ...@@ -168,6 +173,8 @@
#define SDMA_WATERMARK_LEVEL_SPDIF BIT(10) #define SDMA_WATERMARK_LEVEL_SPDIF BIT(10)
#define SDMA_WATERMARK_LEVEL_SP BIT(11) #define SDMA_WATERMARK_LEVEL_SP BIT(11)
#define SDMA_WATERMARK_LEVEL_DP BIT(12) #define SDMA_WATERMARK_LEVEL_DP BIT(12)
#define SDMA_WATERMARK_LEVEL_SD BIT(13)
#define SDMA_WATERMARK_LEVEL_DD BIT(14)
#define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16) #define SDMA_WATERMARK_LEVEL_HWML (0xFF << 16)
#define SDMA_WATERMARK_LEVEL_LWE BIT(28) #define SDMA_WATERMARK_LEVEL_LWE BIT(28)
#define SDMA_WATERMARK_LEVEL_HWE BIT(29) #define SDMA_WATERMARK_LEVEL_HWE BIT(29)
...@@ -175,6 +182,7 @@ ...@@ -175,6 +182,7 @@
#define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ #define SDMA_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
#define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \ #define SDMA_DMA_DIRECTIONS (BIT(DMA_DEV_TO_MEM) | \
...@@ -232,20 +240,23 @@ struct sdma_script_start_addrs { ...@@ -232,20 +240,23 @@ struct sdma_script_start_addrs {
s32 utra_addr; s32 utra_addr;
s32 ram_code_start_addr; s32 ram_code_start_addr;
/* End of v1 array */ /* End of v1 array */
s32 mcu_2_ssish_addr; union { s32 v1_end; s32 mcu_2_ssish_addr; };
s32 ssish_2_mcu_addr; s32 ssish_2_mcu_addr;
s32 hdmi_dma_addr; s32 hdmi_dma_addr;
/* End of v2 array */ /* End of v2 array */
s32 zcanfd_2_mcu_addr; union { s32 v2_end; s32 zcanfd_2_mcu_addr; };
s32 zqspi_2_mcu_addr; s32 zqspi_2_mcu_addr;
s32 mcu_2_ecspi_addr; s32 mcu_2_ecspi_addr;
s32 mcu_2_sai_addr; s32 mcu_2_sai_addr;
s32 sai_2_mcu_addr; s32 sai_2_mcu_addr;
s32 uart_2_mcu_rom_addr; s32 uart_2_mcu_rom_addr;
s32 uartsh_2_mcu_rom_addr; s32 uartsh_2_mcu_rom_addr;
s32 i2c_2_mcu_addr;
s32 mcu_2_i2c_addr;
/* End of v3 array */ /* End of v3 array */
s32 mcu_2_zqspi_addr; union { s32 v3_end; s32 mcu_2_zqspi_addr; };
/* End of v4 array */ /* End of v4 array */
s32 v4_end[0];
}; };
/* /*
...@@ -531,6 +542,7 @@ struct sdma_engine { ...@@ -531,6 +542,7 @@ struct sdma_engine {
/* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/ /* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
bool clk_ratio; bool clk_ratio;
bool fw_loaded; bool fw_loaded;
struct gen_pool *iram_pool;
}; };
static int sdma_config_write(struct dma_chan *chan, static int sdma_config_write(struct dma_chan *chan,
...@@ -1072,6 +1084,11 @@ static int sdma_get_pc(struct sdma_channel *sdmac, ...@@ -1072,6 +1084,11 @@ static int sdma_get_pc(struct sdma_channel *sdmac,
per_2_emi = sdma->script_addrs->sai_2_mcu_addr; per_2_emi = sdma->script_addrs->sai_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_sai_addr; emi_2_per = sdma->script_addrs->mcu_2_sai_addr;
break; break;
case IMX_DMATYPE_I2C:
per_2_emi = sdma->script_addrs->i2c_2_mcu_addr;
emi_2_per = sdma->script_addrs->mcu_2_i2c_addr;
sdmac->is_ram_script = true;
break;
case IMX_DMATYPE_HDMI: case IMX_DMATYPE_HDMI:
emi_2_per = sdma->script_addrs->hdmi_dma_addr; emi_2_per = sdma->script_addrs->hdmi_dma_addr;
sdmac->is_ram_script = true; sdmac->is_ram_script = true;
...@@ -1255,6 +1272,16 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac) ...@@ -1255,6 +1272,16 @@ static void sdma_set_watermarklevel_for_p2p(struct sdma_channel *sdmac)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP; sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DP;
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT; sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_CONT;
/*
* Limitation: The p2p script support dual fifos in maximum,
* So when fifo number is larger than 1, force enable dual
* fifos.
*/
if (sdmac->n_fifos_src > 1)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_SD;
if (sdmac->n_fifos_dst > 1)
sdmac->watermark_level |= SDMA_WATERMARK_LEVEL_DD;
} }
static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac) static void sdma_set_watermarklevel_for_sais(struct sdma_channel *sdmac)
...@@ -1358,8 +1385,14 @@ static int sdma_request_channel0(struct sdma_engine *sdma) ...@@ -1358,8 +1385,14 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
{ {
int ret = -EBUSY; int ret = -EBUSY;
sdma->bd0 = dma_alloc_coherent(sdma->dev, PAGE_SIZE, &sdma->bd0_phys, if (sdma->iram_pool)
GFP_NOWAIT); sdma->bd0 = gen_pool_dma_alloc(sdma->iram_pool,
sizeof(struct sdma_buffer_descriptor),
&sdma->bd0_phys);
else
sdma->bd0 = dma_alloc_coherent(sdma->dev,
sizeof(struct sdma_buffer_descriptor),
&sdma->bd0_phys, GFP_NOWAIT);
if (!sdma->bd0) { if (!sdma->bd0) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -1379,10 +1412,14 @@ static int sdma_request_channel0(struct sdma_engine *sdma) ...@@ -1379,10 +1412,14 @@ static int sdma_request_channel0(struct sdma_engine *sdma)
static int sdma_alloc_bd(struct sdma_desc *desc) static int sdma_alloc_bd(struct sdma_desc *desc)
{ {
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
struct sdma_engine *sdma = desc->sdmac->sdma;
int ret = 0; int ret = 0;
desc->bd = dma_alloc_coherent(desc->sdmac->sdma->dev, bd_size, if (sdma->iram_pool)
&desc->bd_phys, GFP_NOWAIT); desc->bd = gen_pool_dma_alloc(sdma->iram_pool, bd_size, &desc->bd_phys);
else
desc->bd = dma_alloc_coherent(sdma->dev, bd_size, &desc->bd_phys, GFP_NOWAIT);
if (!desc->bd) { if (!desc->bd) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -1394,9 +1431,12 @@ static int sdma_alloc_bd(struct sdma_desc *desc) ...@@ -1394,9 +1431,12 @@ static int sdma_alloc_bd(struct sdma_desc *desc)
static void sdma_free_bd(struct sdma_desc *desc) static void sdma_free_bd(struct sdma_desc *desc)
{ {
u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor); u32 bd_size = desc->num_bd * sizeof(struct sdma_buffer_descriptor);
struct sdma_engine *sdma = desc->sdmac->sdma;
dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, if (sdma->iram_pool)
desc->bd_phys); gen_pool_free(sdma->iram_pool, (unsigned long)desc->bd, bd_size);
else
dma_free_coherent(desc->sdmac->sdma->dev, bd_size, desc->bd, desc->bd_phys);
} }
static void sdma_desc_free(struct virt_dma_desc *vd) static void sdma_desc_free(struct virt_dma_desc *vd)
...@@ -1643,6 +1683,9 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg( ...@@ -1643,6 +1683,9 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
if (count & 3 || sg->dma_address & 3) if (count & 3 || sg->dma_address & 3)
goto err_bd_out; goto err_bd_out;
break; break;
case DMA_SLAVE_BUSWIDTH_3_BYTES:
bd->mode.command = 3;
break;
case DMA_SLAVE_BUSWIDTH_2_BYTES: case DMA_SLAVE_BUSWIDTH_2_BYTES:
bd->mode.command = 2; bd->mode.command = 2;
if (count & 1 || sg->dma_address & 1) if (count & 1 || sg->dma_address & 1)
...@@ -1880,10 +1923,17 @@ static void sdma_issue_pending(struct dma_chan *chan) ...@@ -1880,10 +1923,17 @@ static void sdma_issue_pending(struct dma_chan *chan)
spin_unlock_irqrestore(&sdmac->vc.lock, flags); spin_unlock_irqrestore(&sdmac->vc.lock, flags);
} }
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 34 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1 \
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 38 (offsetof(struct sdma_script_start_addrs, v1_end) / sizeof(s32))
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 45
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 46 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2 \
(offsetof(struct sdma_script_start_addrs, v2_end) / sizeof(s32))
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V3 \
(offsetof(struct sdma_script_start_addrs, v3_end) / sizeof(s32))
#define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V4 \
(offsetof(struct sdma_script_start_addrs, v4_end) / sizeof(s32))
static void sdma_add_scripts(struct sdma_engine *sdma, static void sdma_add_scripts(struct sdma_engine *sdma,
const struct sdma_script_start_addrs *addr) const struct sdma_script_start_addrs *addr)
...@@ -2068,6 +2118,7 @@ static int sdma_init(struct sdma_engine *sdma) ...@@ -2068,6 +2118,7 @@ static int sdma_init(struct sdma_engine *sdma)
{ {
int i, ret; int i, ret;
dma_addr_t ccb_phys; dma_addr_t ccb_phys;
int ccbsize;
ret = clk_enable(sdma->clk_ipg); ret = clk_enable(sdma->clk_ipg);
if (ret) if (ret)
...@@ -2083,10 +2134,14 @@ static int sdma_init(struct sdma_engine *sdma) ...@@ -2083,10 +2134,14 @@ static int sdma_init(struct sdma_engine *sdma)
/* Be sure SDMA has not started yet */ /* Be sure SDMA has not started yet */
writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
sdma->channel_control = dma_alloc_coherent(sdma->dev, ccbsize = MAX_DMA_CHANNELS * (sizeof(struct sdma_channel_control)
MAX_DMA_CHANNELS * sizeof(struct sdma_channel_control) + + sizeof(struct sdma_context_data));
sizeof(struct sdma_context_data),
&ccb_phys, GFP_KERNEL); if (sdma->iram_pool)
sdma->channel_control = gen_pool_dma_alloc(sdma->iram_pool, ccbsize, &ccb_phys);
else
sdma->channel_control = dma_alloc_coherent(sdma->dev, ccbsize, &ccb_phys,
GFP_KERNEL);
if (!sdma->channel_control) { if (!sdma->channel_control) {
ret = -ENOMEM; ret = -ENOMEM;
...@@ -2272,6 +2327,12 @@ static int sdma_probe(struct platform_device *pdev) ...@@ -2272,6 +2327,12 @@ static int sdma_probe(struct platform_device *pdev)
vchan_init(&sdmac->vc, &sdma->dma_device); vchan_init(&sdmac->vc, &sdma->dma_device);
} }
if (np) {
sdma->iram_pool = of_gen_pool_get(np, "iram", 0);
if (sdma->iram_pool)
dev_info(&pdev->dev, "alloc bd from iram.\n");
}
ret = sdma_init(sdma); ret = sdma_init(sdma);
if (ret) if (ret)
goto err_init; goto err_init;
......
...@@ -195,7 +195,7 @@ static int mcf_edma_probe(struct platform_device *pdev) ...@@ -195,7 +195,7 @@ static int mcf_edma_probe(struct platform_device *pdev)
struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i]; struct fsl_edma_chan *mcf_chan = &mcf_edma->chans[i];
mcf_chan->edma = mcf_edma; mcf_chan->edma = mcf_edma;
mcf_chan->slave_id = i; mcf_chan->srcid = i;
mcf_chan->idle = true; mcf_chan->idle = true;
mcf_chan->dma_dir = DMA_NONE; mcf_chan->dma_dir = DMA_NONE;
mcf_chan->vchan.desc_free = fsl_edma_free_desc; mcf_chan->vchan.desc_free = fsl_edma_free_desc;
...@@ -277,7 +277,7 @@ bool mcf_edma_filter_fn(struct dma_chan *chan, void *param) ...@@ -277,7 +277,7 @@ bool mcf_edma_filter_fn(struct dma_chan *chan, void *param)
if (chan->device->dev->driver == &mcf_edma_driver.driver) { if (chan->device->dev->driver == &mcf_edma_driver.driver) {
struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan); struct fsl_edma_chan *mcf_chan = to_fsl_edma_chan(chan);
return (mcf_chan->slave_id == (uintptr_t)param); return (mcf_chan->srcid == (uintptr_t)param);
} }
return false; return false;
......
...@@ -155,11 +155,6 @@ static inline struct device *chan2dev(struct dma_chan *chan) ...@@ -155,11 +155,6 @@ static inline struct device *chan2dev(struct dma_chan *chan)
return &chan->dev->device; return &chan->dev->device;
} }
static inline struct device *chan2parent(struct dma_chan *chan)
{
return chan->dev->device.parent;
}
static inline static inline
struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan) struct pch_dma_desc *pdc_first_active(struct pch_dma_chan *pd_chan)
{ {
......
...@@ -50,7 +50,6 @@ ...@@ -50,7 +50,6 @@
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/spinlock.h> #include <linux/spinlock.h>
#include <linux/of_dma.h>
#include <linux/property.h> #include <linux/property.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/acpi.h> #include <linux/acpi.h>
...@@ -947,22 +946,12 @@ static const struct acpi_device_id hidma_acpi_ids[] = { ...@@ -947,22 +946,12 @@ static const struct acpi_device_id hidma_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids); MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
#endif #endif
static const struct of_device_id hidma_match[] = {
{.compatible = "qcom,hidma-1.0",},
{.compatible = "qcom,hidma-1.1", .data = (void *)(HIDMA_MSI_CAP),},
{.compatible = "qcom,hidma-1.2",
.data = (void *)(HIDMA_MSI_CAP | HIDMA_IDENTITY_CAP),},
{},
};
MODULE_DEVICE_TABLE(of, hidma_match);
static struct platform_driver hidma_driver = { static struct platform_driver hidma_driver = {
.probe = hidma_probe, .probe = hidma_probe,
.remove_new = hidma_remove, .remove_new = hidma_remove,
.shutdown = hidma_shutdown, .shutdown = hidma_shutdown,
.driver = { .driver = {
.name = "hidma", .name = "hidma",
.of_match_table = hidma_match,
.acpi_match_table = ACPI_PTR(hidma_acpi_ids), .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
}, },
}; };
......
...@@ -7,12 +7,7 @@ ...@@ -7,12 +7,7 @@
#include <linux/dmaengine.h> #include <linux/dmaengine.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/of.h>
#include <linux/property.h> #include <linux/property.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/of_device.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
...@@ -327,115 +322,13 @@ static const struct acpi_device_id hidma_mgmt_acpi_ids[] = { ...@@ -327,115 +322,13 @@ static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids); MODULE_DEVICE_TABLE(acpi, hidma_mgmt_acpi_ids);
#endif #endif
static const struct of_device_id hidma_mgmt_match[] = {
{.compatible = "qcom,hidma-mgmt-1.0",},
{},
};
MODULE_DEVICE_TABLE(of, hidma_mgmt_match);
static struct platform_driver hidma_mgmt_driver = { static struct platform_driver hidma_mgmt_driver = {
.probe = hidma_mgmt_probe, .probe = hidma_mgmt_probe,
.driver = { .driver = {
.name = "hidma-mgmt", .name = "hidma-mgmt",
.of_match_table = hidma_mgmt_match,
.acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids), .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids),
}, },
}; };
#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ) module_platform_driver(hidma_mgmt_driver);
static int object_counter;
static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
{
struct platform_device *pdev_parent = of_find_device_by_node(np);
struct platform_device_info pdevinfo;
struct device_node *child;
struct resource *res;
int ret = 0;
/* allocate a resource array */
res = kcalloc(3, sizeof(*res), GFP_KERNEL);
if (!res)
return -ENOMEM;
for_each_available_child_of_node(np, child) {
struct platform_device *new_pdev;
ret = of_address_to_resource(child, 0, &res[0]);
if (!ret)
goto out;
ret = of_address_to_resource(child, 1, &res[1]);
if (!ret)
goto out;
ret = of_irq_to_resource(child, 0, &res[2]);
if (ret <= 0)
goto out;
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.fwnode = &child->fwnode;
pdevinfo.parent = pdev_parent ? &pdev_parent->dev : NULL;
pdevinfo.name = child->name;
pdevinfo.id = object_counter++;
pdevinfo.res = res;
pdevinfo.num_res = 3;
pdevinfo.data = NULL;
pdevinfo.size_data = 0;
pdevinfo.dma_mask = DMA_BIT_MASK(64);
new_pdev = platform_device_register_full(&pdevinfo);
if (IS_ERR(new_pdev)) {
ret = PTR_ERR(new_pdev);
goto out;
}
new_pdev->dev.of_node = child;
of_dma_configure(&new_pdev->dev, child, true);
/*
* It is assumed that calling of_msi_configure is safe on
* platforms with or without MSI support.
*/
of_msi_configure(&new_pdev->dev, child);
}
kfree(res);
return ret;
out:
of_node_put(child);
kfree(res);
return ret;
}
#endif
static int __init hidma_mgmt_init(void)
{
#if defined(CONFIG_OF) && defined(CONFIG_OF_IRQ)
struct device_node *child;
for_each_matching_node(child, hidma_mgmt_match) {
/* device tree based firmware here */
hidma_mgmt_of_populate_channels(child);
}
#endif
/*
* We do not check for return value here, as it is assumed that
* platform_driver_register must not fail. The reason for this is that
* the (potential) hidma_mgmt_of_populate_channels calls above are not
* cleaned up if it does fail, and to do this work is quite
* complicated. In particular, various calls of of_address_to_resource,
* of_irq_to_resource, platform_device_register_full, of_dma_configure,
* and of_msi_configure which then call other functions and so on, must
* be cleaned up - this is not a trivial exercise.
*
* Currently, this module is not intended to be unloaded, and there is
* no module_exit function defined which does the needed cleanup. For
* this reason, we have to assume success here.
*/
platform_driver_register(&hidma_mgmt_driver);
return 0;
}
module_init(hidma_mgmt_init);
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -81,6 +81,8 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan ...@@ -81,6 +81,8 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
*/ */
static inline bool vchan_issue_pending(struct virt_dma_chan *vc) static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
{ {
lockdep_assert_held(&vc->lock);
list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued); list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
return !list_empty(&vc->desc_issued); return !list_empty(&vc->desc_issued);
} }
...@@ -96,6 +98,8 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd) ...@@ -96,6 +98,8 @@ static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
dma_cookie_t cookie; dma_cookie_t cookie;
lockdep_assert_held(&vc->lock);
cookie = vd->tx.cookie; cookie = vd->tx.cookie;
dma_cookie_complete(&vd->tx); dma_cookie_complete(&vd->tx);
dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n", dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
...@@ -146,6 +150,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) ...@@ -146,6 +150,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
{ {
struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan); struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
lockdep_assert_held(&vc->lock);
list_add_tail(&vd->node, &vc->desc_terminated); list_add_tail(&vd->node, &vc->desc_terminated);
if (vc->cyclic == vd) if (vc->cyclic == vd)
...@@ -160,6 +166,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd) ...@@ -160,6 +166,8 @@ static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
*/ */
static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
{ {
lockdep_assert_held(&vc->lock);
return list_first_entry_or_null(&vc->desc_issued, return list_first_entry_or_null(&vc->desc_issued,
struct virt_dma_desc, node); struct virt_dma_desc, node);
} }
...@@ -177,6 +185,8 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc) ...@@ -177,6 +185,8 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
struct list_head *head) struct list_head *head)
{ {
lockdep_assert_held(&vc->lock);
list_splice_tail_init(&vc->desc_allocated, head); list_splice_tail_init(&vc->desc_allocated, head);
list_splice_tail_init(&vc->desc_submitted, head); list_splice_tail_init(&vc->desc_submitted, head);
list_splice_tail_init(&vc->desc_issued, head); list_splice_tail_init(&vc->desc_issued, head);
......
...@@ -1307,6 +1307,7 @@ static const struct platform_device_id xdma_id_table[] = { ...@@ -1307,6 +1307,7 @@ static const struct platform_device_id xdma_id_table[] = {
{ "xdma", 0}, { "xdma", 0},
{ }, { },
}; };
MODULE_DEVICE_TABLE(platform, xdma_id_table);
static struct platform_driver xdma_driver = { static struct platform_driver xdma_driver = {
.driver = { .driver = {
......
...@@ -1043,9 +1043,8 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan) ...@@ -1043,9 +1043,8 @@ static int xilinx_dpdma_chan_stop(struct xilinx_dpdma_chan *chan)
static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
{ {
struct xilinx_dpdma_tx_desc *active; struct xilinx_dpdma_tx_desc *active;
unsigned long flags;
spin_lock_irqsave(&chan->lock, flags); spin_lock(&chan->lock);
xilinx_dpdma_debugfs_desc_done_irq(chan); xilinx_dpdma_debugfs_desc_done_irq(chan);
...@@ -1057,7 +1056,7 @@ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan) ...@@ -1057,7 +1056,7 @@ static void xilinx_dpdma_chan_done_irq(struct xilinx_dpdma_chan *chan)
"chan%u: DONE IRQ with no active descriptor!\n", "chan%u: DONE IRQ with no active descriptor!\n",
chan->id); chan->id);
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock(&chan->lock);
} }
/** /**
...@@ -1072,10 +1071,9 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan) ...@@ -1072,10 +1071,9 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
{ {
struct xilinx_dpdma_tx_desc *pending; struct xilinx_dpdma_tx_desc *pending;
struct xilinx_dpdma_sw_desc *sw_desc; struct xilinx_dpdma_sw_desc *sw_desc;
unsigned long flags;
u32 desc_id; u32 desc_id;
spin_lock_irqsave(&chan->lock, flags); spin_lock(&chan->lock);
pending = chan->desc.pending; pending = chan->desc.pending;
if (!chan->running || !pending) if (!chan->running || !pending)
...@@ -1108,7 +1106,7 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan) ...@@ -1108,7 +1106,7 @@ static void xilinx_dpdma_chan_vsync_irq(struct xilinx_dpdma_chan *chan)
spin_unlock(&chan->vchan.lock); spin_unlock(&chan->vchan.lock);
out: out:
spin_unlock_irqrestore(&chan->lock, flags); spin_unlock(&chan->lock);
} }
/** /**
......
...@@ -41,6 +41,7 @@ enum sdma_peripheral_type { ...@@ -41,6 +41,7 @@ enum sdma_peripheral_type {
IMX_DMATYPE_SAI, /* SAI */ IMX_DMATYPE_SAI, /* SAI */
IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */ IMX_DMATYPE_MULTI_SAI, /* MULTI FIFOs For Audio */
IMX_DMATYPE_HDMI, /* HDMI Audio */ IMX_DMATYPE_HDMI, /* HDMI Audio */
IMX_DMATYPE_I2C, /* I2C */
}; };
enum imx_dma_prio { enum imx_dma_prio {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment