Commit 14ccf0aa authored by Radhey Shyam Pandey's avatar Radhey Shyam Pandey Committed by Vinod Koul

dmaengine: xilinx_dma: In dma channel probe fix node order dependency

In overlay application we noticed that dma channel node probe order is
inverted i.e s2mm channel is probed first followed by mm2s channel. The
reason for this inversion is fdtoverlay utility which uses a function
called fdt_add_subnode(*). It stores the subnodes after the properties,
this has the effect of inserting the new subnode before any others and
the end result is a reversal.

Because of this inverted channel probe order, the node probed first is
assigned a '0' index instead of Channel ID should be '0' for tx and '1'
for rx and dmatest client using the DT convention fails in dma transfer
as channel are swapped.

To fix above behavior and make channel assignment index independent
of probe order, always assign mm2s channel at '0' index and the s2mm
channel at IP specific fixed offset derived from the max_channels
count.
Signed-off-by: default avatarRadhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
Link: https://lore.kernel.org/r/1580388865-9960-3-git-send-email-radhey.shyam.pandey@xilinx.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent 04c2bc2b
......@@ -488,16 +488,15 @@ struct xilinx_dma_config {
* @txs_clk: DMA mm2s stream clock
* @rx_clk: DMA s2mm clock
* @rxs_clk: DMA s2mm stream clock
* @nr_channels: Number of channels DMA device supports
* @chan_id: DMA channel identifier
* @s2mm_chan_id: DMA s2mm channel identifier
* @mm2s_chan_id: DMA mm2s channel identifier
* @max_buffer_len: Max buffer length
* @s2mm_index: S2MM channel index
*/
struct xilinx_dma_device {
void __iomem *regs;
struct device *dev;
struct dma_device common;
struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
struct xilinx_dma_chan *chan[XILINX_MCDMA_MAX_CHANS_PER_DEVICE];
u32 flush_on_fsync;
bool ext_addr;
struct platform_device *pdev;
......@@ -507,10 +506,9 @@ struct xilinx_dma_device {
struct clk *txs_clk;
struct clk *rx_clk;
struct clk *rxs_clk;
u32 nr_channels;
u32 chan_id;
u32 s2mm_chan_id;
u32 mm2s_chan_id;
u32 max_buffer_len;
u32 s2mm_index;
};
/* Macros */
......@@ -1748,7 +1746,7 @@ static irqreturn_t xilinx_mcdma_irq_handler(int irq, void *data)
return IRQ_NONE;
if (chan->direction == DMA_DEV_TO_MEM)
chan_offset = chan->xdev->s2mm_index;
chan_offset = chan->xdev->dma_config->max_channels / 2;
chan_offset = chan_offset + (chan_id - 1);
chan = chan->xdev->chan[chan_offset];
......@@ -2734,12 +2732,11 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
*
* @xdev: Driver specific device structure
* @node: Device node
* @chan_id: DMA Channel id
*
* Return: '0' on success and failure value on error
*/
static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
struct device_node *node, int chan_id)
struct device_node *node)
{
struct xilinx_dma_chan *chan;
bool has_dre = false;
......@@ -2791,8 +2788,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
chan->direction = DMA_MEM_TO_DEV;
chan->id = chan_id;
chan->tdest = chan_id;
chan->id = xdev->mm2s_chan_id++;
chan->tdest = chan->id;
chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
......@@ -2808,9 +2805,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
of_device_is_compatible(node,
"xlnx,axi-dma-s2mm-channel")) {
chan->direction = DMA_DEV_TO_MEM;
chan->id = chan_id;
xdev->s2mm_index = xdev->nr_channels;
chan->tdest = chan_id - xdev->nr_channels;
chan->id = xdev->s2mm_chan_id++;
chan->tdest = chan->id - xdev->dma_config->max_channels / 2;
chan->has_vflip = of_property_read_bool(node,
"xlnx,enable-vert-flip");
if (chan->has_vflip) {
......@@ -2912,9 +2908,7 @@ static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
dev_warn(xdev->dev, "missing dma-channels property\n");
for (i = 0; i < nr_channels; i++)
xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
xdev->nr_channels += nr_channels;
xilinx_dma_chan_probe(xdev, node);
return 0;
}
......@@ -2932,7 +2926,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
struct xilinx_dma_device *xdev = ofdma->of_dma_data;
int chan_id = dma_spec->args[0];
if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
if (chan_id >= xdev->dma_config->max_channels || !xdev->chan[chan_id])
return NULL;
return dma_get_slave_channel(&xdev->chan[chan_id]->common);
......@@ -3019,6 +3013,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
/* Retrieve the DMA engine properties from the device tree */
xdev->max_buffer_len = GENMASK(XILINX_DMA_MAX_TRANS_LEN_MAX - 1, 0);
xdev->s2mm_chan_id = xdev->dma_config->max_channels / 2;
if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA ||
xdev->dma_config->dmatype == XDMA_TYPE_AXIMCDMA) {
......@@ -3112,7 +3107,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
}
if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
for (i = 0; i < xdev->nr_channels; i++)
for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i])
xdev->chan[i]->num_frms = num_frames;
}
......@@ -3142,7 +3137,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
disable_clks:
xdma_disable_allclks(xdev);
error:
for (i = 0; i < xdev->nr_channels; i++)
for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
......@@ -3164,7 +3159,7 @@ static int xilinx_dma_remove(struct platform_device *pdev)
dma_async_device_unregister(&xdev->common);
for (i = 0; i < xdev->nr_channels; i++)
for (i = 0; i < xdev->dma_config->max_channels; i++)
if (xdev->chan[i])
xilinx_dma_chan_remove(xdev->chan[i]);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment