Commit 2166d960 authored by Peter Ujfalusi's avatar Peter Ujfalusi Committed by Vinod Koul

dmaengine: ti: k3-udma: Remove dma_sync_single calls for descriptors

The descriptors are allocated via wither dma_pool or dma_alloc_coherent.

There is no need for the dma_sync_singel_* calls.
Signed-off-by: default avatarPeter Ujfalusi <peter.ujfalusi@ti.com>
Reviewed-by: default avatarGrygorii Strashko <grygorii.strashko@ti.com>
Link: https://lore.kernel.org/r/20200707102352.28773-2-peter.ujfalusi@ti.comSigned-off-by: default avatarVinod Koul <vkoul@kernel.org>
parent d6a7bb86
...@@ -539,30 +539,6 @@ static bool udma_is_chan_paused(struct udma_chan *uc) ...@@ -539,30 +539,6 @@ static bool udma_is_chan_paused(struct udma_chan *uc)
return false; return false;
} }
static void udma_sync_for_device(struct udma_chan *uc, int idx)
{
struct udma_desc *d = uc->desc;
if (uc->cyclic && uc->config.pkt_mode) {
dma_sync_single_for_device(uc->ud->dev,
d->hwdesc[idx].cppi5_desc_paddr,
d->hwdesc[idx].cppi5_desc_size,
DMA_TO_DEVICE);
} else {
int i;
for (i = 0; i < d->hwdesc_count; i++) {
if (!d->hwdesc[i].cppi5_desc_vaddr)
continue;
dma_sync_single_for_device(uc->ud->dev,
d->hwdesc[i].cppi5_desc_paddr,
d->hwdesc[i].cppi5_desc_size,
DMA_TO_DEVICE);
}
}
}
static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc) static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
{ {
return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr; return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
...@@ -593,7 +569,6 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx) ...@@ -593,7 +569,6 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx)
paddr = udma_curr_cppi5_desc_paddr(d, idx); paddr = udma_curr_cppi5_desc_paddr(d, idx);
wmb(); /* Ensure that writes are not moved over this point */ wmb(); /* Ensure that writes are not moved over this point */
udma_sync_for_device(uc, idx);
} }
return k3_ringacc_ring_push(ring, &paddr); return k3_ringacc_ring_push(ring, &paddr);
...@@ -628,12 +603,12 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) ...@@ -628,12 +603,12 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
} }
if (ring && k3_ringacc_ring_get_occ(ring)) { if (ring && k3_ringacc_ring_get_occ(ring)) {
struct udma_desc *d = NULL;
ret = k3_ringacc_ring_pop(ring, addr); ret = k3_ringacc_ring_pop(ring, addr);
if (ret) if (ret)
return ret; return ret;
rmb(); /* Ensure that reads are not moved before this point */
/* Teardown completion */ /* Teardown completion */
if (cppi5_desc_is_tdcm(*addr)) if (cppi5_desc_is_tdcm(*addr))
return ret; return ret;
...@@ -641,14 +616,6 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr) ...@@ -641,14 +616,6 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
/* Check for flush descriptor */ /* Check for flush descriptor */
if (udma_desc_is_rx_flush(uc, *addr)) if (udma_desc_is_rx_flush(uc, *addr))
return -ENOENT; return -ENOENT;
d = udma_udma_desc_from_paddr(uc, *addr);
if (d)
dma_sync_single_for_cpu(uc->ud->dev, *addr,
d->hwdesc[0].cppi5_desc_size,
DMA_FROM_DEVICE);
rmb(); /* Ensure that reads are not moved before this point */
} }
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment