Commit a8eb92d0 authored by Christoph Hellwig's avatar Christoph Hellwig

arc: fix arc_dma_{map,unmap}_page

These functions should perform the same cache synchronoization as calling
arc_dma_sync_single_for_{cpu,device} in addition to doing any required
address translation or mapping [1].  Ensure they actually do that by calling
arc_dma_sync_single_for_{cpu,device} instead of passing the dir argument
along to _dma_cache_sync.

The now unused _dma_cache_sync function is removed as well.

[1] in fact various drivers rely on that by passing DMA_ATTR_SKIP_CPU_SYNC
to the map/unmap routines and doing the cache synchronization manually.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarAlexey Brodkin <abrodkin@synopsys.com>
Acked-by: default avatarVineet Gupta <vgupta@synopsys.com>
parent b5917410
...@@ -130,29 +130,6 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -130,29 +130,6 @@ static int arc_dma_mmap(struct device *dev, struct vm_area_struct *vma,
return ret; return ret;
} }
/*
* streaming DMA Mapping API...
* CPU accesses page via normal paddr, thus needs to explicitly made
* consistent before each use
*/
static void _dma_cache_sync(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_FROM_DEVICE:
dma_cache_inv(paddr, size);
break;
case DMA_TO_DEVICE:
dma_cache_wback(paddr, size);
break;
case DMA_BIDIRECTIONAL:
dma_cache_wback_inv(paddr, size);
break;
default:
pr_err("Invalid DMA dir [%d] for OP @ %pa[p]\n", dir, &paddr);
}
}
static void arc_dma_sync_single_for_device(struct device *dev, static void arc_dma_sync_single_for_device(struct device *dev,
dma_addr_t dma_handle, size_t size, enum dma_data_direction dir) dma_addr_t dma_handle, size_t size, enum dma_data_direction dir)
{ {
...@@ -185,7 +162,7 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page, ...@@ -185,7 +162,7 @@ static dma_addr_t arc_dma_map_page(struct device *dev, struct page *page,
phys_addr_t paddr = page_to_phys(page) + offset; phys_addr_t paddr = page_to_phys(page) + offset;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
_dma_cache_sync(paddr, size, dir); arc_dma_sync_single_for_device(dev, paddr, size, dir);
return paddr; return paddr;
} }
...@@ -205,7 +182,7 @@ static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle, ...@@ -205,7 +182,7 @@ static void arc_dma_unmap_page(struct device *dev, dma_addr_t handle,
phys_addr_t paddr = handle; phys_addr_t paddr = handle;
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
_dma_cache_sync(paddr, size, dir); arc_dma_sync_single_for_cpu(dev, paddr, size, dir);
} }
static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg, static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment