Commit 80e61fcd authored by Christoph Hellwig's avatar Christoph Hellwig

arc: remove the partial DMA_ATTR_NON_CONSISTENT support

The arc DMA code supports DMA_ATTR_NON_CONSISTENT allocations, but does
not provide a cache_sync operation.  This means any user of it will
never be able to actually transfer cache ownership and thus cause
coherency bugs.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarEvgeniy Paltsev <paltsev@synopsys.com>
Tested-by: default avatarEvgeniy Paltsev <paltsev@synopsys.com>
parent 34ab0316
...@@ -24,7 +24,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -24,7 +24,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
struct page *page; struct page *page;
phys_addr_t paddr; phys_addr_t paddr;
void *kvaddr; void *kvaddr;
bool need_coh = !(attrs & DMA_ATTR_NON_CONSISTENT);
/* /*
* __GFP_HIGHMEM flag is cleared by upper layer functions * __GFP_HIGHMEM flag is cleared by upper layer functions
...@@ -46,14 +45,10 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -46,14 +45,10 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
* A coherent buffer needs MMU mapping to enforce non-cachability. * A coherent buffer needs MMU mapping to enforce non-cachability.
* kvaddr is kernel Virtual address (0x7000_0000 based). * kvaddr is kernel Virtual address (0x7000_0000 based).
*/ */
if (need_coh) { kvaddr = ioremap_nocache(paddr, size);
kvaddr = ioremap_nocache(paddr, size); if (kvaddr == NULL) {
if (kvaddr == NULL) { __free_pages(page, order);
__free_pages(page, order); return NULL;
return NULL;
}
} else {
kvaddr = (void *)(u32)paddr;
} }
/* /*
...@@ -66,9 +61,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -66,9 +61,7 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
* Currently flush_cache_vmap nukes the L1 cache completely which * Currently flush_cache_vmap nukes the L1 cache completely which
* will be optimized as a separate commit * will be optimized as a separate commit
*/ */
if (need_coh) dma_cache_wback_inv(paddr, size);
dma_cache_wback_inv(paddr, size);
return kvaddr; return kvaddr;
} }
...@@ -78,9 +71,7 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, ...@@ -78,9 +71,7 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
phys_addr_t paddr = dma_handle; phys_addr_t paddr = dma_handle;
struct page *page = virt_to_page(paddr); struct page *page = virt_to_page(paddr);
if (!(attrs & DMA_ATTR_NON_CONSISTENT)) iounmap((void __force __iomem *)vaddr);
iounmap((void __force __iomem *)vaddr);
__free_pages(page, get_order(size)); __free_pages(page, get_order(size));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment