Commit fa7e2247 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-direct: make uncached_kernel_address more general

Rename the symbol to arch_dma_set_uncached, and pass a size to it as
well as allow an error return.  That will allow reusing this hook for
in-place pagetable remapping.

As the in-place remap doesn't always require an explicit cache flush,
also detangle ARCH_HAS_DMA_PREP_COHERENT from ARCH_HAS_DMA_SET_UNCACHED.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
parent 3d0fc341
...@@ -248,11 +248,11 @@ config ARCH_HAS_SET_DIRECT_MAP ...@@ -248,11 +248,11 @@ config ARCH_HAS_SET_DIRECT_MAP
bool bool
# #
# Select if arch has an uncached kernel segment and provides the # Select if the architecture provides the arch_dma_set_uncached symbol to
# uncached_kernel_address symbol to use it # either provide an uncached segement alias for a DMA allocation, or
# to remap the page tables in place.
# #
config ARCH_HAS_UNCACHED_SEGMENT config ARCH_HAS_DMA_SET_UNCACHED
select ARCH_HAS_DMA_PREP_COHERENT
bool bool
# Select if arch init_task must go in the __init_task_data section # Select if arch init_task must go in the __init_task_data section
......
...@@ -8,7 +8,7 @@ config MICROBLAZE ...@@ -8,7 +8,7 @@ config MICROBLAZE
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_UNCACHED_SEGMENT if !MMU select ARCH_HAS_DMA_SET_UNCACHED if !MMU
select ARCH_MIGHT_HAVE_PC_PARPORT select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select BUILDTIME_TABLE_SORT select BUILDTIME_TABLE_SORT
......
...@@ -40,7 +40,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) ...@@ -40,7 +40,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
#define UNCACHED_SHADOW_MASK 0 #define UNCACHED_SHADOW_MASK 0
#endif /* CONFIG_XILINX_UNCACHED_SHADOW */ #endif /* CONFIG_XILINX_UNCACHED_SHADOW */
void *uncached_kernel_address(void *ptr) void *arch_dma_set_uncached(void *ptr, size_t size)
{ {
unsigned long addr = (unsigned long)ptr; unsigned long addr = (unsigned long)ptr;
......
...@@ -1187,8 +1187,9 @@ config DMA_NONCOHERENT ...@@ -1187,8 +1187,9 @@ config DMA_NONCOHERENT
# significant advantages. # significant advantages.
# #
select ARCH_HAS_DMA_WRITE_COMBINE select ARCH_HAS_DMA_WRITE_COMBINE
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_UNCACHED_SEGMENT select ARCH_HAS_DMA_SET_UNCACHED
select DMA_NONCOHERENT_MMAP select DMA_NONCOHERENT_MMAP
select DMA_NONCOHERENT_CACHE_SYNC select DMA_NONCOHERENT_CACHE_SYNC
select NEED_DMA_MAP_STATE select NEED_DMA_MAP_STATE
......
...@@ -49,7 +49,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) ...@@ -49,7 +49,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
dma_cache_wback_inv((unsigned long)page_address(page), size); dma_cache_wback_inv((unsigned long)page_address(page), size);
} }
void *uncached_kernel_address(void *addr) void *arch_dma_set_uncached(void *addr, size_t size)
{ {
return (void *)(__pa(addr) + UNCAC_BASE); return (void *)(__pa(addr) + UNCAC_BASE);
} }
......
...@@ -2,9 +2,10 @@ ...@@ -2,9 +2,10 @@
config NIOS2 config NIOS2
def_bool y def_bool y
select ARCH_32BIT_OFF_T select ARCH_32BIT_OFF_T
select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_CPU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_UNCACHED_SEGMENT select ARCH_HAS_DMA_SET_UNCACHED
select ARCH_NO_SWAP select ARCH_NO_SWAP
select TIMER_OF select TIMER_OF
select GENERIC_ATOMIC64 select GENERIC_ATOMIC64
......
...@@ -67,7 +67,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) ...@@ -67,7 +67,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
flush_dcache_range(start, start + size); flush_dcache_range(start, start + size);
} }
void *uncached_kernel_address(void *ptr) void *arch_dma_set_uncached(void *ptr, size_t size)
{ {
unsigned long addr = (unsigned long)ptr; unsigned long addr = (unsigned long)ptr;
......
...@@ -6,7 +6,7 @@ config XTENSA ...@@ -6,7 +6,7 @@ config XTENSA
select ARCH_HAS_DMA_PREP_COHERENT if MMU select ARCH_HAS_DMA_PREP_COHERENT if MMU
select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU select ARCH_HAS_SYNC_DMA_FOR_CPU if MMU
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU select ARCH_HAS_SYNC_DMA_FOR_DEVICE if MMU
select ARCH_HAS_UNCACHED_SEGMENT if MMU select ARCH_HAS_DMA_SET_UNCACHED if MMU
select ARCH_USE_QUEUED_RWLOCKS select ARCH_USE_QUEUED_RWLOCKS
select ARCH_USE_QUEUED_SPINLOCKS select ARCH_USE_QUEUED_SPINLOCKS
select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_FRAME_POINTERS
......
...@@ -92,7 +92,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) ...@@ -92,7 +92,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
* coherent DMA memory operations when CONFIG_MMU is not enabled. * coherent DMA memory operations when CONFIG_MMU is not enabled.
*/ */
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
void *uncached_kernel_address(void *p) void *arch_dma_set_uncached(void *p, size_t size)
{ {
return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR; return p + XCHAL_KSEG_BYPASS_VADDR - XCHAL_KSEG_CACHED_VADDR;
} }
......
...@@ -108,6 +108,6 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size) ...@@ -108,6 +108,6 @@ static inline void arch_dma_prep_coherent(struct page *page, size_t size)
} }
#endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */ #endif /* CONFIG_ARCH_HAS_DMA_PREP_COHERENT */
void *uncached_kernel_address(void *addr); void *arch_dma_set_uncached(void *addr, size_t size);
#endif /* _LINUX_DMA_NONCOHERENT_H */ #endif /* _LINUX_DMA_NONCOHERENT_H */
...@@ -192,10 +192,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, ...@@ -192,10 +192,12 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
memset(ret, 0, size); memset(ret, 0, size);
if (IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && if (IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
dma_alloc_need_uncached(dev, attrs)) { dma_alloc_need_uncached(dev, attrs)) {
arch_dma_prep_coherent(page, size); arch_dma_prep_coherent(page, size);
ret = uncached_kernel_address(ret); ret = arch_dma_set_uncached(ret, size);
if (IS_ERR(ret))
goto out_free_pages;
} }
done: done:
if (force_dma_unencrypted(dev)) if (force_dma_unencrypted(dev))
...@@ -236,7 +238,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, ...@@ -236,7 +238,7 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
void *dma_direct_alloc(struct device *dev, size_t size, void *dma_direct_alloc(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs) dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{ {
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs)) dma_alloc_need_uncached(dev, attrs))
return arch_dma_alloc(dev, size, dma_handle, gfp, attrs); return arch_dma_alloc(dev, size, dma_handle, gfp, attrs);
...@@ -246,7 +248,7 @@ void *dma_direct_alloc(struct device *dev, size_t size, ...@@ -246,7 +248,7 @@ void *dma_direct_alloc(struct device *dev, size_t size,
void dma_direct_free(struct device *dev, size_t size, void dma_direct_free(struct device *dev, size_t size,
void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs) void *cpu_addr, dma_addr_t dma_addr, unsigned long attrs)
{ {
if (!IS_ENABLED(CONFIG_ARCH_HAS_UNCACHED_SEGMENT) && if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_SET_UNCACHED) &&
!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && !IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_alloc_need_uncached(dev, attrs)) dma_alloc_need_uncached(dev, attrs))
arch_dma_free(dev, size, cpu_addr, dma_addr, attrs); arch_dma_free(dev, size, cpu_addr, dma_addr, attrs);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment