Commit 93573016 authored by Lad Prabhakar's avatar Lad Prabhakar Committed by Palmer Dabbelt

riscv: dma-mapping: switch over to generic implementation

Add helper functions for cache wback/inval/clean and use them
arch_sync_dma_for_device()/arch_sync_dma_for_cpu() functions. The proposed
changes are in preparation for switching over to generic implementation.

Reorganization of the code is based on the patch (Link[0]) from Arnd.
For now I have dropped CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU check as this
will be enabled by default upon selection of RISCV_DMA_NONCOHERENT
and also dropped arch_dma_mark_dcache_clean().

Link[0]: https://lore.kernel.org/all/20230327121317.4081816-22-arnd@kernel.org/Signed-off-by: default avatarArnd Bergmann <arnd@arndb.de>
Signed-off-by: default avatarLad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
Link: https://lore.kernel.org/r/20230816232336.164413-4-prabhakar.mahadev-lad.rj@bp.renesas.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent 482069eb
......@@ -12,21 +12,61 @@
static bool noncoherent_supported __ro_after_init;
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
}
static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
}
static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
}
static inline bool arch_sync_dma_clean_before_fromdevice(void)
{
return true;
}
static inline bool arch_sync_dma_cpu_needs_post_dma_flush(void)
{
return true;
}
void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
arch_dma_cache_wback(paddr, size);
break;
case DMA_FROM_DEVICE:
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
if (!arch_sync_dma_clean_before_fromdevice()) {
arch_dma_cache_inv(paddr, size);
break;
}
fallthrough;
case DMA_BIDIRECTIONAL:
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
/* Skip the invalidate here if it's done later */
if (IS_ENABLED(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) &&
arch_sync_dma_cpu_needs_post_dma_flush())
arch_dma_cache_wback(paddr, size);
else
arch_dma_cache_wback_inv(paddr, size);
break;
default:
break;
}
......@@ -35,15 +75,17 @@ void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
enum dma_data_direction dir)
{
void *vaddr = phys_to_virt(paddr);
switch (dir) {
case DMA_TO_DEVICE:
break;
case DMA_FROM_DEVICE:
case DMA_BIDIRECTIONAL:
ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
/* FROM_DEVICE invalidate needed if speculative CPU prefetch only */
if (arch_sync_dma_cpu_needs_post_dma_flush())
arch_dma_cache_inv(paddr, size);
break;
default:
break;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment