Commit b79f300c authored by Lad Prabhakar's avatar Lad Prabhakar Committed by Palmer Dabbelt

riscv: mm: dma-noncoherent: nonstandard cache operations support

Introduce support for nonstandard noncoherent systems in the RISC-V
architecture. It enables function pointer support to handle cache
management in such systems.

This patch adds a new configuration option called
"RISCV_NONSTANDARD_CACHE_OPS." This option is a boolean flag that
depends on "RISCV_DMA_NONCOHERENT" and enables the function pointer
support for cache management in nonstandard noncoherent systems.
Signed-off-by: default avatarLad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
Reviewed-by: default avatarConor Dooley <conor.dooley@microchip.com>
Tested-by: Conor Dooley <conor.dooley@microchip.com> # tyre-kicking on a d1
Reviewed-by: default avatarEmil Renner Berthing <emil.renner.berthing@canonical.com>
Tested-by: Emil Renner Berthing <emil.renner.berthing@canonical.com> #
Link: https://lore.kernel.org/r/20230818135723.80612-4-prabhakar.mahadev-lad.rj@bp.renesas.comSigned-off-by: default avatarPalmer Dabbelt <palmer@rivosinc.com>
parent e021ae7f
......@@ -269,6 +269,13 @@ config RISCV_DMA_NONCOHERENT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select DMA_DIRECT_REMAP
config RISCV_NONSTANDARD_CACHE_OPS
bool
depends on RISCV_DMA_NONCOHERENT
help
This enables function pointer support for non-standard noncoherent
systems to handle cache management.
config AS_HAS_INSN
def_bool $(as-instr,.insn r 51$(comma) 0$(comma) 0$(comma) t0$(comma) t0$(comma) zero)
......
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (C) 2023 Renesas Electronics Corp.
*/
#ifndef __ASM_DMA_NONCOHERENT_H
#define __ASM_DMA_NONCOHERENT_H
#include <linux/dma-direct.h>
/*
* struct riscv_nonstd_cache_ops - Structure for non-standard CMO function pointers
*
* @wback: Function pointer for cache writeback
* @inv: Function pointer for invalidating cache
* @wback_inv: Function pointer for flushing the cache (writeback + invalidating)
*/
struct riscv_nonstd_cache_ops {
void (*wback)(phys_addr_t paddr, size_t size);
void (*inv)(phys_addr_t paddr, size_t size);
void (*wback_inv)(phys_addr_t paddr, size_t size);
};
extern struct riscv_nonstd_cache_ops noncoherent_cache_ops;
void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops);
#endif /* __ASM_DMA_NONCOHERENT_H */
......@@ -9,13 +9,26 @@
#include <linux/dma-map-ops.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/dma-noncoherent.h>
static bool noncoherent_supported __ro_after_init;
struct riscv_nonstd_cache_ops noncoherent_cache_ops __ro_after_init = {
.wback = NULL,
.inv = NULL,
.wback_inv = NULL,
};
static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (unlikely(noncoherent_cache_ops.wback)) {
noncoherent_cache_ops.wback(paddr, size);
return;
}
#endif
ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
}
......@@ -23,6 +36,13 @@ static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (unlikely(noncoherent_cache_ops.inv)) {
noncoherent_cache_ops.inv(paddr, size);
return;
}
#endif
ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
}
......@@ -30,6 +50,13 @@ static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
{
void *vaddr = phys_to_virt(paddr);
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (unlikely(noncoherent_cache_ops.wback_inv)) {
noncoherent_cache_ops.wback_inv(paddr, size);
return;
}
#endif
ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
}
......@@ -95,6 +122,13 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
{
void *flush_addr = page_address(page);
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (unlikely(noncoherent_cache_ops.wback_inv)) {
noncoherent_cache_ops.wback_inv(page_to_phys(page), size);
return;
}
#endif
ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
}
......@@ -120,3 +154,12 @@ void riscv_noncoherent_supported(void)
"Non-coherent DMA support enabled without a block size\n");
noncoherent_supported = true;
}
void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops)
{
if (!ops)
return;
noncoherent_cache_ops = *ops;
}
EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
......@@ -7,15 +7,28 @@
#include <linux/libnvdimm.h>
#include <asm/cacheflush.h>
#include <asm/dma-noncoherent.h>
void arch_wb_cache_pmem(void *addr, size_t size)
{
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (unlikely(noncoherent_cache_ops.wback)) {
noncoherent_cache_ops.wback(virt_to_phys(addr), size);
return;
}
#endif
ALT_CMO_OP(clean, addr, size, riscv_cbom_block_size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
void arch_invalidate_pmem(void *addr, size_t size)
{
#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
if (unlikely(noncoherent_cache_ops.inv)) {
noncoherent_cache_ops.inv(virt_to_phys(addr), size);
return;
}
#endif
ALT_CMO_OP(inval, addr, size, riscv_cbom_block_size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment