Commit a20bb058 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-direct: add an explicit dma_direct_get_required_mask

This is somewhat modelled after the powerpc version, and differs from
the legacy fallback in use fls64 instead of pointlessly splitting up the
address into low and high dwords and in that it takes (__)phys_to_dma
into account.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
parent c6d43812
...@@ -55,6 +55,7 @@ static inline void dma_mark_clean(void *addr, size_t size) ...@@ -55,6 +55,7 @@ static inline void dma_mark_clean(void *addr, size_t size)
} }
#endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */ #endif /* CONFIG_ARCH_HAS_DMA_MARK_CLEAN */
u64 dma_direct_get_required_mask(struct device *dev);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs); gfp_t gfp, unsigned long attrs);
void dma_direct_free(struct device *dev, size_t size, void *cpu_addr, void dma_direct_free(struct device *dev, size_t size, void *cpu_addr,
......
...@@ -4,6 +4,7 @@ ...@@ -4,6 +4,7 @@
* *
* DMA operations that map physical memory directly without using an IOMMU. * DMA operations that map physical memory directly without using an IOMMU.
*/ */
#include <linux/bootmem.h> /* for max_pfn */
#include <linux/export.h> #include <linux/export.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/dma-direct.h> #include <linux/dma-direct.h>
...@@ -53,11 +54,25 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, ...@@ -53,11 +54,25 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
return true; return true;
} }
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
phys_addr_t phys)
{
if (force_dma_unencrypted())
return __phys_to_dma(dev, phys);
return phys_to_dma(dev, phys);
}
u64 dma_direct_get_required_mask(struct device *dev)
{
u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
}
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
{ {
dma_addr_t addr = force_dma_unencrypted() ? return phys_to_dma_direct(dev, phys) + size - 1 <=
__phys_to_dma(dev, phys) : phys_to_dma(dev, phys); dev->coherent_dma_mask;
return addr + size - 1 <= dev->coherent_dma_mask;
} }
void *dma_direct_alloc_pages(struct device *dev, size_t size, void *dma_direct_alloc_pages(struct device *dev, size_t size,
...@@ -296,6 +311,7 @@ const struct dma_map_ops dma_direct_ops = { ...@@ -296,6 +311,7 @@ const struct dma_map_ops dma_direct_ops = {
.unmap_page = dma_direct_unmap_page, .unmap_page = dma_direct_unmap_page,
.unmap_sg = dma_direct_unmap_sg, .unmap_sg = dma_direct_unmap_sg,
#endif #endif
.get_required_mask = dma_direct_get_required_mask,
.dma_supported = dma_direct_supported, .dma_supported = dma_direct_supported,
.mapping_error = dma_direct_mapping_error, .mapping_error = dma_direct_mapping_error,
.cache_sync = arch_dma_cache_sync, .cache_sync = arch_dma_cache_sync,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment