Commit 878ec367 authored by Vladimir Murzin's avatar Vladimir Murzin Committed by Christoph Hellwig

ARM: NOMMU: Wire-up default DMA interface

The way how default DMA pool is exposed has changed and now we need to
use dedicated interface to work with it. This patch makes alloc/release
operations to use such interface. Since, default DMA pool is not
handled by generic code anymore we have to implement our own mmap
operation.
Tested-by: default avatarAndras Szemzo <sza@esh.hu>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarVladimir Murzin <vladimir.murzin@arm.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 43fc509c
...@@ -40,9 +40,21 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size, ...@@ -40,9 +40,21 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
{ {
const struct dma_map_ops *ops = &dma_noop_ops; const struct dma_map_ops *ops = &dma_noop_ops;
void *ret;
/* /*
* We are here because: * Try generic allocator first if we are advertised that
* consistency is not required.
*/
if (attrs & DMA_ATTR_NON_CONSISTENT)
return ops->alloc(dev, size, dma_handle, gfp, attrs);
ret = dma_alloc_from_global_coherent(size, dma_handle);
/*
* dma_alloc_from_global_coherent() may fail because:
*
* - no consistent DMA region has been defined, so we can't * - no consistent DMA region has been defined, so we can't
* continue. * continue.
* - there is no space left in consistent DMA region, so we * - there is no space left in consistent DMA region, so we
...@@ -50,11 +62,8 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size, ...@@ -50,11 +62,8 @@ static void *arm_nommu_dma_alloc(struct device *dev, size_t size,
* advertised that consistency is not required. * advertised that consistency is not required.
*/ */
if (attrs & DMA_ATTR_NON_CONSISTENT) WARN_ON_ONCE(ret == NULL);
return ops->alloc(dev, size, dma_handle, gfp, attrs); return ret;
WARN_ON_ONCE(1);
return NULL;
} }
static void arm_nommu_dma_free(struct device *dev, size_t size, static void arm_nommu_dma_free(struct device *dev, size_t size,
...@@ -63,14 +72,31 @@ static void arm_nommu_dma_free(struct device *dev, size_t size, ...@@ -63,14 +72,31 @@ static void arm_nommu_dma_free(struct device *dev, size_t size,
{ {
const struct dma_map_ops *ops = &dma_noop_ops; const struct dma_map_ops *ops = &dma_noop_ops;
if (attrs & DMA_ATTR_NON_CONSISTENT) if (attrs & DMA_ATTR_NON_CONSISTENT) {
ops->free(dev, size, cpu_addr, dma_addr, attrs); ops->free(dev, size, cpu_addr, dma_addr, attrs);
else } else {
WARN_ON_ONCE(1); int ret = dma_release_from_global_coherent(get_order(size),
cpu_addr);
WARN_ON_ONCE(ret == 0);
}
return; return;
} }
static int arm_nommu_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
int ret;
if (dma_mmap_from_global_coherent(vma, cpu_addr, size, &ret))
return ret;
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
}
static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size, static void __dma_page_cpu_to_dev(phys_addr_t paddr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
...@@ -173,6 +199,7 @@ static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist ...@@ -173,6 +199,7 @@ static void arm_nommu_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist
const struct dma_map_ops arm_nommu_dma_ops = { const struct dma_map_ops arm_nommu_dma_ops = {
.alloc = arm_nommu_dma_alloc, .alloc = arm_nommu_dma_alloc,
.free = arm_nommu_dma_free, .free = arm_nommu_dma_free,
.mmap = arm_nommu_dma_mmap,
.map_page = arm_nommu_dma_map_page, .map_page = arm_nommu_dma_map_page,
.unmap_page = arm_nommu_dma_unmap_page, .unmap_page = arm_nommu_dma_unmap_page,
.map_sg = arm_nommu_dma_map_sg, .map_sg = arm_nommu_dma_map_sg,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment