Commit 51fde349 authored by Marek Szyprowski's avatar Marek Szyprowski

ARM: dma-mapping: remove redundant code and do the cleanup

This patch just performs a global cleanup in DMA mapping implementation
for ARM architecture. Some of the tiny helper functions have been moved
to the caller code, some have been merged together.
Signed-off-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Acked-by: default avatarKyungmin Park <kyungmin.park@samsung.com>
Acked-by: default avatarArnd Bergmann <arnd@arndb.de>
Tested-By: default avatarSubash Patel <subash.ramaswamy@linaro.org>
parent 15237e1f
...@@ -40,64 +40,12 @@ ...@@ -40,64 +40,12 @@
* the CPU does do speculative prefetches, which means we clean caches * the CPU does do speculative prefetches, which means we clean caches
* before transfers and delay cache invalidation until transfer completion. * before transfers and delay cache invalidation until transfer completion.
* *
* Private support functions: these are not part of the API and are
* liable to change. Drivers must not use these.
*/ */
static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, static void __dma_page_cpu_to_dev(struct page *, unsigned long,
enum dma_data_direction dir)
{
extern void ___dma_single_cpu_to_dev(const void *, size_t,
enum dma_data_direction);
if (!arch_is_coherent())
___dma_single_cpu_to_dev(kaddr, size, dir);
}
static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
enum dma_data_direction dir)
{
extern void ___dma_single_dev_to_cpu(const void *, size_t,
enum dma_data_direction);
if (!arch_is_coherent())
___dma_single_dev_to_cpu(kaddr, size, dir);
}
static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
size_t, enum dma_data_direction); size_t, enum dma_data_direction);
static void __dma_page_dev_to_cpu(struct page *, unsigned long,
if (!arch_is_coherent())
___dma_page_cpu_to_dev(page, off, size, dir);
}
static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir)
{
extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
size_t, enum dma_data_direction); size_t, enum dma_data_direction);
if (!arch_is_coherent())
___dma_page_dev_to_cpu(page, off, size, dir);
}
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
__dma_page_cpu_to_dev(page, offset, size, dir);
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
handle & ~PAGE_MASK, size, dir);
}
/** /**
* arm_dma_map_page - map a portion of a page for streaming DMA * arm_dma_map_page - map a portion of a page for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
...@@ -112,11 +60,13 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, ...@@ -112,11 +60,13 @@ static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
* The device owns this memory once this call has completed. The CPU * The device owns this memory once this call has completed. The CPU
* can regain ownership by calling dma_unmap_page(). * can regain ownership by calling dma_unmap_page().
*/ */
static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir, unsigned long offset, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs) struct dma_attrs *attrs)
{ {
return __dma_map_page(dev, page, offset, size, dir); if (!arch_is_coherent())
__dma_page_cpu_to_dev(page, offset, size, dir);
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
} }
/** /**
...@@ -133,26 +83,30 @@ static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page, ...@@ -133,26 +83,30 @@ static inline dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
* After this call, reads by the CPU to the buffer are guaranteed to see * After this call, reads by the CPU to the buffer are guaranteed to see
* whatever the device wrote there. * whatever the device wrote there.
*/ */
static inline void arm_dma_unmap_page(struct device *dev, dma_addr_t handle, static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, size_t size, enum dma_data_direction dir,
struct dma_attrs *attrs) struct dma_attrs *attrs)
{ {
__dma_unmap_page(dev, handle, size, dir); if (!arch_is_coherent())
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
handle & ~PAGE_MASK, size, dir);
} }
static inline void arm_dma_sync_single_for_cpu(struct device *dev, static void arm_dma_sync_single_for_cpu(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir) dma_addr_t handle, size_t size, enum dma_data_direction dir)
{ {
unsigned int offset = handle & (PAGE_SIZE - 1); unsigned int offset = handle & (PAGE_SIZE - 1);
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
if (!arch_is_coherent())
__dma_page_dev_to_cpu(page, offset, size, dir); __dma_page_dev_to_cpu(page, offset, size, dir);
} }
static inline void arm_dma_sync_single_for_device(struct device *dev, static void arm_dma_sync_single_for_device(struct device *dev,
dma_addr_t handle, size_t size, enum dma_data_direction dir) dma_addr_t handle, size_t size, enum dma_data_direction dir)
{ {
unsigned int offset = handle & (PAGE_SIZE - 1); unsigned int offset = handle & (PAGE_SIZE - 1);
struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset)); struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
if (!arch_is_coherent())
__dma_page_cpu_to_dev(page, offset, size, dir); __dma_page_cpu_to_dev(page, offset, size, dir);
} }
...@@ -647,7 +601,13 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset, ...@@ -647,7 +601,13 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
} while (left); } while (left);
} }
void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, /*
* Make an area consistent for devices.
* Note: Drivers should NOT use this function directly, as it will break
* platforms with CONFIG_DMABOUNCE.
* Use the driver DMA support - see dma-mapping.h (dma_sync_*)
*/
static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
unsigned long paddr; unsigned long paddr;
...@@ -663,7 +623,7 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, ...@@ -663,7 +623,7 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
/* FIXME: non-speculating: flush on bidirectional mappings? */ /* FIXME: non-speculating: flush on bidirectional mappings? */
} }
void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
unsigned long paddr = page_to_phys(page) + off; unsigned long paddr = page_to_phys(page) + off;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment