Commit 843aef49 authored by David Daney's avatar David Daney Committed by Ralf Baechle

MIPS: Adjust the dma-common.c platform hooks.

We add a dev parameter to plat_unmap_dma_mem(), and hooks for
plat_dma_supported() and plat_extra_sync_for_device() which should be
nop changes for all existing targets.
Signed-off-by: default avatarDavid Daney <ddaney@caviumnetworks.com>
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent ec454d8c
...@@ -28,10 +28,34 @@ static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) ...@@ -28,10 +28,34 @@ static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return dma_addr; return dma_addr;
} }
static inline void plat_unmap_dma_mem(dma_addr_t dma_addr) static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{ {
} }
static inline int plat_dma_supported(struct device *dev, u64 mask)
{
/*
* we fall back to GFP_DMA when the mask isn't all 1s,
* so we can't guarantee allocations that must be
* within a tighter range than GFP_DMA..
*/
if (mask < DMA_BIT_MASK(24))
return 0;
return 1;
}
static inline void plat_extra_sync_for_device(struct device *dev)
{
return;
}
static inline int plat_dma_mapping_error(struct device *dev,
dma_addr_t dma_addr)
{
return 0;
}
static inline int plat_device_is_coherent(struct device *dev) static inline int plat_device_is_coherent(struct device *dev)
{ {
#ifdef CONFIG_DMA_COHERENT #ifdef CONFIG_DMA_COHERENT
......
...@@ -38,10 +38,34 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) ...@@ -38,10 +38,34 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return dma_addr & ~(0xffUL << 56); return dma_addr & ~(0xffUL << 56);
} }
static inline void plat_unmap_dma_mem(dma_addr_t dma_addr) static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{ {
} }
static inline int plat_dma_supported(struct device *dev, u64 mask)
{
/*
* we fall back to GFP_DMA when the mask isn't all 1s,
* so we can't guarantee allocations that must be
* within a tighter range than GFP_DMA..
*/
if (mask < DMA_BIT_MASK(24))
return 0;
return 1;
}
static inline void plat_extra_sync_for_device(struct device *dev)
{
return;
}
static inline int plat_dma_mapping_error(struct device *dev,
dma_addr_t dma_addr)
{
return 0;
}
static inline int plat_device_is_coherent(struct device *dev) static inline int plat_device_is_coherent(struct device *dev)
{ {
return 1; /* IP27 non-cohernet mode is unsupported */ return 1; /* IP27 non-cohernet mode is unsupported */
......
...@@ -60,10 +60,34 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) ...@@ -60,10 +60,34 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return addr; return addr;
} }
static inline void plat_unmap_dma_mem(dma_addr_t dma_addr) static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{ {
} }
static inline int plat_dma_supported(struct device *dev, u64 mask)
{
/*
* we fall back to GFP_DMA when the mask isn't all 1s,
* so we can't guarantee allocations that must be
* within a tighter range than GFP_DMA..
*/
if (mask < DMA_BIT_MASK(24))
return 0;
return 1;
}
static inline void plat_extra_sync_for_device(struct device *dev)
{
return;
}
static inline int plat_dma_mapping_error(struct device *dev,
dma_addr_t dma_addr)
{
return 0;
}
static inline int plat_device_is_coherent(struct device *dev) static inline int plat_device_is_coherent(struct device *dev)
{ {
return 0; /* IP32 is non-cohernet */ return 0; /* IP32 is non-cohernet */
......
...@@ -27,11 +27,35 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) ...@@ -27,11 +27,35 @@ static unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return vdma_log2phys(dma_addr); return vdma_log2phys(dma_addr);
} }
static void plat_unmap_dma_mem(dma_addr_t dma_addr) static void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{ {
vdma_free(dma_addr); vdma_free(dma_addr);
} }
static inline int plat_dma_supported(struct device *dev, u64 mask)
{
/*
* we fall back to GFP_DMA when the mask isn't all 1s,
* so we can't guarantee allocations that must be
* within a tighter range than GFP_DMA..
*/
if (mask < DMA_BIT_MASK(24))
return 0;
return 1;
}
static inline void plat_extra_sync_for_device(struct device *dev)
{
return;
}
static inline int plat_dma_mapping_error(struct device *dev,
dma_addr_t dma_addr)
{
return 0;
}
static inline int plat_device_is_coherent(struct device *dev) static inline int plat_device_is_coherent(struct device *dev)
{ {
return 0; return 0;
......
...@@ -30,10 +30,34 @@ static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr) ...@@ -30,10 +30,34 @@ static inline unsigned long plat_dma_addr_to_phys(dma_addr_t dma_addr)
return dma_addr & 0x7fffffff; return dma_addr & 0x7fffffff;
} }
static inline void plat_unmap_dma_mem(dma_addr_t dma_addr) static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr)
{ {
} }
static inline int plat_dma_supported(struct device *dev, u64 mask)
{
/*
* we fall back to GFP_DMA when the mask isn't all 1s,
* so we can't guarantee allocations that must be
* within a tighter range than GFP_DMA..
*/
if (mask < DMA_BIT_MASK(24))
return 0;
return 1;
}
static inline void plat_extra_sync_for_device(struct device *dev)
{
return;
}
static inline int plat_dma_mapping_error(struct device *dev,
dma_addr_t dma_addr)
{
return 0;
}
static inline int plat_device_is_coherent(struct device *dev) static inline int plat_device_is_coherent(struct device *dev)
{ {
return 0; return 0;
......
...@@ -111,7 +111,7 @@ EXPORT_SYMBOL(dma_alloc_coherent); ...@@ -111,7 +111,7 @@ EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
dma_addr_t dma_handle) dma_addr_t dma_handle)
{ {
plat_unmap_dma_mem(dma_handle); plat_unmap_dma_mem(dev, dma_handle);
free_pages((unsigned long) vaddr, get_order(size)); free_pages((unsigned long) vaddr, get_order(size));
} }
...@@ -122,7 +122,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr, ...@@ -122,7 +122,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
{ {
unsigned long addr = (unsigned long) vaddr; unsigned long addr = (unsigned long) vaddr;
plat_unmap_dma_mem(dma_handle); plat_unmap_dma_mem(dev, dma_handle);
if (!plat_device_is_coherent(dev)) if (!plat_device_is_coherent(dev))
addr = CAC_ADDR(addr); addr = CAC_ADDR(addr);
...@@ -173,7 +173,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size, ...@@ -173,7 +173,7 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
__dma_sync(dma_addr_to_virt(dma_addr), size, __dma_sync(dma_addr_to_virt(dma_addr), size,
direction); direction);
plat_unmap_dma_mem(dma_addr); plat_unmap_dma_mem(dev, dma_addr);
} }
EXPORT_SYMBOL(dma_unmap_single); EXPORT_SYMBOL(dma_unmap_single);
...@@ -229,7 +229,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, ...@@ -229,7 +229,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
dma_cache_wback_inv(addr, size); dma_cache_wback_inv(addr, size);
} }
plat_unmap_dma_mem(dma_address); plat_unmap_dma_mem(dev, dma_address);
} }
EXPORT_SYMBOL(dma_unmap_page); EXPORT_SYMBOL(dma_unmap_page);
...@@ -249,7 +249,7 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries, ...@@ -249,7 +249,7 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
if (addr) if (addr)
__dma_sync(addr, sg->length, direction); __dma_sync(addr, sg->length, direction);
} }
plat_unmap_dma_mem(sg->dma_address); plat_unmap_dma_mem(dev, sg->dma_address);
} }
} }
...@@ -275,6 +275,7 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, ...@@ -275,6 +275,7 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
{ {
BUG_ON(direction == DMA_NONE); BUG_ON(direction == DMA_NONE);
plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev)) { if (!plat_device_is_coherent(dev)) {
unsigned long addr; unsigned long addr;
...@@ -305,6 +306,7 @@ void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle, ...@@ -305,6 +306,7 @@ void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
{ {
BUG_ON(direction == DMA_NONE); BUG_ON(direction == DMA_NONE);
plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev)) { if (!plat_device_is_coherent(dev)) {
unsigned long addr; unsigned long addr;
...@@ -351,22 +353,14 @@ EXPORT_SYMBOL(dma_sync_sg_for_device); ...@@ -351,22 +353,14 @@ EXPORT_SYMBOL(dma_sync_sg_for_device);
int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
{ {
return 0; return plat_dma_mapping_error(dev, dma_addr);
} }
EXPORT_SYMBOL(dma_mapping_error); EXPORT_SYMBOL(dma_mapping_error);
int dma_supported(struct device *dev, u64 mask) int dma_supported(struct device *dev, u64 mask)
{ {
/* return plat_dma_supported(dev, mask);
* we fall back to GFP_DMA when the mask isn't all 1s,
* so we can't guarantee allocations that must be
* within a tighter range than GFP_DMA..
*/
if (mask < DMA_BIT_MASK(24))
return 0;
return 1;
} }
EXPORT_SYMBOL(dma_supported); EXPORT_SYMBOL(dma_supported);
...@@ -383,6 +377,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, ...@@ -383,6 +377,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
{ {
BUG_ON(direction == DMA_NONE); BUG_ON(direction == DMA_NONE);
plat_extra_sync_for_device(dev);
if (!plat_device_is_coherent(dev)) if (!plat_device_is_coherent(dev))
__dma_sync((unsigned long)vaddr, size, direction); __dma_sync((unsigned long)vaddr, size, direction);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment