Commit 2638b4db authored by Russell King's avatar Russell King Committed by Russell King

[ARM] dma: Reduce to one dma_sync_sg_* implementation

Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent 01135d92
...@@ -468,45 +468,23 @@ void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_addr, ...@@ -468,45 +468,23 @@ void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_addr,
} }
EXPORT_SYMBOL(dma_sync_single_range_for_device); EXPORT_SYMBOL(dma_sync_single_range_for_device);
void int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, unsigned long off, size_t sz, enum dma_data_direction dir)
enum dma_data_direction dir)
{ {
struct scatterlist *s; dev_dbg(dev, "%s(dma=%#lx,off=%#lx,sz=%zx,dir=%x)\n",
int i; __func__, addr, off, sz, dir);
return sync_single(dev, addr, off + sz, dir);
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, dir);
BUG_ON(dir == DMA_NONE);
for_each_sg(sg, s, nents, i) {
dma_addr_t dma_addr = s->dma_address;
unsigned int length = s->length;
sync_single(dev, dma_addr, length, dir);
}
} }
EXPORT_SYMBOL(dmabounce_sync_for_cpu);
void int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, unsigned long off, size_t sz, enum dma_data_direction dir)
enum dma_data_direction dir)
{ {
struct scatterlist *s; dev_dbg(dev, "%s(dma=%#lx,off=%#lx,sz=%zx,dir=%x)\n",
int i; __func__, addr, off, sz, dir);
return sync_single(dev, addr, off + sz, dir);
dev_dbg(dev, "%s(sg=%p,nents=%d,dir=%x)\n",
__func__, sg, nents, dir);
BUG_ON(dir == DMA_NONE);
for_each_sg(sg, s, nents, i) {
dma_addr_t dma_addr = s->dma_address;
unsigned int length = s->length;
sync_single(dev, dma_addr, length, dir);
}
} }
EXPORT_SYMBOL(dmabounce_sync_for_device);
static int static int
dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name, dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, const char *name,
...@@ -618,8 +596,6 @@ dmabounce_unregister_dev(struct device *dev) ...@@ -618,8 +596,6 @@ dmabounce_unregister_dev(struct device *dev)
EXPORT_SYMBOL(dma_map_single); EXPORT_SYMBOL(dma_map_single);
EXPORT_SYMBOL(dma_unmap_single); EXPORT_SYMBOL(dma_unmap_single);
EXPORT_SYMBOL(dma_sync_sg_for_cpu);
EXPORT_SYMBOL(dma_sync_sg_for_device);
EXPORT_SYMBOL(dmabounce_register_dev); EXPORT_SYMBOL(dmabounce_register_dev);
EXPORT_SYMBOL(dmabounce_unregister_dev); EXPORT_SYMBOL(dmabounce_unregister_dev);
......
...@@ -410,6 +410,17 @@ extern void dmabounce_unregister_dev(struct device *); ...@@ -410,6 +410,17 @@ extern void dmabounce_unregister_dev(struct device *);
* *
*/ */
extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
/*
* Private functions
*/
int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
size_t, enum dma_data_direction);
int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
size_t, enum dma_data_direction);
#else
#define dmabounce_sync_for_cpu(dev,dma,off,sz,dir) (1)
#define dmabounce_sync_for_device(dev,dma,off,sz,dir) (1)
#endif /* CONFIG_DMABOUNCE */ #endif /* CONFIG_DMABOUNCE */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -571,7 +571,6 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, ...@@ -571,7 +571,6 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
} }
EXPORT_SYMBOL(dma_unmap_sg); EXPORT_SYMBOL(dma_unmap_sg);
#ifndef CONFIG_DMABOUNCE
/** /**
* dma_sync_sg_for_cpu * dma_sync_sg_for_cpu
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
...@@ -586,6 +585,10 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, ...@@ -586,6 +585,10 @@ void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
int i; int i;
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
sg_dma_len(s), dir))
continue;
if (!arch_is_coherent()) if (!arch_is_coherent())
dma_cache_maint(sg_virt(s), s->length, dir); dma_cache_maint(sg_virt(s), s->length, dir);
} }
...@@ -606,9 +609,12 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -606,9 +609,12 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
int i; int i;
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
sg_dma_len(s), dir))
continue;
if (!arch_is_coherent()) if (!arch_is_coherent())
dma_cache_maint(sg_virt(s), s->length, dir); dma_cache_maint(sg_virt(s), s->length, dir);
} }
} }
EXPORT_SYMBOL(dma_sync_sg_for_device); EXPORT_SYMBOL(dma_sync_sg_for_device);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment