Commit e7f3a913 authored by Glauber Costa's avatar Glauber Costa Committed by Ingo Molnar

x86: move dma_sync_sg_for_device to common header

i386 gets an empty function.
Signed-off-by: default avatarGlauber Costa <gcosta@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent ed435dee
...@@ -40,6 +40,7 @@ static const struct dma_mapping_ops pci32_dma_ops = { ...@@ -40,6 +40,7 @@ static const struct dma_mapping_ops pci32_dma_ops = {
.sync_single_range_for_cpu = NULL, .sync_single_range_for_cpu = NULL,
.sync_single_range_for_device = NULL, .sync_single_range_for_device = NULL,
.sync_sg_for_cpu = NULL, .sync_sg_for_cpu = NULL,
.sync_sg_for_device = NULL,
}; };
const struct dma_mapping_ops *dma_ops = &pci32_dma_ops; const struct dma_mapping_ops *dma_ops = &pci32_dma_ops;
......
...@@ -148,4 +148,15 @@ dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, ...@@ -148,4 +148,15 @@ dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
flush_write_buffers(); flush_write_buffers();
} }
static inline void
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_sg_for_device)
dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
flush_write_buffers();
}
#endif #endif
...@@ -32,13 +32,6 @@ dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size, ...@@ -32,13 +32,6 @@ dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
BUG_ON(!valid_dma_direction(direction)); BUG_ON(!valid_dma_direction(direction));
} }
static inline void
dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
enum dma_data_direction direction)
{
flush_write_buffers();
}
static inline int static inline int
dma_mapping_error(dma_addr_t dma_addr) dma_mapping_error(dma_addr_t dma_addr)
{ {
......
...@@ -27,18 +27,6 @@ extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr, ...@@ -27,18 +27,6 @@ extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
dma_map_single((dev), page_address(page)+(offset), (size), (dir)) dma_map_single((dev), page_address(page)+(offset), (size), (dir))
#define dma_unmap_page dma_unmap_single #define dma_unmap_page dma_unmap_single
static inline void
dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, int direction)
{
BUG_ON(!valid_dma_direction(direction));
if (dma_ops->sync_sg_for_device) {
dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
}
flush_write_buffers();
}
extern int dma_supported(struct device *hwdev, u64 mask); extern int dma_supported(struct device *hwdev, u64 mask);
/* same for gart, swiotlb, and nommu */ /* same for gart, swiotlb, and nommu */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment