Commit 6c505ce3 authored by Joerg Roedel's avatar Joerg Roedel Committed by Ingo Molnar

x86: move dma_*_coherent functions to include file

All the x86 DMA-API functions are defined in asm/dma-mapping.h. This patch
moves the dma_*_coherent functions also to this header file because they are
now small enough to do so.
This is done as a separate patch because it also includes some renaming and
restructuring of the dma-mapping.h file.
Signed-off-by: default avatarJoerg Roedel <joerg.roede@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent c647c3bb
...@@ -41,11 +41,12 @@ EXPORT_SYMBOL(bad_dma_address); ...@@ -41,11 +41,12 @@ EXPORT_SYMBOL(bad_dma_address);
/* Dummy device used for NULL arguments (normally ISA). Better would /* Dummy device used for NULL arguments (normally ISA). Better would
be probably a smaller DMA mask, but this is bug-to-bug compatible be probably a smaller DMA mask, but this is bug-to-bug compatible
to older i386. */ to older i386. */
struct device fallback_dev = { struct device x86_dma_fallback_dev = {
.bus_id = "fallback device", .bus_id = "fallback device",
.coherent_dma_mask = DMA_32BIT_MASK, .coherent_dma_mask = DMA_32BIT_MASK,
.dma_mask = &fallback_dev.coherent_dma_mask, .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
}; };
EXPORT_SYMBOL(x86_dma_fallback_dev);
int dma_set_mask(struct device *dev, u64 mask) int dma_set_mask(struct device *dev, u64 mask)
{ {
...@@ -241,50 +242,6 @@ int dma_supported(struct device *dev, u64 mask) ...@@ -241,50 +242,6 @@ int dma_supported(struct device *dev, u64 mask)
} }
EXPORT_SYMBOL(dma_supported); EXPORT_SYMBOL(dma_supported);
/*
* Allocate memory for a coherent mapping.
*/
void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp)
{
struct dma_mapping_ops *ops = get_dma_ops(dev);
void *memory;
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
return memory;
if (!dev) {
dev = &fallback_dev;
gfp |= GFP_DMA;
}
if (ops->alloc_coherent)
return ops->alloc_coherent(dev, size,
dma_handle, gfp);
return NULL;
}
EXPORT_SYMBOL(dma_alloc_coherent);
/*
* Unmap coherent memory.
* The caller must ensure that the device has finished accessing the mapping.
*/
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t bus)
{
struct dma_mapping_ops *ops = get_dma_ops(dev);
WARN_ON(irqs_disabled()); /* for portability */
if (dma_release_from_coherent(dev, get_order(size), vaddr))
return;
if (ops->free_coherent)
ops->free_coherent(dev, size, vaddr, bus);
}
EXPORT_SYMBOL(dma_free_coherent);
static int __init pci_iommu_init(void) static int __init pci_iommu_init(void)
{ {
calgary_iommu_init(); calgary_iommu_init();
......
...@@ -276,7 +276,7 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir) ...@@ -276,7 +276,7 @@ gart_map_single(struct device *dev, phys_addr_t paddr, size_t size, int dir)
unsigned long bus; unsigned long bus;
if (!dev) if (!dev)
dev = &fallback_dev; dev = &x86_dma_fallback_dev;
if (!need_iommu(dev, paddr, size)) if (!need_iommu(dev, paddr, size))
return paddr; return paddr;
...@@ -427,7 +427,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir) ...@@ -427,7 +427,7 @@ gart_map_sg(struct device *dev, struct scatterlist *sg, int nents, int dir)
return 0; return 0;
if (!dev) if (!dev)
dev = &fallback_dev; dev = &x86_dma_fallback_dev;
out = 0; out = 0;
start = 0; start = 0;
......
...@@ -9,10 +9,11 @@ ...@@ -9,10 +9,11 @@
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/swiotlb.h> #include <asm/swiotlb.h>
#include <asm-generic/dma-coherent.h>
extern dma_addr_t bad_dma_address; extern dma_addr_t bad_dma_address;
extern int iommu_merge; extern int iommu_merge;
extern struct device fallback_dev; extern struct device x86_dma_fallback_dev;
extern int panic_on_overflow; extern int panic_on_overflow;
extern int force_iommu; extern int force_iommu;
...@@ -87,13 +88,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) ...@@ -87,13 +88,7 @@ static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
#define dma_is_consistent(d, h) (1)
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t flag);
void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t dma_handle);
extern int dma_supported(struct device *hwdev, u64 mask); extern int dma_supported(struct device *hwdev, u64 mask);
extern int dma_set_mask(struct device *dev, u64 mask); extern int dma_set_mask(struct device *dev, u64 mask);
...@@ -247,7 +242,39 @@ static inline int dma_get_cache_alignment(void) ...@@ -247,7 +242,39 @@ static inline int dma_get_cache_alignment(void)
return boot_cpu_data.x86_clflush_size; return boot_cpu_data.x86_clflush_size;
} }
#define dma_is_consistent(d, h) (1) static inline void *
dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp)
{
struct dma_mapping_ops *ops = get_dma_ops(dev);
void *memory;
if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
return memory;
if (!dev) {
dev = &x86_dma_fallback_dev;
gfp |= GFP_DMA;
}
if (ops->alloc_coherent)
return ops->alloc_coherent(dev, size,
dma_handle, gfp);
return NULL;
}
static inline void dma_free_coherent(struct device *dev, size_t size,
void *vaddr, dma_addr_t bus)
{
struct dma_mapping_ops *ops = get_dma_ops(dev);
WARN_ON(irqs_disabled()); /* for portability */
if (dma_release_from_coherent(dev, get_order(size), vaddr))
return;
if (ops->free_coherent)
ops->free_coherent(dev, size, vaddr, bus);
}
#include <asm-generic/dma-coherent.h>
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment