Commit 356da6d0 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-mapping: bypass indirect calls for dma-direct

Avoid expensive indirect calls in the fast path DMA mapping
operations by directly calling the dma_direct_* ops if we are using
the directly mapped DMA operations.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Tested-by: default avatarJesper Dangaard Brouer <brouer@redhat.com>
Tested-by: default avatarTony Luck <tony.luck@intel.com>
parent 190d4e59
...@@ -7,7 +7,7 @@ extern const struct dma_map_ops alpha_pci_ops; ...@@ -7,7 +7,7 @@ extern const struct dma_map_ops alpha_pci_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
#ifdef CONFIG_ALPHA_JENSEN #ifdef CONFIG_ALPHA_JENSEN
return &dma_direct_ops; return NULL;
#else #else
return &alpha_pci_ops; return &alpha_pci_ops;
#endif #endif
......
...@@ -1280,7 +1280,7 @@ void __init arc_cache_init_master(void) ...@@ -1280,7 +1280,7 @@ void __init arc_cache_init_master(void)
/* /*
* In case of IOC (say IOC+SLC case), pointers above could still be set * In case of IOC (say IOC+SLC case), pointers above could still be set
* but end up not being relevant as the first function in chain is not * but end up not being relevant as the first function in chain is not
* called at all for @dma_direct_ops * called at all for devices using coherent DMA.
* arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*() * arch_sync_dma_for_cpu() -> dma_cache_*() -> __dma_cache_*()
*/ */
} }
......
...@@ -18,7 +18,7 @@ extern const struct dma_map_ops arm_coherent_dma_ops; ...@@ -18,7 +18,7 @@ extern const struct dma_map_ops arm_coherent_dma_ops;
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : &dma_direct_ops; return IS_ENABLED(CONFIG_MMU) ? &arm_dma_ops : NULL;
} }
#ifdef __arch_page_to_dma #ifdef __arch_page_to_dma
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include "dma.h" #include "dma.h"
/* /*
* dma_direct_ops is used if * The generic direct mapping code is used if
* - MMU/MPU is off * - MMU/MPU is off
* - cpu is v7m w/o cache support * - cpu is v7m w/o cache support
* - device is coherent * - device is coherent
...@@ -209,16 +209,9 @@ const struct dma_map_ops arm_nommu_dma_ops = { ...@@ -209,16 +209,9 @@ const struct dma_map_ops arm_nommu_dma_ops = {
}; };
EXPORT_SYMBOL(arm_nommu_dma_ops); EXPORT_SYMBOL(arm_nommu_dma_ops);
static const struct dma_map_ops *arm_nommu_get_dma_map_ops(bool coherent)
{
return coherent ? &dma_direct_ops : &arm_nommu_dma_ops;
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) const struct iommu_ops *iommu, bool coherent)
{ {
const struct dma_map_ops *dma_ops;
if (IS_ENABLED(CONFIG_CPU_V7M)) { if (IS_ENABLED(CONFIG_CPU_V7M)) {
/* /*
* Cache support for v7m is optional, so can be treated as * Cache support for v7m is optional, so can be treated as
...@@ -234,7 +227,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, ...@@ -234,7 +227,6 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true; dev->archdata.dma_coherent = (get_cr() & CR_M) ? coherent : true;
} }
dma_ops = arm_nommu_get_dma_map_ops(dev->archdata.dma_coherent); if (!dev->archdata.dma_coherent)
set_dma_ops(dev, &arm_nommu_dma_ops);
set_dma_ops(dev, dma_ops);
} }
...@@ -462,9 +462,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, ...@@ -462,9 +462,6 @@ static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) const struct iommu_ops *iommu, bool coherent)
{ {
if (!dev->dma_ops)
dev->dma_ops = &dma_direct_ops;
dev->dma_coherent = coherent; dev->dma_coherent = coherent;
__iommu_setup_dma_ops(dev, dma_base, size, iommu); __iommu_setup_dma_ops(dev, dma_base, size, iommu);
......
...@@ -38,7 +38,7 @@ static inline int use_swiotlb(struct device *dev) ...@@ -38,7 +38,7 @@ static inline int use_swiotlb(struct device *dev)
const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
{ {
if (use_swiotlb(dev)) if (use_swiotlb(dev))
return &dma_direct_ops; return NULL;
return &sba_dma_ops; return &sba_dma_ops;
} }
EXPORT_SYMBOL(hwsw_dma_get_ops); EXPORT_SYMBOL(hwsw_dma_get_ops);
......
...@@ -2078,7 +2078,7 @@ sba_init(void) ...@@ -2078,7 +2078,7 @@ sba_init(void)
* a successful kdump kernel boot is to use the swiotlb. * a successful kdump kernel boot is to use the swiotlb.
*/ */
if (is_kdump_kernel()) { if (is_kdump_kernel()) {
dma_ops = &dma_direct_ops; dma_ops = NULL;
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
panic("Unable to initialize software I/O TLB:" panic("Unable to initialize software I/O TLB:"
" Try machvec=dig boot option"); " Try machvec=dig boot option");
...@@ -2100,7 +2100,7 @@ sba_init(void) ...@@ -2100,7 +2100,7 @@ sba_init(void)
* If we didn't find something sba_iommu can claim, we * If we didn't find something sba_iommu can claim, we
* need to setup the swiotlb and switch to the dig machvec. * need to setup the swiotlb and switch to the dig machvec.
*/ */
dma_ops = &dma_direct_ops; dma_ops = NULL;
if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0) if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
panic("Unable to find SBA IOMMU or initialize " panic("Unable to find SBA IOMMU or initialize "
"software I/O TLB: Try machvec=dig boot option"); "software I/O TLB: Try machvec=dig boot option");
......
...@@ -36,7 +36,6 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr, ...@@ -36,7 +36,6 @@ long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
void __init swiotlb_dma_init(void) void __init swiotlb_dma_init(void)
{ {
dma_ops = &dma_direct_ops;
swiotlb_init(1); swiotlb_init(1);
} }
#endif #endif
...@@ -11,7 +11,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -11,7 +11,7 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
#if defined(CONFIG_MACH_JAZZ) #if defined(CONFIG_MACH_JAZZ)
return &jazz_dma_ops; return &jazz_dma_ops;
#else #else
return &dma_direct_ops; return NULL;
#endif #endif
} }
......
...@@ -99,10 +99,6 @@ void __init dma_ops_init(void) ...@@ -99,10 +99,6 @@ void __init dma_ops_init(void)
case pcxl2: case pcxl2:
pa7300lc_init(); pa7300lc_init();
case pcxl: /* falls through */
case pcxs:
case pcxt:
hppa_dma_ops = &dma_direct_ops;
break; break;
default: default:
break; break;
......
...@@ -12,11 +12,11 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) ...@@ -12,11 +12,11 @@ static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
#ifdef CONFIG_SPARC_LEON #ifdef CONFIG_SPARC_LEON
if (sparc_cpu_model == sparc_leon) if (sparc_cpu_model == sparc_leon)
return &dma_direct_ops; return NULL;
#endif #endif
#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
if (bus == &pci_bus_type) if (bus == &pci_bus_type)
return &dma_direct_ops; return NULL;
#endif #endif
return dma_ops; return dma_ops;
} }
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
static bool disable_dac_quirk __read_mostly; static bool disable_dac_quirk __read_mostly;
const struct dma_map_ops *dma_ops = &dma_direct_ops; const struct dma_map_ops *dma_ops;
EXPORT_SYMBOL(dma_ops); EXPORT_SYMBOL(dma_ops);
#ifdef CONFIG_IOMMU_DEBUG #ifdef CONFIG_IOMMU_DEBUG
......
...@@ -581,7 +581,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv) ...@@ -581,7 +581,7 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
dev_priv->map_mode = vmw_dma_map_populate; dev_priv->map_mode = vmw_dma_map_populate;
if (dma_ops->sync_single_for_cpu) if (dma_ops && dma_ops->sync_single_for_cpu)
dev_priv->map_mode = vmw_dma_alloc_coherent; dev_priv->map_mode = vmw_dma_alloc_coherent;
#ifdef CONFIG_SWIOTLB #ifdef CONFIG_SWIOTLB
if (swiotlb_nr_tbl() == 0) if (swiotlb_nr_tbl() == 0)
......
...@@ -2184,7 +2184,7 @@ static int amd_iommu_add_device(struct device *dev) ...@@ -2184,7 +2184,7 @@ static int amd_iommu_add_device(struct device *dev)
dev_name(dev)); dev_name(dev));
iommu_ignore_device(dev); iommu_ignore_device(dev);
dev->dma_ops = &dma_direct_ops; dev->dma_ops = NULL;
goto out; goto out;
} }
init_iommu_group(dev); init_iommu_group(dev);
...@@ -2770,17 +2770,6 @@ int __init amd_iommu_init_dma_ops(void) ...@@ -2770,17 +2770,6 @@ int __init amd_iommu_init_dma_ops(void)
swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0; swiotlb = (iommu_pass_through || sme_me_mask) ? 1 : 0;
iommu_detected = 1; iommu_detected = 1;
/*
* In case we don't initialize SWIOTLB (actually the common case
* when AMD IOMMU is enabled and SME is not active), make sure there
* are global dma_ops set as a fall-back for devices not handled by
* this driver (for example non-PCI devices). When SME is active,
* make sure that swiotlb variable remains set so the global dma_ops
* continue to be SWIOTLB.
*/
if (!swiotlb)
dma_ops = &dma_direct_ops;
if (amd_iommu_unmap_flush) if (amd_iommu_unmap_flush)
pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n"); pr_info("AMD-Vi: IO/TLB flush on unmap enabled\n");
else else
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus) static inline const struct dma_map_ops *get_arch_dma_ops(struct bus_type *bus)
{ {
return &dma_direct_ops; return NULL;
} }
#endif /* _ASM_GENERIC_DMA_MAPPING_H */ #endif /* _ASM_GENERIC_DMA_MAPPING_H */
...@@ -60,22 +60,5 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, ...@@ -60,22 +60,5 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs); dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page); void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs);
void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs);
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs);
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs);
void dma_direct_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir);
void dma_direct_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir);
void dma_direct_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir);
void dma_direct_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir);
int dma_direct_supported(struct device *dev, u64 mask); int dma_direct_supported(struct device *dev, u64 mask);
#endif /* _LINUX_DMA_DIRECT_H */ #endif /* _LINUX_DMA_DIRECT_H */
...@@ -134,7 +134,6 @@ struct dma_map_ops { ...@@ -134,7 +134,6 @@ struct dma_map_ops {
#define DMA_MAPPING_ERROR (~(dma_addr_t)0) #define DMA_MAPPING_ERROR (~(dma_addr_t)0)
extern const struct dma_map_ops dma_direct_ops;
extern const struct dma_map_ops dma_virt_ops; extern const struct dma_map_ops dma_virt_ops;
extern const struct dma_map_ops dma_dummy_ops; extern const struct dma_map_ops dma_dummy_ops;
...@@ -222,6 +221,69 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev) ...@@ -222,6 +221,69 @@ static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
} }
#endif #endif
static inline bool dma_is_direct(const struct dma_map_ops *ops)
{
return likely(!ops);
}
/*
* All the dma_direct_* declarations are here just for the indirect call bypass,
* and must not be used directly drivers!
*/
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs);
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs);
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_SWIOTLB)
void dma_direct_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir);
void dma_direct_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir);
#else
static inline void dma_direct_sync_single_for_device(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
}
static inline void dma_direct_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{
}
#endif
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
defined(CONFIG_SWIOTLB)
void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs);
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs);
void dma_direct_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir);
void dma_direct_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir);
#else
static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
}
static inline void dma_direct_unmap_sg(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir,
unsigned long attrs)
{
}
static inline void dma_direct_sync_single_for_cpu(struct device *dev,
dma_addr_t addr, size_t size, enum dma_data_direction dir)
{
}
static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir)
{
}
#endif
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size, size_t size,
enum dma_data_direction dir, enum dma_data_direction dir,
...@@ -232,9 +294,12 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, ...@@ -232,9 +294,12 @@ static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
debug_dma_map_single(dev, ptr, size); debug_dma_map_single(dev, ptr, size);
addr = ops->map_page(dev, virt_to_page(ptr), if (dma_is_direct(ops))
offset_in_page(ptr), size, addr = dma_direct_map_page(dev, virt_to_page(ptr),
dir, attrs); offset_in_page(ptr), size, dir, attrs);
else
addr = ops->map_page(dev, virt_to_page(ptr),
offset_in_page(ptr), size, dir, attrs);
debug_dma_map_page(dev, virt_to_page(ptr), debug_dma_map_page(dev, virt_to_page(ptr),
offset_in_page(ptr), size, offset_in_page(ptr), size,
dir, addr, true); dir, addr, true);
...@@ -249,7 +314,9 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, ...@@ -249,7 +314,9 @@ static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_page) if (dma_is_direct(ops))
dma_direct_unmap_page(dev, addr, size, dir, attrs);
else if (ops->unmap_page)
ops->unmap_page(dev, addr, size, dir, attrs); ops->unmap_page(dev, addr, size, dir, attrs);
debug_dma_unmap_page(dev, addr, size, dir, true); debug_dma_unmap_page(dev, addr, size, dir, true);
} }
...@@ -272,7 +339,10 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, ...@@ -272,7 +339,10 @@ static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
int ents; int ents;
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
ents = ops->map_sg(dev, sg, nents, dir, attrs); if (dma_is_direct(ops))
ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
else
ents = ops->map_sg(dev, sg, nents, dir, attrs);
BUG_ON(ents < 0); BUG_ON(ents < 0);
debug_dma_map_sg(dev, sg, nents, ents, dir); debug_dma_map_sg(dev, sg, nents, ents, dir);
...@@ -287,7 +357,9 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg ...@@ -287,7 +357,9 @@ static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
debug_dma_unmap_sg(dev, sg, nents, dir); debug_dma_unmap_sg(dev, sg, nents, dir);
if (ops->unmap_sg) if (dma_is_direct(ops))
dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
else if (ops->unmap_sg)
ops->unmap_sg(dev, sg, nents, dir, attrs); ops->unmap_sg(dev, sg, nents, dir, attrs);
} }
...@@ -301,7 +373,10 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev, ...@@ -301,7 +373,10 @@ static inline dma_addr_t dma_map_page_attrs(struct device *dev,
dma_addr_t addr; dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
addr = ops->map_page(dev, page, offset, size, dir, attrs); if (dma_is_direct(ops))
addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
else
addr = ops->map_page(dev, page, offset, size, dir, attrs);
debug_dma_map_page(dev, page, offset, size, dir, addr, false); debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return addr; return addr;
...@@ -322,7 +397,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev, ...@@ -322,7 +397,7 @@ static inline dma_addr_t dma_map_resource(struct device *dev,
BUG_ON(pfn_valid(PHYS_PFN(phys_addr))); BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
addr = phys_addr; addr = phys_addr;
if (ops->map_resource) if (ops && ops->map_resource)
addr = ops->map_resource(dev, phys_addr, size, dir, attrs); addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
debug_dma_map_resource(dev, phys_addr, size, dir, addr); debug_dma_map_resource(dev, phys_addr, size, dir, addr);
...@@ -337,7 +412,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr, ...@@ -337,7 +412,7 @@ static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops->unmap_resource) if (ops && ops->unmap_resource)
ops->unmap_resource(dev, addr, size, dir, attrs); ops->unmap_resource(dev, addr, size, dir, attrs);
debug_dma_unmap_resource(dev, addr, size, dir); debug_dma_unmap_resource(dev, addr, size, dir);
} }
...@@ -349,7 +424,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, ...@@ -349,7 +424,9 @@ static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_cpu) if (dma_is_direct(ops))
dma_direct_sync_single_for_cpu(dev, addr, size, dir);
else if (ops->sync_single_for_cpu)
ops->sync_single_for_cpu(dev, addr, size, dir); ops->sync_single_for_cpu(dev, addr, size, dir);
debug_dma_sync_single_for_cpu(dev, addr, size, dir); debug_dma_sync_single_for_cpu(dev, addr, size, dir);
} }
...@@ -368,7 +445,9 @@ static inline void dma_sync_single_for_device(struct device *dev, ...@@ -368,7 +445,9 @@ static inline void dma_sync_single_for_device(struct device *dev,
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops->sync_single_for_device) if (dma_is_direct(ops))
dma_direct_sync_single_for_device(dev, addr, size, dir);
else if (ops->sync_single_for_device)
ops->sync_single_for_device(dev, addr, size, dir); ops->sync_single_for_device(dev, addr, size, dir);
debug_dma_sync_single_for_device(dev, addr, size, dir); debug_dma_sync_single_for_device(dev, addr, size, dir);
} }
...@@ -387,7 +466,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, ...@@ -387,7 +466,9 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_cpu) if (dma_is_direct(ops))
dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
else if (ops->sync_sg_for_cpu)
ops->sync_sg_for_cpu(dev, sg, nelems, dir); ops->sync_sg_for_cpu(dev, sg, nelems, dir);
debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
} }
...@@ -399,7 +480,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -399,7 +480,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops->sync_sg_for_device) if (dma_is_direct(ops))
dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
else if (ops->sync_sg_for_device)
ops->sync_sg_for_device(dev, sg, nelems, dir); ops->sync_sg_for_device(dev, sg, nelems, dir);
debug_dma_sync_sg_for_device(dev, sg, nelems, dir); debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
......
...@@ -38,7 +38,10 @@ pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot, ...@@ -38,7 +38,10 @@ pgprot_t arch_dma_mmap_pgprot(struct device *dev, pgprot_t prot,
void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size, void arch_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
enum dma_data_direction direction); enum dma_data_direction direction);
#else #else
#define arch_dma_cache_sync NULL static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
size_t size, enum dma_data_direction direction)
{
}
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */ #endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE #ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
......
...@@ -223,6 +223,7 @@ void dma_direct_sync_single_for_device(struct device *dev, ...@@ -223,6 +223,7 @@ void dma_direct_sync_single_for_device(struct device *dev,
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_device(dev, paddr, size, dir); arch_sync_dma_for_device(dev, paddr, size, dir);
} }
EXPORT_SYMBOL(dma_direct_sync_single_for_device);
void dma_direct_sync_sg_for_device(struct device *dev, void dma_direct_sync_sg_for_device(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir) struct scatterlist *sgl, int nents, enum dma_data_direction dir)
...@@ -240,6 +241,7 @@ void dma_direct_sync_sg_for_device(struct device *dev, ...@@ -240,6 +241,7 @@ void dma_direct_sync_sg_for_device(struct device *dev,
dir); dir);
} }
} }
EXPORT_SYMBOL(dma_direct_sync_sg_for_device);
#endif #endif
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
...@@ -258,6 +260,7 @@ void dma_direct_sync_single_for_cpu(struct device *dev, ...@@ -258,6 +260,7 @@ void dma_direct_sync_single_for_cpu(struct device *dev,
if (unlikely(is_swiotlb_buffer(paddr))) if (unlikely(is_swiotlb_buffer(paddr)))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
} }
EXPORT_SYMBOL(dma_direct_sync_single_for_cpu);
void dma_direct_sync_sg_for_cpu(struct device *dev, void dma_direct_sync_sg_for_cpu(struct device *dev,
struct scatterlist *sgl, int nents, enum dma_data_direction dir) struct scatterlist *sgl, int nents, enum dma_data_direction dir)
...@@ -277,6 +280,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev, ...@@ -277,6 +280,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu_all(dev); arch_sync_dma_for_cpu_all(dev);
} }
EXPORT_SYMBOL(dma_direct_sync_sg_for_cpu);
void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
size_t size, enum dma_data_direction dir, unsigned long attrs) size_t size, enum dma_data_direction dir, unsigned long attrs)
...@@ -289,6 +293,7 @@ void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, ...@@ -289,6 +293,7 @@ void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
if (unlikely(is_swiotlb_buffer(phys))) if (unlikely(is_swiotlb_buffer(phys)))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
} }
EXPORT_SYMBOL(dma_direct_unmap_page);
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs) int nents, enum dma_data_direction dir, unsigned long attrs)
...@@ -300,11 +305,7 @@ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, ...@@ -300,11 +305,7 @@ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir, dma_direct_unmap_page(dev, sg->dma_address, sg_dma_len(sg), dir,
attrs); attrs);
} }
#else EXPORT_SYMBOL(dma_direct_unmap_sg);
void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
int nents, enum dma_data_direction dir, unsigned long attrs)
{
}
#endif #endif
static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr, static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
...@@ -331,6 +332,7 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page, ...@@ -331,6 +332,7 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
arch_sync_dma_for_device(dev, phys, size, dir); arch_sync_dma_for_device(dev, phys, size, dir);
return dma_addr; return dma_addr;
} }
EXPORT_SYMBOL(dma_direct_map_page);
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
enum dma_data_direction dir, unsigned long attrs) enum dma_data_direction dir, unsigned long attrs)
...@@ -352,6 +354,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, ...@@ -352,6 +354,7 @@ int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC); dma_direct_unmap_sg(dev, sgl, i, dir, attrs | DMA_ATTR_SKIP_CPU_SYNC);
return 0; return 0;
} }
EXPORT_SYMBOL(dma_direct_map_sg);
/* /*
* Because 32-bit DMA masks are so common we expect every architecture to be * Because 32-bit DMA masks are so common we expect every architecture to be
...@@ -372,27 +375,3 @@ int dma_direct_supported(struct device *dev, u64 mask) ...@@ -372,27 +375,3 @@ int dma_direct_supported(struct device *dev, u64 mask)
return mask >= phys_to_dma(dev, min_mask); return mask >= phys_to_dma(dev, min_mask);
} }
const struct dma_map_ops dma_direct_ops = {
.alloc = dma_direct_alloc,
.free = dma_direct_free,
.map_page = dma_direct_map_page,
.map_sg = dma_direct_map_sg,
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_SWIOTLB)
.sync_single_for_device = dma_direct_sync_single_for_device,
.sync_sg_for_device = dma_direct_sync_sg_for_device,
#endif
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
defined(CONFIG_SWIOTLB)
.sync_single_for_cpu = dma_direct_sync_single_for_cpu,
.sync_sg_for_cpu = dma_direct_sync_sg_for_cpu,
.unmap_page = dma_direct_unmap_page,
.unmap_sg = dma_direct_unmap_sg,
#endif
.get_required_mask = dma_direct_get_required_mask,
.dma_supported = dma_direct_supported,
.cache_sync = arch_dma_cache_sync,
};
EXPORT_SYMBOL(dma_direct_ops);
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
*/ */
#include <linux/memblock.h> /* for max_pfn */ #include <linux/memblock.h> /* for max_pfn */
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h> #include <linux/dma-noncoherent.h>
#include <linux/export.h> #include <linux/export.h>
#include <linux/gfp.h> #include <linux/gfp.h>
...@@ -229,8 +230,8 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, ...@@ -229,8 +230,8 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
unsigned long attrs) unsigned long attrs)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
if (ops->get_sgtable) if (!dma_is_direct(ops) && ops->get_sgtable)
return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
attrs); attrs);
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size, return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
...@@ -293,8 +294,8 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, ...@@ -293,8 +294,8 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
unsigned long attrs) unsigned long attrs)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
if (ops->mmap) if (!dma_is_direct(ops) && ops->mmap)
return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs); return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
} }
...@@ -324,6 +325,8 @@ u64 dma_get_required_mask(struct device *dev) ...@@ -324,6 +325,8 @@ u64 dma_get_required_mask(struct device *dev)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
if (dma_is_direct(ops))
return dma_direct_get_required_mask(dev);
if (ops->get_required_mask) if (ops->get_required_mask)
return ops->get_required_mask(dev); return ops->get_required_mask(dev);
return dma_default_get_required_mask(dev); return dma_default_get_required_mask(dev);
...@@ -341,7 +344,6 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -341,7 +344,6 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
void *cpu_addr; void *cpu_addr;
BUG_ON(!ops);
WARN_ON_ONCE(dev && !dev->coherent_dma_mask); WARN_ON_ONCE(dev && !dev->coherent_dma_mask);
if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr)) if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
...@@ -352,10 +354,14 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -352,10 +354,14 @@ void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
if (!arch_dma_alloc_attrs(&dev)) if (!arch_dma_alloc_attrs(&dev))
return NULL; return NULL;
if (!ops->alloc)
if (dma_is_direct(ops))
cpu_addr = dma_direct_alloc(dev, size, dma_handle, flag, attrs);
else if (ops->alloc)
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
else
return NULL; return NULL;
cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
return cpu_addr; return cpu_addr;
} }
...@@ -366,8 +372,6 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, ...@@ -366,8 +372,6 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr)) if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
return; return;
/* /*
...@@ -379,11 +383,14 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, ...@@ -379,11 +383,14 @@ void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
*/ */
WARN_ON(irqs_disabled()); WARN_ON(irqs_disabled());
if (!ops->free || !cpu_addr) if (!cpu_addr)
return; return;
debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
ops->free(dev, size, cpu_addr, dma_handle, attrs); if (dma_is_direct(ops))
dma_direct_free(dev, size, cpu_addr, dma_handle, attrs);
else if (ops->free)
ops->free(dev, size, cpu_addr, dma_handle, attrs);
} }
EXPORT_SYMBOL(dma_free_attrs); EXPORT_SYMBOL(dma_free_attrs);
...@@ -397,9 +404,9 @@ int dma_supported(struct device *dev, u64 mask) ...@@ -397,9 +404,9 @@ int dma_supported(struct device *dev, u64 mask)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
if (!ops) if (dma_is_direct(ops))
return 0; return dma_direct_supported(dev, mask);
if (!ops->dma_supported) if (ops->dma_supported)
return 1; return 1;
return ops->dma_supported(dev, mask); return ops->dma_supported(dev, mask);
} }
...@@ -437,7 +444,10 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size, ...@@ -437,7 +444,10 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!valid_dma_direction(dir)); BUG_ON(!valid_dma_direction(dir));
if (ops->cache_sync)
if (dma_is_direct(ops))
arch_dma_cache_sync(dev, vaddr, size, dir);
else if (ops->cache_sync)
ops->cache_sync(dev, vaddr, size, dir); ops->cache_sync(dev, vaddr, size, dir);
} }
EXPORT_SYMBOL(dma_cache_sync); EXPORT_SYMBOL(dma_cache_sync);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment