Commit fd27a526 authored by Christoph Hellwig's avatar Christoph Hellwig

ARM/dma-mapping: merge __dma_supported into arm_dma_supported

Merge __dma_supported into its only caller, and move the resulting
function so that it doesn't need a forward declaration.  Also mark
it static as there are no callers outside of dma-mapping.c.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 7607cb73
...@@ -33,7 +33,5 @@ int arm_iommu_attach_device(struct device *dev, ...@@ -33,7 +33,5 @@ int arm_iommu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping); struct dma_iommu_mapping *mapping);
void arm_iommu_detach_device(struct device *dev); void arm_iommu_detach_device(struct device *dev);
int arm_dma_supported(struct device *dev, u64 mask);
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif #endif
...@@ -179,6 +179,23 @@ static void arm_dma_sync_single_for_device(struct device *dev, ...@@ -179,6 +179,23 @@ static void arm_dma_sync_single_for_device(struct device *dev,
__dma_page_cpu_to_dev(page, offset, size, dir); __dma_page_cpu_to_dev(page, offset, size, dir);
} }
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during bus mastering, then you would pass 0x00ffffff as the mask
* to this function.
*/
static int arm_dma_supported(struct device *dev, u64 mask)
{
unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
/*
* Translate the device's DMA mask to a PFN limit. This
* PFN number includes the page which we can DMA to.
*/
return dma_to_pfn(dev, mask) >= max_dma_pfn;
}
const struct dma_map_ops arm_dma_ops = { const struct dma_map_ops arm_dma_ops = {
.alloc = arm_dma_alloc, .alloc = arm_dma_alloc,
.free = arm_dma_free, .free = arm_dma_free,
...@@ -219,19 +236,6 @@ const struct dma_map_ops arm_coherent_dma_ops = { ...@@ -219,19 +236,6 @@ const struct dma_map_ops arm_coherent_dma_ops = {
}; };
EXPORT_SYMBOL(arm_coherent_dma_ops); EXPORT_SYMBOL(arm_coherent_dma_ops);
static int __dma_supported(struct device *dev, u64 mask)
{
unsigned long max_dma_pfn = min(max_pfn - 1, arm_dma_pfn_limit);
/*
* Translate the device's DMA mask to a PFN limit. This
* PFN number includes the page which we can DMA to.
*/
if (dma_to_pfn(dev, mask) < max_dma_pfn)
return 0;
return 1;
}
static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag) static void __dma_clear_buffer(struct page *page, size_t size, int coherent_flag)
{ {
/* /*
...@@ -1054,17 +1058,6 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, ...@@ -1054,17 +1058,6 @@ void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
dir); dir);
} }
/*
* Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits
* during bus mastering, then you would pass 0x00ffffff as the mask
* to this function.
*/
int arm_dma_supported(struct device *dev, u64 mask)
{
return __dma_supported(dev, mask);
}
static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent) static const struct dma_map_ops *arm_get_dma_map_ops(bool coherent)
{ {
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment