Commit 34dc0ea6 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-direct: provide mmap and get_sgtable method overrides

For dma-direct we know that the DMA address is an encoding of the
physical address that we can trivially decode.  Use that fact to
provide implementations that do not need the arch_dma_coherent_to_pfn
architecture hook.  Note that we still can only support mmap of
non-coherent memory only if the architecture provides a way to set an
uncached bit in the page tables.  This must be true for architectures
that use the generic remap helpers, but other architectures can also
manually select it.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
parent 4e1003aa
...@@ -6,7 +6,6 @@ ...@@ -6,7 +6,6 @@
config ARC config ARC
def_bool y def_bool y
select ARC_TIMERS select ARC_TIMERS
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_PTE_SPECIAL select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_SETUP_DMA_OPS select ARCH_HAS_SETUP_DMA_OPS
......
...@@ -7,7 +7,6 @@ config ARM ...@@ -7,7 +7,6 @@ config ARM
select ARCH_HAS_BINFMT_FLAT select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DEBUG_VIRTUAL if MMU select ARCH_HAS_DEBUG_VIRTUAL if MMU
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_COHERENT_TO_PFN if SWIOTLB
select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE select ARCH_HAS_DMA_WRITE_COMBINE if !ARM_DMA_MEM_BUFFERABLE
select ARCH_HAS_ELF_RANDOMIZE select ARCH_HAS_ELF_RANDOMIZE
select ARCH_HAS_FORTIFY_SOURCE select ARCH_HAS_FORTIFY_SOURCE
......
...@@ -2346,12 +2346,6 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr, ...@@ -2346,12 +2346,6 @@ void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
size, dir); size, dir);
} }
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr)
{
return dma_to_pfn(dev, dma_addr);
}
void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs) gfp_t gfp, unsigned long attrs)
{ {
......
...@@ -12,7 +12,6 @@ config ARM64 ...@@ -12,7 +12,6 @@ config ARM64
select ARCH_CLOCKSOURCE_DATA select ARCH_CLOCKSOURCE_DATA
select ARCH_HAS_DEBUG_VIRTUAL select ARCH_HAS_DEBUG_VIRTUAL
select ARCH_HAS_DEVMEM_IS_ALLOWED select ARCH_HAS_DEVMEM_IS_ALLOWED
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
select ARCH_HAS_FAST_MULTIPLIER select ARCH_HAS_FAST_MULTIPLIER
......
...@@ -33,7 +33,7 @@ config IA64 ...@@ -33,7 +33,7 @@ config IA64
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_MEMBLOCK_NODE_MAP select HAVE_MEMBLOCK_NODE_MAP
select HAVE_VIRT_CPU_ACCOUNTING select HAVE_VIRT_CPU_ACCOUNTING
select ARCH_HAS_DMA_COHERENT_TO_PFN select DMA_NONCOHERENT_MMAP
select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_CPU
select VIRT_TO_BUS select VIRT_TO_BUS
select GENERIC_IRQ_PROBE select GENERIC_IRQ_PROBE
......
...@@ -19,9 +19,3 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, ...@@ -19,9 +19,3 @@ void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
{ {
dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs); dma_direct_free_pages(dev, size, cpu_addr, dma_addr, attrs);
} }
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr)
{
return page_to_pfn(virt_to_page(cpu_addr));
}
...@@ -4,7 +4,6 @@ config MICROBLAZE ...@@ -4,7 +4,6 @@ config MICROBLAZE
select ARCH_32BIT_OFF_T select ARCH_32BIT_OFF_T
select ARCH_NO_SWAP select ARCH_NO_SWAP
select ARCH_HAS_BINFMT_FLAT if !MMU select ARCH_HAS_BINFMT_FLAT if !MMU
select ARCH_HAS_DMA_COHERENT_TO_PFN if MMU
select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_GCOV_PROFILE_ALL select ARCH_HAS_GCOV_PROFILE_ALL
select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_CPU
......
...@@ -1134,9 +1134,9 @@ config DMA_NONCOHERENT ...@@ -1134,9 +1134,9 @@ config DMA_NONCOHERENT
select ARCH_HAS_DMA_WRITE_COMBINE select ARCH_HAS_DMA_WRITE_COMBINE
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_UNCACHED_SEGMENT select ARCH_HAS_UNCACHED_SEGMENT
select NEED_DMA_MAP_STATE select DMA_NONCOHERENT_MMAP
select ARCH_HAS_DMA_COHERENT_TO_PFN
select DMA_NONCOHERENT_CACHE_SYNC select DMA_NONCOHERENT_CACHE_SYNC
select NEED_DMA_MAP_STATE
config SYS_HAS_EARLY_PRINTK config SYS_HAS_EARLY_PRINTK
bool bool
......
...@@ -59,12 +59,6 @@ void *cached_kernel_address(void *addr) ...@@ -59,12 +59,6 @@ void *cached_kernel_address(void *addr)
return __va(addr) - UNCAC_BASE; return __va(addr) - UNCAC_BASE;
} }
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr)
{
return page_to_pfn(virt_to_page(cached_kernel_address(cpu_addr)));
}
static inline void dma_sync_virt(void *addr, size_t size, static inline void dma_sync_virt(void *addr, size_t size,
enum dma_data_direction dir) enum dma_data_direction dir)
{ {
......
...@@ -459,7 +459,6 @@ config NOT_COHERENT_CACHE ...@@ -459,7 +459,6 @@ config NOT_COHERENT_CACHE
bool bool
depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || \ depends on 4xx || PPC_8xx || E200 || PPC_MPC512x || \
GAMECUBE_COMMON || AMIGAONE GAMECUBE_COMMON || AMIGAONE
select ARCH_HAS_DMA_COHERENT_TO_PFN
select ARCH_HAS_DMA_PREP_COHERENT select ARCH_HAS_DMA_PREP_COHERENT
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_HAS_SYNC_DMA_FOR_CPU select ARCH_HAS_SYNC_DMA_FOR_CPU
......
...@@ -68,5 +68,12 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, ...@@ -68,5 +68,12 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs); dma_addr_t dma_addr, unsigned long attrs);
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp, unsigned long attrs); gfp_t gfp, unsigned long attrs);
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
bool dma_direct_can_mmap(struct device *dev);
int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs);
int dma_direct_supported(struct device *dev, u64 mask); int dma_direct_supported(struct device *dev, u64 mask);
#endif /* _LINUX_DMA_DIRECT_H */ #endif /* _LINUX_DMA_DIRECT_H */
...@@ -41,8 +41,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle, ...@@ -41,8 +41,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs); gfp_t gfp, unsigned long attrs);
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr, void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs); dma_addr_t dma_addr, unsigned long attrs);
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr);
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
/* /*
......
...@@ -51,9 +51,6 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL ...@@ -51,9 +51,6 @@ config ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
config ARCH_HAS_DMA_PREP_COHERENT config ARCH_HAS_DMA_PREP_COHERENT
bool bool
config ARCH_HAS_DMA_COHERENT_TO_PFN
bool
config ARCH_HAS_FORCE_DMA_UNENCRYPTED config ARCH_HAS_FORCE_DMA_UNENCRYPTED
bool bool
...@@ -68,9 +65,18 @@ config SWIOTLB ...@@ -68,9 +65,18 @@ config SWIOTLB
bool bool
select NEED_DMA_MAP_STATE select NEED_DMA_MAP_STATE
#
# Should be selected if we can mmap non-coherent mappings to userspace.
# The only thing that is really required is a way to set an uncached bit
# in the pagetables
#
config DMA_NONCOHERENT_MMAP
bool
config DMA_REMAP config DMA_REMAP
depends on MMU depends on MMU
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
select DMA_NONCOHERENT_MMAP
bool bool
config DMA_DIRECT_REMAP config DMA_DIRECT_REMAP
......
...@@ -43,6 +43,12 @@ static inline dma_addr_t phys_to_dma_direct(struct device *dev, ...@@ -43,6 +43,12 @@ static inline dma_addr_t phys_to_dma_direct(struct device *dev,
return phys_to_dma(dev, phys); return phys_to_dma(dev, phys);
} }
static inline struct page *dma_direct_to_page(struct device *dev,
dma_addr_t dma_addr)
{
return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr)));
}
u64 dma_direct_get_required_mask(struct device *dev) u64 dma_direct_get_required_mask(struct device *dev)
{ {
u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
...@@ -379,6 +385,59 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr, ...@@ -379,6 +385,59 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
} }
EXPORT_SYMBOL(dma_direct_map_resource); EXPORT_SYMBOL(dma_direct_map_resource);
int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
struct page *page = dma_direct_to_page(dev, dma_addr);
int ret;
ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (!ret)
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
return ret;
}
#ifdef CONFIG_MMU
bool dma_direct_can_mmap(struct device *dev)
{
return dev_is_dma_coherent(dev) ||
IS_ENABLED(CONFIG_DMA_NONCOHERENT_MMAP);
}
int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long pfn = PHYS_PFN(dma_to_phys(dev, dma_addr));
int ret = -ENXIO;
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
return ret;
if (vma->vm_pgoff >= count || user_count > count - vma->vm_pgoff)
return -ENXIO;
return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
user_count << PAGE_SHIFT, vma->vm_page_prot);
}
#else /* CONFIG_MMU */
bool dma_direct_can_mmap(struct device *dev)
{
return false;
}
int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs)
{
return -ENXIO;
}
#endif /* CONFIG_MMU */
/* /*
* Because 32-bit DMA masks are so common we expect every architecture to be * Because 32-bit DMA masks are so common we expect every architecture to be
* able to satisfy them - either by not supporting more physical memory, or by * able to satisfy them - either by not supporting more physical memory, or by
......
...@@ -112,24 +112,9 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, ...@@ -112,24 +112,9 @@ int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
void *cpu_addr, dma_addr_t dma_addr, size_t size, void *cpu_addr, dma_addr_t dma_addr, size_t size,
unsigned long attrs) unsigned long attrs)
{ {
struct page *page; struct page *page = virt_to_page(cpu_addr);
int ret; int ret;
if (!dev_is_dma_coherent(dev)) {
unsigned long pfn;
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN))
return -ENXIO;
/* If the PFN is not valid, we do not have a struct page */
pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
if (!pfn_valid(pfn))
return -ENXIO;
page = pfn_to_page(pfn);
} else {
page = virt_to_page(cpu_addr);
}
ret = sg_alloc_table(sgt, 1, GFP_KERNEL); ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
if (!ret) if (!ret)
sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
...@@ -154,7 +139,7 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, ...@@ -154,7 +139,7 @@ int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
if (dma_is_direct(ops)) if (dma_is_direct(ops))
return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr,
size, attrs); size, attrs);
if (!ops->get_sgtable) if (!ops->get_sgtable)
return -ENXIO; return -ENXIO;
...@@ -192,7 +177,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -192,7 +177,6 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
unsigned long user_count = vma_pages(vma); unsigned long user_count = vma_pages(vma);
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
unsigned long off = vma->vm_pgoff; unsigned long off = vma->vm_pgoff;
unsigned long pfn;
int ret = -ENXIO; int ret = -ENXIO;
vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs);
...@@ -203,19 +187,8 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, ...@@ -203,19 +187,8 @@ int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
if (off >= count || user_count > count - off) if (off >= count || user_count > count - off)
return -ENXIO; return -ENXIO;
if (!dev_is_dma_coherent(dev)) { return remap_pfn_range(vma, vma->vm_start,
if (!IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN)) page_to_pfn(virt_to_page(cpu_addr)) + vma->vm_pgoff,
return -ENXIO;
/* If the PFN is not valid, we do not have a struct page */
pfn = arch_dma_coherent_to_pfn(dev, cpu_addr, dma_addr);
if (!pfn_valid(pfn))
return -ENXIO;
} else {
pfn = page_to_pfn(virt_to_page(cpu_addr));
}
return remap_pfn_range(vma, vma->vm_start, pfn + vma->vm_pgoff,
user_count << PAGE_SHIFT, vma->vm_page_prot); user_count << PAGE_SHIFT, vma->vm_page_prot);
#else #else
return -ENXIO; return -ENXIO;
...@@ -233,12 +206,8 @@ bool dma_can_mmap(struct device *dev) ...@@ -233,12 +206,8 @@ bool dma_can_mmap(struct device *dev)
{ {
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
if (dma_is_direct(ops)) { if (dma_is_direct(ops))
return IS_ENABLED(CONFIG_MMU) && return dma_direct_can_mmap(dev);
(dev_is_dma_coherent(dev) ||
IS_ENABLED(CONFIG_ARCH_HAS_DMA_COHERENT_TO_PFN));
}
return ops->mmap != NULL; return ops->mmap != NULL;
} }
EXPORT_SYMBOL_GPL(dma_can_mmap); EXPORT_SYMBOL_GPL(dma_can_mmap);
...@@ -263,7 +232,7 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, ...@@ -263,7 +232,7 @@ int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
const struct dma_map_ops *ops = get_dma_ops(dev); const struct dma_map_ops *ops = get_dma_ops(dev);
if (dma_is_direct(ops)) if (dma_is_direct(ops))
return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size, return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size,
attrs); attrs);
if (!ops->mmap) if (!ops->mmap)
return -ENXIO; return -ENXIO;
......
...@@ -259,10 +259,4 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr, ...@@ -259,10 +259,4 @@ void arch_dma_free(struct device *dev, size_t size, void *vaddr,
dma_free_contiguous(dev, page, size); dma_free_contiguous(dev, page, size);
} }
} }
long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
dma_addr_t dma_addr)
{
return __phys_to_pfn(dma_to_phys(dev, dma_addr));
}
#endif /* CONFIG_DMA_DIRECT_REMAP */ #endif /* CONFIG_DMA_DIRECT_REMAP */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment