Commit 10b22b53 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-6.1-2022-10-10' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping updates from Christoph Hellwig:

 - fix a regression in the ARM dma-direct conversion (Christoph Hellwig)

 - use memcpy_{from,to}_page (Fabio M. De Francesco)

 - cleanup the swiotlb MAINTAINERS entry (Lukas Bulwahn)

 - make SG table pool allocation less fragile (Masahiro Yamada)

 - don't panic on swiotlb initialization failure (Robin Murphy)

* tag 'dma-mapping-6.1-2022-10-10' of git://git.infradead.org/users/hch/dma-mapping:
  ARM/dma-mapping: remove the dma_coherent member of struct dev_archdata
  ARM/dma-mappіng: don't override ->dma_coherent when set from a bus notifier
  lib/sg_pool: change module_init(sg_pool_init) to subsys_initcall
  MAINTAINERS: merge SWIOTLB SUBSYSTEM into DMA MAPPING HELPERS
  swiotlb: don't panic!
  swiotlb: replace kmap_atomic() with memcpy_{from,to}_page()
parents f23cdfcd c9cb0136
...@@ -6171,6 +6171,7 @@ F: include/asm-generic/dma-mapping.h ...@@ -6171,6 +6171,7 @@ F: include/asm-generic/dma-mapping.h
F: include/linux/dma-direct.h F: include/linux/dma-direct.h
F: include/linux/dma-mapping.h F: include/linux/dma-mapping.h
F: include/linux/dma-map-ops.h F: include/linux/dma-map-ops.h
F: include/linux/swiotlb.h
F: kernel/dma/ F: kernel/dma/
DMA MAPPING BENCHMARK DMA MAPPING BENCHMARK
...@@ -19749,16 +19750,6 @@ S: Maintained ...@@ -19749,16 +19750,6 @@ S: Maintained
F: Documentation/admin-guide/svga.rst F: Documentation/admin-guide/svga.rst
F: arch/x86/boot/video* F: arch/x86/boot/video*
SWIOTLB SUBSYSTEM
M: Christoph Hellwig <hch@infradead.org>
L: iommu@lists.linux.dev
S: Supported
W: http://git.infradead.org/users/hch/dma-mapping.git
T: git git://git.infradead.org/users/hch/dma-mapping.git
F: arch/*/kernel/pci-swiotlb.c
F: include/linux/swiotlb.h
F: kernel/dma/swiotlb.c
SWITCHDEV SWITCHDEV
M: Jiri Pirko <jiri@resnulli.us> M: Jiri Pirko <jiri@resnulli.us>
M: Ivan Vecera <ivecera@redhat.com> M: Ivan Vecera <ivecera@redhat.com>
...@@ -22475,8 +22466,10 @@ M: Stefano Stabellini <sstabellini@kernel.org> ...@@ -22475,8 +22466,10 @@ M: Stefano Stabellini <sstabellini@kernel.org>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers) L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
L: iommu@lists.linux.dev L: iommu@lists.linux.dev
S: Supported S: Supported
F: arch/x86/xen/*swiotlb* F: arch/*/include/asm/xen/swiotlb-xen.h
F: drivers/xen/*swiotlb* F: drivers/xen/swiotlb-xen.c
F: include/xen/arm/swiotlb-xen.h
F: include/xen/swiotlb-xen.h
XFS FILESYSTEM XFS FILESYSTEM
C: irc://irc.oftc.net/xfs C: irc://irc.oftc.net/xfs
......
...@@ -9,7 +9,6 @@ struct dev_archdata { ...@@ -9,7 +9,6 @@ struct dev_archdata {
#ifdef CONFIG_ARM_DMA_USE_IOMMU #ifdef CONFIG_ARM_DMA_USE_IOMMU
struct dma_iommu_mapping *mapping; struct dma_iommu_mapping *mapping;
#endif #endif
unsigned int dma_coherent:1;
unsigned int dma_ops_setup:1; unsigned int dma_ops_setup:1;
}; };
......
...@@ -1769,8 +1769,14 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { } ...@@ -1769,8 +1769,14 @@ static void arm_teardown_iommu_dma_ops(struct device *dev) { }
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
const struct iommu_ops *iommu, bool coherent) const struct iommu_ops *iommu, bool coherent)
{ {
dev->archdata.dma_coherent = coherent; /*
dev->dma_coherent = coherent; * Due to legacy code that sets the ->dma_coherent flag from a bus
* notifier we can't just assign coherent to the ->dma_coherent flag
* here, but instead have to make sure we only set but never clear it
* for now.
*/
if (coherent)
dev->dma_coherent = true;
/* /*
* Don't override the dma_ops if they have already been set. Ideally * Don't override the dma_ops if they have already been set. Ideally
......
...@@ -346,22 +346,27 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags, ...@@ -346,22 +346,27 @@ void __init swiotlb_init_remap(bool addressing_limit, unsigned int flags,
memblock_free(tlb, PAGE_ALIGN(bytes)); memblock_free(tlb, PAGE_ALIGN(bytes));
nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE); nslabs = ALIGN(nslabs >> 1, IO_TLB_SEGSIZE);
if (nslabs < IO_TLB_MIN_SLABS) if (nslabs >= IO_TLB_MIN_SLABS)
panic("%s: Failed to remap %zu bytes\n", goto retry;
__func__, bytes);
goto retry; pr_warn("%s: Failed to remap %zu bytes\n", __func__, bytes);
return;
} }
alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs)); alloc_size = PAGE_ALIGN(array_size(sizeof(*mem->slots), nslabs));
mem->slots = memblock_alloc(alloc_size, PAGE_SIZE); mem->slots = memblock_alloc(alloc_size, PAGE_SIZE);
if (!mem->slots) if (!mem->slots) {
panic("%s: Failed to allocate %zu bytes align=0x%lx\n", pr_warn("%s: Failed to allocate %zu bytes align=0x%lx\n",
__func__, alloc_size, PAGE_SIZE); __func__, alloc_size, PAGE_SIZE);
return;
}
mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area), mem->areas = memblock_alloc(array_size(sizeof(struct io_tlb_area),
default_nareas), SMP_CACHE_BYTES); default_nareas), SMP_CACHE_BYTES);
if (!mem->areas) if (!mem->areas) {
panic("%s: Failed to allocate mem->areas.\n", __func__); pr_warn("%s: Failed to allocate mem->areas.\n", __func__);
return;
}
swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false, swiotlb_init_io_tlb_mem(mem, __pa(tlb), nslabs, flags, false,
default_nareas); default_nareas);
...@@ -545,9 +550,8 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size ...@@ -545,9 +550,8 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
} }
if (PageHighMem(pfn_to_page(pfn))) { if (PageHighMem(pfn_to_page(pfn))) {
/* The buffer does not have a mapping. Map it in and copy */
unsigned int offset = orig_addr & ~PAGE_MASK; unsigned int offset = orig_addr & ~PAGE_MASK;
char *buffer; struct page *page;
unsigned int sz = 0; unsigned int sz = 0;
unsigned long flags; unsigned long flags;
...@@ -555,12 +559,11 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size ...@@ -555,12 +559,11 @@ static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size
sz = min_t(size_t, PAGE_SIZE - offset, size); sz = min_t(size_t, PAGE_SIZE - offset, size);
local_irq_save(flags); local_irq_save(flags);
buffer = kmap_atomic(pfn_to_page(pfn)); page = pfn_to_page(pfn);
if (dir == DMA_TO_DEVICE) if (dir == DMA_TO_DEVICE)
memcpy(vaddr, buffer + offset, sz); memcpy_from_page(vaddr, page, offset, sz);
else else
memcpy(buffer + offset, vaddr, sz); memcpy_to_page(page, offset, vaddr, sz);
kunmap_atomic(buffer);
local_irq_restore(flags); local_irq_restore(flags);
size -= sz; size -= sz;
...@@ -731,8 +734,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr, ...@@ -731,8 +734,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *dev, phys_addr_t orig_addr,
int index; int index;
phys_addr_t tlb_addr; phys_addr_t tlb_addr;
if (!mem || !mem->nslabs) if (!mem || !mem->nslabs) {
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer"); dev_warn_ratelimited(dev,
"Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
return (phys_addr_t)DMA_MAPPING_ERROR;
}
if (cc_platform_has(CC_ATTR_MEM_ENCRYPT)) if (cc_platform_has(CC_ATTR_MEM_ENCRYPT))
pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n"); pr_warn_once("Memory encryption is active and system is using DMA bounce buffers\n");
......
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
#include <linux/module.h> #include <linux/init.h>
#include <linux/scatterlist.h> #include <linux/scatterlist.h>
#include <linux/mempool.h> #include <linux/mempool.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -177,16 +177,4 @@ static __init int sg_pool_init(void) ...@@ -177,16 +177,4 @@ static __init int sg_pool_init(void)
return -ENOMEM; return -ENOMEM;
} }
static __exit void sg_pool_exit(void) subsys_initcall(sg_pool_init);
{
int i;
for (i = 0; i < SG_MEMPOOL_NR; i++) {
struct sg_pool *sgp = sg_pools + i;
mempool_destroy(sgp->pool);
kmem_cache_destroy(sgp->slab);
}
}
module_init(sg_pool_init);
module_exit(sg_pool_exit);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment