Commit dff8d6c1 authored by Christoph Hellwig's avatar Christoph Hellwig

swiotlb: remove the overflow buffer

Like all other dma mapping drivers just return an error code instead
of an actual memory buffer.  The reason for the overflow buffer was
that at the time swiotlb was invented there was no way to check for
dma mapping errors, but this has long been fixed.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
Reviewed-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 80885468
...@@ -324,7 +324,7 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask) ...@@ -324,7 +324,7 @@ static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr) static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
{ {
if (swiotlb) if (swiotlb)
return swiotlb_dma_mapping_error(hwdev, addr); return dma_direct_mapping_error(hwdev, addr);
return 0; return 0;
} }
......
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
* *
*/ */
#include <linux/dma-mapping.h> #include <linux/dma-direct.h>
#include <linux/memblock.h> #include <linux/memblock.h>
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
...@@ -59,7 +59,7 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = { ...@@ -59,7 +59,7 @@ const struct dma_map_ops powerpc_swiotlb_dma_ops = {
.sync_single_for_device = swiotlb_sync_single_for_device, .sync_single_for_device = swiotlb_sync_single_for_device,
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu, .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
.sync_sg_for_device = swiotlb_sync_sg_for_device, .sync_sg_for_device = swiotlb_sync_sg_for_device,
.mapping_error = swiotlb_dma_mapping_error, .mapping_error = dma_direct_mapping_error,
.get_required_mask = swiotlb_powerpc_get_required, .get_required_mask = swiotlb_powerpc_get_required,
}; };
......
...@@ -5,6 +5,8 @@ ...@@ -5,6 +5,8 @@
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/mem_encrypt.h> #include <linux/mem_encrypt.h>
#define DIRECT_MAPPING_ERROR 0
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA #ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h> #include <asm/dma-direct.h>
#else #else
......
...@@ -106,9 +106,6 @@ extern void ...@@ -106,9 +106,6 @@ extern void
swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
int nelems, enum dma_data_direction dir); int nelems, enum dma_data_direction dir);
extern int
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
extern int extern int
swiotlb_dma_supported(struct device *hwdev, u64 mask); swiotlb_dma_supported(struct device *hwdev, u64 mask);
......
...@@ -14,8 +14,6 @@ ...@@ -14,8 +14,6 @@
#include <linux/pfn.h> #include <linux/pfn.h>
#include <linux/set_memory.h> #include <linux/set_memory.h>
#define DIRECT_MAPPING_ERROR 0
/* /*
* Most architectures use ZONE_DMA for the first 16 Megabytes, but * Most architectures use ZONE_DMA for the first 16 Megabytes, but
* some use it for entirely different regions: * some use it for entirely different regions:
......
...@@ -72,13 +72,6 @@ static phys_addr_t io_tlb_start, io_tlb_end; ...@@ -72,13 +72,6 @@ static phys_addr_t io_tlb_start, io_tlb_end;
*/ */
static unsigned long io_tlb_nslabs; static unsigned long io_tlb_nslabs;
/*
* When the IOMMU overflows we return a fallback buffer. This sets the size.
*/
static unsigned long io_tlb_overflow = 32*1024;
static phys_addr_t io_tlb_overflow_buffer;
/* /*
* This is a free list describing the number of free entries available from * This is a free list describing the number of free entries available from
* each index * each index
...@@ -126,7 +119,6 @@ setup_io_tlb_npages(char *str) ...@@ -126,7 +119,6 @@ setup_io_tlb_npages(char *str)
return 0; return 0;
} }
early_param("swiotlb", setup_io_tlb_npages); early_param("swiotlb", setup_io_tlb_npages);
/* make io_tlb_overflow tunable too? */
unsigned long swiotlb_nr_tbl(void) unsigned long swiotlb_nr_tbl(void)
{ {
...@@ -194,16 +186,10 @@ void __init swiotlb_update_mem_attributes(void) ...@@ -194,16 +186,10 @@ void __init swiotlb_update_mem_attributes(void)
bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT); bytes = PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT);
set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT); set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
memset(vaddr, 0, bytes); memset(vaddr, 0, bytes);
vaddr = phys_to_virt(io_tlb_overflow_buffer);
bytes = PAGE_ALIGN(io_tlb_overflow);
set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
memset(vaddr, 0, bytes);
} }
int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
{ {
void *v_overflow_buffer;
unsigned long i, bytes; unsigned long i, bytes;
bytes = nslabs << IO_TLB_SHIFT; bytes = nslabs << IO_TLB_SHIFT;
...@@ -212,17 +198,6 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) ...@@ -212,17 +198,6 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
io_tlb_start = __pa(tlb); io_tlb_start = __pa(tlb);
io_tlb_end = io_tlb_start + bytes; io_tlb_end = io_tlb_start + bytes;
/*
* Get the overflow emergency buffer
*/
v_overflow_buffer = memblock_virt_alloc_low_nopanic(
PAGE_ALIGN(io_tlb_overflow),
PAGE_SIZE);
if (!v_overflow_buffer)
return -ENOMEM;
io_tlb_overflow_buffer = __pa(v_overflow_buffer);
/* /*
* Allocate and initialize the free list array. This array is used * Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
...@@ -330,7 +305,6 @@ int ...@@ -330,7 +305,6 @@ int
swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
{ {
unsigned long i, bytes; unsigned long i, bytes;
unsigned char *v_overflow_buffer;
bytes = nslabs << IO_TLB_SHIFT; bytes = nslabs << IO_TLB_SHIFT;
...@@ -341,19 +315,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) ...@@ -341,19 +315,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT); set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
memset(tlb, 0, bytes); memset(tlb, 0, bytes);
/*
* Get the overflow emergency buffer
*/
v_overflow_buffer = (void *)__get_free_pages(GFP_DMA,
get_order(io_tlb_overflow));
if (!v_overflow_buffer)
goto cleanup2;
set_memory_decrypted((unsigned long)v_overflow_buffer,
io_tlb_overflow >> PAGE_SHIFT);
memset(v_overflow_buffer, 0, io_tlb_overflow);
io_tlb_overflow_buffer = virt_to_phys(v_overflow_buffer);
/* /*
* Allocate and initialize the free list array. This array is used * Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
...@@ -390,10 +351,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs) ...@@ -390,10 +351,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
sizeof(int))); sizeof(int)));
io_tlb_list = NULL; io_tlb_list = NULL;
cleanup3: cleanup3:
free_pages((unsigned long)v_overflow_buffer,
get_order(io_tlb_overflow));
io_tlb_overflow_buffer = 0;
cleanup2:
io_tlb_end = 0; io_tlb_end = 0;
io_tlb_start = 0; io_tlb_start = 0;
io_tlb_nslabs = 0; io_tlb_nslabs = 0;
...@@ -407,8 +364,6 @@ void __init swiotlb_exit(void) ...@@ -407,8 +364,6 @@ void __init swiotlb_exit(void)
return; return;
if (late_alloc) { if (late_alloc) {
free_pages((unsigned long)phys_to_virt(io_tlb_overflow_buffer),
get_order(io_tlb_overflow));
free_pages((unsigned long)io_tlb_orig_addr, free_pages((unsigned long)io_tlb_orig_addr,
get_order(io_tlb_nslabs * sizeof(phys_addr_t))); get_order(io_tlb_nslabs * sizeof(phys_addr_t)));
free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs * free_pages((unsigned long)io_tlb_list, get_order(io_tlb_nslabs *
...@@ -416,8 +371,6 @@ void __init swiotlb_exit(void) ...@@ -416,8 +371,6 @@ void __init swiotlb_exit(void)
free_pages((unsigned long)phys_to_virt(io_tlb_start), free_pages((unsigned long)phys_to_virt(io_tlb_start),
get_order(io_tlb_nslabs << IO_TLB_SHIFT)); get_order(io_tlb_nslabs << IO_TLB_SHIFT));
} else { } else {
memblock_free_late(io_tlb_overflow_buffer,
PAGE_ALIGN(io_tlb_overflow));
memblock_free_late(__pa(io_tlb_orig_addr), memblock_free_late(__pa(io_tlb_orig_addr),
PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t))); PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));
memblock_free_late(__pa(io_tlb_list), memblock_free_late(__pa(io_tlb_list),
...@@ -790,7 +743,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, ...@@ -790,7 +743,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
/* Oh well, have to allocate and map a bounce buffer. */ /* Oh well, have to allocate and map a bounce buffer. */
map = map_single(dev, phys, size, dir, attrs); map = map_single(dev, phys, size, dir, attrs);
if (map == SWIOTLB_MAP_ERROR) if (map == SWIOTLB_MAP_ERROR)
return __phys_to_dma(dev, io_tlb_overflow_buffer); return DIRECT_MAPPING_ERROR;
dev_addr = __phys_to_dma(dev, map); dev_addr = __phys_to_dma(dev, map);
...@@ -801,7 +754,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page, ...@@ -801,7 +754,7 @@ dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
attrs |= DMA_ATTR_SKIP_CPU_SYNC; attrs |= DMA_ATTR_SKIP_CPU_SYNC;
swiotlb_tbl_unmap_single(dev, map, size, dir, attrs); swiotlb_tbl_unmap_single(dev, map, size, dir, attrs);
return __phys_to_dma(dev, io_tlb_overflow_buffer); return DIRECT_MAPPING_ERROR;
} }
/* /*
...@@ -985,12 +938,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, ...@@ -985,12 +938,6 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE); swiotlb_sync_sg(hwdev, sg, nelems, dir, SYNC_FOR_DEVICE);
} }
int
swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
{
return (dma_addr == __phys_to_dma(hwdev, io_tlb_overflow_buffer));
}
/* /*
* Return whether the given device DMA address mask can be supported * Return whether the given device DMA address mask can be supported
* properly. For example, if your device can only drive the low 24-bits * properly. For example, if your device can only drive the low 24-bits
...@@ -1033,7 +980,7 @@ void swiotlb_free(struct device *dev, size_t size, void *vaddr, ...@@ -1033,7 +980,7 @@ void swiotlb_free(struct device *dev, size_t size, void *vaddr,
} }
const struct dma_map_ops swiotlb_dma_ops = { const struct dma_map_ops swiotlb_dma_ops = {
.mapping_error = swiotlb_dma_mapping_error, .mapping_error = dma_direct_mapping_error,
.alloc = swiotlb_alloc, .alloc = swiotlb_alloc,
.free = swiotlb_free, .free = swiotlb_free,
.sync_single_for_cpu = swiotlb_sync_single_for_cpu, .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment