Commit ac2cbab2 authored by Yinghai Lu's avatar Yinghai Lu Committed by H. Peter Anvin

x86: Don't panic if can not alloc buffer for swiotlb

Normal boot path on system with iommu support:
swiotlb buffer will be allocated early at first and then try to initialize
iommu, if iommu for intel or AMD could setup properly, swiotlb buffer
will be freed.

The early allocating is with bootmem, and could panic when we try to use
kdump with buffer above 4G only, or with memmap to limit mem under 4G.
for example: memmap=4095M$1M to remove memory under 4G.

According to Eric, add _nopanic version and no_iotlb_memory to fail
map single later if swiotlb is still needed.

-v2: don't pass nopanic, and use -ENOMEM return value according to Eric.
     panic early instead of using swiotlb_full to panic...according to Eric/Konrad.
-v3: make swiotlb_init to be notpanic, but will affect:
     arm64, ia64, powerpc, tile, unicore32, x86.
-v4: cleanup swiotlb_init by removing swiotlb_init_with_default_size.
Suggested-by: default avatarEric W. Biederman <ebiederm@xmission.com>
Signed-off-by: default avatarYinghai Lu <yinghai@kernel.org>
Link: http://lkml.kernel.org/r/1359058816-7615-36-git-send-email-yinghai@kernel.orgReviewed-and-tested-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Joerg Roedel <joro@8bytes.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: Jeremy Fitzhardinge <jeremy@goop.org>
Cc: Kyungmin Park <kyungmin.park@samsung.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
Cc: linux-mips@linux-mips.org
Cc: xen-devel@lists.xensource.com
Cc: virtualization@lists.linux-foundation.org
Cc: Shuah Khan <shuahkhan@gmail.com>
Signed-off-by: default avatarH. Peter Anvin <hpa@linux.intel.com>
parent 38fa4175
...@@ -317,7 +317,8 @@ void __init plat_swiotlb_setup(void) ...@@ -317,7 +317,8 @@ void __init plat_swiotlb_setup(void)
octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize); octeon_swiotlb = alloc_bootmem_low_pages(swiotlbsize);
swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1); if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
panic("Cannot allocate SWIOTLB buffer");
mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops; mips_dma_map_ops = &octeon_linear_dma_map_ops.dma_map_ops;
} }
......
...@@ -231,7 +231,9 @@ int __ref xen_swiotlb_init(int verbose, bool early) ...@@ -231,7 +231,9 @@ int __ref xen_swiotlb_init(int verbose, bool early)
} }
start_dma_addr = xen_virt_to_bus(xen_io_tlb_start); start_dma_addr = xen_virt_to_bus(xen_io_tlb_start);
if (early) { if (early) {
swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs, verbose); if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
verbose))
panic("Cannot allocate SWIOTLB buffer");
rc = 0; rc = 0;
} else } else
rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs); rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
......
...@@ -23,7 +23,7 @@ extern int swiotlb_force; ...@@ -23,7 +23,7 @@ extern int swiotlb_force;
#define IO_TLB_SHIFT 11 #define IO_TLB_SHIFT 11
extern void swiotlb_init(int verbose); extern void swiotlb_init(int verbose);
extern void swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose); int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
extern unsigned long swiotlb_nr_tbl(void); extern unsigned long swiotlb_nr_tbl(void);
extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs); extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
......
...@@ -122,11 +122,18 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev, ...@@ -122,11 +122,18 @@ static dma_addr_t swiotlb_virt_to_bus(struct device *hwdev,
return phys_to_dma(hwdev, virt_to_phys(address)); return phys_to_dma(hwdev, virt_to_phys(address));
} }
static bool no_iotlb_memory;
void swiotlb_print_info(void) void swiotlb_print_info(void)
{ {
unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT; unsigned long bytes = io_tlb_nslabs << IO_TLB_SHIFT;
unsigned char *vstart, *vend; unsigned char *vstart, *vend;
if (no_iotlb_memory) {
pr_warn("software IO TLB: No low mem\n");
return;
}
vstart = phys_to_virt(io_tlb_start); vstart = phys_to_virt(io_tlb_start);
vend = phys_to_virt(io_tlb_end); vend = phys_to_virt(io_tlb_end);
...@@ -136,7 +143,7 @@ void swiotlb_print_info(void) ...@@ -136,7 +143,7 @@ void swiotlb_print_info(void)
bytes >> 20, vstart, vend - 1); bytes >> 20, vstart, vend - 1);
} }
void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
{ {
void *v_overflow_buffer; void *v_overflow_buffer;
unsigned long i, bytes; unsigned long i, bytes;
...@@ -150,9 +157,10 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) ...@@ -150,9 +157,10 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
/* /*
* Get the overflow emergency buffer * Get the overflow emergency buffer
*/ */
v_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow)); v_overflow_buffer = alloc_bootmem_low_pages_nopanic(
PAGE_ALIGN(io_tlb_overflow));
if (!v_overflow_buffer) if (!v_overflow_buffer)
panic("Cannot allocate SWIOTLB overflow buffer!\n"); return -ENOMEM;
io_tlb_overflow_buffer = __pa(v_overflow_buffer); io_tlb_overflow_buffer = __pa(v_overflow_buffer);
...@@ -169,15 +177,19 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) ...@@ -169,15 +177,19 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
if (verbose) if (verbose)
swiotlb_print_info(); swiotlb_print_info();
return 0;
} }
/* /*
* Statically reserve bounce buffer space and initialize bounce buffer data * Statically reserve bounce buffer space and initialize bounce buffer data
* structures for the software IO TLB used to implement the DMA API. * structures for the software IO TLB used to implement the DMA API.
*/ */
static void __init void __init
swiotlb_init_with_default_size(size_t default_size, int verbose) swiotlb_init(int verbose)
{ {
/* default to 64MB */
size_t default_size = 64UL<<20;
unsigned char *vstart; unsigned char *vstart;
unsigned long bytes; unsigned long bytes;
...@@ -188,20 +200,16 @@ swiotlb_init_with_default_size(size_t default_size, int verbose) ...@@ -188,20 +200,16 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)
bytes = io_tlb_nslabs << IO_TLB_SHIFT; bytes = io_tlb_nslabs << IO_TLB_SHIFT;
/* /* Get IO TLB memory from the low pages */
* Get IO TLB memory from the low pages vstart = alloc_bootmem_low_pages_nopanic(PAGE_ALIGN(bytes));
*/ if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
vstart = alloc_bootmem_low_pages(PAGE_ALIGN(bytes)); return;
if (!vstart)
panic("Cannot allocate SWIOTLB buffer");
swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose);
}
void __init if (io_tlb_start)
swiotlb_init(int verbose) free_bootmem(io_tlb_start,
{ PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
swiotlb_init_with_default_size(64 * (1<<20), verbose); /* default to 64MB */ pr_warn("Cannot allocate SWIOTLB buffer");
no_iotlb_memory = true;
} }
/* /*
...@@ -405,6 +413,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, ...@@ -405,6 +413,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
unsigned long offset_slots; unsigned long offset_slots;
unsigned long max_slots; unsigned long max_slots;
if (no_iotlb_memory)
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
mask = dma_get_seg_boundary(hwdev); mask = dma_get_seg_boundary(hwdev);
tbl_dma_addr &= mask; tbl_dma_addr &= mask;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment