Commit 3d5e28bf authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'stable/for-linus-5.10-rc2' of...

Merge branch 'stable/for-linus-5.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb

Pull swiotlb fixes from Konrad Rzeszutek Wilk:
 "Two tiny fixes for issues that make drivers under Xen unhappy under
  certain conditions"

* 'stable/for-linus-5.10-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/konrad/swiotlb:
  swiotlb: remove the tbl_dma_addr argument to swiotlb_tbl_map_single
  swiotlb: fix "x86: Don't panic if can not alloc buffer for swiotlb"
parents eccc8767 fc0021aa
......@@ -3818,9 +3818,8 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
* page aligned, we don't need to use a bounce page.
*/
if (!IS_ALIGNED(paddr | size, VTD_PAGE_SIZE)) {
tlb_addr = swiotlb_tbl_map_single(dev,
phys_to_dma_unencrypted(dev, io_tlb_start),
paddr, size, aligned_size, dir, attrs);
tlb_addr = swiotlb_tbl_map_single(dev, paddr, size,
aligned_size, dir, attrs);
if (tlb_addr == DMA_MAPPING_ERROR) {
goto swiotlb_error;
} else {
......
......@@ -395,8 +395,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
*/
trace_swiotlb_bounced(dev, dev_addr, size, swiotlb_force);
map = swiotlb_tbl_map_single(dev, virt_to_phys(xen_io_tlb_start),
phys, size, size, dir, attrs);
map = swiotlb_tbl_map_single(dev, phys, size, size, dir, attrs);
if (map == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
......
......@@ -45,13 +45,9 @@ enum dma_sync_target {
SYNC_FOR_DEVICE = 1,
};
extern phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr,
phys_addr_t phys,
size_t mapping_size,
size_t alloc_size,
enum dma_data_direction dir,
unsigned long attrs);
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t phys,
size_t mapping_size, size_t alloc_size,
enum dma_data_direction dir, unsigned long attrs);
extern void swiotlb_tbl_unmap_single(struct device *hwdev,
phys_addr_t tlb_addr,
......
......@@ -229,6 +229,7 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
io_tlb_index = 0;
no_iotlb_memory = false;
if (verbose)
swiotlb_print_info();
......@@ -260,9 +261,11 @@ swiotlb_init(int verbose)
if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, verbose))
return;
if (io_tlb_start)
if (io_tlb_start) {
memblock_free_early(io_tlb_start,
PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));
io_tlb_start = 0;
}
pr_warn("Cannot allocate buffer");
no_iotlb_memory = true;
}
......@@ -360,6 +363,7 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
}
io_tlb_index = 0;
no_iotlb_memory = false;
swiotlb_print_info();
......@@ -441,14 +445,11 @@ static void swiotlb_bounce(phys_addr_t orig_addr, phys_addr_t tlb_addr,
}
}
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
dma_addr_t tbl_dma_addr,
phys_addr_t orig_addr,
size_t mapping_size,
size_t alloc_size,
enum dma_data_direction dir,
unsigned long attrs)
phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
size_t mapping_size, size_t alloc_size,
enum dma_data_direction dir, unsigned long attrs)
{
dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(hwdev, io_tlb_start);
unsigned long flags;
phys_addr_t tlb_addr;
unsigned int nslots, stride, index, wrap;
......@@ -667,9 +668,8 @@ dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
swiotlb_force);
swiotlb_addr = swiotlb_tbl_map_single(dev,
phys_to_dma_unencrypted(dev, io_tlb_start),
paddr, size, size, dir, attrs);
swiotlb_addr = swiotlb_tbl_map_single(dev, paddr, size, size, dir,
attrs);
if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
return DMA_MAPPING_ERROR;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment