Commit 6810df88 authored by Konrad Rzeszutek Wilk's avatar Konrad Rzeszutek Wilk

xen-swiotlb: When doing coherent alloc/dealloc check before swizzling the MFNs.

The process to swizzle a Machine Frame Number (MFN) is not always
necessary. Especially if we know that we actually do not have to do it.
In this patch we check the MFN against the device's coherent
DMA mask and if the requested page(s) are contingous. If it all checks
out we will just return the bus addr without doing the memory
swizzle.
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 12e13ac8
...@@ -209,6 +209,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, ...@@ -209,6 +209,8 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
int order = get_order(size); int order = get_order(size);
u64 dma_mask = DMA_BIT_MASK(32); u64 dma_mask = DMA_BIT_MASK(32);
unsigned long vstart; unsigned long vstart;
phys_addr_t phys;
dma_addr_t dev_addr;
/* /*
* Ignore region specifiers - the kernel's ideas of * Ignore region specifiers - the kernel's ideas of
...@@ -224,18 +226,26 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, ...@@ -224,18 +226,26 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
vstart = __get_free_pages(flags, order); vstart = __get_free_pages(flags, order);
ret = (void *)vstart; ret = (void *)vstart;
if (!ret)
return ret;
if (hwdev && hwdev->coherent_dma_mask) if (hwdev && hwdev->coherent_dma_mask)
dma_mask = dma_alloc_coherent_mask(hwdev, flags); dma_mask = hwdev->coherent_dma_mask;
if (ret) { phys = virt_to_phys(ret);
dev_addr = xen_phys_to_bus(phys);
if (((dev_addr + size - 1 <= dma_mask)) &&
!range_straddles_page_boundary(phys, size))
*dma_handle = dev_addr;
else {
if (xen_create_contiguous_region(vstart, order, if (xen_create_contiguous_region(vstart, order,
fls64(dma_mask)) != 0) { fls64(dma_mask)) != 0) {
free_pages(vstart, order); free_pages(vstart, order);
return NULL; return NULL;
} }
memset(ret, 0, size);
*dma_handle = virt_to_machine(ret).maddr; *dma_handle = virt_to_machine(ret).maddr;
} }
memset(ret, 0, size);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent); EXPORT_SYMBOL_GPL(xen_swiotlb_alloc_coherent);
...@@ -245,11 +255,21 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, ...@@ -245,11 +255,21 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
dma_addr_t dev_addr) dma_addr_t dev_addr)
{ {
int order = get_order(size); int order = get_order(size);
phys_addr_t phys;
u64 dma_mask = DMA_BIT_MASK(32);
if (dma_release_from_coherent(hwdev, order, vaddr)) if (dma_release_from_coherent(hwdev, order, vaddr))
return; return;
if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask;
phys = virt_to_phys(vaddr);
if (((dev_addr + size - 1 > dma_mask)) ||
range_straddles_page_boundary(phys, size))
xen_destroy_contiguous_region((unsigned long)vaddr, order); xen_destroy_contiguous_region((unsigned long)vaddr, order);
free_pages((unsigned long)vaddr, order); free_pages((unsigned long)vaddr, order);
} }
EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent); EXPORT_SYMBOL_GPL(xen_swiotlb_free_coherent);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment