Commit 91ffe4ad authored by Stefano Stabellini's avatar Stefano Stabellini Committed by Juergen Gross

swiotlb-xen: introduce phys_to_dma/dma_to_phys translations

With some devices physical addresses are different than dma addresses.
To be able to deal with these cases, we need to call phys_to_dma on
physical addresses (including machine addresses in Xen terminology)
before returning them from xen_swiotlb_alloc_coherent and
xen_swiotlb_map_page.

We also need to convert dma addresses back to physical addresses using
dma_to_phys in xen_swiotlb_free_coherent and xen_swiotlb_unmap_page if
we want to do any operations on them.

Call dma_to_phys in is_xen_swiotlb_buffer.
Introduce xen_phys_to_dma and call phys_to_dma in its implementation.
Introduce xen_dma_to_phys and call dma_to_phys in its implementation.
Call xen_phys_to_dma/xen_dma_to_phys instead of
xen_phys_to_bus/xen_bus_to_phys through swiotlb-xen.c.

Everything is taken care of by these changes except for
xen_swiotlb_alloc_coherent and xen_swiotlb_free_coherent, which need a
few explicit phys_to_dma/dma_to_phys calls.
Signed-off-by: default avatarStefano Stabellini <stefano.stabellini@xilinx.com>
Tested-by: default avatarCorey Minyard <cminyard@mvista.com>
Tested-by: default avatarRoman Shaposhnik <roman@zededa.com>
Reviewed-by: default avatarJuergen Gross <jgross@suse.com>
Link: https://lore.kernel.org/r/20200710223427.6897-9-sstabellini@kernel.orgSigned-off-by: default avatarJuergen Gross <jgross@suse.com>
parent e9aab7e4
...@@ -52,30 +52,39 @@ static unsigned long xen_io_tlb_nslabs; ...@@ -52,30 +52,39 @@ static unsigned long xen_io_tlb_nslabs;
* Quick lookup value of the bus address of the IOTLB. * Quick lookup value of the bus address of the IOTLB.
*/ */
static inline dma_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr) static inline phys_addr_t xen_phys_to_bus(struct device *dev, phys_addr_t paddr)
{ {
unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr)); unsigned long bfn = pfn_to_bfn(XEN_PFN_DOWN(paddr));
dma_addr_t dma = (dma_addr_t)bfn << XEN_PAGE_SHIFT; phys_addr_t baddr = (phys_addr_t)bfn << XEN_PAGE_SHIFT;
dma |= paddr & ~XEN_PAGE_MASK; baddr |= paddr & ~XEN_PAGE_MASK;
return baddr;
}
return dma; static inline dma_addr_t xen_phys_to_dma(struct device *dev, phys_addr_t paddr)
{
return phys_to_dma(dev, xen_phys_to_bus(dev, paddr));
} }
static inline phys_addr_t xen_bus_to_phys(struct device *dev, dma_addr_t baddr) static inline phys_addr_t xen_bus_to_phys(struct device *dev,
phys_addr_t baddr)
{ {
unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr)); unsigned long xen_pfn = bfn_to_pfn(XEN_PFN_DOWN(baddr));
dma_addr_t dma = (dma_addr_t)xen_pfn << XEN_PAGE_SHIFT; phys_addr_t paddr = (xen_pfn << XEN_PAGE_SHIFT) |
phys_addr_t paddr = dma; (baddr & ~XEN_PAGE_MASK);
paddr |= baddr & ~XEN_PAGE_MASK;
return paddr; return paddr;
} }
static inline phys_addr_t xen_dma_to_phys(struct device *dev,
dma_addr_t dma_addr)
{
return xen_bus_to_phys(dev, dma_to_phys(dev, dma_addr));
}
static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address) static inline dma_addr_t xen_virt_to_bus(struct device *dev, void *address)
{ {
return xen_phys_to_bus(dev, virt_to_phys(address)); return xen_phys_to_dma(dev, virt_to_phys(address));
} }
static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
...@@ -94,7 +103,7 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size) ...@@ -94,7 +103,7 @@ static inline int range_straddles_page_boundary(phys_addr_t p, size_t size)
static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr) static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
{ {
unsigned long bfn = XEN_PFN_DOWN(dma_addr); unsigned long bfn = XEN_PFN_DOWN(dma_to_phys(dev, dma_addr));
unsigned long xen_pfn = bfn_to_local_pfn(bfn); unsigned long xen_pfn = bfn_to_local_pfn(bfn);
phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT; phys_addr_t paddr = (phys_addr_t)xen_pfn << XEN_PAGE_SHIFT;
...@@ -299,12 +308,12 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, ...@@ -299,12 +308,12 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
if (hwdev && hwdev->coherent_dma_mask) if (hwdev && hwdev->coherent_dma_mask)
dma_mask = hwdev->coherent_dma_mask; dma_mask = hwdev->coherent_dma_mask;
/* At this point dma_handle is the physical address, next we are /* At this point dma_handle is the dma address, next we are
* going to set it to the machine address. * going to set it to the machine address.
* Do not use virt_to_phys(ret) because on ARM it doesn't correspond * Do not use virt_to_phys(ret) because on ARM it doesn't correspond
* to *dma_handle. */ * to *dma_handle. */
phys = *dma_handle; phys = dma_to_phys(hwdev, *dma_handle);
dev_addr = xen_phys_to_bus(hwdev, phys); dev_addr = xen_phys_to_dma(hwdev, phys);
if (((dev_addr + size - 1 <= dma_mask)) && if (((dev_addr + size - 1 <= dma_mask)) &&
!range_straddles_page_boundary(phys, size)) !range_straddles_page_boundary(phys, size))
*dma_handle = dev_addr; *dma_handle = dev_addr;
...@@ -314,6 +323,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size, ...@@ -314,6 +323,7 @@ xen_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs); xen_free_coherent_pages(hwdev, size, ret, (dma_addr_t)phys, attrs);
return NULL; return NULL;
} }
*dma_handle = phys_to_dma(hwdev, *dma_handle);
SetPageXenRemapped(virt_to_page(ret)); SetPageXenRemapped(virt_to_page(ret));
} }
memset(ret, 0, size); memset(ret, 0, size);
...@@ -334,7 +344,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, ...@@ -334,7 +344,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
/* do not use virt_to_phys because on ARM it doesn't return you the /* do not use virt_to_phys because on ARM it doesn't return you the
* physical address */ * physical address */
phys = xen_bus_to_phys(hwdev, dev_addr); phys = xen_dma_to_phys(hwdev, dev_addr);
/* Convert the size to actually allocated. */ /* Convert the size to actually allocated. */
size = 1UL << (order + XEN_PAGE_SHIFT); size = 1UL << (order + XEN_PAGE_SHIFT);
...@@ -349,7 +359,8 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr, ...@@ -349,7 +359,8 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
TestClearPageXenRemapped(page)) TestClearPageXenRemapped(page))
xen_destroy_contiguous_region(phys, order); xen_destroy_contiguous_region(phys, order);
xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs); xen_free_coherent_pages(hwdev, size, vaddr, phys_to_dma(hwdev, phys),
attrs);
} }
/* /*
...@@ -365,7 +376,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, ...@@ -365,7 +376,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
unsigned long attrs) unsigned long attrs)
{ {
phys_addr_t map, phys = page_to_phys(page) + offset; phys_addr_t map, phys = page_to_phys(page) + offset;
dma_addr_t dev_addr = xen_phys_to_bus(dev, phys); dma_addr_t dev_addr = xen_phys_to_dma(dev, phys);
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
/* /*
...@@ -390,7 +401,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, ...@@ -390,7 +401,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
return DMA_MAPPING_ERROR; return DMA_MAPPING_ERROR;
phys = map; phys = map;
dev_addr = xen_phys_to_bus(dev, map); dev_addr = xen_phys_to_dma(dev, map);
/* /*
* Ensure that the address returned is DMA'ble * Ensure that the address returned is DMA'ble
...@@ -418,7 +429,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, ...@@ -418,7 +429,7 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
size_t size, enum dma_data_direction dir, unsigned long attrs) size_t size, enum dma_data_direction dir, unsigned long attrs)
{ {
phys_addr_t paddr = xen_bus_to_phys(hwdev, dev_addr); phys_addr_t paddr = xen_dma_to_phys(hwdev, dev_addr);
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
...@@ -434,7 +445,7 @@ static void ...@@ -434,7 +445,7 @@ static void
xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
phys_addr_t paddr = xen_bus_to_phys(dev, dma_addr); phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev))
xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir); xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir);
...@@ -447,7 +458,7 @@ static void ...@@ -447,7 +458,7 @@ static void
xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
size_t size, enum dma_data_direction dir) size_t size, enum dma_data_direction dir)
{ {
phys_addr_t paddr = xen_bus_to_phys(dev, dma_addr); phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
if (is_xen_swiotlb_buffer(dev, dma_addr)) if (is_xen_swiotlb_buffer(dev, dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment