Commit 63f0620c authored by Stefano Stabellini's avatar Stefano Stabellini Committed by Juergen Gross

xen/arm: introduce phys/dma translations in xen_dma_sync_for_*

xen_dma_sync_for_cpu, xen_dma_sync_for_device, xen_arch_need_swiotlb are
getting called passing dma addresses. On some platforms dma addresses
could be different from physical addresses. Before doing any operations
on these addresses we need to convert them back to physical addresses
using dma_to_phys.

Move the arch_sync_dma_for_cpu and arch_sync_dma_for_device calls from
xen_dma_sync_for_cpu/device to swiotlb-xen.c, and add a call dma_to_phys
to do address translations there.

dma_cache_maint is fixed by the next patch.
Signed-off-by: default avatarStefano Stabellini <stefano.stabellini@xilinx.com>
Tested-by: default avatarCorey Minyard <cminyard@mvista.com>
Tested-by: default avatarRoman Shaposhnik <roman@zededa.com>
Acked-by: default avatarJuergen Gross <jgross@suse.com>
Link: https://lore.kernel.org/r/20200710223427.6897-10-sstabellini@kernel.orgSigned-off-by: default avatarJuergen Gross <jgross@suse.com>
parent 91ffe4ad
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
#include <linux/cpu.h> #include <linux/cpu.h>
#include <linux/dma-direct.h>
#include <linux/dma-noncoherent.h> #include <linux/dma-noncoherent.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/highmem.h> #include <linux/highmem.h>
...@@ -72,22 +73,16 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op) ...@@ -72,22 +73,16 @@ static void dma_cache_maint(dma_addr_t handle, size_t size, u32 op)
* dma-direct functions, otherwise we call the Xen specific version. * dma-direct functions, otherwise we call the Xen specific version.
*/ */
void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
phys_addr_t paddr, size_t size, size_t size, enum dma_data_direction dir)
enum dma_data_direction dir)
{ {
if (pfn_valid(PFN_DOWN(handle))) if (dir != DMA_TO_DEVICE)
arch_sync_dma_for_cpu(paddr, size, dir);
else if (dir != DMA_TO_DEVICE)
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL); dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
} }
void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
phys_addr_t paddr, size_t size, size_t size, enum dma_data_direction dir)
enum dma_data_direction dir)
{ {
if (pfn_valid(PFN_DOWN(handle))) if (dir == DMA_FROM_DEVICE)
arch_sync_dma_for_device(paddr, size, dir);
else if (dir == DMA_FROM_DEVICE)
dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL); dma_cache_maint(handle, size, GNTTAB_CACHE_INVAL);
else else
dma_cache_maint(handle, size, GNTTAB_CACHE_CLEAN); dma_cache_maint(handle, size, GNTTAB_CACHE_CLEAN);
...@@ -98,7 +93,7 @@ bool xen_arch_need_swiotlb(struct device *dev, ...@@ -98,7 +93,7 @@ bool xen_arch_need_swiotlb(struct device *dev,
dma_addr_t dev_addr) dma_addr_t dev_addr)
{ {
unsigned int xen_pfn = XEN_PFN_DOWN(phys); unsigned int xen_pfn = XEN_PFN_DOWN(phys);
unsigned int bfn = XEN_PFN_DOWN(dev_addr); unsigned int bfn = XEN_PFN_DOWN(dma_to_phys(dev, dev_addr));
/* /*
* The swiotlb buffer should be used if * The swiotlb buffer should be used if
......
...@@ -413,8 +413,12 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page, ...@@ -413,8 +413,12 @@ static dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
} }
done: done:
if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
xen_dma_sync_for_device(dev, dev_addr, phys, size, dir); if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dev_addr))))
arch_sync_dma_for_device(phys, size, dir);
else
xen_dma_sync_for_device(dev, dev_addr, size, dir);
}
return dev_addr; return dev_addr;
} }
...@@ -433,8 +437,12 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr, ...@@ -433,8 +437,12 @@ static void xen_swiotlb_unmap_page(struct device *hwdev, dma_addr_t dev_addr,
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!dev_is_dma_coherent(hwdev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
xen_dma_sync_for_cpu(hwdev, dev_addr, paddr, size, dir); if (pfn_valid(PFN_DOWN(dma_to_phys(hwdev, dev_addr))))
arch_sync_dma_for_cpu(paddr, size, dir);
else
xen_dma_sync_for_cpu(hwdev, dev_addr, size, dir);
}
/* NOTE: We use dev_addr here, not paddr! */ /* NOTE: We use dev_addr here, not paddr! */
if (is_xen_swiotlb_buffer(hwdev, dev_addr)) if (is_xen_swiotlb_buffer(hwdev, dev_addr))
...@@ -447,8 +455,12 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr, ...@@ -447,8 +455,12 @@ xen_swiotlb_sync_single_for_cpu(struct device *dev, dma_addr_t dma_addr,
{ {
phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr); phys_addr_t paddr = xen_dma_to_phys(dev, dma_addr);
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev)) {
xen_dma_sync_for_cpu(dev, dma_addr, paddr, size, dir); if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
arch_sync_dma_for_cpu(paddr, size, dir);
else
xen_dma_sync_for_cpu(dev, dma_addr, size, dir);
}
if (is_xen_swiotlb_buffer(dev, dma_addr)) if (is_xen_swiotlb_buffer(dev, dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU); swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_CPU);
...@@ -463,8 +475,12 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr, ...@@ -463,8 +475,12 @@ xen_swiotlb_sync_single_for_device(struct device *dev, dma_addr_t dma_addr,
if (is_xen_swiotlb_buffer(dev, dma_addr)) if (is_xen_swiotlb_buffer(dev, dma_addr))
swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE); swiotlb_tbl_sync_single(dev, paddr, size, dir, SYNC_FOR_DEVICE);
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev)) {
xen_dma_sync_for_device(dev, dma_addr, paddr, size, dir); if (pfn_valid(PFN_DOWN(dma_to_phys(dev, dma_addr))))
arch_sync_dma_for_device(paddr, size, dir);
else
xen_dma_sync_for_device(dev, dma_addr, size, dir);
}
} }
/* /*
......
...@@ -5,11 +5,9 @@ ...@@ -5,11 +5,9 @@
#include <linux/swiotlb.h> #include <linux/swiotlb.h>
void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle, void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
phys_addr_t paddr, size_t size, size_t size, enum dma_data_direction dir);
enum dma_data_direction dir);
void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle, void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
phys_addr_t paddr, size_t size, size_t size, enum dma_data_direction dir);
enum dma_data_direction dir);
extern int xen_swiotlb_init(int verbose, bool early); extern int xen_swiotlb_init(int verbose, bool early);
extern const struct dma_map_ops xen_swiotlb_dma_ops; extern const struct dma_map_ops xen_swiotlb_dma_ops;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment