Commit 7fd856aa authored by Claire Chang's avatar Claire Chang Committed by Konrad Rzeszutek Wilk

swiotlb: Update is_swiotlb_buffer to add a struct device argument

Update is_swiotlb_buffer to add a struct device argument. This will be
useful later to allow for different pools.
Signed-off-by: default avatarClaire Chang <tientzu@chromium.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Tested-by: default avatarStefano Stabellini <sstabellini@kernel.org>
Tested-by: default avatarWill Deacon <will@kernel.org>
Acked-by: default avatarStefano Stabellini <sstabellini@kernel.org>
Signed-off-by: default avatarKonrad Rzeszutek Wilk <konrad.wilk@oracle.com>
parent 69031f50
...@@ -506,7 +506,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr, ...@@ -506,7 +506,7 @@ static void __iommu_dma_unmap_swiotlb(struct device *dev, dma_addr_t dma_addr,
__iommu_dma_unmap(dev, dma_addr, size); __iommu_dma_unmap(dev, dma_addr, size);
if (unlikely(is_swiotlb_buffer(phys))) if (unlikely(is_swiotlb_buffer(dev, phys)))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
} }
...@@ -577,7 +577,7 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys, ...@@ -577,7 +577,7 @@ static dma_addr_t __iommu_dma_map_swiotlb(struct device *dev, phys_addr_t phys,
} }
iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask); iova = __iommu_dma_map(dev, phys, aligned_size, prot, dma_mask);
if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(phys)) if (iova == DMA_MAPPING_ERROR && is_swiotlb_buffer(dev, phys))
swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs); swiotlb_tbl_unmap_single(dev, phys, org_size, dir, attrs);
return iova; return iova;
} }
...@@ -783,7 +783,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev, ...@@ -783,7 +783,7 @@ static void iommu_dma_sync_single_for_cpu(struct device *dev,
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(phys, size, dir); arch_sync_dma_for_cpu(phys, size, dir);
if (is_swiotlb_buffer(phys)) if (is_swiotlb_buffer(dev, phys))
swiotlb_sync_single_for_cpu(dev, phys, size, dir); swiotlb_sync_single_for_cpu(dev, phys, size, dir);
} }
...@@ -796,7 +796,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev, ...@@ -796,7 +796,7 @@ static void iommu_dma_sync_single_for_device(struct device *dev,
return; return;
phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle); phys = iommu_iova_to_phys(iommu_get_dma_domain(dev), dma_handle);
if (is_swiotlb_buffer(phys)) if (is_swiotlb_buffer(dev, phys))
swiotlb_sync_single_for_device(dev, phys, size, dir); swiotlb_sync_single_for_device(dev, phys, size, dir);
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev))
...@@ -817,7 +817,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev, ...@@ -817,7 +817,7 @@ static void iommu_dma_sync_sg_for_cpu(struct device *dev,
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir); arch_sync_dma_for_cpu(sg_phys(sg), sg->length, dir);
if (is_swiotlb_buffer(sg_phys(sg))) if (is_swiotlb_buffer(dev, sg_phys(sg)))
swiotlb_sync_single_for_cpu(dev, sg_phys(sg), swiotlb_sync_single_for_cpu(dev, sg_phys(sg),
sg->length, dir); sg->length, dir);
} }
...@@ -834,7 +834,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, ...@@ -834,7 +834,7 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
return; return;
for_each_sg(sgl, sg, nelems, i) { for_each_sg(sgl, sg, nelems, i) {
if (is_swiotlb_buffer(sg_phys(sg))) if (is_swiotlb_buffer(dev, sg_phys(sg)))
swiotlb_sync_single_for_device(dev, sg_phys(sg), swiotlb_sync_single_for_device(dev, sg_phys(sg),
sg->length, dir); sg->length, dir);
......
...@@ -100,7 +100,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr) ...@@ -100,7 +100,7 @@ static int is_xen_swiotlb_buffer(struct device *dev, dma_addr_t dma_addr)
* in our domain. Therefore _only_ check address within our domain. * in our domain. Therefore _only_ check address within our domain.
*/ */
if (pfn_valid(PFN_DOWN(paddr))) if (pfn_valid(PFN_DOWN(paddr)))
return is_swiotlb_buffer(paddr); return is_swiotlb_buffer(dev, paddr);
return 0; return 0;
} }
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
#ifndef __LINUX_SWIOTLB_H #ifndef __LINUX_SWIOTLB_H
#define __LINUX_SWIOTLB_H #define __LINUX_SWIOTLB_H
#include <linux/device.h>
#include <linux/dma-direction.h> #include <linux/dma-direction.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/types.h> #include <linux/types.h>
...@@ -101,9 +102,9 @@ struct io_tlb_mem { ...@@ -101,9 +102,9 @@ struct io_tlb_mem {
}; };
extern struct io_tlb_mem *io_tlb_default_mem; extern struct io_tlb_mem *io_tlb_default_mem;
static inline bool is_swiotlb_buffer(phys_addr_t paddr) static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
{ {
struct io_tlb_mem *mem = io_tlb_default_mem; struct io_tlb_mem *mem = dev->dma_io_tlb_mem;
return mem && paddr >= mem->start && paddr < mem->end; return mem && paddr >= mem->start && paddr < mem->end;
} }
...@@ -115,7 +116,7 @@ bool is_swiotlb_active(void); ...@@ -115,7 +116,7 @@ bool is_swiotlb_active(void);
void __init swiotlb_adjust_size(unsigned long size); void __init swiotlb_adjust_size(unsigned long size);
#else #else
#define swiotlb_force SWIOTLB_NO_FORCE #define swiotlb_force SWIOTLB_NO_FORCE
static inline bool is_swiotlb_buffer(phys_addr_t paddr) static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
{ {
return false; return false;
} }
......
...@@ -343,7 +343,7 @@ void dma_direct_sync_sg_for_device(struct device *dev, ...@@ -343,7 +343,7 @@ void dma_direct_sync_sg_for_device(struct device *dev,
for_each_sg(sgl, sg, nents, i) { for_each_sg(sgl, sg, nents, i) {
phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg)); phys_addr_t paddr = dma_to_phys(dev, sg_dma_address(sg));
if (unlikely(is_swiotlb_buffer(paddr))) if (unlikely(is_swiotlb_buffer(dev, paddr)))
swiotlb_sync_single_for_device(dev, paddr, sg->length, swiotlb_sync_single_for_device(dev, paddr, sg->length,
dir); dir);
...@@ -369,7 +369,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev, ...@@ -369,7 +369,7 @@ void dma_direct_sync_sg_for_cpu(struct device *dev,
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev))
arch_sync_dma_for_cpu(paddr, sg->length, dir); arch_sync_dma_for_cpu(paddr, sg->length, dir);
if (unlikely(is_swiotlb_buffer(paddr))) if (unlikely(is_swiotlb_buffer(dev, paddr)))
swiotlb_sync_single_for_cpu(dev, paddr, sg->length, swiotlb_sync_single_for_cpu(dev, paddr, sg->length,
dir); dir);
...@@ -504,7 +504,7 @@ size_t dma_direct_max_mapping_size(struct device *dev) ...@@ -504,7 +504,7 @@ size_t dma_direct_max_mapping_size(struct device *dev)
bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr) bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr)
{ {
return !dev_is_dma_coherent(dev) || return !dev_is_dma_coherent(dev) ||
is_swiotlb_buffer(dma_to_phys(dev, dma_addr)); is_swiotlb_buffer(dev, dma_to_phys(dev, dma_addr));
} }
/** /**
......
...@@ -56,7 +56,7 @@ static inline void dma_direct_sync_single_for_device(struct device *dev, ...@@ -56,7 +56,7 @@ static inline void dma_direct_sync_single_for_device(struct device *dev,
{ {
phys_addr_t paddr = dma_to_phys(dev, addr); phys_addr_t paddr = dma_to_phys(dev, addr);
if (unlikely(is_swiotlb_buffer(paddr))) if (unlikely(is_swiotlb_buffer(dev, paddr)))
swiotlb_sync_single_for_device(dev, paddr, size, dir); swiotlb_sync_single_for_device(dev, paddr, size, dir);
if (!dev_is_dma_coherent(dev)) if (!dev_is_dma_coherent(dev))
...@@ -73,7 +73,7 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev, ...@@ -73,7 +73,7 @@ static inline void dma_direct_sync_single_for_cpu(struct device *dev,
arch_sync_dma_for_cpu_all(); arch_sync_dma_for_cpu_all();
} }
if (unlikely(is_swiotlb_buffer(paddr))) if (unlikely(is_swiotlb_buffer(dev, paddr)))
swiotlb_sync_single_for_cpu(dev, paddr, size, dir); swiotlb_sync_single_for_cpu(dev, paddr, size, dir);
if (dir == DMA_FROM_DEVICE) if (dir == DMA_FROM_DEVICE)
...@@ -113,7 +113,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr, ...@@ -113,7 +113,7 @@ static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
dma_direct_sync_single_for_cpu(dev, addr, size, dir); dma_direct_sync_single_for_cpu(dev, addr, size, dir);
if (unlikely(is_swiotlb_buffer(phys))) if (unlikely(is_swiotlb_buffer(dev, phys)))
swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs); swiotlb_tbl_unmap_single(dev, phys, size, dir, attrs);
} }
#endif /* _KERNEL_DMA_DIRECT_H */ #endif /* _KERNEL_DMA_DIRECT_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment