Commit b61d271e authored by Robin Murphy's avatar Robin Murphy Committed by Joerg Roedel

iommu/dma: Move domain lookup into __iommu_dma_{map,unmap}

Most of the callers don't care, and the couple that do already have the
domain to hand for other reasons are in slow paths where the (trivial)
overhead of a repeated lookup will be utterly immaterial.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
[hch: dropped the hunk touching iommu_dma_get_msi_page to avoid a
 conflict with another series]
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 92aec09c
...@@ -448,9 +448,10 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, ...@@ -448,9 +448,10 @@ static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
size >> iova_shift(iovad)); size >> iova_shift(iovad));
} }
static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, static void __iommu_dma_unmap(struct device *dev, dma_addr_t dma_addr,
size_t size) size_t size)
{ {
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_cookie *cookie = domain->iova_cookie;
struct iova_domain *iovad = &cookie->iovad; struct iova_domain *iovad = &cookie->iovad;
size_t iova_off = iova_offset(iovad, dma_addr); size_t iova_off = iova_offset(iovad, dma_addr);
...@@ -465,8 +466,9 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, ...@@ -465,8 +466,9 @@ static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
} }
static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
size_t size, int prot, struct iommu_domain *domain) size_t size, int prot)
{ {
struct iommu_domain *domain = iommu_get_dma_domain(dev);
struct iommu_dma_cookie *cookie = domain->iova_cookie; struct iommu_dma_cookie *cookie = domain->iova_cookie;
size_t iova_off = 0; size_t iova_off = 0;
dma_addr_t iova; dma_addr_t iova;
...@@ -565,7 +567,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev, ...@@ -565,7 +567,7 @@ static struct page **__iommu_dma_alloc_pages(struct device *dev,
static void __iommu_dma_free(struct device *dev, struct page **pages, static void __iommu_dma_free(struct device *dev, struct page **pages,
size_t size, dma_addr_t *handle) size_t size, dma_addr_t *handle)
{ {
__iommu_dma_unmap(iommu_get_dma_domain(dev), *handle, size); __iommu_dma_unmap(dev, *handle, size);
__iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
*handle = DMA_MAPPING_ERROR; *handle = DMA_MAPPING_ERROR;
} }
...@@ -718,14 +720,13 @@ static void iommu_dma_sync_sg_for_device(struct device *dev, ...@@ -718,14 +720,13 @@ static void iommu_dma_sync_sg_for_device(struct device *dev,
static dma_addr_t __iommu_dma_map_page(struct device *dev, struct page *page, static dma_addr_t __iommu_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, int prot) unsigned long offset, size_t size, int prot)
{ {
return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot, return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
iommu_get_dma_domain(dev));
} }
static void __iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, static void __iommu_dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs) size_t size, enum dma_data_direction dir, unsigned long attrs)
{ {
__iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size); __iommu_dma_unmap(dev, handle, size);
} }
static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
...@@ -734,11 +735,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, ...@@ -734,11 +735,10 @@ static dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
{ {
phys_addr_t phys = page_to_phys(page) + offset; phys_addr_t phys = page_to_phys(page) + offset;
bool coherent = dev_is_dma_coherent(dev); bool coherent = dev_is_dma_coherent(dev);
int prot = dma_info_to_prot(dir, coherent, attrs);
dma_addr_t dma_handle; dma_addr_t dma_handle;
dma_handle =__iommu_dma_map(dev, phys, size, dma_handle =__iommu_dma_map(dev, phys, size, prot);
dma_info_to_prot(dir, coherent, attrs),
iommu_get_dma_domain(dev));
if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) && if (!coherent && !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
dma_handle != DMA_MAPPING_ERROR) dma_handle != DMA_MAPPING_ERROR)
arch_sync_dma_for_device(dev, phys, size, dir); arch_sync_dma_for_device(dev, phys, size, dir);
...@@ -750,7 +750,7 @@ static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle, ...@@ -750,7 +750,7 @@ static void iommu_dma_unmap_page(struct device *dev, dma_addr_t dma_handle,
{ {
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC)) if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC))
iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir); iommu_dma_sync_single_for_cpu(dev, dma_handle, size, dir);
__iommu_dma_unmap(iommu_get_dma_domain(dev), dma_handle, size); __iommu_dma_unmap(dev, dma_handle, size);
} }
/* /*
...@@ -931,21 +931,20 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, ...@@ -931,21 +931,20 @@ static void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
sg = tmp; sg = tmp;
} }
end = sg_dma_address(sg) + sg_dma_len(sg); end = sg_dma_address(sg) + sg_dma_len(sg);
__iommu_dma_unmap(iommu_get_dma_domain(dev), start, end - start); __iommu_dma_unmap(dev, start, end - start);
} }
static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, static dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
size_t size, enum dma_data_direction dir, unsigned long attrs) size_t size, enum dma_data_direction dir, unsigned long attrs)
{ {
return __iommu_dma_map(dev, phys, size, return __iommu_dma_map(dev, phys, size,
dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO, dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
iommu_get_dma_domain(dev));
} }
static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, static void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir, unsigned long attrs) size_t size, enum dma_data_direction dir, unsigned long attrs)
{ {
__iommu_dma_unmap(iommu_get_dma_domain(dev), handle, size); __iommu_dma_unmap(dev, handle, size);
} }
static void *iommu_dma_alloc(struct device *dev, size_t size, static void *iommu_dma_alloc(struct device *dev, size_t size,
...@@ -1222,7 +1221,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, ...@@ -1222,7 +1221,7 @@ static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
if (!msi_page) if (!msi_page)
return NULL; return NULL;
iova = __iommu_dma_map(dev, msi_addr, size, prot, domain); iova = __iommu_dma_map(dev, msi_addr, size, prot);
if (iova == DMA_MAPPING_ERROR) if (iova == DMA_MAPPING_ERROR)
goto out_free_page; goto out_free_page;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment