Commit 0518a3a4 authored by Joerg Roedel's avatar Joerg Roedel

x86/amd-iommu: Add function to complete a tlb flush

This patch adds a function to the AMD IOMMU driver which
completes all queued commands an all IOMMUs a specific
domain has devices attached on. This is required in a later
patch when per-domain flushing is implemented.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent c4596114
...@@ -376,6 +376,22 @@ static int iommu_completion_wait(struct amd_iommu *iommu) ...@@ -376,6 +376,22 @@ static int iommu_completion_wait(struct amd_iommu *iommu)
return 0; return 0;
} }
static void iommu_flush_complete(struct protection_domain *domain)
{
int i;
for (i = 0; i < amd_iommus_present; ++i) {
if (!domain->dev_iommu[i])
continue;
/*
* Devices of this domain are behind this IOMMU
* We need to wait for completion of all commands.
*/
iommu_completion_wait(amd_iommus[i]);
}
}
/* /*
* Command send function for invalidating a device table entry * Command send function for invalidating a device table entry
*/ */
...@@ -1758,7 +1774,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page, ...@@ -1758,7 +1774,7 @@ static dma_addr_t map_page(struct device *dev, struct page *page,
if (addr == DMA_ERROR_CODE) if (addr == DMA_ERROR_CODE)
goto out; goto out;
iommu_completion_wait(iommu); iommu_flush_complete(domain);
out: out:
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
...@@ -1791,7 +1807,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, ...@@ -1791,7 +1807,7 @@ static void unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
__unmap_single(iommu, domain->priv, dma_addr, size, dir); __unmap_single(iommu, domain->priv, dma_addr, size, dir);
iommu_completion_wait(iommu); iommu_flush_complete(domain);
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
} }
...@@ -1863,7 +1879,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -1863,7 +1879,7 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
goto unmap; goto unmap;
} }
iommu_completion_wait(iommu); iommu_flush_complete(domain);
out: out:
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
...@@ -1914,7 +1930,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist, ...@@ -1914,7 +1930,7 @@ static void unmap_sg(struct device *dev, struct scatterlist *sglist,
s->dma_address = s->dma_length = 0; s->dma_address = s->dma_length = 0;
} }
iommu_completion_wait(iommu); iommu_flush_complete(domain);
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
} }
...@@ -1969,7 +1985,7 @@ static void *alloc_coherent(struct device *dev, size_t size, ...@@ -1969,7 +1985,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
goto out_free; goto out_free;
} }
iommu_completion_wait(iommu); iommu_flush_complete(domain);
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
...@@ -2010,7 +2026,7 @@ static void free_coherent(struct device *dev, size_t size, ...@@ -2010,7 +2026,7 @@ static void free_coherent(struct device *dev, size_t size,
__unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL); __unmap_single(iommu, domain->priv, dma_addr, size, DMA_BIDIRECTIONAL);
iommu_completion_wait(iommu); iommu_flush_complete(domain);
spin_unlock_irqrestore(&domain->lock, flags); spin_unlock_irqrestore(&domain->lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment