Commit dcd1e92e authored by Joerg Roedel's avatar Joerg Roedel

x86/amd-iommu: Use __iommu_flush_pages for tlb flushes

This patch re-implements iommu_flush_tlb functions to use
the __iommu_flush_pages logic.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
parent 6de8ad9b
...@@ -451,7 +451,7 @@ static void __iommu_flush_pages(struct protection_domain *domain, ...@@ -451,7 +451,7 @@ static void __iommu_flush_pages(struct protection_domain *domain,
u64 address, size_t size, int pde) u64 address, size_t size, int pde)
{ {
int s = 0, i; int s = 0, i;
unsigned pages = iommu_num_pages(address, size, PAGE_SIZE); unsigned long pages = iommu_num_pages(address, size, PAGE_SIZE);
address &= PAGE_MASK; address &= PAGE_MASK;
...@@ -487,23 +487,15 @@ static void iommu_flush_pages(struct protection_domain *domain, ...@@ -487,23 +487,15 @@ static void iommu_flush_pages(struct protection_domain *domain,
} }
/* Flush the whole IO/TLB for a given protection domain */ /* Flush the whole IO/TLB for a given protection domain */
static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid) static void iommu_flush_tlb(struct protection_domain *domain)
{ {
u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 0);
INC_STATS_COUNTER(domain_flush_single);
iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
} }
/* Flush the whole IO/TLB for a given protection domain - including PDE */ /* Flush the whole IO/TLB for a given protection domain - including PDE */
static void iommu_flush_tlb_pde(struct amd_iommu *iommu, u16 domid) static void iommu_flush_tlb_pde(struct protection_domain *domain)
{ {
u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS; __iommu_flush_pages(domain, 0, CMD_INV_IOMMU_ALL_PAGES_ADDRESS, 1);
INC_STATS_COUNTER(domain_flush_single);
iommu_queue_inv_iommu_pages(iommu, address, domid, 1, 1);
} }
/* /*
...@@ -1236,7 +1228,7 @@ static void attach_device(struct amd_iommu *iommu, ...@@ -1236,7 +1228,7 @@ static void attach_device(struct amd_iommu *iommu,
* here to evict all dirty stuff. * here to evict all dirty stuff.
*/ */
iommu_queue_inv_dev_entry(iommu, devid); iommu_queue_inv_dev_entry(iommu, devid);
iommu_flush_tlb_pde(iommu, domain->id); iommu_flush_tlb_pde(domain);
} }
/* /*
...@@ -1697,7 +1689,7 @@ static dma_addr_t __map_single(struct device *dev, ...@@ -1697,7 +1689,7 @@ static dma_addr_t __map_single(struct device *dev,
ADD_STATS_COUNTER(alloced_io_mem, size); ADD_STATS_COUNTER(alloced_io_mem, size);
if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) {
iommu_flush_tlb(iommu, dma_dom->domain.id); iommu_flush_tlb(&dma_dom->domain);
dma_dom->need_flush = false; dma_dom->need_flush = false;
} else if (unlikely(iommu_has_npcache(iommu))) } else if (unlikely(iommu_has_npcache(iommu)))
iommu_flush_pages(&dma_dom->domain, address, size); iommu_flush_pages(&dma_dom->domain, address, size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment