Commit ab7032bb authored by Joerg Roedel's avatar Joerg Roedel

iommu/amd: Remove need_flush from struct dma_ops_domain

The flushing of iommu tlbs is now done on a per-range basis.
So there is no need anymore for domain-wide flush tracking.
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 2a87442c
...@@ -151,9 +151,6 @@ struct dma_ops_domain { ...@@ -151,9 +151,6 @@ struct dma_ops_domain {
/* address space relevant data */ /* address space relevant data */
struct aperture_range *aperture[APERTURE_MAX_RANGES]; struct aperture_range *aperture[APERTURE_MAX_RANGES];
/* This will be set to true when TLB needs to be flushed */
bool need_flush;
}; };
/**************************************************************************** /****************************************************************************
...@@ -1563,7 +1560,7 @@ static unsigned long dma_ops_area_alloc(struct device *dev, ...@@ -1563,7 +1560,7 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
unsigned long align_mask, unsigned long align_mask,
u64 dma_mask) u64 dma_mask)
{ {
unsigned long next_bit, boundary_size, mask; unsigned long boundary_size, mask;
unsigned long address = -1; unsigned long address = -1;
int start = dom->next_index; int start = dom->next_index;
int i; int i;
...@@ -1581,8 +1578,6 @@ static unsigned long dma_ops_area_alloc(struct device *dev, ...@@ -1581,8 +1578,6 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
if (!range || range->offset >= dma_mask) if (!range || range->offset >= dma_mask)
continue; continue;
next_bit = range->next_bit;
address = dma_ops_aperture_alloc(dom, range, pages, address = dma_ops_aperture_alloc(dom, range, pages,
dma_mask, boundary_size, dma_mask, boundary_size,
align_mask); align_mask);
...@@ -1591,9 +1586,6 @@ static unsigned long dma_ops_area_alloc(struct device *dev, ...@@ -1591,9 +1586,6 @@ static unsigned long dma_ops_area_alloc(struct device *dev,
dom->next_index = i; dom->next_index = i;
break; break;
} }
if (next_bit > range->next_bit)
dom->need_flush = true;
} }
return address; return address;
...@@ -1609,7 +1601,6 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, ...@@ -1609,7 +1601,6 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
#ifdef CONFIG_IOMMU_STRESS #ifdef CONFIG_IOMMU_STRESS
dom->next_index = 0; dom->next_index = 0;
dom->need_flush = true;
#endif #endif
address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask); address = dma_ops_area_alloc(dev, dom, pages, align_mask, dma_mask);
...@@ -1642,7 +1633,8 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, ...@@ -1642,7 +1633,8 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
return; return;
#endif #endif
if (address + pages > range->next_bit) { if (amd_iommu_unmap_flush ||
(address + pages > range->next_bit)) {
domain_flush_tlb(&dom->domain); domain_flush_tlb(&dom->domain);
domain_flush_complete(&dom->domain); domain_flush_complete(&dom->domain);
} }
...@@ -1868,8 +1860,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) ...@@ -1868,8 +1860,6 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
if (!dma_dom->domain.pt_root) if (!dma_dom->domain.pt_root)
goto free_dma_dom; goto free_dma_dom;
dma_dom->need_flush = false;
add_domain_to_list(&dma_dom->domain); add_domain_to_list(&dma_dom->domain);
if (alloc_new_range(dma_dom, true, GFP_KERNEL)) if (alloc_new_range(dma_dom, true, GFP_KERNEL))
...@@ -2503,11 +2493,10 @@ static dma_addr_t __map_single(struct device *dev, ...@@ -2503,11 +2493,10 @@ static dma_addr_t __map_single(struct device *dev,
ADD_STATS_COUNTER(alloced_io_mem, size); ADD_STATS_COUNTER(alloced_io_mem, size);
if (unlikely(dma_dom->need_flush && !amd_iommu_unmap_flush)) { if (unlikely(amd_iommu_np_cache)) {
domain_flush_tlb(&dma_dom->domain);
dma_dom->need_flush = false;
} else if (unlikely(amd_iommu_np_cache))
domain_flush_pages(&dma_dom->domain, address, size); domain_flush_pages(&dma_dom->domain, address, size);
domain_flush_complete(&dma_dom->domain);
}
out: out:
return address; return address;
...@@ -2519,8 +2508,6 @@ static dma_addr_t __map_single(struct device *dev, ...@@ -2519,8 +2508,6 @@ static dma_addr_t __map_single(struct device *dev,
dma_ops_domain_unmap(dma_dom, start); dma_ops_domain_unmap(dma_dom, start);
} }
domain_flush_pages(&dma_dom->domain, address, size);
dma_ops_free_addresses(dma_dom, address, pages); dma_ops_free_addresses(dma_dom, address, pages);
return DMA_ERROR_CODE; return DMA_ERROR_CODE;
...@@ -2553,11 +2540,6 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, ...@@ -2553,11 +2540,6 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
start += PAGE_SIZE; start += PAGE_SIZE;
} }
if (amd_iommu_unmap_flush || dma_dom->need_flush) {
domain_flush_pages(&dma_dom->domain, flush_addr, size);
dma_dom->need_flush = false;
}
SUB_STATS_COUNTER(alloced_io_mem, size); SUB_STATS_COUNTER(alloced_io_mem, size);
dma_ops_free_addresses(dma_dom, dma_addr, pages); dma_ops_free_addresses(dma_dom, dma_addr, pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment