Commit 4eeca8c5 authored by Joerg Roedel's avatar Joerg Roedel

iommu/amd: Optimize dma_ops_free_addresses

Don't flush the iommu tlb when we free something behind the
current next_bit pointer. Update the next_bit pointer
instead and let the flush happen on the next wraparound in
the allocation path.
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent ab7032bb
...@@ -1633,8 +1633,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, ...@@ -1633,8 +1633,7 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
return; return;
#endif #endif
if (amd_iommu_unmap_flush || if (amd_iommu_unmap_flush) {
(address + pages > range->next_bit)) {
domain_flush_tlb(&dom->domain); domain_flush_tlb(&dom->domain);
domain_flush_complete(&dom->domain); domain_flush_complete(&dom->domain);
} }
...@@ -1642,6 +1641,8 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom, ...@@ -1642,6 +1641,8 @@ static void dma_ops_free_addresses(struct dma_ops_domain *dom,
address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT; address = (address % APERTURE_RANGE_SIZE) >> PAGE_SHIFT;
spin_lock_irqsave(&range->bitmap_lock, flags); spin_lock_irqsave(&range->bitmap_lock, flags);
if (address + pages > range->next_bit)
range->next_bit = address + pages;
bitmap_clear(range->bitmap, address, pages); bitmap_clear(range->bitmap, address, pages);
spin_unlock_irqrestore(&range->bitmap_lock, flags); spin_unlock_irqrestore(&range->bitmap_lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment