Commit 1c655773 authored by Joerg Roedel's avatar Joerg Roedel Committed by Ingo Molnar

AMD IOMMU: implement lazy IO/TLB flushing

The IO/TLB flushing on every unmaping operation is the most expensive
part in AMD IOMMU code and not strictly necessary. It is sufficient to
do the flush before any entries are reused. This is patch implements
lazy IO/TLB flushing which does exactly this.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent 2842e5bf
...@@ -203,6 +203,14 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid, ...@@ -203,6 +203,14 @@ static int iommu_flush_pages(struct amd_iommu *iommu, u16 domid,
return 0; return 0;
} }
/* Flush the whole IO/TLB for a given protection domain */
static void iommu_flush_tlb(struct amd_iommu *iommu, u16 domid)
{
u64 address = CMD_INV_IOMMU_ALL_PAGES_ADDRESS;
iommu_queue_inv_iommu_pages(iommu, address, domid, 0, 1);
}
/**************************************************************************** /****************************************************************************
* *
* The functions below are used the create the page table mappings for * The functions below are used the create the page table mappings for
...@@ -386,14 +394,18 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev, ...@@ -386,14 +394,18 @@ static unsigned long dma_ops_alloc_addresses(struct device *dev,
PAGE_SIZE) >> PAGE_SHIFT; PAGE_SIZE) >> PAGE_SHIFT;
limit = limit < size ? limit : size; limit = limit < size ? limit : size;
if (dom->next_bit >= limit) if (dom->next_bit >= limit) {
dom->next_bit = 0; dom->next_bit = 0;
dom->need_flush = true;
}
address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages, address = iommu_area_alloc(dom->bitmap, limit, dom->next_bit, pages,
0 , boundary_size, 0); 0 , boundary_size, 0);
if (address == -1) if (address == -1) {
address = iommu_area_alloc(dom->bitmap, limit, 0, pages, address = iommu_area_alloc(dom->bitmap, limit, 0, pages,
0, boundary_size, 0); 0, boundary_size, 0);
dom->need_flush = true;
}
if (likely(address != -1)) { if (likely(address != -1)) {
dom->next_bit = address + pages; dom->next_bit = address + pages;
...@@ -553,6 +565,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu, ...@@ -553,6 +565,8 @@ static struct dma_ops_domain *dma_ops_domain_alloc(struct amd_iommu *iommu,
dma_dom->bitmap[0] = 1; dma_dom->bitmap[0] = 1;
dma_dom->next_bit = 0; dma_dom->next_bit = 0;
dma_dom->need_flush = false;
/* Intialize the exclusion range if necessary */ /* Intialize the exclusion range if necessary */
if (iommu->exclusion_start && if (iommu->exclusion_start &&
iommu->exclusion_start < dma_dom->aperture_size) { iommu->exclusion_start < dma_dom->aperture_size) {
...@@ -795,7 +809,10 @@ static dma_addr_t __map_single(struct device *dev, ...@@ -795,7 +809,10 @@ static dma_addr_t __map_single(struct device *dev,
} }
address += offset; address += offset;
if (unlikely(iommu_has_npcache(iommu))) if (unlikely(dma_dom->need_flush && !iommu_fullflush)) {
iommu_flush_tlb(iommu, dma_dom->domain.id);
dma_dom->need_flush = false;
} else if (unlikely(iommu_has_npcache(iommu)))
iommu_flush_pages(iommu, dma_dom->domain.id, address, size); iommu_flush_pages(iommu, dma_dom->domain.id, address, size);
out: out:
...@@ -829,7 +846,8 @@ static void __unmap_single(struct amd_iommu *iommu, ...@@ -829,7 +846,8 @@ static void __unmap_single(struct amd_iommu *iommu,
dma_ops_free_addresses(dma_dom, dma_addr, pages); dma_ops_free_addresses(dma_dom, dma_addr, pages);
iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size); if (iommu_fullflush)
iommu_flush_pages(iommu, dma_dom->domain.id, dma_addr, size);
} }
/* /*
......
...@@ -995,6 +995,11 @@ int __init amd_iommu_init(void) ...@@ -995,6 +995,11 @@ int __init amd_iommu_init(void)
else else
printk("disabled\n"); printk("disabled\n");
if (iommu_fullflush)
printk(KERN_INFO "AMD IOMMU: IO/TLB flush on unmap enabled\n");
else
printk(KERN_INFO "AMD IOMMU: Lazy IO/TLB flushing enabled\n");
out: out:
return ret; return ret;
...@@ -1057,7 +1062,7 @@ void __init amd_iommu_detect(void) ...@@ -1057,7 +1062,7 @@ void __init amd_iommu_detect(void)
static int __init parse_amd_iommu_options(char *str) static int __init parse_amd_iommu_options(char *str)
{ {
for (; *str; ++str) { for (; *str; ++str) {
if (strcmp(str, "isolate") == 0) if (strncmp(str, "isolate", 7) == 0)
amd_iommu_isolate = 1; amd_iommu_isolate = 1;
} }
......
...@@ -196,6 +196,9 @@ struct dma_ops_domain { ...@@ -196,6 +196,9 @@ struct dma_ops_domain {
* just calculate its address in constant time. * just calculate its address in constant time.
*/ */
u64 **pte_pages; u64 **pte_pages;
/* This will be set to true when TLB needs to be flushed */
bool need_flush;
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment