Commit 6a059658 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.infradead.org/iommu-2.6

* git://git.infradead.org/iommu-2.6:
  intel-iommu: fix superpage support in pfn_to_dma_pte()
  intel-iommu: set iommu_superpage on VM domains to lowest common denominator
  intel-iommu: fix return value of iommu_unmap() API
  MAINTAINERS: Update VT-d entry for drivers/pci -> drivers/iommu move
  intel-iommu: Export a flag indicating that the IOMMU is used for iGFX.
  intel-iommu: Workaround IOTLB hang on Ironlake GPU
  intel-iommu: Fix AB-BA lockdep report
parents 15cc9101 4399c8bf
...@@ -3313,7 +3313,7 @@ M: David Woodhouse <dwmw2@infradead.org> ...@@ -3313,7 +3313,7 @@ M: David Woodhouse <dwmw2@infradead.org>
L: iommu@lists.linux-foundation.org L: iommu@lists.linux-foundation.org
T: git git://git.infradead.org/iommu-2.6.git T: git git://git.infradead.org/iommu-2.6.git
S: Supported S: Supported
F: drivers/pci/intel-iommu.c F: drivers/iommu/intel-iommu.c
F: include/linux/intel-iommu.h F: include/linux/intel-iommu.h
INTEL IOP-ADMA DMA DRIVER INTEL IOP-ADMA DMA DRIVER
......
...@@ -306,6 +306,11 @@ static inline bool dma_pte_present(struct dma_pte *pte) ...@@ -306,6 +306,11 @@ static inline bool dma_pte_present(struct dma_pte *pte)
return (pte->val & 3) != 0; return (pte->val & 3) != 0;
} }
static inline bool dma_pte_superpage(struct dma_pte *pte)
{
return (pte->val & (1 << 7));
}
static inline int first_pte_in_page(struct dma_pte *pte) static inline int first_pte_in_page(struct dma_pte *pte)
{ {
return !((unsigned long)pte & ~VTD_PAGE_MASK); return !((unsigned long)pte & ~VTD_PAGE_MASK);
...@@ -404,6 +409,9 @@ static int dmar_forcedac; ...@@ -404,6 +409,9 @@ static int dmar_forcedac;
static int intel_iommu_strict; static int intel_iommu_strict;
static int intel_iommu_superpage = 1; static int intel_iommu_superpage = 1;
int intel_iommu_gfx_mapped;
EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
#define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1)) #define DUMMY_DEVICE_DOMAIN_INFO ((struct device_domain_info *)(-1))
static DEFINE_SPINLOCK(device_domain_lock); static DEFINE_SPINLOCK(device_domain_lock);
static LIST_HEAD(device_domain_list); static LIST_HEAD(device_domain_list);
...@@ -577,17 +585,18 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain) ...@@ -577,17 +585,18 @@ static void domain_update_iommu_snooping(struct dmar_domain *domain)
static void domain_update_iommu_superpage(struct dmar_domain *domain) static void domain_update_iommu_superpage(struct dmar_domain *domain)
{ {
int i, mask = 0xf; struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu = NULL;
int mask = 0xf;
if (!intel_iommu_superpage) { if (!intel_iommu_superpage) {
domain->iommu_superpage = 0; domain->iommu_superpage = 0;
return; return;
} }
domain->iommu_superpage = 4; /* 1TiB */ /* set iommu_superpage to the smallest common denominator */
for_each_active_iommu(iommu, drhd) {
for_each_set_bit(i, &domain->iommu_bmp, g_num_of_iommus) { mask &= cap_super_page_val(iommu->cap);
mask |= cap_super_page_val(g_iommus[i]->cap);
if (!mask) { if (!mask) {
break; break;
} }
...@@ -730,29 +739,23 @@ static void free_context_table(struct intel_iommu *iommu) ...@@ -730,29 +739,23 @@ static void free_context_table(struct intel_iommu *iommu)
} }
static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
unsigned long pfn, int large_level) unsigned long pfn, int target_level)
{ {
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
struct dma_pte *parent, *pte = NULL; struct dma_pte *parent, *pte = NULL;
int level = agaw_to_level(domain->agaw); int level = agaw_to_level(domain->agaw);
int offset, target_level; int offset;
BUG_ON(!domain->pgd); BUG_ON(!domain->pgd);
BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && pfn >> addr_width);
parent = domain->pgd; parent = domain->pgd;
/* Search pte */
if (!large_level)
target_level = 1;
else
target_level = large_level;
while (level > 0) { while (level > 0) {
void *tmp_page; void *tmp_page;
offset = pfn_level_offset(pfn, level); offset = pfn_level_offset(pfn, level);
pte = &parent[offset]; pte = &parent[offset];
if (!large_level && (pte->val & DMA_PTE_LARGE_PAGE)) if (!target_level && (dma_pte_superpage(pte) || !dma_pte_present(pte)))
break; break;
if (level == target_level) if (level == target_level)
break; break;
...@@ -816,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain, ...@@ -816,13 +819,14 @@ static struct dma_pte *dma_pfn_level_pte(struct dmar_domain *domain,
} }
/* clear last level pte, a tlb flush should be followed */ /* clear last level pte, a tlb flush should be followed */
static void dma_pte_clear_range(struct dmar_domain *domain, static int dma_pte_clear_range(struct dmar_domain *domain,
unsigned long start_pfn, unsigned long start_pfn,
unsigned long last_pfn) unsigned long last_pfn)
{ {
int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT;
unsigned int large_page = 1; unsigned int large_page = 1;
struct dma_pte *first_pte, *pte; struct dma_pte *first_pte, *pte;
int order;
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
...@@ -846,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain, ...@@ -846,6 +850,9 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
(void *)pte - (void *)first_pte); (void *)pte - (void *)first_pte);
} while (start_pfn && start_pfn <= last_pfn); } while (start_pfn && start_pfn <= last_pfn);
order = (large_page - 1) * 9;
return order;
} }
/* free page table pages. last level pte should already be cleared */ /* free page table pages. last level pte should already be cleared */
...@@ -3226,9 +3233,6 @@ static void __init init_no_remapping_devices(void) ...@@ -3226,9 +3233,6 @@ static void __init init_no_remapping_devices(void)
} }
} }
if (dmar_map_gfx)
return;
for_each_drhd_unit(drhd) { for_each_drhd_unit(drhd) {
int i; int i;
if (drhd->ignored || drhd->include_all) if (drhd->ignored || drhd->include_all)
...@@ -3242,7 +3246,11 @@ static void __init init_no_remapping_devices(void) ...@@ -3242,7 +3246,11 @@ static void __init init_no_remapping_devices(void)
if (i < drhd->devices_cnt) if (i < drhd->devices_cnt)
continue; continue;
/* bypass IOMMU if it is just for gfx devices */ /* This IOMMU has *only* gfx devices. Either bypass it or
set the gfx_mapped flag, as appropriate */
if (dmar_map_gfx) {
intel_iommu_gfx_mapped = 1;
} else {
drhd->ignored = 1; drhd->ignored = 1;
for (i = 0; i < drhd->devices_cnt; i++) { for (i = 0; i < drhd->devices_cnt; i++) {
if (!drhd->devices[i]) if (!drhd->devices[i])
...@@ -3250,6 +3258,7 @@ static void __init init_no_remapping_devices(void) ...@@ -3250,6 +3258,7 @@ static void __init init_no_remapping_devices(void)
drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO; drhd->devices[i]->dev.archdata.iommu = DUMMY_DEVICE_DOMAIN_INFO;
} }
} }
}
} }
#ifdef CONFIG_SUSPEND #ifdef CONFIG_SUSPEND
...@@ -3568,6 +3577,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, ...@@ -3568,6 +3577,8 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
found = 1; found = 1;
} }
spin_unlock_irqrestore(&device_domain_lock, flags);
if (found == 0) { if (found == 0) {
unsigned long tmp_flags; unsigned long tmp_flags;
spin_lock_irqsave(&domain->iommu_lock, tmp_flags); spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
...@@ -3584,8 +3595,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain, ...@@ -3584,8 +3595,6 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
spin_unlock_irqrestore(&iommu->lock, tmp_flags); spin_unlock_irqrestore(&iommu->lock, tmp_flags);
} }
} }
spin_unlock_irqrestore(&device_domain_lock, flags);
} }
static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
...@@ -3739,6 +3748,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) ...@@ -3739,6 +3748,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
vm_domain_exit(dmar_domain); vm_domain_exit(dmar_domain);
return -ENOMEM; return -ENOMEM;
} }
domain_update_iommu_cap(dmar_domain);
domain->priv = dmar_domain; domain->priv = dmar_domain;
return 0; return 0;
...@@ -3864,14 +3874,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain, ...@@ -3864,14 +3874,15 @@ static int intel_iommu_unmap(struct iommu_domain *domain,
{ {
struct dmar_domain *dmar_domain = domain->priv; struct dmar_domain *dmar_domain = domain->priv;
size_t size = PAGE_SIZE << gfp_order; size_t size = PAGE_SIZE << gfp_order;
int order;
dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT, order = dma_pte_clear_range(dmar_domain, iova >> VTD_PAGE_SHIFT,
(iova + size - 1) >> VTD_PAGE_SHIFT); (iova + size - 1) >> VTD_PAGE_SHIFT);
if (dmar_domain->max_addr == iova + size) if (dmar_domain->max_addr == iova + size)
dmar_domain->max_addr = iova; dmar_domain->max_addr = iova;
return gfp_order; return order;
} }
static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
...@@ -3950,6 +3961,10 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev) ...@@ -3950,6 +3961,10 @@ static void __devinit quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
if (!(ggc & GGC_MEMORY_VT_ENABLED)) { if (!(ggc & GGC_MEMORY_VT_ENABLED)) {
printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n"); printk(KERN_INFO "DMAR: BIOS has allocated no shadow GTT; disabling IOMMU for graphics\n");
dmar_map_gfx = 0; dmar_map_gfx = 0;
} else if (dmar_map_gfx) {
/* we have to ensure the gfx device is idle before we flush */
printk(KERN_INFO "DMAR: Disabling batched IOTLB flush on Ironlake\n");
intel_iommu_strict = 1;
} }
} }
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x0040, quirk_calpella_no_shadow_gtt);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment