Commit b09a75fc authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.infradead.org/iommu-2.6

* git://git.infradead.org/iommu-2.6: (23 commits)
  intel-iommu: Disable PMRs after we enable translation, not before
  intel-iommu: Kill DMAR_BROKEN_GFX_WA option.
  intel-iommu: Fix integer wrap on 32 bit kernels
  intel-iommu: Fix integer overflow in dma_pte_{clear_range,free_pagetable}()
  intel-iommu: Limit DOMAIN_MAX_PFN to fit in an 'unsigned long'
  intel-iommu: Fix kernel hang if interrupt remapping disabled in BIOS
  intel-iommu: Disallow interrupt remapping if not all ioapics covered
  intel-iommu: include linux/dmi.h to use dmi_ routines
  pci/dmar: correct off-by-one error in dmar_fault()
  intel-iommu: Cope with yet another BIOS screwup causing crashes
  intel-iommu: iommu init error path bug fixes
  intel-iommu: Mark functions with __init
  USB: Work around BIOS bugs by quiescing USB controllers earlier
  ia64: IOMMU passthrough mode shouldn't trigger swiotlb init
  intel-iommu: make domain_add_dev_info() call domain_context_mapping()
  intel-iommu: Unify hardware and software passthrough support
  intel-iommu: Cope with broken HP DC7900 BIOS
  iommu=pt is a valid early param
  intel-iommu: double kfree()
  intel-iommu: Kill pointless intel_unmap_single() function
  ...

Fixed up trivial include lines conflict in drivers/pci/intel-iommu.c
parents cf63ff5f b94996c9
...@@ -56,11 +56,7 @@ Graphics Problems? ...@@ -56,11 +56,7 @@ Graphics Problems?
------------------ ------------------
If you encounter issues with graphics devices, you can try adding If you encounter issues with graphics devices, you can try adding
option intel_iommu=igfx_off to turn off the integrated graphics engine. option intel_iommu=igfx_off to turn off the integrated graphics engine.
If this fixes anything, please ensure you file a bug reporting the problem.
If it happens to be a PCI device included in the INCLUDE_ALL Engine,
then try enabling CONFIG_DMAR_GFX_WA to setup a 1-1 map. We hear
graphics drivers may be in process of using DMA api's in the near
future and at that time this option can be yanked out.
Some exceptions to IOVA Some exceptions to IOVA
----------------------- -----------------------
......
...@@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void) ...@@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void)
void __init pci_swiotlb_init(void) void __init pci_swiotlb_init(void)
{ {
if (!iommu_detected || iommu_pass_through) { if (!iommu_detected) {
#ifdef CONFIG_IA64_GENERIC #ifdef CONFIG_IA64_GENERIC
swiotlb = 1; swiotlb = 1;
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
......
...@@ -1901,7 +1901,7 @@ config DMAR_DEFAULT_ON ...@@ -1901,7 +1901,7 @@ config DMAR_DEFAULT_ON
config DMAR_BROKEN_GFX_WA config DMAR_BROKEN_GFX_WA
def_bool n def_bool n
prompt "Workaround broken graphics drivers (going away soon)" prompt "Workaround broken graphics drivers (going away soon)"
depends on DMAR depends on DMAR && BROKEN
---help--- ---help---
Current Graphics drivers tend to use physical address Current Graphics drivers tend to use physical address
for DMA and avoid using DMA APIs. Setting this config for DMA and avoid using DMA APIs. Setting this config
......
...@@ -46,8 +46,7 @@ void __init pci_swiotlb_init(void) ...@@ -46,8 +46,7 @@ void __init pci_swiotlb_init(void)
{ {
/* don't initialize swiotlb if iommu=off (no_iommu=1) */ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) || if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN))
iommu_pass_through)
swiotlb = 1; swiotlb = 1;
#endif #endif
if (swiotlb_force) if (swiotlb_force)
......
...@@ -577,9 +577,6 @@ int __init dmar_table_init(void) ...@@ -577,9 +577,6 @@ int __init dmar_table_init(void)
printk(KERN_INFO PREFIX "No ATSR found\n"); printk(KERN_INFO PREFIX "No ATSR found\n");
#endif #endif
#ifdef CONFIG_INTR_REMAP
parse_ioapics_under_ir();
#endif
return 0; return 0;
} }
...@@ -639,20 +636,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -639,20 +636,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
/* Promote an attitude of violence to a BIOS engineer today */
WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
drhd->reg_base_addr,
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
goto err_unmap;
}
#ifdef CONFIG_DMAR #ifdef CONFIG_DMAR
agaw = iommu_calculate_agaw(iommu); agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) { if (agaw < 0) {
printk(KERN_ERR printk(KERN_ERR
"Cannot get a valid agaw for iommu (seq_id = %d)\n", "Cannot get a valid agaw for iommu (seq_id = %d)\n",
iommu->seq_id); iommu->seq_id);
goto error; goto err_unmap;
} }
msagaw = iommu_calculate_max_sagaw(iommu); msagaw = iommu_calculate_max_sagaw(iommu);
if (msagaw < 0) { if (msagaw < 0) {
printk(KERN_ERR printk(KERN_ERR
"Cannot get a valid max agaw for iommu (seq_id = %d)\n", "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id); iommu->seq_id);
goto error; goto err_unmap;
} }
#endif #endif
iommu->agaw = agaw; iommu->agaw = agaw;
...@@ -672,7 +680,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -672,7 +680,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
} }
ver = readl(iommu->reg + DMAR_VER_REG); ver = readl(iommu->reg + DMAR_VER_REG);
pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
(unsigned long long)drhd->reg_base_addr, (unsigned long long)drhd->reg_base_addr,
DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
(unsigned long long)iommu->cap, (unsigned long long)iommu->cap,
...@@ -682,7 +690,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -682,7 +690,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
drhd->iommu = iommu; drhd->iommu = iommu;
return 0; return 0;
error:
err_unmap:
iounmap(iommu->reg);
error:
kfree(iommu); kfree(iommu);
return -1; return -1;
} }
...@@ -1219,7 +1230,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) ...@@ -1219,7 +1230,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
source_id, guest_addr); source_id, guest_addr);
fault_index++; fault_index++;
if (fault_index > cap_num_fault_regs(iommu->cap)) if (fault_index >= cap_num_fault_regs(iommu->cap))
fault_index = 0; fault_index = 0;
spin_lock_irqsave(&iommu->register_lock, flag); spin_lock_irqsave(&iommu->register_lock, flag);
} }
...@@ -1312,3 +1323,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu) ...@@ -1312,3 +1323,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu)
return 0; return 0;
} }
/*
* Check interrupt remapping support in DMAR table description.
*/
int dmar_ir_support(void)
{
struct acpi_table_dmar *dmar;
dmar = (struct acpi_table_dmar *)dmar_tbl;
return dmar->flags & 0x1;
}
This diff is collapsed.
...@@ -603,6 +603,9 @@ int __init intr_remapping_supported(void) ...@@ -603,6 +603,9 @@ int __init intr_remapping_supported(void)
if (disable_intremap) if (disable_intremap)
return 0; return 0;
if (!dmar_ir_support())
return 0;
for_each_drhd_unit(drhd) { for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu; struct intel_iommu *iommu = drhd->iommu;
...@@ -618,6 +621,11 @@ int __init enable_intr_remapping(int eim) ...@@ -618,6 +621,11 @@ int __init enable_intr_remapping(int eim)
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
int setup = 0; int setup = 0;
if (parse_ioapics_under_ir() != 1) {
printk(KERN_INFO "Not enable interrupt remapping\n");
return -1;
}
for_each_drhd_unit(drhd) { for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu; struct intel_iommu *iommu = drhd->iommu;
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
void void
init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
{ {
spin_lock_init(&iovad->iova_alloc_lock);
spin_lock_init(&iovad->iova_rbtree_lock); spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT; iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL; iovad->cached32_node = NULL;
...@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, ...@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, unsigned long limit_pfn,
bool size_aligned) bool size_aligned)
{ {
unsigned long flags;
struct iova *new_iova; struct iova *new_iova;
int ret; int ret;
...@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, ...@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (size_aligned) if (size_aligned)
size = __roundup_pow_of_two(size); size = __roundup_pow_of_two(size);
spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
new_iova, size_aligned); new_iova, size_aligned);
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
if (ret) { if (ret) {
free_iova_mem(new_iova); free_iova_mem(new_iova);
return NULL; return NULL;
...@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad, ...@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad,
struct iova *iova; struct iova *iova;
unsigned int overlap = 0; unsigned int overlap = 0;
spin_lock_irqsave(&iovad->iova_alloc_lock, flags); spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
spin_lock(&iovad->iova_rbtree_lock);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) { if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
iova = container_of(node, struct iova, node); iova = container_of(node, struct iova, node);
...@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad, ...@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad,
iova = __insert_new_range(iovad, pfn_lo, pfn_hi); iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
finish: finish:
spin_unlock(&iovad->iova_rbtree_lock); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
return iova; return iova;
} }
...@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) ...@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
unsigned long flags; unsigned long flags;
struct rb_node *node; struct rb_node *node;
spin_lock_irqsave(&from->iova_alloc_lock, flags); spin_lock_irqsave(&from->iova_rbtree_lock, flags);
spin_lock(&from->iova_rbtree_lock);
for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
struct iova *iova = container_of(node, struct iova, node); struct iova *iova = container_of(node, struct iova, node);
struct iova *new_iova; struct iova *new_iova;
...@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) ...@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
iova->pfn_lo, iova->pfn_lo); iova->pfn_lo, iova->pfn_lo);
} }
spin_unlock(&from->iova_rbtree_lock); spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
} }
...@@ -360,4 +360,6 @@ extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, ...@@ -360,4 +360,6 @@ extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void);
#endif #endif
...@@ -28,7 +28,6 @@ struct iova { ...@@ -28,7 +28,6 @@ struct iova {
/* holds all the iova translations for a domain */ /* holds all the iova translations for a domain */
struct iova_domain { struct iova_domain {
spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */ struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */ struct rb_node *cached32_node; /* Save last alloced node */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment