Commit 8c256155 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v4.7-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU fixes from Joerg Roedel:
 "Three patches queued up:

   - Fix for ARM-SMMU to add a missing iommu-ops callback which is
     required by common iommu code

   - Fix for the rockchip iommu where the wrong MMUs got the commands

   - A regression fix for the Intel VT-d driver.  The regression only
     showed up on X58 chipsets with more than one iommu.  These chipsets
     seem to require that QI is enabled on all IOMMUs before it can be
     used"

* tag 'iommu-fixes-v4.7-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/vt-d: Enable QI on all IOMMUs before setting root entry
  iommu/rockchip: Fix zap cache during device attach
  iommu/arm-smmu: Wire up map_sg for arm-smmu-v3
parents 7af9a170 a4c34ff1
......@@ -1941,6 +1941,7 @@ static struct iommu_ops arm_smmu_ops = {
.attach_dev = arm_smmu_attach_dev,
.map = arm_smmu_map,
.unmap = arm_smmu_unmap,
.map_sg = default_iommu_map_sg,
.iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device,
......
......@@ -3222,11 +3222,6 @@ static int __init init_dmars(void)
}
}
iommu_flush_write_buffer(iommu);
iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
if (!ecap_pass_through(iommu->ecap))
hw_pass_through = 0;
#ifdef CONFIG_INTEL_IOMMU_SVM
......@@ -3235,6 +3230,18 @@ static int __init init_dmars(void)
#endif
}
/*
* Now that qi is enabled on all iommus, set the root entry and flush
* caches. This is required on some Intel X58 chipsets, otherwise the
* flush_context function will loop forever and the boot hangs.
*/
for_each_active_iommu(iommu, drhd) {
iommu_flush_write_buffer(iommu);
iommu_set_root_entry(iommu);
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
}
if (iommu_pass_through)
iommu_identity_mapping |= IDENTMAP_ALL;
......
......@@ -815,7 +815,7 @@ static int rk_iommu_attach_device(struct iommu_domain *domain,
dte_addr = virt_to_phys(rk_domain->dt);
for (i = 0; i < iommu->num_mmu; i++) {
rk_iommu_write(iommu->bases[i], RK_MMU_DTE_ADDR, dte_addr);
rk_iommu_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
rk_iommu_base_command(iommu->bases[i], RK_MMU_CMD_ZAP_CACHE);
rk_iommu_write(iommu->bases[i], RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment