Commit c7c4e130 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v6.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:

 - Intel VT-d Fixes:
     - Allocate local memory for PRQ page
     - Fix WARN_ON in iommu probe path
     - Fix wrong use of pasid config

 - AMD IOMMU Fixes:
     - Lock inversion fix
     - Log message severity fix
     - Disable SNP when v2 page-tables are used

 - Mediatek driver:
     - Fix module autoloading

* tag 'iommu-fixes-v6.9-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/amd: Change log message severity
  iommu/vt-d: Fix WARN_ON in iommu probe path
  iommu/vt-d: Allocate local memory for page request queue
  iommu/vt-d: Fix wrong use of pasid config
  iommu: mtk: fix module autoloading
  iommu/amd: Do not enable SNP when V2 page table is enabled
  iommu/amd: Fix possible irq lock inversion dependency issue
parents b3812ff0 b8246a2a
...@@ -3232,28 +3232,29 @@ static void iommu_snp_enable(void) ...@@ -3232,28 +3232,29 @@ static void iommu_snp_enable(void)
return; return;
/* /*
* The SNP support requires that IOMMU must be enabled, and is * The SNP support requires that IOMMU must be enabled, and is
* not configured in the passthrough mode. * configured with V1 page table (DTE[Mode] = 0 is not supported).
*/ */
if (no_iommu || iommu_default_passthrough()) { if (no_iommu || iommu_default_passthrough()) {
pr_err("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n"); pr_warn("SNP: IOMMU disabled or configured in passthrough mode, SNP cannot be supported.\n");
cc_platform_clear(CC_ATTR_HOST_SEV_SNP); goto disable_snp;
return; }
if (amd_iommu_pgtable != AMD_IOMMU_V1) {
pr_warn("SNP: IOMMU is configured with V2 page table mode, SNP cannot be supported.\n");
goto disable_snp;
} }
amd_iommu_snp_en = check_feature(FEATURE_SNP); amd_iommu_snp_en = check_feature(FEATURE_SNP);
if (!amd_iommu_snp_en) { if (!amd_iommu_snp_en) {
pr_err("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n"); pr_warn("SNP: IOMMU SNP feature not enabled, SNP cannot be supported.\n");
cc_platform_clear(CC_ATTR_HOST_SEV_SNP); goto disable_snp;
return;
} }
pr_info("IOMMU SNP support enabled.\n"); pr_info("IOMMU SNP support enabled.\n");
return;
/* Enforce IOMMU v1 pagetable when SNP is enabled. */ disable_snp:
if (amd_iommu_pgtable != AMD_IOMMU_V1) { cc_platform_clear(CC_ATTR_HOST_SEV_SNP);
pr_warn("Forcing use of AMD IOMMU v1 page table due to SNP.\n");
amd_iommu_pgtable = AMD_IOMMU_V1;
}
#endif #endif
} }
......
...@@ -1692,26 +1692,29 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid, ...@@ -1692,26 +1692,29 @@ int amd_iommu_complete_ppr(struct pci_dev *pdev, u32 pasid,
static u16 domain_id_alloc(void) static u16 domain_id_alloc(void)
{ {
unsigned long flags;
int id; int id;
spin_lock(&pd_bitmap_lock); spin_lock_irqsave(&pd_bitmap_lock, flags);
id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID); id = find_first_zero_bit(amd_iommu_pd_alloc_bitmap, MAX_DOMAIN_ID);
BUG_ON(id == 0); BUG_ON(id == 0);
if (id > 0 && id < MAX_DOMAIN_ID) if (id > 0 && id < MAX_DOMAIN_ID)
__set_bit(id, amd_iommu_pd_alloc_bitmap); __set_bit(id, amd_iommu_pd_alloc_bitmap);
else else
id = 0; id = 0;
spin_unlock(&pd_bitmap_lock); spin_unlock_irqrestore(&pd_bitmap_lock, flags);
return id; return id;
} }
static void domain_id_free(int id) static void domain_id_free(int id)
{ {
spin_lock(&pd_bitmap_lock); unsigned long flags;
spin_lock_irqsave(&pd_bitmap_lock, flags);
if (id > 0 && id < MAX_DOMAIN_ID) if (id > 0 && id < MAX_DOMAIN_ID)
__clear_bit(id, amd_iommu_pd_alloc_bitmap); __clear_bit(id, amd_iommu_pd_alloc_bitmap);
spin_unlock(&pd_bitmap_lock); spin_unlock_irqrestore(&pd_bitmap_lock, flags);
} }
static void free_gcr3_tbl_level1(u64 *tbl) static void free_gcr3_tbl_level1(u64 *tbl)
......
...@@ -4299,9 +4299,11 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev) ...@@ -4299,9 +4299,11 @@ static struct iommu_device *intel_iommu_probe_device(struct device *dev)
} }
dev_iommu_priv_set(dev, info); dev_iommu_priv_set(dev, info);
ret = device_rbtree_insert(iommu, info); if (pdev && pci_ats_supported(pdev)) {
if (ret) ret = device_rbtree_insert(iommu, info);
goto free; if (ret)
goto free;
}
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) { if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev)) {
ret = intel_pasid_alloc_table(dev); ret = intel_pasid_alloc_table(dev);
...@@ -4336,7 +4338,8 @@ static void intel_iommu_release_device(struct device *dev) ...@@ -4336,7 +4338,8 @@ static void intel_iommu_release_device(struct device *dev)
struct intel_iommu *iommu = info->iommu; struct intel_iommu *iommu = info->iommu;
mutex_lock(&iommu->iopf_lock); mutex_lock(&iommu->iopf_lock);
device_rbtree_remove(info); if (dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)))
device_rbtree_remove(info);
mutex_unlock(&iommu->iopf_lock); mutex_unlock(&iommu->iopf_lock);
if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) && if (sm_supported(iommu) && !dev_is_real_dma_subdevice(dev) &&
......
...@@ -438,7 +438,7 @@ static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu, ...@@ -438,7 +438,7 @@ static int iommu_pmu_assign_event(struct iommu_pmu *iommu_pmu,
iommu_pmu_set_filter(domain, event->attr.config1, iommu_pmu_set_filter(domain, event->attr.config1,
IOMMU_PMU_FILTER_DOMAIN, idx, IOMMU_PMU_FILTER_DOMAIN, idx,
event->attr.config1); event->attr.config1);
iommu_pmu_set_filter(pasid, event->attr.config1, iommu_pmu_set_filter(pasid, event->attr.config2,
IOMMU_PMU_FILTER_PASID, idx, IOMMU_PMU_FILTER_PASID, idx,
event->attr.config1); event->attr.config1);
iommu_pmu_set_filter(ats, event->attr.config2, iommu_pmu_set_filter(ats, event->attr.config2,
......
...@@ -66,7 +66,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu) ...@@ -66,7 +66,7 @@ int intel_svm_enable_prq(struct intel_iommu *iommu)
struct page *pages; struct page *pages;
int irq, ret; int irq, ret;
pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, PRQ_ORDER); pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, PRQ_ORDER);
if (!pages) { if (!pages) {
pr_warn("IOMMU: %s: Failed to allocate page request queue\n", pr_warn("IOMMU: %s: Failed to allocate page request queue\n",
iommu->name); iommu->name);
......
...@@ -1790,6 +1790,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = { ...@@ -1790,6 +1790,7 @@ static const struct of_device_id mtk_iommu_of_ids[] = {
{ .compatible = "mediatek,mt8365-m4u", .data = &mt8365_data}, { .compatible = "mediatek,mt8365-m4u", .data = &mt8365_data},
{} {}
}; };
MODULE_DEVICE_TABLE(of, mtk_iommu_of_ids);
static struct platform_driver mtk_iommu_driver = { static struct platform_driver mtk_iommu_driver = {
.probe = mtk_iommu_probe, .probe = mtk_iommu_probe,
......
...@@ -600,6 +600,7 @@ static const struct of_device_id mtk_iommu_v1_of_ids[] = { ...@@ -600,6 +600,7 @@ static const struct of_device_id mtk_iommu_v1_of_ids[] = {
{ .compatible = "mediatek,mt2701-m4u", }, { .compatible = "mediatek,mt2701-m4u", },
{} {}
}; };
MODULE_DEVICE_TABLE(of, mtk_iommu_v1_of_ids);
static const struct component_master_ops mtk_iommu_v1_com_ops = { static const struct component_master_ops mtk_iommu_v1_com_ops = {
.bind = mtk_iommu_v1_bind, .bind = mtk_iommu_v1_bind,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment