Commit bd37cdf8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-fixes-v5.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu fixes from Joerg Roedel:
 "A couple of Intel VT-d fixes:

   - Make Intel SVM code 64bit only. The code uses pgd_t* and the IOMMU
     only supports long-mode page-table formats, so its broken on 32bit
     anyway.

   - Make sure GFX quirks in for Intel VT-d are not applied to untrusted
     devices. Those devices might gain full memory access otherwise.

   - Identity mapping setup fix.

   - Fix ACS enabling when Intel IOMMU is off and untrusted devices are
     detected.

   - Two smaller fixes for coherency and IO page-table setup"

* tag 'iommu-fixes-v5.8-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu:
  iommu/vt-d: Fix misuse of iommu_domain_identity_map()
  iommu/vt-d: Update scalable mode paging structure coherency
  iommu/vt-d: Enable PCI ACS for platform opt in hint
  iommu/vt-d: Don't apply gfx quirks to untrusted devices
  iommu/vt-d: Set U/S bit in first level page table by default
  iommu/vt-d: Make Intel SVM code 64-bit only
parents 6a6c9b22 48f0bcfb
...@@ -211,7 +211,7 @@ config INTEL_IOMMU_DEBUGFS ...@@ -211,7 +211,7 @@ config INTEL_IOMMU_DEBUGFS
config INTEL_IOMMU_SVM config INTEL_IOMMU_SVM
bool "Support for Shared Virtual Memory with Intel IOMMU" bool "Support for Shared Virtual Memory with Intel IOMMU"
depends on INTEL_IOMMU && X86 depends on INTEL_IOMMU && X86_64
select PCI_PASID select PCI_PASID
select PCI_PRI select PCI_PRI
select MMU_NOTIFIER select MMU_NOTIFIER
......
...@@ -898,7 +898,8 @@ int __init detect_intel_iommu(void) ...@@ -898,7 +898,8 @@ int __init detect_intel_iommu(void)
if (!ret) if (!ret)
ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl, ret = dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
&validate_drhd_cb); &validate_drhd_cb);
if (!ret && !no_iommu && !iommu_detected && !dmar_disabled) { if (!ret && !no_iommu && !iommu_detected &&
(!dmar_disabled || dmar_platform_optin())) {
iommu_detected = 1; iommu_detected = 1;
/* Make sure ACS will be enabled */ /* Make sure ACS will be enabled */
pci_request_acs(); pci_request_acs();
......
...@@ -612,6 +612,12 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) ...@@ -612,6 +612,12 @@ struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
return g_iommus[iommu_id]; return g_iommus[iommu_id];
} }
static inline bool iommu_paging_structure_coherency(struct intel_iommu *iommu)
{
return sm_supported(iommu) ?
ecap_smpwc(iommu->ecap) : ecap_coherent(iommu->ecap);
}
static void domain_update_iommu_coherency(struct dmar_domain *domain) static void domain_update_iommu_coherency(struct dmar_domain *domain)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
...@@ -623,7 +629,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) ...@@ -623,7 +629,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
for_each_domain_iommu(i, domain) { for_each_domain_iommu(i, domain) {
found = true; found = true;
if (!ecap_coherent(g_iommus[i]->ecap)) { if (!iommu_paging_structure_coherency(g_iommus[i])) {
domain->iommu_coherency = 0; domain->iommu_coherency = 0;
break; break;
} }
...@@ -634,7 +640,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) ...@@ -634,7 +640,7 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
/* No hardware attached; use lowest common denominator */ /* No hardware attached; use lowest common denominator */
rcu_read_lock(); rcu_read_lock();
for_each_active_iommu(iommu, drhd) { for_each_active_iommu(iommu, drhd) {
if (!ecap_coherent(iommu->ecap)) { if (!iommu_paging_structure_coherency(iommu)) {
domain->iommu_coherency = 0; domain->iommu_coherency = 0;
break; break;
} }
...@@ -921,7 +927,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, ...@@ -921,7 +927,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
if (domain_use_first_level(domain)) if (domain_use_first_level(domain))
pteval |= DMA_FL_PTE_XD; pteval |= DMA_FL_PTE_XD | DMA_FL_PTE_US;
if (cmpxchg64(&pte->val, 0ULL, pteval)) if (cmpxchg64(&pte->val, 0ULL, pteval))
/* Someone else set it while we were thinking; use theirs. */ /* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page); free_pgtable_page(tmp_page);
...@@ -1951,7 +1957,6 @@ static inline void ...@@ -1951,7 +1957,6 @@ static inline void
context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid) context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
{ {
context->hi |= pasid & ((1 << 20) - 1); context->hi |= pasid & ((1 << 20) - 1);
context->hi |= (1 << 20);
} }
/* /*
...@@ -2095,7 +2100,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, ...@@ -2095,7 +2100,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
context_set_fault_enable(context); context_set_fault_enable(context);
context_set_present(context); context_set_present(context);
domain_flush_cache(domain, context, sizeof(*context)); if (!ecap_coherent(iommu->ecap))
clflush_cache_range(context, sizeof(*context));
/* /*
* It's a non-present to present mapping. If hardware doesn't cache * It's a non-present to present mapping. If hardware doesn't cache
...@@ -2243,7 +2249,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn, ...@@ -2243,7 +2249,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP); attr = prot & (DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP);
if (domain_use_first_level(domain)) if (domain_use_first_level(domain))
attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD; attr |= DMA_FL_PTE_PRESENT | DMA_FL_PTE_XD | DMA_FL_PTE_US;
if (!sg) { if (!sg) {
sg_res = nr_pages; sg_res = nr_pages;
...@@ -2695,7 +2701,9 @@ static int __init si_domain_init(int hw) ...@@ -2695,7 +2701,9 @@ static int __init si_domain_init(int hw)
end >> agaw_to_width(si_domain->agaw))) end >> agaw_to_width(si_domain->agaw)))
continue; continue;
ret = iommu_domain_identity_map(si_domain, start, end); ret = iommu_domain_identity_map(si_domain,
mm_to_dma_pfn(start >> PAGE_SHIFT),
mm_to_dma_pfn(end >> PAGE_SHIFT));
if (ret) if (ret)
return ret; return ret;
} }
...@@ -6021,6 +6029,23 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain, ...@@ -6021,6 +6029,23 @@ intel_iommu_domain_set_attr(struct iommu_domain *domain,
return ret; return ret;
} }
/*
* Check that the device does not live on an external facing PCI port that is
* marked as untrusted. Such devices should not be able to apply quirks and
* thus not be able to bypass the IOMMU restrictions.
*/
static bool risky_device(struct pci_dev *pdev)
{
if (pdev->untrusted) {
pci_info(pdev,
"Skipping IOMMU quirk for dev [%04X:%04X] on untrusted PCI link\n",
pdev->vendor, pdev->device);
pci_info(pdev, "Please check with your BIOS/Platform vendor about this\n");
return true;
}
return false;
}
const struct iommu_ops intel_iommu_ops = { const struct iommu_ops intel_iommu_ops = {
.capable = intel_iommu_capable, .capable = intel_iommu_capable,
.domain_alloc = intel_iommu_domain_alloc, .domain_alloc = intel_iommu_domain_alloc,
...@@ -6060,6 +6085,9 @@ const struct iommu_ops intel_iommu_ops = { ...@@ -6060,6 +6085,9 @@ const struct iommu_ops intel_iommu_ops = {
static void quirk_iommu_igfx(struct pci_dev *dev) static void quirk_iommu_igfx(struct pci_dev *dev)
{ {
if (risky_device(dev))
return;
pci_info(dev, "Disabling IOMMU for graphics on this chipset\n"); pci_info(dev, "Disabling IOMMU for graphics on this chipset\n");
dmar_map_gfx = 0; dmar_map_gfx = 0;
} }
...@@ -6101,6 +6129,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx); ...@@ -6101,6 +6129,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x163D, quirk_iommu_igfx);
static void quirk_iommu_rwbf(struct pci_dev *dev) static void quirk_iommu_rwbf(struct pci_dev *dev)
{ {
if (risky_device(dev))
return;
/* /*
* Mobile 4 Series Chipset neglects to set RWBF capability, * Mobile 4 Series Chipset neglects to set RWBF capability,
* but needs it. Same seems to hold for the desktop versions. * but needs it. Same seems to hold for the desktop versions.
...@@ -6131,6 +6162,9 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev) ...@@ -6131,6 +6162,9 @@ static void quirk_calpella_no_shadow_gtt(struct pci_dev *dev)
{ {
unsigned short ggc; unsigned short ggc;
if (risky_device(dev))
return;
if (pci_read_config_word(dev, GGC, &ggc)) if (pci_read_config_word(dev, GGC, &ggc))
return; return;
...@@ -6164,6 +6198,12 @@ static void __init check_tylersburg_isoch(void) ...@@ -6164,6 +6198,12 @@ static void __init check_tylersburg_isoch(void)
pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL); pdev = pci_get_device(PCI_VENDOR_ID_INTEL, 0x3a3e, NULL);
if (!pdev) if (!pdev)
return; return;
if (risky_device(pdev)) {
pci_dev_put(pdev);
return;
}
pci_dev_put(pdev); pci_dev_put(pdev);
/* System Management Registers. Might be hidden, in which case /* System Management Registers. Might be hidden, in which case
...@@ -6173,6 +6213,11 @@ static void __init check_tylersburg_isoch(void) ...@@ -6173,6 +6213,11 @@ static void __init check_tylersburg_isoch(void)
if (!pdev) if (!pdev)
return; return;
if (risky_device(pdev)) {
pci_dev_put(pdev);
return;
}
if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) { if (pci_read_config_dword(pdev, 0x188, &vtisochctrl)) {
pci_dev_put(pdev); pci_dev_put(pdev);
return; return;
......
...@@ -41,6 +41,7 @@ ...@@ -41,6 +41,7 @@
#define DMA_PTE_SNP BIT_ULL(11) #define DMA_PTE_SNP BIT_ULL(11)
#define DMA_FL_PTE_PRESENT BIT_ULL(0) #define DMA_FL_PTE_PRESENT BIT_ULL(0)
#define DMA_FL_PTE_US BIT_ULL(2)
#define DMA_FL_PTE_XD BIT_ULL(63) #define DMA_FL_PTE_XD BIT_ULL(63)
#define ADDR_WIDTH_5LEVEL (57) #define ADDR_WIDTH_5LEVEL (57)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment