Commit b09a75fc authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.infradead.org/iommu-2.6

* git://git.infradead.org/iommu-2.6: (23 commits)
  intel-iommu: Disable PMRs after we enable translation, not before
  intel-iommu: Kill DMAR_BROKEN_GFX_WA option.
  intel-iommu: Fix integer wrap on 32 bit kernels
  intel-iommu: Fix integer overflow in dma_pte_{clear_range,free_pagetable}()
  intel-iommu: Limit DOMAIN_MAX_PFN to fit in an 'unsigned long'
  intel-iommu: Fix kernel hang if interrupt remapping disabled in BIOS
  intel-iommu: Disallow interrupt remapping if not all ioapics covered
  intel-iommu: include linux/dmi.h to use dmi_ routines
  pci/dmar: correct off-by-one error in dmar_fault()
  intel-iommu: Cope with yet another BIOS screwup causing crashes
  intel-iommu: iommu init error path bug fixes
  intel-iommu: Mark functions with __init
  USB: Work around BIOS bugs by quiescing USB controllers earlier
  ia64: IOMMU passthrough mode shouldn't trigger swiotlb init
  intel-iommu: make domain_add_dev_info() call domain_context_mapping()
  intel-iommu: Unify hardware and software passthrough support
  intel-iommu: Cope with broken HP DC7900 BIOS
  iommu=pt is a valid early param
  intel-iommu: double kfree()
  intel-iommu: Kill pointless intel_unmap_single() function
  ...

Fixed up trivial include lines conflict in drivers/pci/intel-iommu.c
parents cf63ff5f b94996c9
...@@ -56,11 +56,7 @@ Graphics Problems? ...@@ -56,11 +56,7 @@ Graphics Problems?
------------------ ------------------
If you encounter issues with graphics devices, you can try adding If you encounter issues with graphics devices, you can try adding
option intel_iommu=igfx_off to turn off the integrated graphics engine. option intel_iommu=igfx_off to turn off the integrated graphics engine.
If this fixes anything, please ensure you file a bug reporting the problem.
If it happens to be a PCI device included in the INCLUDE_ALL Engine,
then try enabling CONFIG_DMAR_GFX_WA to setup a 1-1 map. We hear
graphics drivers may be in process of using DMA api's in the near
future and at that time this option can be yanked out.
Some exceptions to IOVA Some exceptions to IOVA
----------------------- -----------------------
......
...@@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void) ...@@ -46,7 +46,7 @@ void __init swiotlb_dma_init(void)
void __init pci_swiotlb_init(void) void __init pci_swiotlb_init(void)
{ {
if (!iommu_detected || iommu_pass_through) { if (!iommu_detected) {
#ifdef CONFIG_IA64_GENERIC #ifdef CONFIG_IA64_GENERIC
swiotlb = 1; swiotlb = 1;
printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
......
...@@ -1901,7 +1901,7 @@ config DMAR_DEFAULT_ON ...@@ -1901,7 +1901,7 @@ config DMAR_DEFAULT_ON
config DMAR_BROKEN_GFX_WA config DMAR_BROKEN_GFX_WA
def_bool n def_bool n
prompt "Workaround broken graphics drivers (going away soon)" prompt "Workaround broken graphics drivers (going away soon)"
depends on DMAR depends on DMAR && BROKEN
---help--- ---help---
Current Graphics drivers tend to use physical address Current Graphics drivers tend to use physical address
for DMA and avoid using DMA APIs. Setting this config for DMA and avoid using DMA APIs. Setting this config
......
...@@ -46,8 +46,7 @@ void __init pci_swiotlb_init(void) ...@@ -46,8 +46,7 @@ void __init pci_swiotlb_init(void)
{ {
/* don't initialize swiotlb if iommu=off (no_iommu=1) */ /* don't initialize swiotlb if iommu=off (no_iommu=1) */
#ifdef CONFIG_X86_64 #ifdef CONFIG_X86_64
if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN) || if ((!iommu_detected && !no_iommu && max_pfn > MAX_DMA32_PFN))
iommu_pass_through)
swiotlb = 1; swiotlb = 1;
#endif #endif
if (swiotlb_force) if (swiotlb_force)
......
...@@ -577,9 +577,6 @@ int __init dmar_table_init(void) ...@@ -577,9 +577,6 @@ int __init dmar_table_init(void)
printk(KERN_INFO PREFIX "No ATSR found\n"); printk(KERN_INFO PREFIX "No ATSR found\n");
#endif #endif
#ifdef CONFIG_INTR_REMAP
parse_ioapics_under_ir();
#endif
return 0; return 0;
} }
...@@ -639,20 +636,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -639,20 +636,31 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG); iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG); iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
/* Promote an attitude of violence to a BIOS engineer today */
WARN(1, "Your BIOS is broken; DMAR reported at address %llx returns all ones!\n"
"BIOS vendor: %s; Ver: %s; Product Version: %s\n",
drhd->reg_base_addr,
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
goto err_unmap;
}
#ifdef CONFIG_DMAR #ifdef CONFIG_DMAR
agaw = iommu_calculate_agaw(iommu); agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) { if (agaw < 0) {
printk(KERN_ERR printk(KERN_ERR
"Cannot get a valid agaw for iommu (seq_id = %d)\n", "Cannot get a valid agaw for iommu (seq_id = %d)\n",
iommu->seq_id); iommu->seq_id);
goto error; goto err_unmap;
} }
msagaw = iommu_calculate_max_sagaw(iommu); msagaw = iommu_calculate_max_sagaw(iommu);
if (msagaw < 0) { if (msagaw < 0) {
printk(KERN_ERR printk(KERN_ERR
"Cannot get a valid max agaw for iommu (seq_id = %d)\n", "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id); iommu->seq_id);
goto error; goto err_unmap;
} }
#endif #endif
iommu->agaw = agaw; iommu->agaw = agaw;
...@@ -672,7 +680,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -672,7 +680,7 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
} }
ver = readl(iommu->reg + DMAR_VER_REG); ver = readl(iommu->reg + DMAR_VER_REG);
pr_debug("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n", pr_info("IOMMU %llx: ver %d:%d cap %llx ecap %llx\n",
(unsigned long long)drhd->reg_base_addr, (unsigned long long)drhd->reg_base_addr,
DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver), DMAR_VER_MAJOR(ver), DMAR_VER_MINOR(ver),
(unsigned long long)iommu->cap, (unsigned long long)iommu->cap,
...@@ -682,7 +690,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -682,7 +690,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
drhd->iommu = iommu; drhd->iommu = iommu;
return 0; return 0;
error:
err_unmap:
iounmap(iommu->reg);
error:
kfree(iommu); kfree(iommu);
return -1; return -1;
} }
...@@ -1219,7 +1230,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id) ...@@ -1219,7 +1230,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
source_id, guest_addr); source_id, guest_addr);
fault_index++; fault_index++;
if (fault_index > cap_num_fault_regs(iommu->cap)) if (fault_index >= cap_num_fault_regs(iommu->cap))
fault_index = 0; fault_index = 0;
spin_lock_irqsave(&iommu->register_lock, flag); spin_lock_irqsave(&iommu->register_lock, flag);
} }
...@@ -1312,3 +1323,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu) ...@@ -1312,3 +1323,13 @@ int dmar_reenable_qi(struct intel_iommu *iommu)
return 0; return 0;
} }
/*
* Check interrupt remapping support in DMAR table description.
*/
int dmar_ir_support(void)
{
struct acpi_table_dmar *dmar;
dmar = (struct acpi_table_dmar *)dmar_tbl;
return dmar->flags & 0x1;
}
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#include <linux/intel-iommu.h> #include <linux/intel-iommu.h>
#include <linux/sysdev.h> #include <linux/sysdev.h>
#include <linux/tboot.h> #include <linux/tboot.h>
#include <linux/dmi.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include "pci.h" #include "pci.h"
...@@ -56,8 +57,14 @@ ...@@ -56,8 +57,14 @@
#define MAX_AGAW_WIDTH 64 #define MAX_AGAW_WIDTH 64
#define DOMAIN_MAX_ADDR(gaw) ((((u64)1) << gaw) - 1) #define __DOMAIN_MAX_PFN(gaw) ((((uint64_t)1) << (gaw-VTD_PAGE_SHIFT)) - 1)
#define DOMAIN_MAX_PFN(gaw) ((((u64)1) << (gaw-VTD_PAGE_SHIFT)) - 1) #define __DOMAIN_MAX_ADDR(gaw) ((((uint64_t)1) << gaw) - 1)
/* We limit DOMAIN_MAX_PFN to fit in an unsigned long, and DOMAIN_MAX_ADDR
to match. That way, we can use 'unsigned long' for PFNs with impunity. */
#define DOMAIN_MAX_PFN(gaw) ((unsigned long) min_t(uint64_t, \
__DOMAIN_MAX_PFN(gaw), (unsigned long)-1))
#define DOMAIN_MAX_ADDR(gaw) (((uint64_t)__DOMAIN_MAX_PFN(gaw)) << VTD_PAGE_SHIFT)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32)) #define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
...@@ -252,7 +259,8 @@ static inline int first_pte_in_page(struct dma_pte *pte) ...@@ -252,7 +259,8 @@ static inline int first_pte_in_page(struct dma_pte *pte)
* 2. It maps to each iommu if successful. * 2. It maps to each iommu if successful.
* 3. Each iommu mapps to this domain if successful. * 3. Each iommu mapps to this domain if successful.
*/ */
struct dmar_domain *si_domain; static struct dmar_domain *si_domain;
static int hw_pass_through = 1;
/* devices under the same p2p bridge are owned in one domain */ /* devices under the same p2p bridge are owned in one domain */
#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
...@@ -728,7 +736,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain, ...@@ -728,7 +736,7 @@ static struct dma_pte *pfn_to_dma_pte(struct dmar_domain *domain,
return NULL; return NULL;
domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE); domain_flush_cache(domain, tmp_page, VTD_PAGE_SIZE);
pteval = (virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE; pteval = ((uint64_t)virt_to_dma_pfn(tmp_page) << VTD_PAGE_SHIFT) | DMA_PTE_READ | DMA_PTE_WRITE;
if (cmpxchg64(&pte->val, 0ULL, pteval)) { if (cmpxchg64(&pte->val, 0ULL, pteval)) {
/* Someone else set it while we were thinking; use theirs. */ /* Someone else set it while we were thinking; use theirs. */
free_pgtable_page(tmp_page); free_pgtable_page(tmp_page);
...@@ -778,9 +786,10 @@ static void dma_pte_clear_range(struct dmar_domain *domain, ...@@ -778,9 +786,10 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
BUG_ON(start_pfn > last_pfn);
/* we don't need lock here; nobody else touches the iova range */ /* we don't need lock here; nobody else touches the iova range */
while (start_pfn <= last_pfn) { do {
first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1); first_pte = pte = dma_pfn_level_pte(domain, start_pfn, 1);
if (!pte) { if (!pte) {
start_pfn = align_to_level(start_pfn + 1, 2); start_pfn = align_to_level(start_pfn + 1, 2);
...@@ -794,7 +803,8 @@ static void dma_pte_clear_range(struct dmar_domain *domain, ...@@ -794,7 +803,8 @@ static void dma_pte_clear_range(struct dmar_domain *domain,
domain_flush_cache(domain, first_pte, domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte); (void *)pte - (void *)first_pte);
}
} while (start_pfn && start_pfn <= last_pfn);
} }
/* free page table pages. last level pte should already be cleared */ /* free page table pages. last level pte should already be cleared */
...@@ -810,6 +820,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, ...@@ -810,6 +820,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width);
BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width);
BUG_ON(start_pfn > last_pfn);
/* We don't need lock here; nobody else touches the iova range */ /* We don't need lock here; nobody else touches the iova range */
level = 2; level = 2;
...@@ -820,7 +831,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, ...@@ -820,7 +831,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
if (tmp + level_size(level) - 1 > last_pfn) if (tmp + level_size(level) - 1 > last_pfn)
return; return;
while (tmp + level_size(level) - 1 <= last_pfn) { do {
first_pte = pte = dma_pfn_level_pte(domain, tmp, level); first_pte = pte = dma_pfn_level_pte(domain, tmp, level);
if (!pte) { if (!pte) {
tmp = align_to_level(tmp + 1, level + 1); tmp = align_to_level(tmp + 1, level + 1);
...@@ -839,7 +850,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain, ...@@ -839,7 +850,7 @@ static void dma_pte_free_pagetable(struct dmar_domain *domain,
domain_flush_cache(domain, first_pte, domain_flush_cache(domain, first_pte,
(void *)pte - (void *)first_pte); (void *)pte - (void *)first_pte);
} } while (tmp && tmp + level_size(level) - 1 <= last_pfn);
level++; level++;
} }
/* free pgd */ /* free pgd */
...@@ -1158,6 +1169,8 @@ static int iommu_init_domains(struct intel_iommu *iommu) ...@@ -1158,6 +1169,8 @@ static int iommu_init_domains(struct intel_iommu *iommu)
pr_debug("Number of Domains supportd <%ld>\n", ndomains); pr_debug("Number of Domains supportd <%ld>\n", ndomains);
nlongs = BITS_TO_LONGS(ndomains); nlongs = BITS_TO_LONGS(ndomains);
spin_lock_init(&iommu->lock);
/* TBD: there might be 64K domains, /* TBD: there might be 64K domains,
* consider other allocation for future chip * consider other allocation for future chip
*/ */
...@@ -1170,12 +1183,9 @@ static int iommu_init_domains(struct intel_iommu *iommu) ...@@ -1170,12 +1183,9 @@ static int iommu_init_domains(struct intel_iommu *iommu)
GFP_KERNEL); GFP_KERNEL);
if (!iommu->domains) { if (!iommu->domains) {
printk(KERN_ERR "Allocating domain array failed\n"); printk(KERN_ERR "Allocating domain array failed\n");
kfree(iommu->domain_ids);
return -ENOMEM; return -ENOMEM;
} }
spin_lock_init(&iommu->lock);
/* /*
* if Caching mode is set, then invalid translations are tagged * if Caching mode is set, then invalid translations are tagged
* with domainid 0. Hence we need to pre-allocate it. * with domainid 0. Hence we need to pre-allocate it.
...@@ -1195,6 +1205,7 @@ void free_dmar_iommu(struct intel_iommu *iommu) ...@@ -1195,6 +1205,7 @@ void free_dmar_iommu(struct intel_iommu *iommu)
int i; int i;
unsigned long flags; unsigned long flags;
if ((iommu->domains) && (iommu->domain_ids)) {
i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap)); i = find_first_bit(iommu->domain_ids, cap_ndoms(iommu->cap));
for (; i < cap_ndoms(iommu->cap); ) { for (; i < cap_ndoms(iommu->cap); ) {
domain = iommu->domains[i]; domain = iommu->domains[i];
...@@ -1212,6 +1223,7 @@ void free_dmar_iommu(struct intel_iommu *iommu) ...@@ -1212,6 +1223,7 @@ void free_dmar_iommu(struct intel_iommu *iommu)
i = find_next_bit(iommu->domain_ids, i = find_next_bit(iommu->domain_ids,
cap_ndoms(iommu->cap), i+1); cap_ndoms(iommu->cap), i+1);
} }
}
if (iommu->gcmd & DMA_GCMD_TE) if (iommu->gcmd & DMA_GCMD_TE)
iommu_disable_translation(iommu); iommu_disable_translation(iommu);
...@@ -1310,7 +1322,6 @@ static void iommu_detach_domain(struct dmar_domain *domain, ...@@ -1310,7 +1322,6 @@ static void iommu_detach_domain(struct dmar_domain *domain,
} }
static struct iova_domain reserved_iova_list; static struct iova_domain reserved_iova_list;
static struct lock_class_key reserved_alloc_key;
static struct lock_class_key reserved_rbtree_key; static struct lock_class_key reserved_rbtree_key;
static void dmar_init_reserved_ranges(void) static void dmar_init_reserved_ranges(void)
...@@ -1321,8 +1332,6 @@ static void dmar_init_reserved_ranges(void) ...@@ -1321,8 +1332,6 @@ static void dmar_init_reserved_ranges(void)
init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN); init_iova_domain(&reserved_iova_list, DMA_32BIT_PFN);
lockdep_set_class(&reserved_iova_list.iova_alloc_lock,
&reserved_alloc_key);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key); &reserved_rbtree_key);
...@@ -1959,13 +1968,34 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, ...@@ -1959,13 +1968,34 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
struct dmar_domain *domain; struct dmar_domain *domain;
int ret; int ret;
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain)
return -ENOMEM;
/* For _hardware_ passthrough, don't bother. But for software
passthrough, we do it anyway -- it may indicate a memory
range which is reserved in E820, so which didn't get set
up to start with in si_domain */
if (domain == si_domain && hw_pass_through) {
printk("Ignoring identity map for HW passthrough device %s [0x%Lx - 0x%Lx]\n",
pci_name(pdev), start, end);
return 0;
}
printk(KERN_INFO printk(KERN_INFO
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
pci_name(pdev), start, end); pci_name(pdev), start, end);
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); if (end >> agaw_to_width(domain->agaw)) {
if (!domain) WARN(1, "Your BIOS is broken; RMRR exceeds permitted address width (%d bits)\n"
return -ENOMEM; "BIOS vendor: %s; Ver: %s; Product Version: %s\n",
agaw_to_width(domain->agaw),
dmi_get_system_info(DMI_BIOS_VENDOR),
dmi_get_system_info(DMI_BIOS_VERSION),
dmi_get_system_info(DMI_PRODUCT_VERSION));
ret = -EIO;
goto error;
}
ret = iommu_domain_identity_map(domain, start, end); ret = iommu_domain_identity_map(domain, start, end);
if (ret) if (ret)
...@@ -2017,23 +2047,6 @@ static inline void iommu_prepare_isa(void) ...@@ -2017,23 +2047,6 @@ static inline void iommu_prepare_isa(void)
} }
#endif /* !CONFIG_DMAR_FLPY_WA */ #endif /* !CONFIG_DMAR_FLPY_WA */
/* Initialize each context entry as pass through.*/
static int __init init_context_pass_through(void)
{
struct pci_dev *pdev = NULL;
struct dmar_domain *domain;
int ret;
for_each_pci_dev(pdev) {
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
ret = domain_context_mapping(domain, pdev,
CONTEXT_TT_PASS_THROUGH);
if (ret)
return ret;
}
return 0;
}
static int md_domain_init(struct dmar_domain *domain, int guest_width); static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int __init si_domain_work_fn(unsigned long start_pfn, static int __init si_domain_work_fn(unsigned long start_pfn,
...@@ -2048,7 +2061,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn, ...@@ -2048,7 +2061,7 @@ static int __init si_domain_work_fn(unsigned long start_pfn,
} }
static int si_domain_init(void) static int __init si_domain_init(int hw)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu; struct intel_iommu *iommu;
...@@ -2075,6 +2088,9 @@ static int si_domain_init(void) ...@@ -2075,6 +2088,9 @@ static int si_domain_init(void)
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY; si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
if (hw)
return 0;
for_each_online_node(nid) { for_each_online_node(nid) {
work_with_active_regions(nid, si_domain_work_fn, &ret); work_with_active_regions(nid, si_domain_work_fn, &ret);
if (ret) if (ret)
...@@ -2101,15 +2117,23 @@ static int identity_mapping(struct pci_dev *pdev) ...@@ -2101,15 +2117,23 @@ static int identity_mapping(struct pci_dev *pdev)
} }
static int domain_add_dev_info(struct dmar_domain *domain, static int domain_add_dev_info(struct dmar_domain *domain,
struct pci_dev *pdev) struct pci_dev *pdev,
int translation)
{ {
struct device_domain_info *info; struct device_domain_info *info;
unsigned long flags; unsigned long flags;
int ret;
info = alloc_devinfo_mem(); info = alloc_devinfo_mem();
if (!info) if (!info)
return -ENOMEM; return -ENOMEM;
ret = domain_context_mapping(domain, pdev, translation);
if (ret) {
free_devinfo_mem(info);
return ret;
}
info->segment = pci_domain_nr(pdev->bus); info->segment = pci_domain_nr(pdev->bus);
info->bus = pdev->bus->number; info->bus = pdev->bus->number;
info->devfn = pdev->devfn; info->devfn = pdev->devfn;
...@@ -2166,27 +2190,25 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup) ...@@ -2166,27 +2190,25 @@ static int iommu_should_identity_map(struct pci_dev *pdev, int startup)
return 1; return 1;
} }
static int iommu_prepare_static_identity_mapping(void) static int __init iommu_prepare_static_identity_mapping(int hw)
{ {
struct pci_dev *pdev = NULL; struct pci_dev *pdev = NULL;
int ret; int ret;
ret = si_domain_init(); ret = si_domain_init(hw);
if (ret) if (ret)
return -EFAULT; return -EFAULT;
for_each_pci_dev(pdev) { for_each_pci_dev(pdev) {
if (iommu_should_identity_map(pdev, 1)) { if (iommu_should_identity_map(pdev, 1)) {
printk(KERN_INFO "IOMMU: identity mapping for device %s\n", printk(KERN_INFO "IOMMU: %s identity mapping for device %s\n",
pci_name(pdev)); hw ? "hardware" : "software", pci_name(pdev));
ret = domain_context_mapping(si_domain, pdev, ret = domain_add_dev_info(si_domain, pdev,
hw ? CONTEXT_TT_PASS_THROUGH :
CONTEXT_TT_MULTI_LEVEL); CONTEXT_TT_MULTI_LEVEL);
if (ret) if (ret)
return ret; return ret;
ret = domain_add_dev_info(si_domain, pdev);
if (ret)
return ret;
} }
} }
...@@ -2200,14 +2222,6 @@ int __init init_dmars(void) ...@@ -2200,14 +2222,6 @@ int __init init_dmars(void)
struct pci_dev *pdev; struct pci_dev *pdev;
struct intel_iommu *iommu; struct intel_iommu *iommu;
int i, ret; int i, ret;
int pass_through = 1;
/*
* In case pass through can not be enabled, iommu tries to use identity
* mapping.
*/
if (iommu_pass_through)
iommu_identity_mapping = 1;
/* /*
* for each drhd * for each drhd
...@@ -2235,7 +2249,6 @@ int __init init_dmars(void) ...@@ -2235,7 +2249,6 @@ int __init init_dmars(void)
deferred_flush = kzalloc(g_num_of_iommus * deferred_flush = kzalloc(g_num_of_iommus *
sizeof(struct deferred_flush_tables), GFP_KERNEL); sizeof(struct deferred_flush_tables), GFP_KERNEL);
if (!deferred_flush) { if (!deferred_flush) {
kfree(g_iommus);
ret = -ENOMEM; ret = -ENOMEM;
goto error; goto error;
} }
...@@ -2262,13 +2275,7 @@ int __init init_dmars(void) ...@@ -2262,13 +2275,7 @@ int __init init_dmars(void)
goto error; goto error;
} }
if (!ecap_pass_through(iommu->ecap)) if (!ecap_pass_through(iommu->ecap))
pass_through = 0; hw_pass_through = 0;
}
if (iommu_pass_through)
if (!pass_through) {
printk(KERN_INFO
"Pass Through is not supported by hardware.\n");
iommu_pass_through = 0;
} }
/* /*
...@@ -2324,30 +2331,24 @@ int __init init_dmars(void) ...@@ -2324,30 +2331,24 @@ int __init init_dmars(void)
} }
} }
/* if (iommu_pass_through)
* If pass through is set and enabled, context entries of all pci iommu_identity_mapping = 1;
* devices are intialized by pass through translation type. #ifdef CONFIG_DMAR_BROKEN_GFX_WA
*/ else
if (iommu_pass_through) { iommu_identity_mapping = 2;
ret = init_context_pass_through(); #endif
if (ret) {
printk(KERN_ERR "IOMMU: Pass through init failed.\n");
iommu_pass_through = 0;
}
}
/* /*
* If pass through is not set or not enabled, setup context entries for * If pass through is not set or not enabled, setup context entries for
* identity mappings for rmrr, gfx, and isa and may fall back to static * identity mappings for rmrr, gfx, and isa and may fall back to static
* identity mapping if iommu_identity_mapping is set. * identity mapping if iommu_identity_mapping is set.
*/ */
if (!iommu_pass_through) { if (iommu_identity_mapping) {
#ifdef CONFIG_DMAR_BROKEN_GFX_WA ret = iommu_prepare_static_identity_mapping(hw_pass_through);
if (!iommu_identity_mapping) if (ret) {
iommu_identity_mapping = 2; printk(KERN_CRIT "Failed to setup IOMMU pass-through\n");
#endif goto error;
if (iommu_identity_mapping) }
iommu_prepare_static_identity_mapping(); }
/* /*
* For each rmrr * For each rmrr
* for each dev attached to rmrr * for each dev attached to rmrr
...@@ -2380,7 +2381,6 @@ int __init init_dmars(void) ...@@ -2380,7 +2381,6 @@ int __init init_dmars(void)
} }
iommu_prepare_isa(); iommu_prepare_isa();
}
/* /*
* for each drhd * for each drhd
...@@ -2404,11 +2404,12 @@ int __init init_dmars(void) ...@@ -2404,11 +2404,12 @@ int __init init_dmars(void)
iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL); iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
iommu_disable_protect_mem_regions(iommu);
ret = iommu_enable_translation(iommu); ret = iommu_enable_translation(iommu);
if (ret) if (ret)
goto error; goto error;
iommu_disable_protect_mem_regions(iommu);
} }
return 0; return 0;
...@@ -2455,8 +2456,7 @@ static struct iova *intel_alloc_iova(struct device *dev, ...@@ -2455,8 +2456,7 @@ static struct iova *intel_alloc_iova(struct device *dev,
return iova; return iova;
} }
static struct dmar_domain * static struct dmar_domain *__get_valid_domain_for_dev(struct pci_dev *pdev)
get_valid_domain_for_dev(struct pci_dev *pdev)
{ {
struct dmar_domain *domain; struct dmar_domain *domain;
int ret; int ret;
...@@ -2484,6 +2484,18 @@ get_valid_domain_for_dev(struct pci_dev *pdev) ...@@ -2484,6 +2484,18 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
return domain; return domain;
} }
static inline struct dmar_domain *get_valid_domain_for_dev(struct pci_dev *dev)
{
struct device_domain_info *info;
/* No lock here, assumes no domain exit in normal case */
info = dev->dev.archdata.iommu;
if (likely(info))
return info->domain;
return __get_valid_domain_for_dev(dev);
}
static int iommu_dummy(struct pci_dev *pdev) static int iommu_dummy(struct pci_dev *pdev)
{ {
return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO; return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
...@@ -2526,10 +2538,10 @@ static int iommu_no_mapping(struct device *dev) ...@@ -2526,10 +2538,10 @@ static int iommu_no_mapping(struct device *dev)
*/ */
if (iommu_should_identity_map(pdev, 0)) { if (iommu_should_identity_map(pdev, 0)) {
int ret; int ret;
ret = domain_add_dev_info(si_domain, pdev); ret = domain_add_dev_info(si_domain, pdev,
if (ret) hw_pass_through ?
return 0; CONTEXT_TT_PASS_THROUGH :
ret = domain_context_mapping(si_domain, pdev, CONTEXT_TT_MULTI_LEVEL); CONTEXT_TT_MULTI_LEVEL);
if (!ret) { if (!ret) {
printk(KERN_INFO "64bit %s uses identity mapping\n", printk(KERN_INFO "64bit %s uses identity mapping\n",
pci_name(pdev)); pci_name(pdev));
...@@ -2638,10 +2650,9 @@ static void flush_unmaps(void) ...@@ -2638,10 +2650,9 @@ static void flush_unmaps(void)
unsigned long mask; unsigned long mask;
struct iova *iova = deferred_flush[i].iova[j]; struct iova *iova = deferred_flush[i].iova[j];
mask = (iova->pfn_hi - iova->pfn_lo + 1) << PAGE_SHIFT; mask = ilog2(mm_to_dma_pfn(iova->pfn_hi - iova->pfn_lo + 1));
mask = ilog2(mask >> VTD_PAGE_SHIFT);
iommu_flush_dev_iotlb(deferred_flush[i].domain[j], iommu_flush_dev_iotlb(deferred_flush[i].domain[j],
iova->pfn_lo << PAGE_SHIFT, mask); (uint64_t)iova->pfn_lo << PAGE_SHIFT, mask);
__free_iova(&deferred_flush[i].domain[j]->iovad, iova); __free_iova(&deferred_flush[i].domain[j]->iovad, iova);
} }
deferred_flush[i].next = 0; deferred_flush[i].next = 0;
...@@ -2734,12 +2745,6 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, ...@@ -2734,12 +2745,6 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
} }
} }
static void intel_unmap_single(struct device *dev, dma_addr_t dev_addr, size_t size,
int dir)
{
intel_unmap_page(dev, dev_addr, size, dir, NULL);
}
static void *intel_alloc_coherent(struct device *hwdev, size_t size, static void *intel_alloc_coherent(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags) dma_addr_t *dma_handle, gfp_t flags)
{ {
...@@ -2772,7 +2777,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, ...@@ -2772,7 +2777,7 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
order = get_order(size); order = get_order(size);
intel_unmap_single(hwdev, dma_handle, size, DMA_BIDIRECTIONAL); intel_unmap_page(hwdev, dma_handle, size, DMA_BIDIRECTIONAL, NULL);
free_pages((unsigned long)vaddr, order); free_pages((unsigned long)vaddr, order);
} }
...@@ -2808,11 +2813,18 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, ...@@ -2808,11 +2813,18 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
/* free page tables */ /* free page tables */
dma_pte_free_pagetable(domain, start_pfn, last_pfn); dma_pte_free_pagetable(domain, start_pfn, last_pfn);
if (intel_iommu_strict) {
iommu_flush_iotlb_psi(iommu, domain->id, start_pfn, iommu_flush_iotlb_psi(iommu, domain->id, start_pfn,
(last_pfn - start_pfn + 1)); last_pfn - start_pfn + 1);
/* free iova */ /* free iova */
__free_iova(&domain->iovad, iova); __free_iova(&domain->iovad, iova);
} else {
add_unmap(domain, iova);
/*
* queue up the release of the unmap to save the 1/6th of the
* cpu used up by the iotlb flush operation...
*/
}
} }
static int intel_nontranslate_map_sg(struct device *hddev, static int intel_nontranslate_map_sg(struct device *hddev,
...@@ -3056,8 +3068,8 @@ static int init_iommu_hw(void) ...@@ -3056,8 +3068,8 @@ static int init_iommu_hw(void)
DMA_CCMD_GLOBAL_INVL); DMA_CCMD_GLOBAL_INVL);
iommu->flush.flush_iotlb(iommu, 0, 0, 0, iommu->flush.flush_iotlb(iommu, 0, 0, 0,
DMA_TLB_GLOBAL_FLUSH); DMA_TLB_GLOBAL_FLUSH);
iommu_disable_protect_mem_regions(iommu);
iommu_enable_translation(iommu); iommu_enable_translation(iommu);
iommu_disable_protect_mem_regions(iommu);
} }
return 0; return 0;
...@@ -3205,7 +3217,7 @@ int __init intel_iommu_init(void) ...@@ -3205,7 +3217,7 @@ int __init intel_iommu_init(void)
* Check the need for DMA-remapping initialization now. * Check the need for DMA-remapping initialization now.
* Above initialization will also be used by Interrupt-remapping. * Above initialization will also be used by Interrupt-remapping.
*/ */
if (no_iommu || (swiotlb && !iommu_pass_through) || dmar_disabled) if (no_iommu || swiotlb || dmar_disabled)
return -ENODEV; return -ENODEV;
iommu_init_mempool(); iommu_init_mempool();
...@@ -3227,14 +3239,7 @@ int __init intel_iommu_init(void) ...@@ -3227,14 +3239,7 @@ int __init intel_iommu_init(void)
init_timer(&unmap_timer); init_timer(&unmap_timer);
force_iommu = 1; force_iommu = 1;
if (!iommu_pass_through) {
printk(KERN_INFO
"Multi-level page-table translation for DMAR.\n");
dma_ops = &intel_dma_ops; dma_ops = &intel_dma_ops;
} else
printk(KERN_INFO
"DMAR: Pass through translation for DMAR.\n");
init_iommu_sysfs(); init_iommu_sysfs();
...@@ -3517,7 +3522,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, ...@@ -3517,7 +3522,6 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
struct intel_iommu *iommu; struct intel_iommu *iommu;
int addr_width; int addr_width;
u64 end; u64 end;
int ret;
/* normally pdev is not mapped */ /* normally pdev is not mapped */
if (unlikely(domain_context_mapped(pdev))) { if (unlikely(domain_context_mapped(pdev))) {
...@@ -3549,12 +3553,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, ...@@ -3549,12 +3553,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
return -EFAULT; return -EFAULT;
} }
ret = domain_add_dev_info(dmar_domain, pdev); return domain_add_dev_info(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
if (ret)
return ret;
ret = domain_context_mapping(dmar_domain, pdev, CONTEXT_TT_MULTI_LEVEL);
return ret;
} }
static void intel_iommu_detach_device(struct iommu_domain *domain, static void intel_iommu_detach_device(struct iommu_domain *domain,
......
...@@ -603,6 +603,9 @@ int __init intr_remapping_supported(void) ...@@ -603,6 +603,9 @@ int __init intr_remapping_supported(void)
if (disable_intremap) if (disable_intremap)
return 0; return 0;
if (!dmar_ir_support())
return 0;
for_each_drhd_unit(drhd) { for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu; struct intel_iommu *iommu = drhd->iommu;
...@@ -618,6 +621,11 @@ int __init enable_intr_remapping(int eim) ...@@ -618,6 +621,11 @@ int __init enable_intr_remapping(int eim)
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
int setup = 0; int setup = 0;
if (parse_ioapics_under_ir() != 1) {
printk(KERN_INFO "Not enable interrupt remapping\n");
return -1;
}
for_each_drhd_unit(drhd) { for_each_drhd_unit(drhd) {
struct intel_iommu *iommu = drhd->iommu; struct intel_iommu *iommu = drhd->iommu;
......
...@@ -22,7 +22,6 @@ ...@@ -22,7 +22,6 @@
void void
init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit) init_iova_domain(struct iova_domain *iovad, unsigned long pfn_32bit)
{ {
spin_lock_init(&iovad->iova_alloc_lock);
spin_lock_init(&iovad->iova_rbtree_lock); spin_lock_init(&iovad->iova_rbtree_lock);
iovad->rbroot = RB_ROOT; iovad->rbroot = RB_ROOT;
iovad->cached32_node = NULL; iovad->cached32_node = NULL;
...@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, ...@@ -205,7 +204,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn, unsigned long limit_pfn,
bool size_aligned) bool size_aligned)
{ {
unsigned long flags;
struct iova *new_iova; struct iova *new_iova;
int ret; int ret;
...@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, ...@@ -219,11 +217,9 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (size_aligned) if (size_aligned)
size = __roundup_pow_of_two(size); size = __roundup_pow_of_two(size);
spin_lock_irqsave(&iovad->iova_alloc_lock, flags);
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
new_iova, size_aligned); new_iova, size_aligned);
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
if (ret) { if (ret) {
free_iova_mem(new_iova); free_iova_mem(new_iova);
return NULL; return NULL;
...@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad, ...@@ -381,8 +377,7 @@ reserve_iova(struct iova_domain *iovad,
struct iova *iova; struct iova *iova;
unsigned int overlap = 0; unsigned int overlap = 0;
spin_lock_irqsave(&iovad->iova_alloc_lock, flags); spin_lock_irqsave(&iovad->iova_rbtree_lock, flags);
spin_lock(&iovad->iova_rbtree_lock);
for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) { for (node = rb_first(&iovad->rbroot); node; node = rb_next(node)) {
if (__is_range_overlap(node, pfn_lo, pfn_hi)) { if (__is_range_overlap(node, pfn_lo, pfn_hi)) {
iova = container_of(node, struct iova, node); iova = container_of(node, struct iova, node);
...@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad, ...@@ -402,8 +397,7 @@ reserve_iova(struct iova_domain *iovad,
iova = __insert_new_range(iovad, pfn_lo, pfn_hi); iova = __insert_new_range(iovad, pfn_lo, pfn_hi);
finish: finish:
spin_unlock(&iovad->iova_rbtree_lock); spin_unlock_irqrestore(&iovad->iova_rbtree_lock, flags);
spin_unlock_irqrestore(&iovad->iova_alloc_lock, flags);
return iova; return iova;
} }
...@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) ...@@ -420,8 +414,7 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
unsigned long flags; unsigned long flags;
struct rb_node *node; struct rb_node *node;
spin_lock_irqsave(&from->iova_alloc_lock, flags); spin_lock_irqsave(&from->iova_rbtree_lock, flags);
spin_lock(&from->iova_rbtree_lock);
for (node = rb_first(&from->rbroot); node; node = rb_next(node)) { for (node = rb_first(&from->rbroot); node; node = rb_next(node)) {
struct iova *iova = container_of(node, struct iova, node); struct iova *iova = container_of(node, struct iova, node);
struct iova *new_iova; struct iova *new_iova;
...@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to) ...@@ -430,6 +423,5 @@ copy_reserved_iova(struct iova_domain *from, struct iova_domain *to)
printk(KERN_ERR "Reserve iova range %lx@%lx failed\n", printk(KERN_ERR "Reserve iova range %lx@%lx failed\n",
iova->pfn_lo, iova->pfn_lo); iova->pfn_lo, iova->pfn_lo);
} }
spin_unlock(&from->iova_rbtree_lock); spin_unlock_irqrestore(&from->iova_rbtree_lock, flags);
spin_unlock_irqrestore(&from->iova_alloc_lock, flags);
} }
...@@ -360,4 +360,6 @@ extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep, ...@@ -360,4 +360,6 @@ extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu); extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void);
#endif #endif
...@@ -28,7 +28,6 @@ struct iova { ...@@ -28,7 +28,6 @@ struct iova {
/* holds all the iova translations for a domain */ /* holds all the iova translations for a domain */
struct iova_domain { struct iova_domain {
spinlock_t iova_alloc_lock;/* Lock to protect iova allocation */
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */ struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */ struct rb_node *cached32_node; /* Save last alloced node */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment