Commit be98eb2c authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.infradead.org/iommu-2.6

* git://git.infradead.org/iommu-2.6:
  Intel-IOMMU, intr-remap: source-id checking
  Intel-IOMMU, intr-remap: set the whole 128bits of irte when modify/free it
  IOMMU Identity Mapping Support (drivers/pci/intel_iommu.c)
parents cf5434e8 f007e99c
...@@ -1414,6 +1414,9 @@ int setup_ioapic_entry(int apic_id, int irq, ...@@ -1414,6 +1414,9 @@ int setup_ioapic_entry(int apic_id, int irq,
irte.vector = vector; irte.vector = vector;
irte.dest_id = IRTE_DEST(destination); irte.dest_id = IRTE_DEST(destination);
/* Set source-id of interrupt request */
set_ioapic_sid(&irte, apic_id);
modify_irte(irq, &irte); modify_irte(irq, &irte);
ir_entry->index2 = (index >> 15) & 0x1; ir_entry->index2 = (index >> 15) & 0x1;
...@@ -3290,6 +3293,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms ...@@ -3290,6 +3293,9 @@ static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_ms
irte.vector = cfg->vector; irte.vector = cfg->vector;
irte.dest_id = IRTE_DEST(dest); irte.dest_id = IRTE_DEST(dest);
/* Set source-id of interrupt request */
set_msi_sid(&irte, pdev);
modify_irte(irq, &irte); modify_irte(irq, &irte);
msg->address_hi = MSI_ADDR_BASE_HI; msg->address_hi = MSI_ADDR_BASE_HI;
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include <linux/sysdev.h> #include <linux/sysdev.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/iommu.h> #include <asm/iommu.h>
#include <asm/e820.h>
#include "pci.h" #include "pci.h"
#define ROOT_SIZE VTD_PAGE_SIZE #define ROOT_SIZE VTD_PAGE_SIZE
...@@ -217,6 +218,14 @@ static inline bool dma_pte_present(struct dma_pte *pte) ...@@ -217,6 +218,14 @@ static inline bool dma_pte_present(struct dma_pte *pte)
return (pte->val & 3) != 0; return (pte->val & 3) != 0;
} }
/*
* This domain is a statically identity mapping domain.
* 1. This domain creats a static 1:1 mapping to all usable memory.
* 2. It maps to each iommu if successful.
* 3. Each iommu mapps to this domain if successful.
*/
struct dmar_domain *si_domain;
/* devices under the same p2p bridge are owned in one domain */ /* devices under the same p2p bridge are owned in one domain */
#define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0) #define DOMAIN_FLAG_P2P_MULTIPLE_DEVICES (1 << 0)
...@@ -225,6 +234,9 @@ static inline bool dma_pte_present(struct dma_pte *pte) ...@@ -225,6 +234,9 @@ static inline bool dma_pte_present(struct dma_pte *pte)
*/ */
#define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1) #define DOMAIN_FLAG_VIRTUAL_MACHINE (1 << 1)
/* si_domain contains mulitple devices */
#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 2)
struct dmar_domain { struct dmar_domain {
int id; /* domain id */ int id; /* domain id */
unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/ unsigned long iommu_bmp; /* bitmap of iommus this domain uses*/
...@@ -435,12 +447,14 @@ int iommu_calculate_agaw(struct intel_iommu *iommu) ...@@ -435,12 +447,14 @@ int iommu_calculate_agaw(struct intel_iommu *iommu)
return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH); return __iommu_calculate_agaw(iommu, DEFAULT_DOMAIN_ADDRESS_WIDTH);
} }
/* in native case, each domain is related to only one iommu */ /* This functionin only returns single iommu in a domain */
static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
{ {
int iommu_id; int iommu_id;
/* si_domain and vm domain should not get here. */
BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE); BUG_ON(domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE);
BUG_ON(domain->flags & DOMAIN_FLAG_STATIC_IDENTITY);
iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus); iommu_id = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
if (iommu_id < 0 || iommu_id >= g_num_of_iommus) if (iommu_id < 0 || iommu_id >= g_num_of_iommus)
...@@ -1189,48 +1203,71 @@ void free_dmar_iommu(struct intel_iommu *iommu) ...@@ -1189,48 +1203,71 @@ void free_dmar_iommu(struct intel_iommu *iommu)
free_context_table(iommu); free_context_table(iommu);
} }
static struct dmar_domain * iommu_alloc_domain(struct intel_iommu *iommu) static struct dmar_domain *alloc_domain(void)
{ {
unsigned long num;
unsigned long ndomains;
struct dmar_domain *domain; struct dmar_domain *domain;
unsigned long flags;
domain = alloc_domain_mem(); domain = alloc_domain_mem();
if (!domain) if (!domain)
return NULL; return NULL;
memset(&domain->iommu_bmp, 0, sizeof(unsigned long));
domain->flags = 0;
return domain;
}
static int iommu_attach_domain(struct dmar_domain *domain,
struct intel_iommu *iommu)
{
int num;
unsigned long ndomains;
unsigned long flags;
ndomains = cap_ndoms(iommu->cap); ndomains = cap_ndoms(iommu->cap);
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
num = find_first_zero_bit(iommu->domain_ids, ndomains); num = find_first_zero_bit(iommu->domain_ids, ndomains);
if (num >= ndomains) { if (num >= ndomains) {
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
free_domain_mem(domain);
printk(KERN_ERR "IOMMU: no free domain ids\n"); printk(KERN_ERR "IOMMU: no free domain ids\n");
return NULL; return -ENOMEM;
} }
set_bit(num, iommu->domain_ids);
domain->id = num; domain->id = num;
memset(&domain->iommu_bmp, 0, sizeof(unsigned long)); set_bit(num, iommu->domain_ids);
set_bit(iommu->seq_id, &domain->iommu_bmp); set_bit(iommu->seq_id, &domain->iommu_bmp);
domain->flags = 0;
iommu->domains[num] = domain; iommu->domains[num] = domain;
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
return domain; return 0;
} }
static void iommu_free_domain(struct dmar_domain *domain) static void iommu_detach_domain(struct dmar_domain *domain,
struct intel_iommu *iommu)
{ {
unsigned long flags; unsigned long flags;
struct intel_iommu *iommu; int num, ndomains;
int found = 0;
iommu = domain_get_iommu(domain);
spin_lock_irqsave(&iommu->lock, flags); spin_lock_irqsave(&iommu->lock, flags);
clear_bit(domain->id, iommu->domain_ids); ndomains = cap_ndoms(iommu->cap);
num = find_first_bit(iommu->domain_ids, ndomains);
for (; num < ndomains; ) {
if (iommu->domains[num] == domain) {
found = 1;
break;
}
num = find_next_bit(iommu->domain_ids,
cap_ndoms(iommu->cap), num+1);
}
if (found) {
clear_bit(num, iommu->domain_ids);
clear_bit(iommu->seq_id, &domain->iommu_bmp);
iommu->domains[num] = NULL;
}
spin_unlock_irqrestore(&iommu->lock, flags); spin_unlock_irqrestore(&iommu->lock, flags);
} }
...@@ -1350,6 +1387,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width) ...@@ -1350,6 +1387,8 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
static void domain_exit(struct dmar_domain *domain) static void domain_exit(struct dmar_domain *domain)
{ {
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
u64 end; u64 end;
/* Domain 0 is reserved, so dont process it */ /* Domain 0 is reserved, so dont process it */
...@@ -1368,7 +1407,10 @@ static void domain_exit(struct dmar_domain *domain) ...@@ -1368,7 +1407,10 @@ static void domain_exit(struct dmar_domain *domain)
/* free page tables */ /* free page tables */
dma_pte_free_pagetable(domain, 0, end); dma_pte_free_pagetable(domain, 0, end);
iommu_free_domain(domain); for_each_active_iommu(iommu, drhd)
if (test_bit(iommu->seq_id, &domain->iommu_bmp))
iommu_detach_domain(domain, iommu);
free_domain_mem(domain); free_domain_mem(domain);
} }
...@@ -1408,7 +1450,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, ...@@ -1408,7 +1450,8 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
id = domain->id; id = domain->id;
pgd = domain->pgd; pgd = domain->pgd;
if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) { if (domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
domain->flags & DOMAIN_FLAG_STATIC_IDENTITY) {
int found = 0; int found = 0;
/* find an available domain id for this device in iommu */ /* find an available domain id for this device in iommu */
...@@ -1433,6 +1476,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment, ...@@ -1433,6 +1476,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, int segment,
} }
set_bit(num, iommu->domain_ids); set_bit(num, iommu->domain_ids);
set_bit(iommu->seq_id, &domain->iommu_bmp);
iommu->domains[num] = domain; iommu->domains[num] = domain;
id = num; id = num;
} }
...@@ -1675,6 +1719,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) ...@@ -1675,6 +1719,7 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
unsigned long flags; unsigned long flags;
int bus = 0, devfn = 0; int bus = 0, devfn = 0;
int segment; int segment;
int ret;
domain = find_domain(pdev); domain = find_domain(pdev);
if (domain) if (domain)
...@@ -1707,6 +1752,10 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) ...@@ -1707,6 +1752,10 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
} }
} }
domain = alloc_domain();
if (!domain)
goto error;
/* Allocate new domain for the device */ /* Allocate new domain for the device */
drhd = dmar_find_matched_drhd_unit(pdev); drhd = dmar_find_matched_drhd_unit(pdev);
if (!drhd) { if (!drhd) {
...@@ -1716,9 +1765,11 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) ...@@ -1716,9 +1765,11 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
} }
iommu = drhd->iommu; iommu = drhd->iommu;
domain = iommu_alloc_domain(iommu); ret = iommu_attach_domain(domain, iommu);
if (!domain) if (ret) {
domain_exit(domain);
goto error; goto error;
}
if (domain_init(domain, gaw)) { if (domain_init(domain, gaw)) {
domain_exit(domain); domain_exit(domain);
...@@ -1792,6 +1843,8 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw) ...@@ -1792,6 +1843,8 @@ static struct dmar_domain *get_domain_for_dev(struct pci_dev *pdev, int gaw)
return find_domain(pdev); return find_domain(pdev);
} }
static int iommu_identity_mapping;
static int iommu_prepare_identity_map(struct pci_dev *pdev, static int iommu_prepare_identity_map(struct pci_dev *pdev,
unsigned long long start, unsigned long long start,
unsigned long long end) unsigned long long end)
...@@ -1804,6 +1857,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev, ...@@ -1804,6 +1857,9 @@ static int iommu_prepare_identity_map(struct pci_dev *pdev,
printk(KERN_INFO printk(KERN_INFO
"IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n", "IOMMU: Setting identity map for device %s [0x%Lx - 0x%Lx]\n",
pci_name(pdev), start, end); pci_name(pdev), start, end);
if (iommu_identity_mapping)
domain = si_domain;
else
/* page table init */ /* page table init */
domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH); domain = get_domain_for_dev(pdev, DEFAULT_DOMAIN_ADDRESS_WIDTH);
if (!domain) if (!domain)
...@@ -1952,7 +2008,110 @@ static int __init init_context_pass_through(void) ...@@ -1952,7 +2008,110 @@ static int __init init_context_pass_through(void)
return 0; return 0;
} }
static int __init init_dmars(void) static int md_domain_init(struct dmar_domain *domain, int guest_width);
static int si_domain_init(void)
{
struct dmar_drhd_unit *drhd;
struct intel_iommu *iommu;
int ret = 0;
si_domain = alloc_domain();
if (!si_domain)
return -EFAULT;
for_each_active_iommu(iommu, drhd) {
ret = iommu_attach_domain(si_domain, iommu);
if (ret) {
domain_exit(si_domain);
return -EFAULT;
}
}
if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
domain_exit(si_domain);
return -EFAULT;
}
si_domain->flags = DOMAIN_FLAG_STATIC_IDENTITY;
return 0;
}
static void domain_remove_one_dev_info(struct dmar_domain *domain,
struct pci_dev *pdev);
static int identity_mapping(struct pci_dev *pdev)
{
struct device_domain_info *info;
if (likely(!iommu_identity_mapping))
return 0;
list_for_each_entry(info, &si_domain->devices, link)
if (info->dev == pdev)
return 1;
return 0;
}
static int domain_add_dev_info(struct dmar_domain *domain,
struct pci_dev *pdev)
{
struct device_domain_info *info;
unsigned long flags;
info = alloc_devinfo_mem();
if (!info)
return -ENOMEM;
info->segment = pci_domain_nr(pdev->bus);
info->bus = pdev->bus->number;
info->devfn = pdev->devfn;
info->dev = pdev;
info->domain = domain;
spin_lock_irqsave(&device_domain_lock, flags);
list_add(&info->link, &domain->devices);
list_add(&info->global, &device_domain_list);
pdev->dev.archdata.iommu = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
return 0;
}
static int iommu_prepare_static_identity_mapping(void)
{
int i;
struct pci_dev *pdev = NULL;
int ret;
ret = si_domain_init();
if (ret)
return -EFAULT;
printk(KERN_INFO "IOMMU: Setting identity map:\n");
for_each_pci_dev(pdev) {
for (i = 0; i < e820.nr_map; i++) {
struct e820entry *ei = &e820.map[i];
if (ei->type == E820_RAM) {
ret = iommu_prepare_identity_map(pdev,
ei->addr, ei->addr + ei->size);
if (ret) {
printk(KERN_INFO "1:1 mapping to one domain failed.\n");
return -EFAULT;
}
}
}
ret = domain_add_dev_info(si_domain, pdev);
if (ret)
return ret;
}
return 0;
}
int __init init_dmars(void)
{ {
struct dmar_drhd_unit *drhd; struct dmar_drhd_unit *drhd;
struct dmar_rmrr_unit *rmrr; struct dmar_rmrr_unit *rmrr;
...@@ -1961,6 +2120,13 @@ static int __init init_dmars(void) ...@@ -1961,6 +2120,13 @@ static int __init init_dmars(void)
int i, ret; int i, ret;
int pass_through = 1; int pass_through = 1;
/*
* In case pass through can not be enabled, iommu tries to use identity
* mapping.
*/
if (iommu_pass_through)
iommu_identity_mapping = 1;
/* /*
* for each drhd * for each drhd
* allocate root * allocate root
...@@ -2090,9 +2256,12 @@ static int __init init_dmars(void) ...@@ -2090,9 +2256,12 @@ static int __init init_dmars(void)
/* /*
* If pass through is not set or not enabled, setup context entries for * If pass through is not set or not enabled, setup context entries for
* identity mappings for rmrr, gfx, and isa. * identity mappings for rmrr, gfx, and isa and may fall back to static
* identity mapping if iommu_identity_mapping is set.
*/ */
if (!iommu_pass_through) { if (!iommu_pass_through) {
if (iommu_identity_mapping)
iommu_prepare_static_identity_mapping();
/* /*
* For each rmrr * For each rmrr
* for each dev attached to rmrr * for each dev attached to rmrr
...@@ -2107,6 +2276,7 @@ static int __init init_dmars(void) ...@@ -2107,6 +2276,7 @@ static int __init init_dmars(void)
* endfor * endfor
* endfor * endfor
*/ */
printk(KERN_INFO "IOMMU: Setting RMRR:\n");
for_each_rmrr_units(rmrr) { for_each_rmrr_units(rmrr) {
for (i = 0; i < rmrr->devices_cnt; i++) { for (i = 0; i < rmrr->devices_cnt; i++) {
pdev = rmrr->devices[i]; pdev = rmrr->devices[i];
...@@ -2248,6 +2418,52 @@ get_valid_domain_for_dev(struct pci_dev *pdev) ...@@ -2248,6 +2418,52 @@ get_valid_domain_for_dev(struct pci_dev *pdev)
return domain; return domain;
} }
static int iommu_dummy(struct pci_dev *pdev)
{
return pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
}
/* Check if the pdev needs to go through non-identity map and unmap process.*/
static int iommu_no_mapping(struct pci_dev *pdev)
{
int found;
if (!iommu_identity_mapping)
return iommu_dummy(pdev);
found = identity_mapping(pdev);
if (found) {
if (pdev->dma_mask > DMA_BIT_MASK(32))
return 1;
else {
/*
* 32 bit DMA is removed from si_domain and fall back
* to non-identity mapping.
*/
domain_remove_one_dev_info(si_domain, pdev);
printk(KERN_INFO "32bit %s uses non-identity mapping\n",
pci_name(pdev));
return 0;
}
} else {
/*
* In case of a detached 64 bit DMA device from vm, the device
* is put into si_domain for identity mapping.
*/
if (pdev->dma_mask > DMA_BIT_MASK(32)) {
int ret;
ret = domain_add_dev_info(si_domain, pdev);
if (!ret) {
printk(KERN_INFO "64bit %s uses identity mapping\n",
pci_name(pdev));
return 1;
}
}
}
return iommu_dummy(pdev);
}
static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
size_t size, int dir, u64 dma_mask) size_t size, int dir, u64 dma_mask)
{ {
...@@ -2260,7 +2476,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, ...@@ -2260,7 +2476,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
struct intel_iommu *iommu; struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO)
if (iommu_no_mapping(pdev))
return paddr; return paddr;
domain = get_valid_domain_for_dev(pdev); domain = get_valid_domain_for_dev(pdev);
...@@ -2401,8 +2618,9 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, ...@@ -2401,8 +2618,9 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
struct iova *iova; struct iova *iova;
struct intel_iommu *iommu; struct intel_iommu *iommu;
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) if (iommu_no_mapping(pdev))
return; return;
domain = find_domain(pdev); domain = find_domain(pdev);
BUG_ON(!domain); BUG_ON(!domain);
...@@ -2492,7 +2710,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, ...@@ -2492,7 +2710,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
struct scatterlist *sg; struct scatterlist *sg;
struct intel_iommu *iommu; struct intel_iommu *iommu;
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) if (iommu_no_mapping(pdev))
return; return;
domain = find_domain(pdev); domain = find_domain(pdev);
...@@ -2553,7 +2771,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne ...@@ -2553,7 +2771,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
struct intel_iommu *iommu; struct intel_iommu *iommu;
BUG_ON(dir == DMA_NONE); BUG_ON(dir == DMA_NONE);
if (pdev->dev.archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO) if (iommu_no_mapping(pdev))
return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir); return intel_nontranslate_map_sg(hwdev, sglist, nelems, dir);
domain = get_valid_domain_for_dev(pdev); domain = get_valid_domain_for_dev(pdev);
...@@ -2951,31 +3169,6 @@ int __init intel_iommu_init(void) ...@@ -2951,31 +3169,6 @@ int __init intel_iommu_init(void)
return 0; return 0;
} }
static int vm_domain_add_dev_info(struct dmar_domain *domain,
struct pci_dev *pdev)
{
struct device_domain_info *info;
unsigned long flags;
info = alloc_devinfo_mem();
if (!info)
return -ENOMEM;
info->segment = pci_domain_nr(pdev->bus);
info->bus = pdev->bus->number;
info->devfn = pdev->devfn;
info->dev = pdev;
info->domain = domain;
spin_lock_irqsave(&device_domain_lock, flags);
list_add(&info->link, &domain->devices);
list_add(&info->global, &device_domain_list);
pdev->dev.archdata.iommu = info;
spin_unlock_irqrestore(&device_domain_lock, flags);
return 0;
}
static void iommu_detach_dependent_devices(struct intel_iommu *iommu, static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
struct pci_dev *pdev) struct pci_dev *pdev)
{ {
...@@ -3003,7 +3196,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu, ...@@ -3003,7 +3196,7 @@ static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
} }
} }
static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, static void domain_remove_one_dev_info(struct dmar_domain *domain,
struct pci_dev *pdev) struct pci_dev *pdev)
{ {
struct device_domain_info *info; struct device_domain_info *info;
...@@ -3136,7 +3329,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void) ...@@ -3136,7 +3329,7 @@ static struct dmar_domain *iommu_alloc_vm_domain(void)
return domain; return domain;
} }
static int vm_domain_init(struct dmar_domain *domain, int guest_width) static int md_domain_init(struct dmar_domain *domain, int guest_width)
{ {
int adjust_width; int adjust_width;
...@@ -3227,7 +3420,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain) ...@@ -3227,7 +3420,7 @@ static int intel_iommu_domain_init(struct iommu_domain *domain)
"intel_iommu_domain_init: dmar_domain == NULL\n"); "intel_iommu_domain_init: dmar_domain == NULL\n");
return -ENOMEM; return -ENOMEM;
} }
if (vm_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) { if (md_domain_init(dmar_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
printk(KERN_ERR printk(KERN_ERR
"intel_iommu_domain_init() failed\n"); "intel_iommu_domain_init() failed\n");
vm_domain_exit(dmar_domain); vm_domain_exit(dmar_domain);
...@@ -3262,8 +3455,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, ...@@ -3262,8 +3455,9 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
old_domain = find_domain(pdev); old_domain = find_domain(pdev);
if (old_domain) { if (old_domain) {
if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE) if (dmar_domain->flags & DOMAIN_FLAG_VIRTUAL_MACHINE ||
vm_domain_remove_one_dev_info(old_domain, pdev); dmar_domain->flags & DOMAIN_FLAG_STATIC_IDENTITY)
domain_remove_one_dev_info(old_domain, pdev);
else else
domain_remove_dev_info(old_domain); domain_remove_dev_info(old_domain);
} }
...@@ -3285,7 +3479,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain, ...@@ -3285,7 +3479,7 @@ static int intel_iommu_attach_device(struct iommu_domain *domain,
return -EFAULT; return -EFAULT;
} }
ret = vm_domain_add_dev_info(dmar_domain, pdev); ret = domain_add_dev_info(dmar_domain, pdev);
if (ret) if (ret)
return ret; return ret;
...@@ -3299,7 +3493,7 @@ static void intel_iommu_detach_device(struct iommu_domain *domain, ...@@ -3299,7 +3493,7 @@ static void intel_iommu_detach_device(struct iommu_domain *domain,
struct dmar_domain *dmar_domain = domain->priv; struct dmar_domain *dmar_domain = domain->priv;
struct pci_dev *pdev = to_pci_dev(dev); struct pci_dev *pdev = to_pci_dev(dev);
vm_domain_remove_one_dev_info(dmar_domain, pdev); domain_remove_one_dev_info(dmar_domain, pdev);
} }
static int intel_iommu_map_range(struct iommu_domain *domain, static int intel_iommu_map_range(struct iommu_domain *domain,
......
...@@ -10,6 +10,8 @@ ...@@ -10,6 +10,8 @@
#include <linux/intel-iommu.h> #include <linux/intel-iommu.h>
#include "intr_remapping.h" #include "intr_remapping.h"
#include <acpi/acpi.h> #include <acpi/acpi.h>
#include <asm/pci-direct.h>
#include "pci.h"
static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
static int ir_ioapic_num; static int ir_ioapic_num;
...@@ -314,7 +316,8 @@ int modify_irte(int irq, struct irte *irte_modified) ...@@ -314,7 +316,8 @@ int modify_irte(int irq, struct irte *irte_modified)
index = irq_iommu->irte_index + irq_iommu->sub_handle; index = irq_iommu->irte_index + irq_iommu->sub_handle;
irte = &iommu->ir_table->base[index]; irte = &iommu->ir_table->base[index];
set_64bit((unsigned long *)irte, irte_modified->low); set_64bit((unsigned long *)&irte->low, irte_modified->low);
set_64bit((unsigned long *)&irte->high, irte_modified->high);
__iommu_flush_cache(iommu, irte, sizeof(*irte)); __iommu_flush_cache(iommu, irte, sizeof(*irte));
rc = qi_flush_iec(iommu, index, 0); rc = qi_flush_iec(iommu, index, 0);
...@@ -369,12 +372,32 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev) ...@@ -369,12 +372,32 @@ struct intel_iommu *map_dev_to_ir(struct pci_dev *dev)
return drhd->iommu; return drhd->iommu;
} }
static int clear_entries(struct irq_2_iommu *irq_iommu)
{
struct irte *start, *entry, *end;
struct intel_iommu *iommu;
int index;
if (irq_iommu->sub_handle)
return 0;
iommu = irq_iommu->iommu;
index = irq_iommu->irte_index + irq_iommu->sub_handle;
start = iommu->ir_table->base + index;
end = start + (1 << irq_iommu->irte_mask);
for (entry = start; entry < end; entry++) {
set_64bit((unsigned long *)&entry->low, 0);
set_64bit((unsigned long *)&entry->high, 0);
}
return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}
int free_irte(int irq) int free_irte(int irq)
{ {
int rc = 0; int rc = 0;
int index, i;
struct irte *irte;
struct intel_iommu *iommu;
struct irq_2_iommu *irq_iommu; struct irq_2_iommu *irq_iommu;
unsigned long flags; unsigned long flags;
...@@ -385,16 +408,7 @@ int free_irte(int irq) ...@@ -385,16 +408,7 @@ int free_irte(int irq)
return -1; return -1;
} }
iommu = irq_iommu->iommu; rc = clear_entries(irq_iommu);
index = irq_iommu->irte_index + irq_iommu->sub_handle;
irte = &iommu->ir_table->base[index];
if (!irq_iommu->sub_handle) {
for (i = 0; i < (1 << irq_iommu->irte_mask); i++)
set_64bit((unsigned long *)(irte + i), 0);
rc = qi_flush_iec(iommu, index, irq_iommu->irte_mask);
}
irq_iommu->iommu = NULL; irq_iommu->iommu = NULL;
irq_iommu->irte_index = 0; irq_iommu->irte_index = 0;
...@@ -406,6 +420,91 @@ int free_irte(int irq) ...@@ -406,6 +420,91 @@ int free_irte(int irq)
return rc; return rc;
} }
/*
* source validation type
*/
#define SVT_NO_VERIFY 0x0 /* no verification is required */
#define SVT_VERIFY_SID_SQ 0x1 /* verify using SID and SQ fiels */
#define SVT_VERIFY_BUS 0x2 /* verify bus of request-id */
/*
* source-id qualifier
*/
#define SQ_ALL_16 0x0 /* verify all 16 bits of request-id */
#define SQ_13_IGNORE_1 0x1 /* verify most significant 13 bits, ignore
* the third least significant bit
*/
#define SQ_13_IGNORE_2 0x2 /* verify most significant 13 bits, ignore
* the second and third least significant bits
*/
#define SQ_13_IGNORE_3 0x3 /* verify most significant 13 bits, ignore
* the least three significant bits
*/
/*
* set SVT, SQ and SID fields of irte to verify
* source ids of interrupt requests
*/
static void set_irte_sid(struct irte *irte, unsigned int svt,
unsigned int sq, unsigned int sid)
{
irte->svt = svt;
irte->sq = sq;
irte->sid = sid;
}
int set_ioapic_sid(struct irte *irte, int apic)
{
int i;
u16 sid = 0;
if (!irte)
return -1;
for (i = 0; i < MAX_IO_APICS; i++) {
if (ir_ioapic[i].id == apic) {
sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
break;
}
}
if (sid == 0) {
pr_warning("Failed to set source-id of IOAPIC (%d)\n", apic);
return -1;
}
set_irte_sid(irte, 1, 0, sid);
return 0;
}
int set_msi_sid(struct irte *irte, struct pci_dev *dev)
{
struct pci_dev *bridge;
if (!irte || !dev)
return -1;
/* PCIe device or Root Complex integrated PCI device */
if (dev->is_pcie || !dev->bus->parent) {
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
(dev->bus->number << 8) | dev->devfn);
return 0;
}
bridge = pci_find_upstream_pcie_bridge(dev);
if (bridge) {
if (bridge->is_pcie) /* this is a PCIE-to-PCI/PCIX bridge */
set_irte_sid(irte, SVT_VERIFY_BUS, SQ_ALL_16,
(bridge->bus->number << 8) | dev->bus->number);
else /* this is a legacy PCI bridge */
set_irte_sid(irte, SVT_VERIFY_SID_SQ, SQ_ALL_16,
(bridge->bus->number << 8) | bridge->devfn);
}
return 0;
}
static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode) static void iommu_set_intr_remapping(struct intel_iommu *iommu, int mode)
{ {
u64 addr; u64 addr;
...@@ -612,6 +711,35 @@ int __init enable_intr_remapping(int eim) ...@@ -612,6 +711,35 @@ int __init enable_intr_remapping(int eim)
return -1; return -1;
} }
static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
struct intel_iommu *iommu)
{
struct acpi_dmar_pci_path *path;
u8 bus;
int count;
bus = scope->bus;
path = (struct acpi_dmar_pci_path *)(scope + 1);
count = (scope->length - sizeof(struct acpi_dmar_device_scope))
/ sizeof(struct acpi_dmar_pci_path);
while (--count > 0) {
/*
* Access PCI directly due to the PCI
* subsystem isn't initialized yet.
*/
bus = read_pci_config_byte(bus, path->dev, path->fn,
PCI_SECONDARY_BUS);
path++;
}
ir_ioapic[ir_ioapic_num].bus = bus;
ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->dev, path->fn);
ir_ioapic[ir_ioapic_num].iommu = iommu;
ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
ir_ioapic_num++;
}
static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
struct intel_iommu *iommu) struct intel_iommu *iommu)
{ {
...@@ -636,9 +764,7 @@ static int ir_parse_ioapic_scope(struct acpi_dmar_header *header, ...@@ -636,9 +764,7 @@ static int ir_parse_ioapic_scope(struct acpi_dmar_header *header,
" 0x%Lx\n", scope->enumeration_id, " 0x%Lx\n", scope->enumeration_id,
drhd->address); drhd->address);
ir_ioapic[ir_ioapic_num].iommu = iommu; ir_parse_one_ioapic_scope(scope, iommu);
ir_ioapic[ir_ioapic_num].id = scope->enumeration_id;
ir_ioapic_num++;
} }
start += scope->length; start += scope->length;
} }
......
...@@ -3,6 +3,8 @@ ...@@ -3,6 +3,8 @@
struct ioapic_scope { struct ioapic_scope {
struct intel_iommu *iommu; struct intel_iommu *iommu;
unsigned int id; unsigned int id;
unsigned int bus; /* PCI bus number */
unsigned int devfn; /* PCI devfn number */
}; };
#define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
...@@ -126,6 +126,8 @@ extern int free_irte(int irq); ...@@ -126,6 +126,8 @@ extern int free_irte(int irq);
extern int irq_remapped(int irq); extern int irq_remapped(int irq);
extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev); extern struct intel_iommu *map_dev_to_ir(struct pci_dev *dev);
extern struct intel_iommu *map_ioapic_to_ir(int apic); extern struct intel_iommu *map_ioapic_to_ir(int apic);
extern int set_ioapic_sid(struct irte *irte, int apic);
extern int set_msi_sid(struct irte *irte, struct pci_dev *dev);
#else #else
static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) static inline int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
{ {
...@@ -156,6 +158,15 @@ static inline struct intel_iommu *map_ioapic_to_ir(int apic) ...@@ -156,6 +158,15 @@ static inline struct intel_iommu *map_ioapic_to_ir(int apic)
{ {
return NULL; return NULL;
} }
static inline int set_ioapic_sid(struct irte *irte, int apic)
{
return 0;
}
static inline int set_msi_sid(struct irte *irte, struct pci_dev *dev)
{
return 0;
}
#define irq_remapped(irq) (0) #define irq_remapped(irq) (0)
#define enable_intr_remapping(mode) (-1) #define enable_intr_remapping(mode) (-1)
#define disable_intr_remapping() (0) #define disable_intr_remapping() (0)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment