Commit ca1ee219 authored by Linus Torvalds's avatar Linus Torvalds

Merge git://git.infradead.org/iommu-2.6

* git://git.infradead.org/iommu-2.6:
  intel-iommu: Fix address wrap on 32-bit kernel.
  intel-iommu: Enable DMAR on 32-bit kernel.
  intel-iommu: fix PCI device detach from virtual machine
  intel-iommu: VT-d page table to support snooping control bit
  iommu: Add domain_has_cap iommu_ops
  intel-iommu: Snooping control support

Fixed trivial conflicts in arch/x86/Kconfig and drivers/pci/intel-iommu.c
parents 3cc50ac0 afeeb7ce
...@@ -1837,8 +1837,8 @@ config PCI_MMCONFIG ...@@ -1837,8 +1837,8 @@ config PCI_MMCONFIG
config DMAR config DMAR
bool "Support for DMA Remapping Devices (EXPERIMENTAL)" bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL depends on PCI_MSI && ACPI && EXPERIMENTAL
---help--- help
DMA remapping (DMAR) devices support enables independent address DMA remapping (DMAR) devices support enables independent address
translations for Direct Memory Access (DMA) from devices. translations for Direct Memory Access (DMA) from devices.
These DMA remapping devices are reported via ACPI tables These DMA remapping devices are reported via ACPI tables
......
...@@ -1928,6 +1928,12 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom, ...@@ -1928,6 +1928,12 @@ static phys_addr_t amd_iommu_iova_to_phys(struct iommu_domain *dom,
return paddr; return paddr;
} }
static int amd_iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap)
{
return 0;
}
static struct iommu_ops amd_iommu_ops = { static struct iommu_ops amd_iommu_ops = {
.domain_init = amd_iommu_domain_init, .domain_init = amd_iommu_domain_init,
.domain_destroy = amd_iommu_domain_destroy, .domain_destroy = amd_iommu_domain_destroy,
...@@ -1936,5 +1942,6 @@ static struct iommu_ops amd_iommu_ops = { ...@@ -1936,5 +1942,6 @@ static struct iommu_ops amd_iommu_ops = {
.map = amd_iommu_map_range, .map = amd_iommu_map_range,
.unmap = amd_iommu_unmap_range, .unmap = amd_iommu_unmap_range,
.iova_to_phys = amd_iommu_iova_to_phys, .iova_to_phys = amd_iommu_iova_to_phys,
.domain_has_cap = amd_iommu_domain_has_cap,
}; };
...@@ -98,3 +98,10 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -98,3 +98,10 @@ phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
return iommu_ops->iova_to_phys(domain, iova); return iommu_ops->iova_to_phys(domain, iova);
} }
EXPORT_SYMBOL_GPL(iommu_iova_to_phys); EXPORT_SYMBOL_GPL(iommu_iova_to_phys);
int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap)
{
return iommu_ops->domain_has_cap(domain, cap);
}
EXPORT_SYMBOL_GPL(iommu_domain_has_cap);
...@@ -164,7 +164,8 @@ static inline void context_clear_entry(struct context_entry *context) ...@@ -164,7 +164,8 @@ static inline void context_clear_entry(struct context_entry *context)
* 1: writable * 1: writable
* 2-6: reserved * 2-6: reserved
* 7: super page * 7: super page
* 8-11: available * 8-10: available
* 11: snoop behavior
* 12-63: Host physcial address * 12-63: Host physcial address
*/ */
struct dma_pte { struct dma_pte {
...@@ -186,6 +187,11 @@ static inline void dma_set_pte_writable(struct dma_pte *pte) ...@@ -186,6 +187,11 @@ static inline void dma_set_pte_writable(struct dma_pte *pte)
pte->val |= DMA_PTE_WRITE; pte->val |= DMA_PTE_WRITE;
} }
static inline void dma_set_pte_snp(struct dma_pte *pte)
{
pte->val |= DMA_PTE_SNP;
}
static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot) static inline void dma_set_pte_prot(struct dma_pte *pte, unsigned long prot)
{ {
pte->val = (pte->val & ~3) | (prot & 3); pte->val = (pte->val & ~3) | (prot & 3);
...@@ -231,6 +237,7 @@ struct dmar_domain { ...@@ -231,6 +237,7 @@ struct dmar_domain {
int flags; /* flags to find out type of domain */ int flags; /* flags to find out type of domain */
int iommu_coherency;/* indicate coherency of iommu access */ int iommu_coherency;/* indicate coherency of iommu access */
int iommu_snooping; /* indicate snooping control feature*/
int iommu_count; /* reference count of iommu */ int iommu_count; /* reference count of iommu */
spinlock_t iommu_lock; /* protect iommu set in domain */ spinlock_t iommu_lock; /* protect iommu set in domain */
u64 max_addr; /* maximum mapped address */ u64 max_addr; /* maximum mapped address */
...@@ -421,7 +428,6 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain) ...@@ -421,7 +428,6 @@ static struct intel_iommu *domain_get_iommu(struct dmar_domain *domain)
return g_iommus[iommu_id]; return g_iommus[iommu_id];
} }
/* "Coherency" capability may be different across iommus */
static void domain_update_iommu_coherency(struct dmar_domain *domain) static void domain_update_iommu_coherency(struct dmar_domain *domain)
{ {
int i; int i;
...@@ -438,6 +444,29 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain) ...@@ -438,6 +444,29 @@ static void domain_update_iommu_coherency(struct dmar_domain *domain)
} }
} }
static void domain_update_iommu_snooping(struct dmar_domain *domain)
{
int i;
domain->iommu_snooping = 1;
i = find_first_bit(&domain->iommu_bmp, g_num_of_iommus);
for (; i < g_num_of_iommus; ) {
if (!ecap_sc_support(g_iommus[i]->ecap)) {
domain->iommu_snooping = 0;
break;
}
i = find_next_bit(&domain->iommu_bmp, g_num_of_iommus, i+1);
}
}
/* Some capabilities may be different across iommus */
static void domain_update_iommu_cap(struct dmar_domain *domain)
{
domain_update_iommu_coherency(domain);
domain_update_iommu_snooping(domain);
}
static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn) static struct intel_iommu *device_to_iommu(u8 bus, u8 devfn)
{ {
struct dmar_drhd_unit *drhd = NULL; struct dmar_drhd_unit *drhd = NULL;
...@@ -689,15 +718,17 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr) ...@@ -689,15 +718,17 @@ static void dma_pte_clear_one(struct dmar_domain *domain, u64 addr)
static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end) static void dma_pte_clear_range(struct dmar_domain *domain, u64 start, u64 end)
{ {
int addr_width = agaw_to_width(domain->agaw); int addr_width = agaw_to_width(domain->agaw);
int npages;
start &= (((u64)1) << addr_width) - 1; start &= (((u64)1) << addr_width) - 1;
end &= (((u64)1) << addr_width) - 1; end &= (((u64)1) << addr_width) - 1;
/* in case it's partial page */ /* in case it's partial page */
start = PAGE_ALIGN(start); start = PAGE_ALIGN(start);
end &= PAGE_MASK; end &= PAGE_MASK;
npages = (end - start) / VTD_PAGE_SIZE;
/* we don't need lock here, nobody else touches the iova range */ /* we don't need lock here, nobody else touches the iova range */
while (start < end) { while (npages--) {
dma_pte_clear_one(domain, start); dma_pte_clear_one(domain, start);
start += VTD_PAGE_SIZE; start += VTD_PAGE_SIZE;
} }
...@@ -1241,6 +1272,11 @@ static int domain_init(struct dmar_domain *domain, int guest_width) ...@@ -1241,6 +1272,11 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
else else
domain->iommu_coherency = 0; domain->iommu_coherency = 0;
if (ecap_sc_support(iommu->ecap))
domain->iommu_snooping = 1;
else
domain->iommu_snooping = 0;
domain->iommu_count = 1; domain->iommu_count = 1;
/* always allocate the top pgd */ /* always allocate the top pgd */
...@@ -1369,7 +1405,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, ...@@ -1369,7 +1405,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
spin_lock_irqsave(&domain->iommu_lock, flags); spin_lock_irqsave(&domain->iommu_lock, flags);
if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) { if (!test_and_set_bit(iommu->seq_id, &domain->iommu_bmp)) {
domain->iommu_count++; domain->iommu_count++;
domain_update_iommu_coherency(domain); domain_update_iommu_cap(domain);
} }
spin_unlock_irqrestore(&domain->iommu_lock, flags); spin_unlock_irqrestore(&domain->iommu_lock, flags);
return 0; return 0;
...@@ -1469,6 +1505,8 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova, ...@@ -1469,6 +1505,8 @@ domain_page_mapping(struct dmar_domain *domain, dma_addr_t iova,
BUG_ON(dma_pte_addr(pte)); BUG_ON(dma_pte_addr(pte));
dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT); dma_set_pte_addr(pte, start_pfn << VTD_PAGE_SHIFT);
dma_set_pte_prot(pte, prot); dma_set_pte_prot(pte, prot);
if (prot & DMA_PTE_SNP)
dma_set_pte_snp(pte);
domain_flush_cache(domain, pte, sizeof(*pte)); domain_flush_cache(domain, pte, sizeof(*pte));
start_pfn++; start_pfn++;
index++; index++;
...@@ -2119,7 +2157,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, ...@@ -2119,7 +2157,7 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
error: error:
if (iova) if (iova)
__free_iova(&domain->iovad, iova); __free_iova(&domain->iovad, iova);
printk(KERN_ERR"Device %s request: %lx@%llx dir %d --- failed\n", printk(KERN_ERR"Device %s request: %zx@%llx dir %d --- failed\n",
pci_name(pdev), size, (unsigned long long)paddr, dir); pci_name(pdev), size, (unsigned long long)paddr, dir);
return 0; return 0;
} }
...@@ -2218,7 +2256,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, ...@@ -2218,7 +2256,7 @@ static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
start_addr = iova->pfn_lo << PAGE_SHIFT; start_addr = iova->pfn_lo << PAGE_SHIFT;
size = aligned_size((u64)dev_addr, size); size = aligned_size((u64)dev_addr, size);
pr_debug("Device %s unmapping: %lx@%llx\n", pr_debug("Device %s unmapping: %zx@%llx\n",
pci_name(pdev), size, (unsigned long long)start_addr); pci_name(pdev), size, (unsigned long long)start_addr);
/* clear the whole page */ /* clear the whole page */
...@@ -2282,8 +2320,6 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, ...@@ -2282,8 +2320,6 @@ static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
free_pages((unsigned long)vaddr, order); free_pages((unsigned long)vaddr, order);
} }
#define SG_ENT_VIRT_ADDRESS(sg) (sg_virt((sg)))
static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
int nelems, enum dma_data_direction dir, int nelems, enum dma_data_direction dir,
struct dma_attrs *attrs) struct dma_attrs *attrs)
...@@ -2294,7 +2330,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, ...@@ -2294,7 +2330,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
unsigned long start_addr; unsigned long start_addr;
struct iova *iova; struct iova *iova;
size_t size = 0; size_t size = 0;
void *addr; phys_addr_t addr;
struct scatterlist *sg; struct scatterlist *sg;
struct intel_iommu *iommu; struct intel_iommu *iommu;
...@@ -2310,7 +2346,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, ...@@ -2310,7 +2346,7 @@ static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
if (!iova) if (!iova)
return; return;
for_each_sg(sglist, sg, nelems, i) { for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg); addr = page_to_phys(sg_page(sg)) + sg->offset;
size += aligned_size((u64)addr, sg->length); size += aligned_size((u64)addr, sg->length);
} }
...@@ -2337,7 +2373,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, ...@@ -2337,7 +2373,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
for_each_sg(sglist, sg, nelems, i) { for_each_sg(sglist, sg, nelems, i) {
BUG_ON(!sg_page(sg)); BUG_ON(!sg_page(sg));
sg->dma_address = virt_to_bus(SG_ENT_VIRT_ADDRESS(sg)); sg->dma_address = page_to_phys(sg_page(sg)) + sg->offset;
sg->dma_length = sg->length; sg->dma_length = sg->length;
} }
return nelems; return nelems;
...@@ -2346,7 +2382,7 @@ static int intel_nontranslate_map_sg(struct device *hddev, ...@@ -2346,7 +2382,7 @@ static int intel_nontranslate_map_sg(struct device *hddev,
static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
enum dma_data_direction dir, struct dma_attrs *attrs) enum dma_data_direction dir, struct dma_attrs *attrs)
{ {
void *addr; phys_addr_t addr;
int i; int i;
struct pci_dev *pdev = to_pci_dev(hwdev); struct pci_dev *pdev = to_pci_dev(hwdev);
struct dmar_domain *domain; struct dmar_domain *domain;
...@@ -2370,8 +2406,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne ...@@ -2370,8 +2406,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
iommu = domain_get_iommu(domain); iommu = domain_get_iommu(domain);
for_each_sg(sglist, sg, nelems, i) { for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg); addr = page_to_phys(sg_page(sg)) + sg->offset;
addr = (void *)virt_to_phys(addr);
size += aligned_size((u64)addr, sg->length); size += aligned_size((u64)addr, sg->length);
} }
...@@ -2394,8 +2429,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne ...@@ -2394,8 +2429,7 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
start_addr = iova->pfn_lo << PAGE_SHIFT; start_addr = iova->pfn_lo << PAGE_SHIFT;
offset = 0; offset = 0;
for_each_sg(sglist, sg, nelems, i) { for_each_sg(sglist, sg, nelems, i) {
addr = SG_ENT_VIRT_ADDRESS(sg); addr = page_to_phys(sg_page(sg)) + sg->offset;
addr = (void *)virt_to_phys(addr);
size = aligned_size((u64)addr, sg->length); size = aligned_size((u64)addr, sg->length);
ret = domain_page_mapping(domain, start_addr + offset, ret = domain_page_mapping(domain, start_addr + offset,
((u64)addr) & PAGE_MASK, ((u64)addr) & PAGE_MASK,
...@@ -2628,6 +2662,33 @@ static int vm_domain_add_dev_info(struct dmar_domain *domain, ...@@ -2628,6 +2662,33 @@ static int vm_domain_add_dev_info(struct dmar_domain *domain,
return 0; return 0;
} }
static void iommu_detach_dependent_devices(struct intel_iommu *iommu,
struct pci_dev *pdev)
{
struct pci_dev *tmp, *parent;
if (!iommu || !pdev)
return;
/* dependent device detach */
tmp = pci_find_upstream_pcie_bridge(pdev);
/* Secondary interface's bus number and devfn 0 */
if (tmp) {
parent = pdev->bus->self;
while (parent != tmp) {
iommu_detach_dev(iommu, parent->bus->number,
parent->devfn);
parent = parent->bus->self;
}
if (tmp->is_pcie) /* this is a PCIE-to-PCI bridge */
iommu_detach_dev(iommu,
tmp->subordinate->number, 0);
else /* this is a legacy PCI bridge */
iommu_detach_dev(iommu,
tmp->bus->number, tmp->devfn);
}
}
static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
struct pci_dev *pdev) struct pci_dev *pdev)
{ {
...@@ -2653,6 +2714,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, ...@@ -2653,6 +2714,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
spin_unlock_irqrestore(&device_domain_lock, flags); spin_unlock_irqrestore(&device_domain_lock, flags);
iommu_detach_dev(iommu, info->bus, info->devfn); iommu_detach_dev(iommu, info->bus, info->devfn);
iommu_detach_dependent_devices(iommu, pdev);
free_devinfo_mem(info); free_devinfo_mem(info);
spin_lock_irqsave(&device_domain_lock, flags); spin_lock_irqsave(&device_domain_lock, flags);
...@@ -2676,7 +2738,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain, ...@@ -2676,7 +2738,7 @@ static void vm_domain_remove_one_dev_info(struct dmar_domain *domain,
spin_lock_irqsave(&domain->iommu_lock, tmp_flags); spin_lock_irqsave(&domain->iommu_lock, tmp_flags);
clear_bit(iommu->seq_id, &domain->iommu_bmp); clear_bit(iommu->seq_id, &domain->iommu_bmp);
domain->iommu_count--; domain->iommu_count--;
domain_update_iommu_coherency(domain); domain_update_iommu_cap(domain);
spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags); spin_unlock_irqrestore(&domain->iommu_lock, tmp_flags);
} }
...@@ -2702,15 +2764,16 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain) ...@@ -2702,15 +2764,16 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
iommu = device_to_iommu(info->bus, info->devfn); iommu = device_to_iommu(info->bus, info->devfn);
iommu_detach_dev(iommu, info->bus, info->devfn); iommu_detach_dev(iommu, info->bus, info->devfn);
iommu_detach_dependent_devices(iommu, info->dev);
/* clear this iommu in iommu_bmp, update iommu count /* clear this iommu in iommu_bmp, update iommu count
* and coherency * and capabilities
*/ */
spin_lock_irqsave(&domain->iommu_lock, flags2); spin_lock_irqsave(&domain->iommu_lock, flags2);
if (test_and_clear_bit(iommu->seq_id, if (test_and_clear_bit(iommu->seq_id,
&domain->iommu_bmp)) { &domain->iommu_bmp)) {
domain->iommu_count--; domain->iommu_count--;
domain_update_iommu_coherency(domain); domain_update_iommu_cap(domain);
} }
spin_unlock_irqrestore(&domain->iommu_lock, flags2); spin_unlock_irqrestore(&domain->iommu_lock, flags2);
...@@ -2933,6 +2996,8 @@ static int intel_iommu_map_range(struct iommu_domain *domain, ...@@ -2933,6 +2996,8 @@ static int intel_iommu_map_range(struct iommu_domain *domain,
prot |= DMA_PTE_READ; prot |= DMA_PTE_READ;
if (iommu_prot & IOMMU_WRITE) if (iommu_prot & IOMMU_WRITE)
prot |= DMA_PTE_WRITE; prot |= DMA_PTE_WRITE;
if ((iommu_prot & IOMMU_CACHE) && dmar_domain->iommu_snooping)
prot |= DMA_PTE_SNP;
max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size); max_addr = (iova & VTD_PAGE_MASK) + VTD_PAGE_ALIGN(size);
if (dmar_domain->max_addr < max_addr) { if (dmar_domain->max_addr < max_addr) {
...@@ -2986,6 +3051,17 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -2986,6 +3051,17 @@ static phys_addr_t intel_iommu_iova_to_phys(struct iommu_domain *domain,
return phys; return phys;
} }
static int intel_iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap)
{
struct dmar_domain *dmar_domain = domain->priv;
if (cap == IOMMU_CAP_CACHE_COHERENCY)
return dmar_domain->iommu_snooping;
return 0;
}
static struct iommu_ops intel_iommu_ops = { static struct iommu_ops intel_iommu_ops = {
.domain_init = intel_iommu_domain_init, .domain_init = intel_iommu_domain_init,
.domain_destroy = intel_iommu_domain_destroy, .domain_destroy = intel_iommu_domain_destroy,
...@@ -2994,6 +3070,7 @@ static struct iommu_ops intel_iommu_ops = { ...@@ -2994,6 +3070,7 @@ static struct iommu_ops intel_iommu_ops = {
.map = intel_iommu_map_range, .map = intel_iommu_map_range,
.unmap = intel_iommu_unmap_range, .unmap = intel_iommu_unmap_range,
.iova_to_phys = intel_iommu_iova_to_phys, .iova_to_phys = intel_iommu_iova_to_phys,
.domain_has_cap = intel_iommu_domain_has_cap,
}; };
static void __devinit quirk_iommu_rwbf(struct pci_dev *dev) static void __devinit quirk_iommu_rwbf(struct pci_dev *dev)
......
...@@ -11,6 +11,7 @@ ...@@ -11,6 +11,7 @@
#define DMA_PTE_READ (1) #define DMA_PTE_READ (1)
#define DMA_PTE_WRITE (2) #define DMA_PTE_WRITE (2)
#define DMA_PTE_SNP (1 << 11)
struct intel_iommu; struct intel_iommu;
struct dmar_domain; struct dmar_domain;
......
...@@ -123,7 +123,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) ...@@ -123,7 +123,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
#define ecap_eim_support(e) ((e >> 4) & 0x1) #define ecap_eim_support(e) ((e >> 4) & 0x1)
#define ecap_ir_support(e) ((e >> 3) & 0x1) #define ecap_ir_support(e) ((e >> 3) & 0x1)
#define ecap_max_handle_mask(e) ((e >> 20) & 0xf) #define ecap_max_handle_mask(e) ((e >> 20) & 0xf)
#define ecap_sc_support(e) ((e >> 7) & 0x1) /* Snooping Control */
/* IOTLB_REG */ /* IOTLB_REG */
#define DMA_TLB_FLUSH_GRANU_OFFSET 60 #define DMA_TLB_FLUSH_GRANU_OFFSET 60
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#define IOMMU_READ (1) #define IOMMU_READ (1)
#define IOMMU_WRITE (2) #define IOMMU_WRITE (2)
#define IOMMU_CACHE (4) /* DMA cache coherency */
struct device; struct device;
...@@ -28,6 +29,8 @@ struct iommu_domain { ...@@ -28,6 +29,8 @@ struct iommu_domain {
void *priv; void *priv;
}; };
#define IOMMU_CAP_CACHE_COHERENCY 0x1
struct iommu_ops { struct iommu_ops {
int (*domain_init)(struct iommu_domain *domain); int (*domain_init)(struct iommu_domain *domain);
void (*domain_destroy)(struct iommu_domain *domain); void (*domain_destroy)(struct iommu_domain *domain);
...@@ -39,6 +42,8 @@ struct iommu_ops { ...@@ -39,6 +42,8 @@ struct iommu_ops {
size_t size); size_t size);
phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, phys_addr_t (*iova_to_phys)(struct iommu_domain *domain,
unsigned long iova); unsigned long iova);
int (*domain_has_cap)(struct iommu_domain *domain,
unsigned long cap);
}; };
#ifdef CONFIG_IOMMU_API #ifdef CONFIG_IOMMU_API
...@@ -57,6 +62,8 @@ extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova, ...@@ -57,6 +62,8 @@ extern void iommu_unmap_range(struct iommu_domain *domain, unsigned long iova,
size_t size); size_t size);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
unsigned long iova); unsigned long iova);
extern int iommu_domain_has_cap(struct iommu_domain *domain,
unsigned long cap);
#else /* CONFIG_IOMMU_API */ #else /* CONFIG_IOMMU_API */
...@@ -107,6 +114,12 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -107,6 +114,12 @@ static inline phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain,
return 0; return 0;
} }
static inline int domain_has_cap(struct iommu_domain *domain,
unsigned long cap)
{
return 0;
}
#endif /* CONFIG_IOMMU_API */ #endif /* CONFIG_IOMMU_API */
#endif /* __LINUX_IOMMU_H */ #endif /* __LINUX_IOMMU_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment