Commit 2cd83ba5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-v4.15-rc1' of git://github.com/awilliam/linux-vfio

Pull IOMMU updates from Alex Williamson:
 "As Joerg mentioned[1], he's out on paternity leave through the end of
  the year and I'm filling in for him in the interim:

   - Enforce MSI multiple IRQ alignment in AMD IOMMU

   - VT-d PASID error handling fixes

   - Add r8a7795 IPMMU support

   - Manage runtime PM links on exynos at {add,remove}_device callbacks

   - Fix Mediatek driver name to avoid conflict

   - Add terminate support to qcom fault handler

   - 64-bit IOVA optimizations

   - Simplfy IOVA domain destruction, better use of rcache, and skip
     anchor nodes on copy

   - Convert to IOMMU TLB sync API in io-pgtable-arm{-v7s}

   - Drop command queue lock when waiting for CMD_SYNC completion on ARM
     SMMU implementations supporting MSI to cacheable memory

   - iomu-vmsa cleanup inspired by missed IOTLB sync callbacks

   - Fix sleeping lock with preemption disabled for RT

   - Dual MMU support for TI DRA7xx DSPs

   - Optional flush option on IOVA allocation avoiding overhead when
     caller can try other options

  [1] https://lkml.org/lkml/2017/10/22/72"

* tag 'iommu-v4.15-rc1' of git://github.com/awilliam/linux-vfio: (54 commits)
  iommu/iova: Use raw_cpu_ptr() instead of get_cpu_ptr() for ->fq
  iommu/mediatek: Fix driver name
  iommu/ipmmu-vmsa: Hook up r8a7795 DT matching code
  iommu/ipmmu-vmsa: Allow two bit SL0
  iommu/ipmmu-vmsa: Make IMBUSCTR setup optional
  iommu/ipmmu-vmsa: Write IMCTR twice
  iommu/ipmmu-vmsa: IPMMU device is 40-bit bus master
  iommu/ipmmu-vmsa: Make use of IOMMU_OF_DECLARE()
  iommu/ipmmu-vmsa: Enable multi context support
  iommu/ipmmu-vmsa: Add optional root device feature
  iommu/ipmmu-vmsa: Introduce features, break out alias
  iommu/ipmmu-vmsa: Unify ipmmu_ops
  iommu/ipmmu-vmsa: Clean up struct ipmmu_vmsa_iommu_priv
  iommu/ipmmu-vmsa: Simplify group allocation
  iommu/ipmmu-vmsa: Unify domain alloc/free
  iommu/ipmmu-vmsa: Fix return value check in ipmmu_find_group_dma()
  iommu/vt-d: Clear pasid table entry when memory unbound
  iommu/vt-d: Clear Page Request Overflow fault bit
  iommu/vt-d: Missing checks for pasid tables if allocation fails
  iommu/amd: Limit the IOVA page range to the specified addresses
  ...
parents 670ffccb 56f19441
...@@ -155,8 +155,7 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags) ...@@ -155,8 +155,7 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
order = __ffs(tegra->domain->pgsize_bitmap); order = __ffs(tegra->domain->pgsize_bitmap);
init_iova_domain(&tegra->carveout.domain, 1UL << order, init_iova_domain(&tegra->carveout.domain, 1UL << order,
carveout_start >> order, carveout_start >> order);
carveout_end >> order);
tegra->carveout.shift = iova_shift(&tegra->carveout.domain); tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
tegra->carveout.limit = carveout_end >> tegra->carveout.shift; tegra->carveout.limit = carveout_end >> tegra->carveout.shift;
......
...@@ -198,8 +198,7 @@ static int host1x_probe(struct platform_device *pdev) ...@@ -198,8 +198,7 @@ static int host1x_probe(struct platform_device *pdev)
order = __ffs(host->domain->pgsize_bitmap); order = __ffs(host->domain->pgsize_bitmap);
init_iova_domain(&host->iova, 1UL << order, init_iova_domain(&host->iova, 1UL << order,
geometry->aperture_start >> order, geometry->aperture_start >> order);
geometry->aperture_end >> order);
host->iova_end = geometry->aperture_end; host->iova_end = geometry->aperture_end;
} }
......
...@@ -63,7 +63,6 @@ ...@@ -63,7 +63,6 @@
/* IO virtual address start page frame number */ /* IO virtual address start page frame number */
#define IOVA_START_PFN (1) #define IOVA_START_PFN (1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
/* Reserved IOVA ranges */ /* Reserved IOVA ranges */
#define MSI_RANGE_START (0xfee00000) #define MSI_RANGE_START (0xfee00000)
...@@ -1547,10 +1546,11 @@ static unsigned long dma_ops_alloc_iova(struct device *dev, ...@@ -1547,10 +1546,11 @@ static unsigned long dma_ops_alloc_iova(struct device *dev,
if (dma_mask > DMA_BIT_MASK(32)) if (dma_mask > DMA_BIT_MASK(32))
pfn = alloc_iova_fast(&dma_dom->iovad, pages, pfn = alloc_iova_fast(&dma_dom->iovad, pages,
IOVA_PFN(DMA_BIT_MASK(32))); IOVA_PFN(DMA_BIT_MASK(32)), false);
if (!pfn) if (!pfn)
pfn = alloc_iova_fast(&dma_dom->iovad, pages, IOVA_PFN(dma_mask)); pfn = alloc_iova_fast(&dma_dom->iovad, pages,
IOVA_PFN(dma_mask), true);
return (pfn << PAGE_SHIFT); return (pfn << PAGE_SHIFT);
} }
...@@ -1788,8 +1788,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void) ...@@ -1788,8 +1788,7 @@ static struct dma_ops_domain *dma_ops_domain_alloc(void)
if (!dma_dom->domain.pt_root) if (!dma_dom->domain.pt_root)
goto free_dma_dom; goto free_dma_dom;
init_iova_domain(&dma_dom->iovad, PAGE_SIZE, init_iova_domain(&dma_dom->iovad, PAGE_SIZE, IOVA_START_PFN);
IOVA_START_PFN, DMA_32BIT_PFN);
if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL)) if (init_iova_flush_queue(&dma_dom->iovad, iova_domain_flush_tlb, NULL))
goto free_dma_dom; goto free_dma_dom;
...@@ -2383,11 +2382,9 @@ static void __unmap_single(struct dma_ops_domain *dma_dom, ...@@ -2383,11 +2382,9 @@ static void __unmap_single(struct dma_ops_domain *dma_dom,
size_t size, size_t size,
int dir) int dir)
{ {
dma_addr_t flush_addr;
dma_addr_t i, start; dma_addr_t i, start;
unsigned int pages; unsigned int pages;
flush_addr = dma_addr;
pages = iommu_num_pages(dma_addr, size, PAGE_SIZE); pages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
dma_addr &= PAGE_MASK; dma_addr &= PAGE_MASK;
start = dma_addr; start = dma_addr;
...@@ -2696,8 +2693,7 @@ static int init_reserved_iova_ranges(void) ...@@ -2696,8 +2693,7 @@ static int init_reserved_iova_ranges(void)
struct pci_dev *pdev = NULL; struct pci_dev *pdev = NULL;
struct iova *val; struct iova *val;
init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, init_iova_domain(&reserved_iova_ranges, PAGE_SIZE, IOVA_START_PFN);
IOVA_START_PFN, DMA_32BIT_PFN);
lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock, lockdep_set_class(&reserved_iova_ranges.iova_rbtree_lock,
&reserved_rbtree_key); &reserved_rbtree_key);
...@@ -3155,7 +3151,7 @@ static void amd_iommu_apply_resv_region(struct device *dev, ...@@ -3155,7 +3151,7 @@ static void amd_iommu_apply_resv_region(struct device *dev,
unsigned long start, end; unsigned long start, end;
start = IOVA_PFN(region->start); start = IOVA_PFN(region->start);
end = IOVA_PFN(region->start + region->length); end = IOVA_PFN(region->start + region->length - 1);
WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL); WARN_ON_ONCE(reserve_iova(&dma_dom->iovad, start, end) == NULL);
} }
...@@ -3663,11 +3659,11 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic) ...@@ -3663,11 +3659,11 @@ static struct irq_remap_table *get_irq_table(u16 devid, bool ioapic)
return table; return table;
} }
static int alloc_irq_index(u16 devid, int count) static int alloc_irq_index(u16 devid, int count, bool align)
{ {
struct irq_remap_table *table; struct irq_remap_table *table;
int index, c, alignment = 1;
unsigned long flags; unsigned long flags;
int index, c;
struct amd_iommu *iommu = amd_iommu_rlookup_table[devid]; struct amd_iommu *iommu = amd_iommu_rlookup_table[devid];
if (!iommu) if (!iommu)
...@@ -3677,16 +3673,21 @@ static int alloc_irq_index(u16 devid, int count) ...@@ -3677,16 +3673,21 @@ static int alloc_irq_index(u16 devid, int count)
if (!table) if (!table)
return -ENODEV; return -ENODEV;
if (align)
alignment = roundup_pow_of_two(count);
spin_lock_irqsave(&table->lock, flags); spin_lock_irqsave(&table->lock, flags);
/* Scan table for free entries */ /* Scan table for free entries */
for (c = 0, index = table->min_index; for (index = ALIGN(table->min_index, alignment), c = 0;
index < MAX_IRQS_PER_TABLE; index < MAX_IRQS_PER_TABLE;) {
++index) { if (!iommu->irte_ops->is_allocated(table, index)) {
if (!iommu->irte_ops->is_allocated(table, index))
c += 1; c += 1;
else } else {
c = 0; c = 0;
index = ALIGN(index + 1, alignment);
continue;
}
if (c == count) { if (c == count) {
for (; c != 0; --c) for (; c != 0; --c)
...@@ -3695,6 +3696,8 @@ static int alloc_irq_index(u16 devid, int count) ...@@ -3695,6 +3696,8 @@ static int alloc_irq_index(u16 devid, int count)
index -= count - 1; index -= count - 1;
goto out; goto out;
} }
index++;
} }
index = -ENOSPC; index = -ENOSPC;
...@@ -4099,7 +4102,9 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, ...@@ -4099,7 +4102,9 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
else else
ret = -ENOMEM; ret = -ENOMEM;
} else { } else {
index = alloc_irq_index(devid, nr_irqs); bool align = (info->type == X86_IRQ_ALLOC_TYPE_MSI);
index = alloc_irq_index(devid, nr_irqs, align);
} }
if (index < 0) { if (index < 0) {
pr_warn("Failed to allocate IRTE\n"); pr_warn("Failed to allocate IRTE\n");
......
This diff is collapsed.
...@@ -59,6 +59,7 @@ ...@@ -59,6 +59,7 @@
#define ARM_MMU500_ACTLR_CPRE (1 << 1) #define ARM_MMU500_ACTLR_CPRE (1 << 1)
#define ARM_MMU500_ACR_CACHE_LOCK (1 << 26) #define ARM_MMU500_ACR_CACHE_LOCK (1 << 26)
#define ARM_MMU500_ACR_S2CRB_TLBEN (1 << 10)
#define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8) #define ARM_MMU500_ACR_SMTNMB_TLBEN (1 << 8)
#define TLB_LOOP_TIMEOUT 1000000 /* 1s! */ #define TLB_LOOP_TIMEOUT 1000000 /* 1s! */
...@@ -119,14 +120,6 @@ enum arm_smmu_implementation { ...@@ -119,14 +120,6 @@ enum arm_smmu_implementation {
CAVIUM_SMMUV2, CAVIUM_SMMUV2,
}; };
/* Until ACPICA headers cover IORT rev. C */
#ifndef ACPI_IORT_SMMU_CORELINK_MMU401
#define ACPI_IORT_SMMU_CORELINK_MMU401 0x4
#endif
#ifndef ACPI_IORT_SMMU_CAVIUM_THUNDERX
#define ACPI_IORT_SMMU_CAVIUM_THUNDERX 0x5
#endif
struct arm_smmu_s2cr { struct arm_smmu_s2cr {
struct iommu_group *group; struct iommu_group *group;
int count; int count;
...@@ -250,6 +243,7 @@ enum arm_smmu_domain_stage { ...@@ -250,6 +243,7 @@ enum arm_smmu_domain_stage {
struct arm_smmu_domain { struct arm_smmu_domain {
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_ops *pgtbl_ops;
const struct iommu_gather_ops *tlb_ops;
struct arm_smmu_cfg cfg; struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage; enum arm_smmu_domain_stage stage;
struct mutex init_mutex; /* Protects smmu pointer */ struct mutex init_mutex; /* Protects smmu pointer */
...@@ -735,7 +729,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ...@@ -735,7 +729,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
enum io_pgtable_fmt fmt; enum io_pgtable_fmt fmt;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
const struct iommu_gather_ops *tlb_ops;
mutex_lock(&smmu_domain->init_mutex); mutex_lock(&smmu_domain->init_mutex);
if (smmu_domain->smmu) if (smmu_domain->smmu)
...@@ -813,7 +806,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ...@@ -813,7 +806,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
ias = min(ias, 32UL); ias = min(ias, 32UL);
oas = min(oas, 32UL); oas = min(oas, 32UL);
} }
tlb_ops = &arm_smmu_s1_tlb_ops; smmu_domain->tlb_ops = &arm_smmu_s1_tlb_ops;
break; break;
case ARM_SMMU_DOMAIN_NESTED: case ARM_SMMU_DOMAIN_NESTED:
/* /*
...@@ -833,9 +826,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ...@@ -833,9 +826,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
oas = min(oas, 40UL); oas = min(oas, 40UL);
} }
if (smmu->version == ARM_SMMU_V2) if (smmu->version == ARM_SMMU_V2)
tlb_ops = &arm_smmu_s2_tlb_ops_v2; smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v2;
else else
tlb_ops = &arm_smmu_s2_tlb_ops_v1; smmu_domain->tlb_ops = &arm_smmu_s2_tlb_ops_v1;
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
...@@ -863,7 +856,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ...@@ -863,7 +856,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.pgsize_bitmap = smmu->pgsize_bitmap, .pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias, .ias = ias,
.oas = oas, .oas = oas,
.tlb = tlb_ops, .tlb = smmu_domain->tlb_ops,
.iommu_dev = smmu->dev, .iommu_dev = smmu->dev,
}; };
...@@ -1259,6 +1252,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, ...@@ -1259,6 +1252,14 @@ static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova,
return ops->unmap(ops, iova, size); return ops->unmap(ops, iova, size);
} }
static void arm_smmu_iotlb_sync(struct iommu_domain *domain)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
if (smmu_domain->tlb_ops)
smmu_domain->tlb_ops->tlb_sync(smmu_domain);
}
static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
dma_addr_t iova) dma_addr_t iova)
{ {
...@@ -1562,6 +1563,8 @@ static struct iommu_ops arm_smmu_ops = { ...@@ -1562,6 +1563,8 @@ static struct iommu_ops arm_smmu_ops = {
.map = arm_smmu_map, .map = arm_smmu_map,
.unmap = arm_smmu_unmap, .unmap = arm_smmu_unmap,
.map_sg = default_iommu_map_sg, .map_sg = default_iommu_map_sg,
.flush_iotlb_all = arm_smmu_iotlb_sync,
.iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys, .iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device, .add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device, .remove_device = arm_smmu_remove_device,
...@@ -1606,7 +1609,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) ...@@ -1606,7 +1609,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
* Allow unmatched Stream IDs to allocate bypass * Allow unmatched Stream IDs to allocate bypass
* TLB entries for reduced latency. * TLB entries for reduced latency.
*/ */
reg |= ARM_MMU500_ACR_SMTNMB_TLBEN; reg |= ARM_MMU500_ACR_SMTNMB_TLBEN | ARM_MMU500_ACR_S2CRB_TLBEN;
writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR); writel_relaxed(reg, gr0_base + ARM_SMMU_GR0_sACR);
} }
......
...@@ -292,18 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, ...@@ -292,18 +292,7 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
/* ...then finally give it a kicking to make sure it fits */ /* ...then finally give it a kicking to make sure it fits */
base_pfn = max_t(unsigned long, base_pfn, base_pfn = max_t(unsigned long, base_pfn,
domain->geometry.aperture_start >> order); domain->geometry.aperture_start >> order);
end_pfn = min_t(unsigned long, end_pfn,
domain->geometry.aperture_end >> order);
} }
/*
* PCI devices may have larger DMA masks, but still prefer allocating
* within a 32-bit mask to avoid DAC addressing. Such limitations don't
* apply to the typical platform device, so for those we may as well
* leave the cache limit at the top of their range to save an rb_last()
* traversal on every allocation.
*/
if (dev && dev_is_pci(dev))
end_pfn &= DMA_BIT_MASK(32) >> order;
/* start_pfn is always nonzero for an already-initialised domain */ /* start_pfn is always nonzero for an already-initialised domain */
if (iovad->start_pfn) { if (iovad->start_pfn) {
...@@ -312,16 +301,11 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, ...@@ -312,16 +301,11 @@ int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
pr_warn("Incompatible range for DMA domain\n"); pr_warn("Incompatible range for DMA domain\n");
return -EFAULT; return -EFAULT;
} }
/*
* If we have devices with different DMA masks, move the free
* area cache limit down for the benefit of the smaller one.
*/
iovad->dma_32bit_pfn = min(end_pfn + 1, iovad->dma_32bit_pfn);
return 0; return 0;
} }
init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); init_iova_domain(iovad, 1UL << order, base_pfn);
if (!dev) if (!dev)
return 0; return 0;
...@@ -386,10 +370,12 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, ...@@ -386,10 +370,12 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
/* Try to get PCI devices a SAC address */ /* Try to get PCI devices a SAC address */
if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev)) if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift); iova = alloc_iova_fast(iovad, iova_len,
DMA_BIT_MASK(32) >> shift, false);
if (!iova) if (!iova)
iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift); iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift,
true);
return (dma_addr_t)iova << shift; return (dma_addr_t)iova << shift;
} }
......
...@@ -801,13 +801,16 @@ int __init dmar_dev_scope_init(void) ...@@ -801,13 +801,16 @@ int __init dmar_dev_scope_init(void)
dmar_free_pci_notify_info(info); dmar_free_pci_notify_info(info);
} }
} }
bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
} }
return dmar_dev_scope_status; return dmar_dev_scope_status;
} }
void dmar_register_bus_notifier(void)
{
bus_register_notifier(&pci_bus_type, &dmar_pci_bus_nb);
}
int __init dmar_table_init(void) int __init dmar_table_init(void)
{ {
...@@ -1676,7 +1679,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id) ...@@ -1676,7 +1679,8 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
raw_spin_lock_irqsave(&iommu->register_lock, flag); raw_spin_lock_irqsave(&iommu->register_lock, flag);
} }
writel(DMA_FSTS_PFO | DMA_FSTS_PPF, iommu->reg + DMAR_FSTS_REG); writel(DMA_FSTS_PFO | DMA_FSTS_PPF | DMA_FSTS_PRO,
iommu->reg + DMAR_FSTS_REG);
unlock_exit: unlock_exit:
raw_spin_unlock_irqrestore(&iommu->register_lock, flag); raw_spin_unlock_irqrestore(&iommu->register_lock, flag);
......
...@@ -263,6 +263,7 @@ struct exynos_iommu_domain { ...@@ -263,6 +263,7 @@ struct exynos_iommu_domain {
struct sysmmu_drvdata { struct sysmmu_drvdata {
struct device *sysmmu; /* SYSMMU controller device */ struct device *sysmmu; /* SYSMMU controller device */
struct device *master; /* master device (owner) */ struct device *master; /* master device (owner) */
struct device_link *link; /* runtime PM link to master */
void __iomem *sfrbase; /* our registers */ void __iomem *sfrbase; /* our registers */
struct clk *clk; /* SYSMMU's clock */ struct clk *clk; /* SYSMMU's clock */
struct clk *aclk; /* SYSMMU's aclk clock */ struct clk *aclk; /* SYSMMU's aclk clock */
...@@ -1250,6 +1251,8 @@ static struct iommu_group *get_device_iommu_group(struct device *dev) ...@@ -1250,6 +1251,8 @@ static struct iommu_group *get_device_iommu_group(struct device *dev)
static int exynos_iommu_add_device(struct device *dev) static int exynos_iommu_add_device(struct device *dev)
{ {
struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct sysmmu_drvdata *data;
struct iommu_group *group; struct iommu_group *group;
if (!has_sysmmu(dev)) if (!has_sysmmu(dev))
...@@ -1260,6 +1263,15 @@ static int exynos_iommu_add_device(struct device *dev) ...@@ -1260,6 +1263,15 @@ static int exynos_iommu_add_device(struct device *dev)
if (IS_ERR(group)) if (IS_ERR(group))
return PTR_ERR(group); return PTR_ERR(group);
list_for_each_entry(data, &owner->controllers, owner_node) {
/*
* SYSMMU will be runtime activated via device link
* (dependency) to its master device, so there are no
* direct calls to pm_runtime_get/put in this driver.
*/
data->link = device_link_add(dev, data->sysmmu,
DL_FLAG_PM_RUNTIME);
}
iommu_group_put(group); iommu_group_put(group);
return 0; return 0;
...@@ -1268,6 +1280,7 @@ static int exynos_iommu_add_device(struct device *dev) ...@@ -1268,6 +1280,7 @@ static int exynos_iommu_add_device(struct device *dev)
static void exynos_iommu_remove_device(struct device *dev) static void exynos_iommu_remove_device(struct device *dev)
{ {
struct exynos_iommu_owner *owner = dev->archdata.iommu; struct exynos_iommu_owner *owner = dev->archdata.iommu;
struct sysmmu_drvdata *data;
if (!has_sysmmu(dev)) if (!has_sysmmu(dev))
return; return;
...@@ -1283,6 +1296,9 @@ static void exynos_iommu_remove_device(struct device *dev) ...@@ -1283,6 +1296,9 @@ static void exynos_iommu_remove_device(struct device *dev)
} }
} }
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
list_for_each_entry(data, &owner->controllers, owner_node)
device_link_del(data->link);
} }
static int exynos_iommu_of_xlate(struct device *dev, static int exynos_iommu_of_xlate(struct device *dev,
...@@ -1316,13 +1332,6 @@ static int exynos_iommu_of_xlate(struct device *dev, ...@@ -1316,13 +1332,6 @@ static int exynos_iommu_of_xlate(struct device *dev,
list_add_tail(&data->owner_node, &owner->controllers); list_add_tail(&data->owner_node, &owner->controllers);
data->master = dev; data->master = dev;
/*
* SYSMMU will be runtime activated via device link (dependency) to its
* master device, so there are no direct calls to pm_runtime_get/put
* in this driver.
*/
device_link_add(dev, data->sysmmu, DL_FLAG_PM_RUNTIME);
return 0; return 0;
} }
......
...@@ -82,8 +82,6 @@ ...@@ -82,8 +82,6 @@
#define IOVA_START_PFN (1) #define IOVA_START_PFN (1)
#define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT) #define IOVA_PFN(addr) ((addr) >> PAGE_SHIFT)
#define DMA_32BIT_PFN IOVA_PFN(DMA_BIT_MASK(32))
#define DMA_64BIT_PFN IOVA_PFN(DMA_BIT_MASK(64))
/* page table handling */ /* page table handling */
#define LEVEL_STRIDE (9) #define LEVEL_STRIDE (9)
...@@ -1878,8 +1876,7 @@ static int dmar_init_reserved_ranges(void) ...@@ -1878,8 +1876,7 @@ static int dmar_init_reserved_ranges(void)
struct iova *iova; struct iova *iova;
int i; int i;
init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN, init_iova_domain(&reserved_iova_list, VTD_PAGE_SIZE, IOVA_START_PFN);
DMA_32BIT_PFN);
lockdep_set_class(&reserved_iova_list.iova_rbtree_lock, lockdep_set_class(&reserved_iova_list.iova_rbtree_lock,
&reserved_rbtree_key); &reserved_rbtree_key);
...@@ -1938,8 +1935,7 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu, ...@@ -1938,8 +1935,7 @@ static int domain_init(struct dmar_domain *domain, struct intel_iommu *iommu,
unsigned long sagaw; unsigned long sagaw;
int err; int err;
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
DMA_32BIT_PFN);
err = init_iova_flush_queue(&domain->iovad, err = init_iova_flush_queue(&domain->iovad,
iommu_flush_iova, iova_entry_free); iommu_flush_iova, iova_entry_free);
...@@ -2058,7 +2054,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain, ...@@ -2058,7 +2054,7 @@ static int domain_context_mapping_one(struct dmar_domain *domain,
if (context_copied(context)) { if (context_copied(context)) {
u16 did_old = context_domain_id(context); u16 did_old = context_domain_id(context);
if (did_old >= 0 && did_old < cap_ndoms(iommu->cap)) { if (did_old < cap_ndoms(iommu->cap)) {
iommu->flush.flush_context(iommu, did_old, iommu->flush.flush_context(iommu, did_old,
(((u16)bus) << 8) | devfn, (((u16)bus) << 8) | devfn,
DMA_CCMD_MASK_NOBIT, DMA_CCMD_MASK_NOBIT,
...@@ -3473,11 +3469,12 @@ static unsigned long intel_alloc_iova(struct device *dev, ...@@ -3473,11 +3469,12 @@ static unsigned long intel_alloc_iova(struct device *dev,
* from higher range * from higher range
*/ */
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
IOVA_PFN(DMA_BIT_MASK(32))); IOVA_PFN(DMA_BIT_MASK(32)), false);
if (iova_pfn) if (iova_pfn)
return iova_pfn; return iova_pfn;
} }
iova_pfn = alloc_iova_fast(&domain->iovad, nrpages, IOVA_PFN(dma_mask)); iova_pfn = alloc_iova_fast(&domain->iovad, nrpages,
IOVA_PFN(dma_mask), true);
if (unlikely(!iova_pfn)) { if (unlikely(!iova_pfn)) {
pr_err("Allocating %ld-page iova for %s failed", pr_err("Allocating %ld-page iova for %s failed",
nrpages, dev_name(dev)); nrpages, dev_name(dev));
...@@ -4752,6 +4749,16 @@ int __init intel_iommu_init(void) ...@@ -4752,6 +4749,16 @@ int __init intel_iommu_init(void)
goto out_free_dmar; goto out_free_dmar;
} }
up_write(&dmar_global_lock);
/*
* The bus notifier takes the dmar_global_lock, so lockdep will
* complain later when we register it under the lock.
*/
dmar_register_bus_notifier();
down_write(&dmar_global_lock);
if (no_iommu || dmar_disabled) { if (no_iommu || dmar_disabled) {
/* /*
* We exit the function here to ensure IOMMU's remapping and * We exit the function here to ensure IOMMU's remapping and
...@@ -4897,8 +4904,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width) ...@@ -4897,8 +4904,7 @@ static int md_domain_init(struct dmar_domain *domain, int guest_width)
{ {
int adjust_width; int adjust_width;
init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN, init_iova_domain(&domain->iovad, VTD_PAGE_SIZE, IOVA_START_PFN);
DMA_32BIT_PFN);
domain_reserve_special_ranges(domain); domain_reserve_special_ranges(domain);
/* calculate AGAW */ /* calculate AGAW */
......
...@@ -292,7 +292,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ ...@@ -292,7 +292,7 @@ int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_
int pasid_max; int pasid_max;
int ret; int ret;
if (WARN_ON(!iommu)) if (WARN_ON(!iommu || !iommu->pasid_table))
return -EINVAL; return -EINVAL;
if (dev_is_pci(dev)) { if (dev_is_pci(dev)) {
...@@ -458,6 +458,8 @@ int intel_svm_unbind_mm(struct device *dev, int pasid) ...@@ -458,6 +458,8 @@ int intel_svm_unbind_mm(struct device *dev, int pasid)
kfree_rcu(sdev, rcu); kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) { if (list_empty(&svm->devs)) {
svm->iommu->pasid_table[svm->pasid].val = 0;
wmb();
idr_remove(&svm->iommu->pasid_idr, svm->pasid); idr_remove(&svm->iommu->pasid_idr, svm->pasid);
if (svm->mm) if (svm->mm)
......
...@@ -660,16 +660,11 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -660,16 +660,11 @@ static int arm_v7s_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size) size_t size)
{ {
struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops); struct arm_v7s_io_pgtable *data = io_pgtable_ops_to_data(ops);
size_t unmapped;
if (WARN_ON(upper_32_bits(iova))) if (WARN_ON(upper_32_bits(iova)))
return 0; return 0;
unmapped = __arm_v7s_unmap(data, iova, size, 1, data->pgd); return __arm_v7s_unmap(data, iova, size, 1, data->pgd);
if (unmapped)
io_pgtable_tlb_sync(&data->iop);
return unmapped;
} }
static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops, static phys_addr_t arm_v7s_iova_to_phys(struct io_pgtable_ops *ops,
......
...@@ -609,7 +609,6 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, ...@@ -609,7 +609,6 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t size) size_t size)
{ {
size_t unmapped;
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte *ptep = data->pgd; arm_lpae_iopte *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data); int lvl = ARM_LPAE_START_LVL(data);
...@@ -617,11 +616,7 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -617,11 +616,7 @@ static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias))) if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
return 0; return 0;
unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); return __arm_lpae_unmap(data, iova, size, lvl, ptep);
if (unmapped)
io_pgtable_tlb_sync(&data->iop);
return unmapped;
} }
static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
......
This diff is collapsed.
This diff is collapsed.
...@@ -392,6 +392,11 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain, ...@@ -392,6 +392,11 @@ static size_t mtk_iommu_unmap(struct iommu_domain *domain,
return unmapsz; return unmapsz;
} }
static void mtk_iommu_iotlb_sync(struct iommu_domain *domain)
{
mtk_iommu_tlb_sync(mtk_iommu_get_m4u_data());
}
static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t mtk_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova) dma_addr_t iova)
{ {
...@@ -491,6 +496,8 @@ static struct iommu_ops mtk_iommu_ops = { ...@@ -491,6 +496,8 @@ static struct iommu_ops mtk_iommu_ops = {
.map = mtk_iommu_map, .map = mtk_iommu_map,
.unmap = mtk_iommu_unmap, .unmap = mtk_iommu_unmap,
.map_sg = default_iommu_map_sg, .map_sg = default_iommu_map_sg,
.flush_iotlb_all = mtk_iommu_iotlb_sync,
.iotlb_sync = mtk_iommu_iotlb_sync,
.iova_to_phys = mtk_iommu_iova_to_phys, .iova_to_phys = mtk_iommu_iova_to_phys,
.add_device = mtk_iommu_add_device, .add_device = mtk_iommu_add_device,
.remove_device = mtk_iommu_remove_device, .remove_device = mtk_iommu_remove_device,
......
...@@ -708,7 +708,7 @@ static struct platform_driver mtk_iommu_driver = { ...@@ -708,7 +708,7 @@ static struct platform_driver mtk_iommu_driver = {
.probe = mtk_iommu_probe, .probe = mtk_iommu_probe,
.remove = mtk_iommu_remove, .remove = mtk_iommu_remove,
.driver = { .driver = {
.name = "mtk-iommu", .name = "mtk-iommu-v1",
.of_match_table = mtk_iommu_of_ids, .of_match_table = mtk_iommu_of_ids,
.pm = &mtk_iommu_pm_ops, .pm = &mtk_iommu_pm_ops,
} }
......
This diff is collapsed.
...@@ -28,18 +28,27 @@ struct iotlb_entry { ...@@ -28,18 +28,27 @@ struct iotlb_entry {
u32 endian, elsz, mixed; u32 endian, elsz, mixed;
}; };
/**
* struct omap_iommu_device - omap iommu device data
* @pgtable: page table used by an omap iommu attached to a domain
* @iommu_dev: pointer to store an omap iommu instance attached to a domain
*/
struct omap_iommu_device {
u32 *pgtable;
struct omap_iommu *iommu_dev;
};
/** /**
* struct omap_iommu_domain - omap iommu domain * struct omap_iommu_domain - omap iommu domain
* @pgtable: the page table * @num_iommus: number of iommus in this domain
* @iommu_dev: an omap iommu device attached to this domain. only a single * @iommus: omap iommu device data for all iommus in this domain
* iommu device can be attached for now.
* @dev: Device using this domain. * @dev: Device using this domain.
* @lock: domain lock, should be taken when attaching/detaching * @lock: domain lock, should be taken when attaching/detaching
* @domain: generic domain handle used by iommu core code * @domain: generic domain handle used by iommu core code
*/ */
struct omap_iommu_domain { struct omap_iommu_domain {
u32 *pgtable; u32 num_iommus;
struct omap_iommu *iommu_dev; struct omap_iommu_device *iommus;
struct device *dev; struct device *dev;
spinlock_t lock; spinlock_t lock;
struct iommu_domain domain; struct iommu_domain domain;
...@@ -97,17 +106,6 @@ struct iotlb_lock { ...@@ -97,17 +106,6 @@ struct iotlb_lock {
short vict; short vict;
}; };
/**
* dev_to_omap_iommu() - retrieves an omap iommu object from a user device
* @dev: iommu client device
*/
static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
{
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
return arch_data->iommu_dev;
}
/* /*
* MMU Register offsets * MMU Register offsets
*/ */
......
...@@ -66,6 +66,7 @@ struct qcom_iommu_ctx { ...@@ -66,6 +66,7 @@ struct qcom_iommu_ctx {
void __iomem *base; void __iomem *base;
bool secure_init; bool secure_init;
u8 asid; /* asid and ctx bank # are 1:1 */ u8 asid; /* asid and ctx bank # are 1:1 */
struct iommu_domain *domain;
}; };
struct qcom_iommu_domain { struct qcom_iommu_domain {
...@@ -194,12 +195,15 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev) ...@@ -194,12 +195,15 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0); fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
iova = iommu_readq(ctx, ARM_SMMU_CB_FAR); iova = iommu_readq(ctx, ARM_SMMU_CB_FAR);
if (!report_iommu_fault(ctx->domain, ctx->dev, iova, 0)) {
dev_err_ratelimited(ctx->dev, dev_err_ratelimited(ctx->dev,
"Unhandled context fault: fsr=0x%x, " "Unhandled context fault: fsr=0x%x, "
"iova=0x%016llx, fsynr=0x%x, cb=%d\n", "iova=0x%016llx, fsynr=0x%x, cb=%d\n",
fsr, iova, fsynr, ctx->asid); fsr, iova, fsynr, ctx->asid);
}
iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr); iommu_writel(ctx, ARM_SMMU_CB_FSR, fsr);
iommu_writel(ctx, ARM_SMMU_CB_RESUME, RESUME_TERMINATE);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -274,12 +278,14 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, ...@@ -274,12 +278,14 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
/* SCTLR */ /* SCTLR */
reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE | reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_AFE | SCTLR_TRE |
SCTLR_M | SCTLR_S1_ASIDPNE; SCTLR_M | SCTLR_S1_ASIDPNE | SCTLR_CFCFG;
if (IS_ENABLED(CONFIG_BIG_ENDIAN)) if (IS_ENABLED(CONFIG_BIG_ENDIAN))
reg |= SCTLR_E; reg |= SCTLR_E;
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg); iommu_writel(ctx, ARM_SMMU_CB_SCTLR, reg);
ctx->domain = domain;
} }
mutex_unlock(&qcom_domain->init_mutex); mutex_unlock(&qcom_domain->init_mutex);
...@@ -395,6 +401,8 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de ...@@ -395,6 +401,8 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
/* Disable the context bank: */ /* Disable the context bank: */
iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0); iommu_writel(ctx, ARM_SMMU_CB_SCTLR, 0);
ctx->domain = NULL;
} }
pm_runtime_put_sync(qcom_iommu->dev); pm_runtime_put_sync(qcom_iommu->dev);
...@@ -443,6 +451,19 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova, ...@@ -443,6 +451,19 @@ static size_t qcom_iommu_unmap(struct iommu_domain *domain, unsigned long iova,
return ret; return ret;
} }
static void qcom_iommu_iotlb_sync(struct iommu_domain *domain)
{
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
struct io_pgtable *pgtable = container_of(qcom_domain->pgtbl_ops,
struct io_pgtable, ops);
if (!qcom_domain->pgtbl_ops)
return;
pm_runtime_get_sync(qcom_domain->iommu->dev);
qcom_iommu_tlb_sync(pgtable->cookie);
pm_runtime_put_sync(qcom_domain->iommu->dev);
}
static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t qcom_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t iova) dma_addr_t iova)
{ {
...@@ -570,6 +591,8 @@ static const struct iommu_ops qcom_iommu_ops = { ...@@ -570,6 +591,8 @@ static const struct iommu_ops qcom_iommu_ops = {
.map = qcom_iommu_map, .map = qcom_iommu_map,
.unmap = qcom_iommu_unmap, .unmap = qcom_iommu_unmap,
.map_sg = default_iommu_map_sg, .map_sg = default_iommu_map_sg,
.flush_iotlb_all = qcom_iommu_iotlb_sync,
.iotlb_sync = qcom_iommu_iotlb_sync,
.iova_to_phys = qcom_iommu_iova_to_phys, .iova_to_phys = qcom_iommu_iova_to_phys,
.add_device = qcom_iommu_add_device, .add_device = qcom_iommu_add_device,
.remove_device = qcom_iommu_remove_device, .remove_device = qcom_iommu_remove_device,
......
...@@ -39,8 +39,7 @@ void scif_rma_ep_init(struct scif_endpt *ep) ...@@ -39,8 +39,7 @@ void scif_rma_ep_init(struct scif_endpt *ep)
struct scif_endpt_rma_info *rma = &ep->rma_info; struct scif_endpt_rma_info *rma = &ep->rma_info;
mutex_init(&rma->rma_lock); mutex_init(&rma->rma_lock);
init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN, init_iova_domain(&rma->iovad, PAGE_SIZE, SCIF_IOVA_START_PFN);
SCIF_DMA_64BIT_PFN);
spin_lock_init(&rma->tc_lock); spin_lock_init(&rma->tc_lock);
mutex_init(&rma->mmn_lock); mutex_init(&rma->mmn_lock);
INIT_LIST_HEAD(&rma->reg_list); INIT_LIST_HEAD(&rma->reg_list);
......
...@@ -112,6 +112,7 @@ static inline bool dmar_rcu_check(void) ...@@ -112,6 +112,7 @@ static inline bool dmar_rcu_check(void)
extern int dmar_table_init(void); extern int dmar_table_init(void);
extern int dmar_dev_scope_init(void); extern int dmar_dev_scope_init(void);
extern void dmar_register_bus_notifier(void);
extern int dmar_parse_dev_scope(void *start, void *end, int *cnt, extern int dmar_parse_dev_scope(void *start, void *end, int *cnt,
struct dmar_dev_scope **devices, u16 segment); struct dmar_dev_scope **devices, u16 segment);
extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt); extern void *dmar_alloc_dev_scope(void *start, void *end, int *cnt);
......
...@@ -212,6 +212,7 @@ ...@@ -212,6 +212,7 @@
#define DMA_FSTS_IQE (1 << 4) #define DMA_FSTS_IQE (1 << 4)
#define DMA_FSTS_ICE (1 << 5) #define DMA_FSTS_ICE (1 << 5)
#define DMA_FSTS_ITE (1 << 6) #define DMA_FSTS_ITE (1 << 6)
#define DMA_FSTS_PRO (1 << 7)
#define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff) #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
/* FRCD_REG, 32 bits access */ /* FRCD_REG, 32 bits access */
......
...@@ -70,10 +70,12 @@ struct iova_fq { ...@@ -70,10 +70,12 @@ struct iova_fq {
struct iova_domain { struct iova_domain {
spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */ spinlock_t iova_rbtree_lock; /* Lock to protect update of rbtree */
struct rb_root rbroot; /* iova domain rbtree root */ struct rb_root rbroot; /* iova domain rbtree root */
struct rb_node *cached32_node; /* Save last alloced node */ struct rb_node *cached_node; /* Save last alloced node */
struct rb_node *cached32_node; /* Save last 32-bit alloced node */
unsigned long granule; /* pfn granularity for this domain */ unsigned long granule; /* pfn granularity for this domain */
unsigned long start_pfn; /* Lower limit for this domain */ unsigned long start_pfn; /* Lower limit for this domain */
unsigned long dma_32bit_pfn; unsigned long dma_32bit_pfn;
struct iova anchor; /* rbtree lookup anchor */
struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */ struct iova_rcache rcaches[IOVA_RANGE_CACHE_MAX_SIZE]; /* IOVA range caches */
iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU iova_flush_cb flush_cb; /* Call-Back function to flush IOMMU
...@@ -148,12 +150,12 @@ void queue_iova(struct iova_domain *iovad, ...@@ -148,12 +150,12 @@ void queue_iova(struct iova_domain *iovad,
unsigned long pfn, unsigned long pages, unsigned long pfn, unsigned long pages,
unsigned long data); unsigned long data);
unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size, unsigned long alloc_iova_fast(struct iova_domain *iovad, unsigned long size,
unsigned long limit_pfn); unsigned long limit_pfn, bool flush_rcache);
struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo, struct iova *reserve_iova(struct iova_domain *iovad, unsigned long pfn_lo,
unsigned long pfn_hi); unsigned long pfn_hi);
void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to); void copy_reserved_iova(struct iova_domain *from, struct iova_domain *to);
void init_iova_domain(struct iova_domain *iovad, unsigned long granule, void init_iova_domain(struct iova_domain *iovad, unsigned long granule,
unsigned long start_pfn, unsigned long pfn_32bit); unsigned long start_pfn);
int init_iova_flush_queue(struct iova_domain *iovad, int init_iova_flush_queue(struct iova_domain *iovad,
iova_flush_cb flush_cb, iova_entry_dtor entry_dtor); iova_flush_cb flush_cb, iova_entry_dtor entry_dtor);
struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn); struct iova *find_iova(struct iova_domain *iovad, unsigned long pfn);
...@@ -210,7 +212,8 @@ static inline void queue_iova(struct iova_domain *iovad, ...@@ -210,7 +212,8 @@ static inline void queue_iova(struct iova_domain *iovad,
static inline unsigned long alloc_iova_fast(struct iova_domain *iovad, static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
unsigned long size, unsigned long size,
unsigned long limit_pfn) unsigned long limit_pfn,
bool flush_rcache)
{ {
return 0; return 0;
} }
...@@ -229,8 +232,7 @@ static inline void copy_reserved_iova(struct iova_domain *from, ...@@ -229,8 +232,7 @@ static inline void copy_reserved_iova(struct iova_domain *from,
static inline void init_iova_domain(struct iova_domain *iovad, static inline void init_iova_domain(struct iova_domain *iovad,
unsigned long granule, unsigned long granule,
unsigned long start_pfn, unsigned long start_pfn)
unsigned long pfn_32bit)
{ {
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment