Commit 9a9952bb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull iommu updates for from Joerg Roedel:
 "This time the IOMMU updates are mostly cleanups or fixes.  No big new
  features or drivers this time.  In particular the changes include:

   - Bigger cleanup of the Domain<->IOMMU data structures and the code
     that manages them in the Intel VT-d driver.  This makes the code
     easier to understand and maintain, and also easier to keep the data
     structures in sync.  It is also a preparation step to make use of
     default domains from the IOMMU core in the Intel VT-d driver.

   - Fixes for a couple of DMA-API misuses in ARM IOMMU drivers, namely
     in the ARM and Tegra SMMU drivers.

   - Fix for a potential buffer overflow in the OMAP iommu driver's
     debug code

   - A couple of smaller fixes and cleanups in various drivers

   - One small new feature: Report domain-id usage in the Intel VT-d
     driver to easier detect bugs where these are leaked"

* tag 'iommu-updates-v4.3' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (83 commits)
  iommu/vt-d: Really use upper context table when necessary
  x86/vt-d: Fix documentation of DRHD
  iommu/fsl: Really fix init section(s) content
  iommu/io-pgtable-arm: Unmap and free table when overwriting with block
  iommu/io-pgtable-arm: Move init-fn declarations to io-pgtable.h
  iommu/msm: Use BUG_ON instead of if () BUG()
  iommu/vt-d: Access iomem correctly
  iommu/vt-d: Make two functions static
  iommu/vt-d: Use BUG_ON instead of if () BUG()
  iommu/vt-d: Return false instead of 0 in irq_remapping_cap()
  iommu/amd: Use BUG_ON instead of if () BUG()
  iommu/amd: Make a symbol static
  iommu/amd: Simplify allocation in irq_remapping_alloc()
  iommu/tegra-smmu: Parameterize number of TLB lines
  iommu/tegra-smmu: Factor out tegra_smmu_set_pde()
  iommu/tegra-smmu: Extract tegra_smmu_pte_get_use()
  iommu/tegra-smmu: Use __GFP_ZERO to allocate zeroed pages
  iommu/tegra-smmu: Remove PageReserved manipulation
  iommu/tegra-smmu: Convert to use DMA API
  iommu/tegra-smmu: smmu_flush_ptc() wants device addresses
  ...
parents e81b594c 4ad79562
...@@ -43,6 +43,12 @@ conditions. ...@@ -43,6 +43,12 @@ conditions.
** System MMU optional properties: ** System MMU optional properties:
- dma-coherent : Present if page table walks made by the SMMU are
cache coherent with the CPU.
NOTE: this only applies to the SMMU itself, not
masters connected upstream of the SMMU.
- calxeda,smmu-secure-config-access : Enable proper handling of buggy - calxeda,smmu-secure-config-access : Enable proper handling of buggy
implementations that always use secure access to implementations that always use secure access to
SMMU configuration registers. In this case non-secure SMMU configuration registers. In this case non-secure
......
...@@ -8,6 +8,11 @@ Required properties: ...@@ -8,6 +8,11 @@ Required properties:
- ti,hwmods : Name of the hwmod associated with the IOMMU instance - ti,hwmods : Name of the hwmod associated with the IOMMU instance
- reg : Address space for the configuration registers - reg : Address space for the configuration registers
- interrupts : Interrupt specifier for the IOMMU instance - interrupts : Interrupt specifier for the IOMMU instance
- #iommu-cells : Should be 0. OMAP IOMMUs are all "single-master" devices,
and needs no additional data in the pargs specifier. Please
also refer to the generic bindings document for more info
on this property,
Documentation/devicetree/bindings/iommu/iommu.txt
Optional properties: Optional properties:
- ti,#tlb-entries : Number of entries in the translation look-aside buffer. - ti,#tlb-entries : Number of entries in the translation look-aside buffer.
...@@ -18,6 +23,7 @@ Optional properties: ...@@ -18,6 +23,7 @@ Optional properties:
Example: Example:
/* OMAP3 ISP MMU */ /* OMAP3 ISP MMU */
mmu_isp: mmu@480bd400 { mmu_isp: mmu@480bd400 {
#iommu-cells = <0>;
compatible = "ti,omap2-iommu"; compatible = "ti,omap2-iommu";
reg = <0x480bd400 0x80>; reg = <0x480bd400 0x80>;
interrupts = <24>; interrupts = <24>;
......
...@@ -23,7 +23,8 @@ config IOMMU_IO_PGTABLE ...@@ -23,7 +23,8 @@ config IOMMU_IO_PGTABLE
config IOMMU_IO_PGTABLE_LPAE config IOMMU_IO_PGTABLE_LPAE
bool "ARMv7/v8 Long Descriptor Format" bool "ARMv7/v8 Long Descriptor Format"
select IOMMU_IO_PGTABLE select IOMMU_IO_PGTABLE
depends on ARM || ARM64 || COMPILE_TEST # SWIOTLB guarantees a dma_to_phys() implementation
depends on ARM || ARM64 || (COMPILE_TEST && SWIOTLB)
help help
Enable support for the ARM long descriptor pagetable format. Enable support for the ARM long descriptor pagetable format.
This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page
......
...@@ -1835,8 +1835,8 @@ static void free_gcr3_table(struct protection_domain *domain) ...@@ -1835,8 +1835,8 @@ static void free_gcr3_table(struct protection_domain *domain)
free_gcr3_tbl_level2(domain->gcr3_tbl); free_gcr3_tbl_level2(domain->gcr3_tbl);
else if (domain->glx == 1) else if (domain->glx == 1)
free_gcr3_tbl_level1(domain->gcr3_tbl); free_gcr3_tbl_level1(domain->gcr3_tbl);
else if (domain->glx != 0) else
BUG(); BUG_ON(domain->glx != 0);
free_page((unsigned long)domain->gcr3_tbl); free_page((unsigned long)domain->gcr3_tbl);
} }
...@@ -3947,11 +3947,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, ...@@ -3947,11 +3947,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
if (ret < 0) if (ret < 0)
return ret; return ret;
ret = -ENOMEM;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
goto out_free_parent;
if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) { if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
if (get_irq_table(devid, true)) if (get_irq_table(devid, true))
index = info->ioapic_pin; index = info->ioapic_pin;
...@@ -3962,7 +3957,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, ...@@ -3962,7 +3957,6 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
} }
if (index < 0) { if (index < 0) {
pr_warn("Failed to allocate IRTE\n"); pr_warn("Failed to allocate IRTE\n");
kfree(data);
goto out_free_parent; goto out_free_parent;
} }
...@@ -3974,17 +3968,18 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, ...@@ -3974,17 +3968,18 @@ static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
goto out_free_data; goto out_free_data;
} }
if (i > 0) { ret = -ENOMEM;
data = kzalloc(sizeof(*data), GFP_KERNEL); data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data) if (!data)
goto out_free_data; goto out_free_data;
}
irq_data->hwirq = (devid << 16) + i; irq_data->hwirq = (devid << 16) + i;
irq_data->chip_data = data; irq_data->chip_data = data;
irq_data->chip = &amd_ir_chip; irq_data->chip = &amd_ir_chip;
irq_remapping_prepare_irte(data, cfg, info, devid, index, i); irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT); irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
} }
return 0; return 0;
out_free_data: out_free_data:
......
...@@ -154,7 +154,7 @@ bool amd_iommu_iotlb_sup __read_mostly = true; ...@@ -154,7 +154,7 @@ bool amd_iommu_iotlb_sup __read_mostly = true;
u32 amd_iommu_max_pasid __read_mostly = ~0; u32 amd_iommu_max_pasid __read_mostly = ~0;
bool amd_iommu_v2_present __read_mostly; bool amd_iommu_v2_present __read_mostly;
bool amd_iommu_pc_present __read_mostly; static bool amd_iommu_pc_present __read_mostly;
bool amd_iommu_force_isolation __read_mostly; bool amd_iommu_force_isolation __read_mostly;
......
...@@ -356,8 +356,8 @@ static void free_pasid_states(struct device_state *dev_state) ...@@ -356,8 +356,8 @@ static void free_pasid_states(struct device_state *dev_state)
free_pasid_states_level2(dev_state->states); free_pasid_states_level2(dev_state->states);
else if (dev_state->pasid_levels == 1) else if (dev_state->pasid_levels == 1)
free_pasid_states_level1(dev_state->states); free_pasid_states_level1(dev_state->states);
else if (dev_state->pasid_levels != 0) else
BUG(); BUG_ON(dev_state->pasid_levels != 0);
free_page((unsigned long)dev_state->states); free_page((unsigned long)dev_state->states);
} }
......
...@@ -118,6 +118,7 @@ ...@@ -118,6 +118,7 @@
#define ARM_SMMU_IRQ_CTRL 0x50 #define ARM_SMMU_IRQ_CTRL 0x50
#define IRQ_CTRL_EVTQ_IRQEN (1 << 2) #define IRQ_CTRL_EVTQ_IRQEN (1 << 2)
#define IRQ_CTRL_PRIQ_IRQEN (1 << 1)
#define IRQ_CTRL_GERROR_IRQEN (1 << 0) #define IRQ_CTRL_GERROR_IRQEN (1 << 0)
#define ARM_SMMU_IRQ_CTRLACK 0x54 #define ARM_SMMU_IRQ_CTRLACK 0x54
...@@ -173,14 +174,14 @@ ...@@ -173,14 +174,14 @@
#define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc #define ARM_SMMU_PRIQ_IRQ_CFG2 0xdc
/* Common MSI config fields */ /* Common MSI config fields */
#define MSI_CFG0_SH_SHIFT 60
#define MSI_CFG0_SH_NSH (0UL << MSI_CFG0_SH_SHIFT)
#define MSI_CFG0_SH_OSH (2UL << MSI_CFG0_SH_SHIFT)
#define MSI_CFG0_SH_ISH (3UL << MSI_CFG0_SH_SHIFT)
#define MSI_CFG0_MEMATTR_SHIFT 56
#define MSI_CFG0_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG0_MEMATTR_SHIFT)
#define MSI_CFG0_ADDR_SHIFT 2 #define MSI_CFG0_ADDR_SHIFT 2
#define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL #define MSI_CFG0_ADDR_MASK 0x3fffffffffffUL
#define MSI_CFG2_SH_SHIFT 4
#define MSI_CFG2_SH_NSH (0UL << MSI_CFG2_SH_SHIFT)
#define MSI_CFG2_SH_OSH (2UL << MSI_CFG2_SH_SHIFT)
#define MSI_CFG2_SH_ISH (3UL << MSI_CFG2_SH_SHIFT)
#define MSI_CFG2_MEMATTR_SHIFT 0
#define MSI_CFG2_MEMATTR_DEVICE_nGnRE (0x1 << MSI_CFG2_MEMATTR_SHIFT)
#define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1)) #define Q_IDX(q, p) ((p) & ((1 << (q)->max_n_shift) - 1))
#define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift)) #define Q_WRP(q, p) ((p) & (1 << (q)->max_n_shift))
...@@ -1330,33 +1331,10 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, ...@@ -1330,33 +1331,10 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
arm_smmu_cmdq_issue_cmd(smmu, &cmd); arm_smmu_cmdq_issue_cmd(smmu, &cmd);
} }
static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
if (smmu->features & ARM_SMMU_FEAT_COHERENCY) {
dsb(ishst);
} else {
dma_addr_t dma_addr;
struct device *dev = smmu->dev;
dma_addr = dma_map_page(dev, virt_to_page(addr), offset, size,
DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma_addr))
dev_err(dev, "failed to flush pgtable at %p\n", addr);
else
dma_unmap_page(dev, dma_addr, size, DMA_TO_DEVICE);
}
}
static struct iommu_gather_ops arm_smmu_gather_ops = { static struct iommu_gather_ops arm_smmu_gather_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context, .tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync, .tlb_sync = arm_smmu_tlb_sync,
.flush_pgtable = arm_smmu_flush_pgtable,
}; };
/* IOMMU API */ /* IOMMU API */
...@@ -1531,6 +1509,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) ...@@ -1531,6 +1509,7 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
.ias = ias, .ias = ias,
.oas = oas, .oas = oas,
.tlb = &arm_smmu_gather_ops, .tlb = &arm_smmu_gather_ops,
.iommu_dev = smmu->dev,
}; };
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
...@@ -2053,9 +2032,17 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) ...@@ -2053,9 +2032,17 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
int ret; int ret;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
/* Calculate the L1 size, capped to the SIDSIZE */ /*
size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); * If we can resolve everything with a single L2 table, then we
size = min(size, smmu->sid_bits - STRTAB_SPLIT); * just need a single L1 descriptor. Otherwise, calculate the L1
* size, capped to the SIDSIZE.
*/
if (smmu->sid_bits < STRTAB_SPLIT) {
size = 0;
} else {
size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
size = min(size, smmu->sid_bits - STRTAB_SPLIT);
}
cfg->num_l1_ents = 1 << size; cfg->num_l1_ents = 1 << size;
size += STRTAB_SPLIT; size += STRTAB_SPLIT;
...@@ -2198,6 +2185,7 @@ static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val, ...@@ -2198,6 +2185,7 @@ static int arm_smmu_write_reg_sync(struct arm_smmu_device *smmu, u32 val,
static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
{ {
int ret, irq; int ret, irq;
u32 irqen_flags = IRQ_CTRL_EVTQ_IRQEN | IRQ_CTRL_GERROR_IRQEN;
/* Disable IRQs first */ /* Disable IRQs first */
ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL, ret = arm_smmu_write_reg_sync(smmu, 0, ARM_SMMU_IRQ_CTRL,
...@@ -2252,13 +2240,13 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu) ...@@ -2252,13 +2240,13 @@ static int arm_smmu_setup_irqs(struct arm_smmu_device *smmu)
if (IS_ERR_VALUE(ret)) if (IS_ERR_VALUE(ret))
dev_warn(smmu->dev, dev_warn(smmu->dev,
"failed to enable priq irq\n"); "failed to enable priq irq\n");
else
irqen_flags |= IRQ_CTRL_PRIQ_IRQEN;
} }
} }
/* Enable interrupt generation on the SMMU */ /* Enable interrupt generation on the SMMU */
ret = arm_smmu_write_reg_sync(smmu, ret = arm_smmu_write_reg_sync(smmu, irqen_flags,
IRQ_CTRL_EVTQ_IRQEN |
IRQ_CTRL_GERROR_IRQEN,
ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK); ARM_SMMU_IRQ_CTRL, ARM_SMMU_IRQ_CTRLACK);
if (ret) if (ret)
dev_warn(smmu->dev, "failed to enable irqs\n"); dev_warn(smmu->dev, "failed to enable irqs\n");
...@@ -2540,12 +2528,12 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu) ...@@ -2540,12 +2528,12 @@ static int arm_smmu_device_probe(struct arm_smmu_device *smmu)
case IDR5_OAS_44_BIT: case IDR5_OAS_44_BIT:
smmu->oas = 44; smmu->oas = 44;
break; break;
default:
dev_info(smmu->dev,
"unknown output address size. Truncating to 48-bit\n");
/* Fallthrough */
case IDR5_OAS_48_BIT: case IDR5_OAS_48_BIT:
smmu->oas = 48; smmu->oas = 48;
break;
default:
dev_err(smmu->dev, "unknown output address size!\n");
return -ENXIO;
} }
/* Set the DMA mask for our table walker */ /* Set the DMA mask for our table walker */
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/iopoll.h> #include <linux/iopoll.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/slab.h> #include <linux/slab.h>
...@@ -607,34 +608,10 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, ...@@ -607,34 +608,10 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
} }
} }
static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
unsigned long offset = (unsigned long)addr & ~PAGE_MASK;
/* Ensure new page tables are visible to the hardware walker */
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) {
dsb(ishst);
} else {
/*
* If the SMMU can't walk tables in the CPU caches, treat them
* like non-coherent DMA since we need to flush the new entries
* all the way out to memory. There's no possibility of
* recursion here as the SMMU table walker will not be wired
* through another SMMU.
*/
dma_map_page(smmu->dev, virt_to_page(addr), offset, size,
DMA_TO_DEVICE);
}
}
static struct iommu_gather_ops arm_smmu_gather_ops = { static struct iommu_gather_ops arm_smmu_gather_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context, .tlb_flush_all = arm_smmu_tlb_inv_context,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_sync = arm_smmu_tlb_sync, .tlb_sync = arm_smmu_tlb_sync,
.flush_pgtable = arm_smmu_flush_pgtable,
}; };
static irqreturn_t arm_smmu_context_fault(int irq, void *dev) static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
...@@ -898,6 +875,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ...@@ -898,6 +875,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
.ias = ias, .ias = ias,
.oas = oas, .oas = oas,
.tlb = &arm_smmu_gather_ops, .tlb = &arm_smmu_gather_ops,
.iommu_dev = smmu->dev,
}; };
smmu_domain->smmu = smmu; smmu_domain->smmu = smmu;
...@@ -1532,6 +1510,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) ...@@ -1532,6 +1510,7 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
unsigned long size; unsigned long size;
void __iomem *gr0_base = ARM_SMMU_GR0(smmu); void __iomem *gr0_base = ARM_SMMU_GR0(smmu);
u32 id; u32 id;
bool cttw_dt, cttw_reg;
dev_notice(smmu->dev, "probing hardware configuration...\n"); dev_notice(smmu->dev, "probing hardware configuration...\n");
dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version); dev_notice(smmu->dev, "SMMUv%d with:\n", smmu->version);
...@@ -1571,10 +1550,22 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) ...@@ -1571,10 +1550,22 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu)
dev_notice(smmu->dev, "\taddress translation ops\n"); dev_notice(smmu->dev, "\taddress translation ops\n");
} }
if (id & ID0_CTTW) { /*
* In order for DMA API calls to work properly, we must defer to what
* the DT says about coherency, regardless of what the hardware claims.
* Fortunately, this also opens up a workaround for systems where the
* ID register value has ended up configured incorrectly.
*/
cttw_dt = of_dma_is_coherent(smmu->dev->of_node);
cttw_reg = !!(id & ID0_CTTW);
if (cttw_dt)
smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK;
dev_notice(smmu->dev, "\tcoherent table walk\n"); if (cttw_dt || cttw_reg)
} dev_notice(smmu->dev, "\t%scoherent table walk\n",
cttw_dt ? "" : "non-");
if (cttw_dt != cttw_reg)
dev_notice(smmu->dev,
"\t(IDR0.CTTW overridden by dma-coherent property)\n");
if (id & ID0_SMS) { if (id & ID0_SMS) {
u32 smr, sid, mask; u32 smr, sid, mask;
......
...@@ -1068,7 +1068,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd) ...@@ -1068,7 +1068,7 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
if (intel_iommu_enabled) if (intel_iommu_enabled)
iommu->iommu_dev = iommu_device_create(NULL, iommu, iommu->iommu_dev = iommu_device_create(NULL, iommu,
intel_iommu_groups, intel_iommu_groups,
iommu->name); "%s", iommu->name);
return 0; return 0;
......
...@@ -41,7 +41,6 @@ struct pamu_isr_data { ...@@ -41,7 +41,6 @@ struct pamu_isr_data {
static struct paace *ppaact; static struct paace *ppaact;
static struct paace *spaact; static struct paace *spaact;
static struct ome *omt __initdata;
/* /*
* Table for matching compatible strings, for device tree * Table for matching compatible strings, for device tree
...@@ -50,7 +49,7 @@ static struct ome *omt __initdata; ...@@ -50,7 +49,7 @@ static struct ome *omt __initdata;
* SOCs. For the older SOCs "fsl,qoriq-device-config-1.0" * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0"
* string would be used. * string would be used.
*/ */
static const struct of_device_id guts_device_ids[] __initconst = { static const struct of_device_id guts_device_ids[] = {
{ .compatible = "fsl,qoriq-device-config-1.0", }, { .compatible = "fsl,qoriq-device-config-1.0", },
{ .compatible = "fsl,qoriq-device-config-2.0", }, { .compatible = "fsl,qoriq-device-config-2.0", },
{} {}
...@@ -599,7 +598,7 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu) ...@@ -599,7 +598,7 @@ u32 get_stash_id(u32 stash_dest_hint, u32 vcpu)
* Memory accesses to QMAN and BMAN private memory need not be coherent, so * Memory accesses to QMAN and BMAN private memory need not be coherent, so
* clear the PAACE entry coherency attribute for them. * clear the PAACE entry coherency attribute for them.
*/ */
static void __init setup_qbman_paace(struct paace *ppaace, int paace_type) static void setup_qbman_paace(struct paace *ppaace, int paace_type)
{ {
switch (paace_type) { switch (paace_type) {
case QMAN_PAACE: case QMAN_PAACE:
...@@ -629,7 +628,7 @@ static void __init setup_qbman_paace(struct paace *ppaace, int paace_type) ...@@ -629,7 +628,7 @@ static void __init setup_qbman_paace(struct paace *ppaace, int paace_type)
* this table to translate device transaction to appropriate corenet * this table to translate device transaction to appropriate corenet
* transaction. * transaction.
*/ */
static void __init setup_omt(struct ome *omt) static void setup_omt(struct ome *omt)
{ {
struct ome *ome; struct ome *ome;
...@@ -666,7 +665,7 @@ static void __init setup_omt(struct ome *omt) ...@@ -666,7 +665,7 @@ static void __init setup_omt(struct ome *omt)
* Get the maximum number of PAACT table entries * Get the maximum number of PAACT table entries
* and subwindows supported by PAMU * and subwindows supported by PAMU
*/ */
static void __init get_pamu_cap_values(unsigned long pamu_reg_base) static void get_pamu_cap_values(unsigned long pamu_reg_base)
{ {
u32 pc_val; u32 pc_val;
...@@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(unsigned long pamu_reg_base) ...@@ -676,9 +675,9 @@ static void __init get_pamu_cap_values(unsigned long pamu_reg_base)
} }
/* Setup PAMU registers pointing to PAACT, SPAACT and OMT */ /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */
static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size,
phys_addr_t ppaact_phys, phys_addr_t spaact_phys, phys_addr_t ppaact_phys, phys_addr_t spaact_phys,
phys_addr_t omt_phys) phys_addr_t omt_phys)
{ {
u32 *pc; u32 *pc;
struct pamu_mmap_regs *pamu_regs; struct pamu_mmap_regs *pamu_regs;
...@@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu ...@@ -720,7 +719,7 @@ static int __init setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu
} }
/* Enable all device LIODNS */ /* Enable all device LIODNS */
static void __init setup_liodns(void) static void setup_liodns(void)
{ {
int i, len; int i, len;
struct paace *ppaace; struct paace *ppaace;
...@@ -846,7 +845,7 @@ struct ccsr_law { ...@@ -846,7 +845,7 @@ struct ccsr_law {
/* /*
* Create a coherence subdomain for a given memory block. * Create a coherence subdomain for a given memory block.
*/ */
static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id) static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
{ {
struct device_node *np; struct device_node *np;
const __be32 *iprop; const __be32 *iprop;
...@@ -988,7 +987,7 @@ static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id) ...@@ -988,7 +987,7 @@ static int __init create_csd(phys_addr_t phys, size_t size, u32 csd_port_id)
static const struct { static const struct {
u32 svr; u32 svr;
u32 port_id; u32 port_id;
} port_id_map[] __initconst = { } port_id_map[] = {
{(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */ {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */
{(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */ {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */
{(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */ {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */
...@@ -1006,7 +1005,7 @@ static const struct { ...@@ -1006,7 +1005,7 @@ static const struct {
#define SVR_SECURITY 0x80000 /* The Security (E) bit */ #define SVR_SECURITY 0x80000 /* The Security (E) bit */
static int __init fsl_pamu_probe(struct platform_device *pdev) static int fsl_pamu_probe(struct platform_device *pdev)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
void __iomem *pamu_regs = NULL; void __iomem *pamu_regs = NULL;
...@@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) ...@@ -1022,6 +1021,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
int irq; int irq;
phys_addr_t ppaact_phys; phys_addr_t ppaact_phys;
phys_addr_t spaact_phys; phys_addr_t spaact_phys;
struct ome *omt;
phys_addr_t omt_phys; phys_addr_t omt_phys;
size_t mem_size = 0; size_t mem_size = 0;
unsigned int order = 0; unsigned int order = 0;
...@@ -1200,7 +1200,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev) ...@@ -1200,7 +1200,7 @@ static int __init fsl_pamu_probe(struct platform_device *pdev)
return ret; return ret;
} }
static struct platform_driver fsl_of_pamu_driver __initdata = { static struct platform_driver fsl_of_pamu_driver = {
.driver = { .driver = {
.name = "fsl-of-pamu", .name = "fsl-of-pamu",
}, },
......
This diff is collapsed.
...@@ -384,7 +384,7 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev) ...@@ -384,7 +384,7 @@ static int set_msi_sid(struct irte *irte, struct pci_dev *dev)
static int iommu_load_old_irte(struct intel_iommu *iommu) static int iommu_load_old_irte(struct intel_iommu *iommu)
{ {
struct irte *old_ir_table; struct irte __iomem *old_ir_table;
phys_addr_t irt_phys; phys_addr_t irt_phys;
unsigned int i; unsigned int i;
size_t size; size_t size;
...@@ -413,7 +413,7 @@ static int iommu_load_old_irte(struct intel_iommu *iommu) ...@@ -413,7 +413,7 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
return -ENOMEM; return -ENOMEM;
/* Copy data over */ /* Copy data over */
memcpy(iommu->ir_table->base, old_ir_table, size); memcpy_fromio(iommu->ir_table->base, old_ir_table, size);
__iommu_flush_cache(iommu, iommu->ir_table->base, size); __iommu_flush_cache(iommu, iommu->ir_table->base, size);
...@@ -426,6 +426,8 @@ static int iommu_load_old_irte(struct intel_iommu *iommu) ...@@ -426,6 +426,8 @@ static int iommu_load_old_irte(struct intel_iommu *iommu)
bitmap_set(iommu->ir_table->bitmap, i, 1); bitmap_set(iommu->ir_table->bitmap, i, 1);
} }
iounmap(old_ir_table);
return 0; return 0;
} }
......
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/types.h> #include <linux/types.h>
#include <asm/barrier.h>
#include "io-pgtable.h" #include "io-pgtable.h"
#define ARM_LPAE_MAX_ADDR_BITS 48 #define ARM_LPAE_MAX_ADDR_BITS 48
...@@ -200,20 +202,97 @@ typedef u64 arm_lpae_iopte; ...@@ -200,20 +202,97 @@ typedef u64 arm_lpae_iopte;
static bool selftest_running = false; static bool selftest_running = false;
static dma_addr_t __arm_lpae_dma_addr(struct device *dev, void *pages)
{
return phys_to_dma(dev, virt_to_phys(pages));
}
static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
struct io_pgtable_cfg *cfg)
{
struct device *dev = cfg->iommu_dev;
dma_addr_t dma;
void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
if (!pages)
return NULL;
if (!selftest_running) {
dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
if (dma_mapping_error(dev, dma))
goto out_free;
/*
* We depend on the IOMMU being able to work with any physical
* address directly, so if the DMA layer suggests it can't by
* giving us back some translation, that bodes very badly...
*/
if (dma != __arm_lpae_dma_addr(dev, pages))
goto out_unmap;
}
return pages;
out_unmap:
dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
out_free:
free_pages_exact(pages, size);
return NULL;
}
static void __arm_lpae_free_pages(void *pages, size_t size,
struct io_pgtable_cfg *cfg)
{
struct device *dev = cfg->iommu_dev;
if (!selftest_running)
dma_unmap_single(dev, __arm_lpae_dma_addr(dev, pages),
size, DMA_TO_DEVICE);
free_pages_exact(pages, size);
}
static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
struct io_pgtable_cfg *cfg)
{
struct device *dev = cfg->iommu_dev;
*ptep = pte;
if (!selftest_running)
dma_sync_single_for_device(dev, __arm_lpae_dma_addr(dev, ptep),
sizeof(pte), DMA_TO_DEVICE);
}
static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
unsigned long iova, size_t size, int lvl,
arm_lpae_iopte *ptep);
static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
unsigned long iova, phys_addr_t paddr, unsigned long iova, phys_addr_t paddr,
arm_lpae_iopte prot, int lvl, arm_lpae_iopte prot, int lvl,
arm_lpae_iopte *ptep) arm_lpae_iopte *ptep)
{ {
arm_lpae_iopte pte = prot; arm_lpae_iopte pte = prot;
struct io_pgtable_cfg *cfg = &data->iop.cfg;
/* We require an unmap first */
if (iopte_leaf(*ptep, lvl)) { if (iopte_leaf(*ptep, lvl)) {
/* We require an unmap first */
WARN_ON(!selftest_running); WARN_ON(!selftest_running);
return -EEXIST; return -EEXIST;
} else if (iopte_type(*ptep, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
/*
* We need to unmap and free the old table before
* overwriting it with a block entry.
*/
arm_lpae_iopte *tblp;
size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
return -EINVAL;
} }
if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
pte |= ARM_LPAE_PTE_NS; pte |= ARM_LPAE_PTE_NS;
if (lvl == ARM_LPAE_MAX_LEVELS - 1) if (lvl == ARM_LPAE_MAX_LEVELS - 1)
...@@ -224,8 +303,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, ...@@ -224,8 +303,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
pte |= pfn_to_iopte(paddr >> data->pg_shift, data); pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
*ptep = pte; __arm_lpae_set_pte(ptep, pte, cfg);
data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie);
return 0; return 0;
} }
...@@ -234,14 +312,14 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, ...@@ -234,14 +312,14 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
int lvl, arm_lpae_iopte *ptep) int lvl, arm_lpae_iopte *ptep)
{ {
arm_lpae_iopte *cptep, pte; arm_lpae_iopte *cptep, pte;
void *cookie = data->iop.cookie;
size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
/* Find our entry at the current level */ /* Find our entry at the current level */
ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
/* If we can install a leaf entry at this level, then do so */ /* If we can install a leaf entry at this level, then do so */
if (size == block_size && (size & data->iop.cfg.pgsize_bitmap)) if (size == block_size && (size & cfg->pgsize_bitmap))
return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
/* We can't allocate tables at the final level */ /* We can't allocate tables at the final level */
...@@ -251,18 +329,15 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, ...@@ -251,18 +329,15 @@ static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
/* Grab a pointer to the next level */ /* Grab a pointer to the next level */
pte = *ptep; pte = *ptep;
if (!pte) { if (!pte) {
cptep = alloc_pages_exact(1UL << data->pg_shift, cptep = __arm_lpae_alloc_pages(1UL << data->pg_shift,
GFP_ATOMIC | __GFP_ZERO); GFP_ATOMIC, cfg);
if (!cptep) if (!cptep)
return -ENOMEM; return -ENOMEM;
data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift,
cookie);
pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE; pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE;
if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
pte |= ARM_LPAE_PTE_NSTABLE; pte |= ARM_LPAE_PTE_NSTABLE;
*ptep = pte; __arm_lpae_set_pte(ptep, pte, cfg);
data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
} else { } else {
cptep = iopte_deref(pte, data); cptep = iopte_deref(pte, data);
} }
...@@ -309,7 +384,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -309,7 +384,7 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
{ {
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
arm_lpae_iopte *ptep = data->pgd; arm_lpae_iopte *ptep = data->pgd;
int lvl = ARM_LPAE_START_LVL(data); int ret, lvl = ARM_LPAE_START_LVL(data);
arm_lpae_iopte prot; arm_lpae_iopte prot;
/* If no access, then nothing to do */ /* If no access, then nothing to do */
...@@ -317,7 +392,14 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, ...@@ -317,7 +392,14 @@ static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
return 0; return 0;
prot = arm_lpae_prot_to_pte(data, iommu_prot); prot = arm_lpae_prot_to_pte(data, iommu_prot);
return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
/*
* Synchronise all PTE updates for the new mapping before there's
* a chance for anything to kick off a table walk for the new iova.
*/
wmb();
return ret;
} }
static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
...@@ -347,7 +429,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, ...@@ -347,7 +429,7 @@ static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
} }
free_pages_exact(start, table_size); __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
} }
static void arm_lpae_free_pgtable(struct io_pgtable *iop) static void arm_lpae_free_pgtable(struct io_pgtable *iop)
...@@ -366,8 +448,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, ...@@ -366,8 +448,7 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
unsigned long blk_start, blk_end; unsigned long blk_start, blk_end;
phys_addr_t blk_paddr; phys_addr_t blk_paddr;
arm_lpae_iopte table = 0; arm_lpae_iopte table = 0;
void *cookie = data->iop.cookie; struct io_pgtable_cfg *cfg = &data->iop.cfg;
const struct iommu_gather_ops *tlb = data->iop.cfg.tlb;
blk_start = iova & ~(blk_size - 1); blk_start = iova & ~(blk_size - 1);
blk_end = blk_start + blk_size; blk_end = blk_start + blk_size;
...@@ -393,10 +474,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, ...@@ -393,10 +474,9 @@ static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
} }
} }
*ptep = table; __arm_lpae_set_pte(ptep, table, cfg);
tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
iova &= ~(blk_size - 1); iova &= ~(blk_size - 1);
tlb->tlb_add_flush(iova, blk_size, true, cookie); cfg->tlb->tlb_add_flush(iova, blk_size, true, data->iop.cookie);
return size; return size;
} }
...@@ -418,13 +498,12 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, ...@@ -418,13 +498,12 @@ static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
/* If the size matches this level, we're in the right place */ /* If the size matches this level, we're in the right place */
if (size == blk_size) { if (size == blk_size) {
*ptep = 0; __arm_lpae_set_pte(ptep, 0, &data->iop.cfg);
tlb->flush_pgtable(ptep, sizeof(*ptep), cookie);
if (!iopte_leaf(pte, lvl)) { if (!iopte_leaf(pte, lvl)) {
/* Also flush any partial walks */ /* Also flush any partial walks */
tlb->tlb_add_flush(iova, size, false, cookie); tlb->tlb_add_flush(iova, size, false, cookie);
tlb->tlb_sync(data->iop.cookie); tlb->tlb_sync(cookie);
ptep = iopte_deref(pte, data); ptep = iopte_deref(pte, data);
__arm_lpae_free_pgtable(data, lvl + 1, ptep); __arm_lpae_free_pgtable(data, lvl + 1, ptep);
} else { } else {
...@@ -640,11 +719,12 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) ...@@ -640,11 +719,12 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s1_cfg.mair[1] = 0; cfg->arm_lpae_s1_cfg.mair[1] = 0;
/* Looking good; allocate a pgd */ /* Looking good; allocate a pgd */
data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO); data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
if (!data->pgd) if (!data->pgd)
goto out_free_data; goto out_free_data;
cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie); /* Ensure the empty pgd is visible before any actual TTBR write */
wmb();
/* TTBRs */ /* TTBRs */
cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd); cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
...@@ -728,11 +808,12 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) ...@@ -728,11 +808,12 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s2_cfg.vtcr = reg; cfg->arm_lpae_s2_cfg.vtcr = reg;
/* Allocate pgd pages */ /* Allocate pgd pages */
data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO); data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
if (!data->pgd) if (!data->pgd)
goto out_free_data; goto out_free_data;
cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie); /* Ensure the empty pgd is visible before any actual TTBR write */
wmb();
/* VTTBR */ /* VTTBR */
cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
...@@ -818,16 +899,10 @@ static void dummy_tlb_sync(void *cookie) ...@@ -818,16 +899,10 @@ static void dummy_tlb_sync(void *cookie)
WARN_ON(cookie != cfg_cookie); WARN_ON(cookie != cfg_cookie);
} }
static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie)
{
WARN_ON(cookie != cfg_cookie);
}
static struct iommu_gather_ops dummy_tlb_ops __initdata = { static struct iommu_gather_ops dummy_tlb_ops __initdata = {
.tlb_flush_all = dummy_tlb_flush_all, .tlb_flush_all = dummy_tlb_flush_all,
.tlb_add_flush = dummy_tlb_add_flush, .tlb_add_flush = dummy_tlb_add_flush,
.tlb_sync = dummy_tlb_sync, .tlb_sync = dummy_tlb_sync,
.flush_pgtable = dummy_flush_pgtable,
}; };
static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
......
...@@ -24,11 +24,6 @@ ...@@ -24,11 +24,6 @@
#include "io-pgtable.h" #include "io-pgtable.h"
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
static const struct io_pgtable_init_fns * static const struct io_pgtable_init_fns *
io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] =
{ {
......
...@@ -17,8 +17,9 @@ enum io_pgtable_fmt { ...@@ -17,8 +17,9 @@ enum io_pgtable_fmt {
* *
* @tlb_flush_all: Synchronously invalidate the entire TLB context. * @tlb_flush_all: Synchronously invalidate the entire TLB context.
* @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range.
* @tlb_sync: Ensure any queue TLB invalidation has taken effect. * @tlb_sync: Ensure any queued TLB invalidation has taken effect, and
* @flush_pgtable: Ensure page table updates are visible to the IOMMU. * any corresponding page table updates are visible to the
* IOMMU.
* *
* Note that these can all be called in atomic context and must therefore * Note that these can all be called in atomic context and must therefore
* not block. * not block.
...@@ -28,7 +29,6 @@ struct iommu_gather_ops { ...@@ -28,7 +29,6 @@ struct iommu_gather_ops {
void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf, void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf,
void *cookie); void *cookie);
void (*tlb_sync)(void *cookie); void (*tlb_sync)(void *cookie);
void (*flush_pgtable)(void *ptr, size_t size, void *cookie);
}; };
/** /**
...@@ -41,6 +41,8 @@ struct iommu_gather_ops { ...@@ -41,6 +41,8 @@ struct iommu_gather_ops {
* @ias: Input address (iova) size, in bits. * @ias: Input address (iova) size, in bits.
* @oas: Output address (paddr) size, in bits. * @oas: Output address (paddr) size, in bits.
* @tlb: TLB management callbacks for this set of tables. * @tlb: TLB management callbacks for this set of tables.
* @iommu_dev: The device representing the DMA configuration for the
* page table walker.
*/ */
struct io_pgtable_cfg { struct io_pgtable_cfg {
#define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */ #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
...@@ -49,6 +51,7 @@ struct io_pgtable_cfg { ...@@ -49,6 +51,7 @@ struct io_pgtable_cfg {
unsigned int ias; unsigned int ias;
unsigned int oas; unsigned int oas;
const struct iommu_gather_ops *tlb; const struct iommu_gather_ops *tlb;
struct device *iommu_dev;
/* Low-level data specific to the table format */ /* Low-level data specific to the table format */
union { union {
...@@ -140,4 +143,9 @@ struct io_pgtable_init_fns { ...@@ -140,4 +143,9 @@ struct io_pgtable_init_fns {
void (*free)(struct io_pgtable *iop); void (*free)(struct io_pgtable *iop);
}; };
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns;
extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns;
#endif /* __IO_PGTABLE_H */ #endif /* __IO_PGTABLE_H */
...@@ -283,24 +283,10 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf, ...@@ -283,24 +283,10 @@ static void ipmmu_tlb_add_flush(unsigned long iova, size_t size, bool leaf,
/* The hardware doesn't support selective TLB flush. */ /* The hardware doesn't support selective TLB flush. */
} }
static void ipmmu_flush_pgtable(void *ptr, size_t size, void *cookie)
{
unsigned long offset = (unsigned long)ptr & ~PAGE_MASK;
struct ipmmu_vmsa_domain *domain = cookie;
/*
* TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling.
*/
dma_map_page(domain->mmu->dev, virt_to_page(ptr), offset, size,
DMA_TO_DEVICE);
}
static struct iommu_gather_ops ipmmu_gather_ops = { static struct iommu_gather_ops ipmmu_gather_ops = {
.tlb_flush_all = ipmmu_tlb_flush_all, .tlb_flush_all = ipmmu_tlb_flush_all,
.tlb_add_flush = ipmmu_tlb_add_flush, .tlb_add_flush = ipmmu_tlb_add_flush,
.tlb_sync = ipmmu_tlb_flush_all, .tlb_sync = ipmmu_tlb_flush_all,
.flush_pgtable = ipmmu_flush_pgtable,
}; };
/* ----------------------------------------------------------------------------- /* -----------------------------------------------------------------------------
...@@ -327,6 +313,11 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain) ...@@ -327,6 +313,11 @@ static int ipmmu_domain_init_context(struct ipmmu_vmsa_domain *domain)
domain->cfg.ias = 32; domain->cfg.ias = 32;
domain->cfg.oas = 40; domain->cfg.oas = 40;
domain->cfg.tlb = &ipmmu_gather_ops; domain->cfg.tlb = &ipmmu_gather_ops;
/*
* TODO: Add support for coherent walk through CCI with DVM and remove
* cache handling. For now, delegate it to the io-pgtable code.
*/
domain->cfg.iommu_dev = domain->mmu->dev;
domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg, domain->iop = alloc_io_pgtable_ops(ARM_32_LPAE_S1, &domain->cfg,
domain); domain);
......
...@@ -84,7 +84,7 @@ void set_irq_remapping_broken(void) ...@@ -84,7 +84,7 @@ void set_irq_remapping_broken(void)
bool irq_remapping_cap(enum irq_remap_cap cap) bool irq_remapping_cap(enum irq_remap_cap cap)
{ {
if (!remap_ops || disable_irq_post) if (!remap_ops || disable_irq_post)
return 0; return false;
return (remap_ops->capability & (1 << cap)); return (remap_ops->capability & (1 << cap));
} }
......
...@@ -106,8 +106,8 @@ static int __flush_iotlb(struct iommu_domain *domain) ...@@ -106,8 +106,8 @@ static int __flush_iotlb(struct iommu_domain *domain)
#endif #endif
list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) { list_for_each_entry(ctx_drvdata, &priv->list_attached, attached_elm) {
if (!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent)
BUG(); BUG_ON(!ctx_drvdata->pdev || !ctx_drvdata->pdev->dev.parent);
iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent); iommu_drvdata = dev_get_drvdata(ctx_drvdata->pdev->dev.parent);
BUG_ON(!iommu_drvdata); BUG_ON(!iommu_drvdata);
......
...@@ -141,10 +141,12 @@ struct iommu_ops *of_iommu_configure(struct device *dev, ...@@ -141,10 +141,12 @@ struct iommu_ops *of_iommu_configure(struct device *dev,
struct iommu_ops *ops = NULL; struct iommu_ops *ops = NULL;
int idx = 0; int idx = 0;
if (dev_is_pci(dev)) { /*
dev_err(dev, "IOMMU is currently not supported for PCI\n"); * We can't do much for PCI devices without knowing how
* device IDs are wired up from the PCI bus to the IOMMU.
*/
if (dev_is_pci(dev))
return NULL; return NULL;
}
/* /*
* We don't currently walk up the tree looking for a parent IOMMU. * We don't currently walk up the tree looking for a parent IOMMU.
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/pm_runtime.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/platform_data/iommu-omap.h> #include <linux/platform_data/iommu-omap.h>
...@@ -29,6 +30,59 @@ static inline bool is_omap_iommu_detached(struct omap_iommu *obj) ...@@ -29,6 +30,59 @@ static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
return !obj->domain; return !obj->domain;
} }
#define pr_reg(name) \
do { \
ssize_t bytes; \
const char *str = "%20s: %08x\n"; \
const int maxcol = 32; \
bytes = snprintf(p, maxcol, str, __stringify(name), \
iommu_read_reg(obj, MMU_##name)); \
p += bytes; \
len -= bytes; \
if (len < maxcol) \
goto out; \
} while (0)
static ssize_t
omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
{
char *p = buf;
pr_reg(REVISION);
pr_reg(IRQSTATUS);
pr_reg(IRQENABLE);
pr_reg(WALKING_ST);
pr_reg(CNTL);
pr_reg(FAULT_AD);
pr_reg(TTB);
pr_reg(LOCK);
pr_reg(LD_TLB);
pr_reg(CAM);
pr_reg(RAM);
pr_reg(GFLUSH);
pr_reg(FLUSH_ENTRY);
pr_reg(READ_CAM);
pr_reg(READ_RAM);
pr_reg(EMU_FAULT_AD);
out:
return p - buf;
}
static ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf,
ssize_t bytes)
{
if (!obj || !buf)
return -EINVAL;
pm_runtime_get_sync(obj->dev);
bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
pm_runtime_put_sync(obj->dev);
return bytes;
}
static ssize_t debug_read_regs(struct file *file, char __user *userbuf, static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
...@@ -55,34 +109,71 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, ...@@ -55,34 +109,71 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
return bytes; return bytes;
} }
static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, static int
size_t count, loff_t *ppos) __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
{ {
struct omap_iommu *obj = file->private_data; int i;
char *p, *buf; struct iotlb_lock saved;
ssize_t bytes, rest; struct cr_regs tmp;
struct cr_regs *p = crs;
pm_runtime_get_sync(obj->dev);
iotlb_lock_get(obj, &saved);
for_each_iotlb_cr(obj, num, i, tmp) {
if (!iotlb_cr_valid(&tmp))
continue;
*p++ = tmp;
}
iotlb_lock_set(obj, &saved);
pm_runtime_put_sync(obj->dev);
return p - crs;
}
static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
struct seq_file *s)
{
return seq_printf(s, "%08x %08x %01x\n", cr->cam, cr->ram,
(cr->cam & MMU_CAM_P) ? 1 : 0);
}
static size_t omap_dump_tlb_entries(struct omap_iommu *obj, struct seq_file *s)
{
int i, num;
struct cr_regs *cr;
num = obj->nr_tlb_entries;
cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
if (!cr)
return 0;
num = __dump_tlb_entries(obj, cr, num);
for (i = 0; i < num; i++)
iotlb_dump_cr(obj, cr + i, s);
kfree(cr);
return 0;
}
static int debug_read_tlb(struct seq_file *s, void *data)
{
struct omap_iommu *obj = s->private;
if (is_omap_iommu_detached(obj)) if (is_omap_iommu_detached(obj))
return -EPERM; return -EPERM;
buf = kmalloc(count, GFP_KERNEL);
if (!buf)
return -ENOMEM;
p = buf;
mutex_lock(&iommu_debug_lock); mutex_lock(&iommu_debug_lock);
p += sprintf(p, "%8s %8s\n", "cam:", "ram:"); seq_printf(s, "%8s %8s\n", "cam:", "ram:");
p += sprintf(p, "-----------------------------------------\n"); seq_puts(s, "-----------------------------------------\n");
rest = count - (p - buf); omap_dump_tlb_entries(obj, s);
p += omap_dump_tlb_entries(obj, p, rest);
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
mutex_unlock(&iommu_debug_lock); mutex_unlock(&iommu_debug_lock);
kfree(buf);
return bytes; return 0;
} }
static void dump_ioptable(struct seq_file *s) static void dump_ioptable(struct seq_file *s)
...@@ -154,10 +245,10 @@ static int debug_read_pagetable(struct seq_file *s, void *data) ...@@ -154,10 +245,10 @@ static int debug_read_pagetable(struct seq_file *s, void *data)
.open = simple_open, \ .open = simple_open, \
.read = debug_read_##name, \ .read = debug_read_##name, \
.llseek = generic_file_llseek, \ .llseek = generic_file_llseek, \
}; }
DEBUG_FOPS_RO(regs); DEBUG_FOPS_RO(regs);
DEBUG_FOPS_RO(tlb); DEBUG_SEQ_FOPS_RO(tlb);
DEBUG_SEQ_FOPS_RO(pagetable); DEBUG_SEQ_FOPS_RO(pagetable);
#define __DEBUG_ADD_FILE(attr, mode) \ #define __DEBUG_ADD_FILE(attr, mode) \
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
*/ */
#include <linux/err.h> #include <linux/err.h>
#include <linux/module.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/ioport.h> #include <linux/ioport.h>
...@@ -38,11 +37,6 @@ ...@@ -38,11 +37,6 @@
#define to_iommu(dev) \ #define to_iommu(dev) \
((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev))) ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
#define for_each_iotlb_cr(obj, n, __i, cr) \
for (__i = 0; \
(__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
__i++)
/* bitmap of the page sizes currently supported */ /* bitmap of the page sizes currently supported */
#define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M) #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
...@@ -72,11 +66,6 @@ struct omap_iommu_domain { ...@@ -72,11 +66,6 @@ struct omap_iommu_domain {
#define MMU_LOCK_VICT(x) \ #define MMU_LOCK_VICT(x) \
((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT) ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
struct iotlb_lock {
short base;
short vict;
};
static struct platform_driver omap_iommu_driver; static struct platform_driver omap_iommu_driver;
static struct kmem_cache *iopte_cachep; static struct kmem_cache *iopte_cachep;
...@@ -213,14 +202,6 @@ static void iommu_disable(struct omap_iommu *obj) ...@@ -213,14 +202,6 @@ static void iommu_disable(struct omap_iommu *obj)
/* /*
* TLB operations * TLB operations
*/ */
static inline int iotlb_cr_valid(struct cr_regs *cr)
{
if (!cr)
return -EINVAL;
return cr->cam & MMU_CAM_V;
}
static u32 iotlb_cr_to_virt(struct cr_regs *cr) static u32 iotlb_cr_to_virt(struct cr_regs *cr)
{ {
u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK; u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
...@@ -260,7 +241,7 @@ static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) ...@@ -260,7 +241,7 @@ static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
return status; return status;
} }
static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
{ {
u32 val; u32 val;
...@@ -268,10 +249,9 @@ static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) ...@@ -268,10 +249,9 @@ static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
l->base = MMU_LOCK_BASE(val); l->base = MMU_LOCK_BASE(val);
l->vict = MMU_LOCK_VICT(val); l->vict = MMU_LOCK_VICT(val);
} }
static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l) void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
{ {
u32 val; u32 val;
...@@ -297,7 +277,7 @@ static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) ...@@ -297,7 +277,7 @@ static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
} }
/* only used in iotlb iteration for-loop */ /* only used in iotlb iteration for-loop */
static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
{ {
struct cr_regs cr; struct cr_regs cr;
struct iotlb_lock l; struct iotlb_lock l;
...@@ -468,129 +448,6 @@ static void flush_iotlb_all(struct omap_iommu *obj) ...@@ -468,129 +448,6 @@ static void flush_iotlb_all(struct omap_iommu *obj)
pm_runtime_put_sync(obj->dev); pm_runtime_put_sync(obj->dev);
} }
#ifdef CONFIG_OMAP_IOMMU_DEBUG
#define pr_reg(name) \
do { \
ssize_t bytes; \
const char *str = "%20s: %08x\n"; \
const int maxcol = 32; \
bytes = snprintf(p, maxcol, str, __stringify(name), \
iommu_read_reg(obj, MMU_##name)); \
p += bytes; \
len -= bytes; \
if (len < maxcol) \
goto out; \
} while (0)
static ssize_t
omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
{
char *p = buf;
pr_reg(REVISION);
pr_reg(IRQSTATUS);
pr_reg(IRQENABLE);
pr_reg(WALKING_ST);
pr_reg(CNTL);
pr_reg(FAULT_AD);
pr_reg(TTB);
pr_reg(LOCK);
pr_reg(LD_TLB);
pr_reg(CAM);
pr_reg(RAM);
pr_reg(GFLUSH);
pr_reg(FLUSH_ENTRY);
pr_reg(READ_CAM);
pr_reg(READ_RAM);
pr_reg(EMU_FAULT_AD);
out:
return p - buf;
}
ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
{
if (!obj || !buf)
return -EINVAL;
pm_runtime_get_sync(obj->dev);
bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
pm_runtime_put_sync(obj->dev);
return bytes;
}
static int
__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
{
int i;
struct iotlb_lock saved;
struct cr_regs tmp;
struct cr_regs *p = crs;
pm_runtime_get_sync(obj->dev);
iotlb_lock_get(obj, &saved);
for_each_iotlb_cr(obj, num, i, tmp) {
if (!iotlb_cr_valid(&tmp))
continue;
*p++ = tmp;
}
iotlb_lock_set(obj, &saved);
pm_runtime_put_sync(obj->dev);
return p - crs;
}
/**
* iotlb_dump_cr - Dump an iommu tlb entry into buf
* @obj: target iommu
* @cr: contents of cam and ram register
* @buf: output buffer
**/
static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
char *buf)
{
char *p = buf;
/* FIXME: Need more detail analysis of cam/ram */
p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
(cr->cam & MMU_CAM_P) ? 1 : 0);
return p - buf;
}
/**
* omap_dump_tlb_entries - dump cr arrays to given buffer
* @obj: target iommu
* @buf: output buffer
**/
size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
{
int i, num;
struct cr_regs *cr;
char *p = buf;
num = bytes / sizeof(*cr);
num = min(obj->nr_tlb_entries, num);
cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
if (!cr)
return 0;
num = __dump_tlb_entries(obj, cr, num);
for (i = 0; i < num; i++)
p += iotlb_dump_cr(obj, cr + i, p);
kfree(cr);
return p - buf;
}
#endif /* CONFIG_OMAP_IOMMU_DEBUG */
/* /*
* H/W pagetable operations * H/W pagetable operations
*/ */
...@@ -930,14 +787,14 @@ static irqreturn_t iommu_fault_handler(int irq, void *data) ...@@ -930,14 +787,14 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
if (!iopgd_is_table(*iopgd)) { if (!iopgd_is_table(*iopgd)) {
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n", dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
obj->name, errs, da, iopgd, *iopgd); obj->name, errs, da, iopgd, *iopgd);
return IRQ_NONE; return IRQ_NONE;
} }
iopte = iopte_offset(iopgd, da); iopte = iopte_offset(iopgd, da);
dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n", dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
obj->name, errs, da, iopgd, *iopgd, iopte, *iopte); obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
return IRQ_NONE; return IRQ_NONE;
} }
...@@ -963,9 +820,8 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd) ...@@ -963,9 +820,8 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
struct device *dev; struct device *dev;
struct omap_iommu *obj; struct omap_iommu *obj;
dev = driver_find_device(&omap_iommu_driver.driver, NULL, dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
(void *)name, device_match_by_alias);
device_match_by_alias);
if (!dev) if (!dev)
return ERR_PTR(-ENODEV); return ERR_PTR(-ENODEV);
...@@ -1089,7 +945,6 @@ static const struct of_device_id omap_iommu_of_match[] = { ...@@ -1089,7 +945,6 @@ static const struct of_device_id omap_iommu_of_match[] = {
{ .compatible = "ti,dra7-iommu" }, { .compatible = "ti,dra7-iommu" },
{}, {},
}; };
MODULE_DEVICE_TABLE(of, omap_iommu_of_match);
static struct platform_driver omap_iommu_driver = { static struct platform_driver omap_iommu_driver = {
.probe = omap_iommu_probe, .probe = omap_iommu_probe,
...@@ -1121,7 +976,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz) ...@@ -1121,7 +976,7 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
} }
static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
phys_addr_t pa, size_t bytes, int prot) phys_addr_t pa, size_t bytes, int prot)
{ {
struct omap_iommu_domain *omap_domain = to_omap_domain(domain); struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev; struct omap_iommu *oiommu = omap_domain->iommu_dev;
...@@ -1148,7 +1003,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da, ...@@ -1148,7 +1003,7 @@ static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
} }
static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da, static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
size_t size) size_t size)
{ {
struct omap_iommu_domain *omap_domain = to_omap_domain(domain); struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev; struct omap_iommu *oiommu = omap_domain->iommu_dev;
...@@ -1199,7 +1054,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -1199,7 +1054,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
} }
static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
struct device *dev) struct device *dev)
{ {
struct omap_iommu *oiommu = dev_to_omap_iommu(dev); struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
struct omap_iommu_arch_data *arch_data = dev->archdata.iommu; struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
...@@ -1220,7 +1075,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain, ...@@ -1220,7 +1075,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
} }
static void omap_iommu_detach_dev(struct iommu_domain *domain, static void omap_iommu_detach_dev(struct iommu_domain *domain,
struct device *dev) struct device *dev)
{ {
struct omap_iommu_domain *omap_domain = to_omap_domain(domain); struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
...@@ -1237,16 +1092,12 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type) ...@@ -1237,16 +1092,12 @@ static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
return NULL; return NULL;
omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL); omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
if (!omap_domain) { if (!omap_domain)
pr_err("kzalloc failed\n");
goto out; goto out;
}
omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL); omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
if (!omap_domain->pgtable) { if (!omap_domain->pgtable)
pr_err("kzalloc failed\n");
goto fail_nomem; goto fail_nomem;
}
/* /*
* should never fail, but please keep this around to ensure * should never fail, but please keep this around to ensure
...@@ -1285,7 +1136,7 @@ static void omap_iommu_domain_free(struct iommu_domain *domain) ...@@ -1285,7 +1136,7 @@ static void omap_iommu_domain_free(struct iommu_domain *domain)
} }
static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
dma_addr_t da) dma_addr_t da)
{ {
struct omap_iommu_domain *omap_domain = to_omap_domain(domain); struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
struct omap_iommu *oiommu = omap_domain->iommu_dev; struct omap_iommu *oiommu = omap_domain->iommu_dev;
...@@ -1302,7 +1153,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -1302,7 +1153,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
ret = omap_iommu_translate(*pte, da, IOLARGE_MASK); ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
else else
dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte, dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
(unsigned long long)da); (unsigned long long)da);
} else { } else {
if (iopgd_is_section(*pgd)) if (iopgd_is_section(*pgd))
ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK); ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
...@@ -1310,7 +1161,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain, ...@@ -1310,7 +1161,7 @@ static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK); ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
else else
dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd, dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
(unsigned long long)da); (unsigned long long)da);
} }
return ret; return ret;
...@@ -1405,20 +1256,5 @@ static int __init omap_iommu_init(void) ...@@ -1405,20 +1256,5 @@ static int __init omap_iommu_init(void)
return platform_driver_register(&omap_iommu_driver); return platform_driver_register(&omap_iommu_driver);
} }
/* must be ready before omap3isp is probed */
subsys_initcall(omap_iommu_init); subsys_initcall(omap_iommu_init);
/* must be ready before omap3isp is probed */
static void __exit omap_iommu_exit(void)
{
kmem_cache_destroy(iopte_cachep);
platform_driver_unregister(&omap_iommu_driver);
omap_iommu_debugfs_exit();
}
module_exit(omap_iommu_exit);
MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
MODULE_ALIAS("platform:omap-iommu");
MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
MODULE_LICENSE("GPL v2");
...@@ -13,16 +13,18 @@ ...@@ -13,16 +13,18 @@
#ifndef _OMAP_IOMMU_H #ifndef _OMAP_IOMMU_H
#define _OMAP_IOMMU_H #define _OMAP_IOMMU_H
#include <linux/bitops.h>
#define for_each_iotlb_cr(obj, n, __i, cr) \
for (__i = 0; \
(__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
__i++)
struct iotlb_entry { struct iotlb_entry {
u32 da; u32 da;
u32 pa; u32 pa;
u32 pgsz, prsvd, valid; u32 pgsz, prsvd, valid;
union { u32 endian, elsz, mixed;
u16 ap;
struct {
u32 endian, elsz, mixed;
};
};
}; };
struct omap_iommu { struct omap_iommu {
...@@ -49,20 +51,13 @@ struct omap_iommu { ...@@ -49,20 +51,13 @@ struct omap_iommu {
}; };
struct cr_regs { struct cr_regs {
union { u32 cam;
struct { u32 ram;
u16 cam_l; };
u16 cam_h;
}; struct iotlb_lock {
u32 cam; short base;
}; short vict;
union {
struct {
u16 ram_l;
u16 ram_h;
};
u32 ram;
};
}; };
/** /**
...@@ -103,11 +98,11 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -103,11 +98,11 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
* MMU Register bit definitions * MMU Register bit definitions
*/ */
/* IRQSTATUS & IRQENABLE */ /* IRQSTATUS & IRQENABLE */
#define MMU_IRQ_MULTIHITFAULT (1 << 4) #define MMU_IRQ_MULTIHITFAULT BIT(4)
#define MMU_IRQ_TABLEWALKFAULT (1 << 3) #define MMU_IRQ_TABLEWALKFAULT BIT(3)
#define MMU_IRQ_EMUMISS (1 << 2) #define MMU_IRQ_EMUMISS BIT(2)
#define MMU_IRQ_TRANSLATIONFAULT (1 << 1) #define MMU_IRQ_TRANSLATIONFAULT BIT(1)
#define MMU_IRQ_TLBMISS (1 << 0) #define MMU_IRQ_TLBMISS BIT(0)
#define __MMU_IRQ_FAULT \ #define __MMU_IRQ_FAULT \
(MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT) (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
...@@ -119,16 +114,16 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -119,16 +114,16 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/* MMU_CNTL */ /* MMU_CNTL */
#define MMU_CNTL_SHIFT 1 #define MMU_CNTL_SHIFT 1
#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT) #define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
#define MMU_CNTL_EML_TLB (1 << 3) #define MMU_CNTL_EML_TLB BIT(3)
#define MMU_CNTL_TWL_EN (1 << 2) #define MMU_CNTL_TWL_EN BIT(2)
#define MMU_CNTL_MMU_EN (1 << 1) #define MMU_CNTL_MMU_EN BIT(1)
/* CAM */ /* CAM */
#define MMU_CAM_VATAG_SHIFT 12 #define MMU_CAM_VATAG_SHIFT 12
#define MMU_CAM_VATAG_MASK \ #define MMU_CAM_VATAG_MASK \
((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT) ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
#define MMU_CAM_P (1 << 3) #define MMU_CAM_P BIT(3)
#define MMU_CAM_V (1 << 2) #define MMU_CAM_V BIT(2)
#define MMU_CAM_PGSZ_MASK 3 #define MMU_CAM_PGSZ_MASK 3
#define MMU_CAM_PGSZ_1M (0 << 0) #define MMU_CAM_PGSZ_1M (0 << 0)
#define MMU_CAM_PGSZ_64K (1 << 0) #define MMU_CAM_PGSZ_64K (1 << 0)
...@@ -141,9 +136,9 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -141,9 +136,9 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT) ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
#define MMU_RAM_ENDIAN_SHIFT 9 #define MMU_RAM_ENDIAN_SHIFT 9
#define MMU_RAM_ENDIAN_MASK (1 << MMU_RAM_ENDIAN_SHIFT) #define MMU_RAM_ENDIAN_MASK BIT(MMU_RAM_ENDIAN_SHIFT)
#define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT) #define MMU_RAM_ENDIAN_LITTLE (0 << MMU_RAM_ENDIAN_SHIFT)
#define MMU_RAM_ENDIAN_BIG (1 << MMU_RAM_ENDIAN_SHIFT) #define MMU_RAM_ENDIAN_BIG BIT(MMU_RAM_ENDIAN_SHIFT)
#define MMU_RAM_ELSZ_SHIFT 7 #define MMU_RAM_ELSZ_SHIFT 7
#define MMU_RAM_ELSZ_MASK (3 << MMU_RAM_ELSZ_SHIFT) #define MMU_RAM_ELSZ_MASK (3 << MMU_RAM_ELSZ_SHIFT)
...@@ -152,7 +147,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -152,7 +147,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_RAM_ELSZ_32 (2 << MMU_RAM_ELSZ_SHIFT) #define MMU_RAM_ELSZ_32 (2 << MMU_RAM_ELSZ_SHIFT)
#define MMU_RAM_ELSZ_NONE (3 << MMU_RAM_ELSZ_SHIFT) #define MMU_RAM_ELSZ_NONE (3 << MMU_RAM_ELSZ_SHIFT)
#define MMU_RAM_MIXED_SHIFT 6 #define MMU_RAM_MIXED_SHIFT 6
#define MMU_RAM_MIXED_MASK (1 << MMU_RAM_MIXED_SHIFT) #define MMU_RAM_MIXED_MASK BIT(MMU_RAM_MIXED_SHIFT)
#define MMU_RAM_MIXED MMU_RAM_MIXED_MASK #define MMU_RAM_MIXED MMU_RAM_MIXED_MASK
#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1 #define MMU_GP_REG_BUS_ERR_BACK_EN 0x1
...@@ -190,12 +185,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -190,12 +185,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/* /*
* global functions * global functions
*/ */
#ifdef CONFIG_OMAP_IOMMU_DEBUG
extern ssize_t
omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
extern size_t
omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n);
void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l);
void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l);
#ifdef CONFIG_OMAP_IOMMU_DEBUG
void omap_iommu_debugfs_init(void); void omap_iommu_debugfs_init(void);
void omap_iommu_debugfs_exit(void); void omap_iommu_debugfs_exit(void);
...@@ -222,4 +217,12 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs) ...@@ -222,4 +217,12 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
__raw_writel(val, obj->regbase + offs); __raw_writel(val, obj->regbase + offs);
} }
static inline int iotlb_cr_valid(struct cr_regs *cr)
{
if (!cr)
return -EINVAL;
return cr->cam & MMU_CAM_V;
}
#endif /* _OMAP_IOMMU_H */ #endif /* _OMAP_IOMMU_H */
...@@ -10,25 +10,30 @@ ...@@ -10,25 +10,30 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#ifndef _OMAP_IOPGTABLE_H
#define _OMAP_IOPGTABLE_H
#include <linux/bitops.h>
/* /*
* "L2 table" address mask and size definitions. * "L2 table" address mask and size definitions.
*/ */
#define IOPGD_SHIFT 20 #define IOPGD_SHIFT 20
#define IOPGD_SIZE (1UL << IOPGD_SHIFT) #define IOPGD_SIZE BIT(IOPGD_SHIFT)
#define IOPGD_MASK (~(IOPGD_SIZE - 1)) #define IOPGD_MASK (~(IOPGD_SIZE - 1))
/* /*
* "section" address mask and size definitions. * "section" address mask and size definitions.
*/ */
#define IOSECTION_SHIFT 20 #define IOSECTION_SHIFT 20
#define IOSECTION_SIZE (1UL << IOSECTION_SHIFT) #define IOSECTION_SIZE BIT(IOSECTION_SHIFT)
#define IOSECTION_MASK (~(IOSECTION_SIZE - 1)) #define IOSECTION_MASK (~(IOSECTION_SIZE - 1))
/* /*
* "supersection" address mask and size definitions. * "supersection" address mask and size definitions.
*/ */
#define IOSUPER_SHIFT 24 #define IOSUPER_SHIFT 24
#define IOSUPER_SIZE (1UL << IOSUPER_SHIFT) #define IOSUPER_SIZE BIT(IOSUPER_SHIFT)
#define IOSUPER_MASK (~(IOSUPER_SIZE - 1)) #define IOSUPER_MASK (~(IOSUPER_SIZE - 1))
#define PTRS_PER_IOPGD (1UL << (32 - IOPGD_SHIFT)) #define PTRS_PER_IOPGD (1UL << (32 - IOPGD_SHIFT))
...@@ -38,14 +43,14 @@ ...@@ -38,14 +43,14 @@
* "small page" address mask and size definitions. * "small page" address mask and size definitions.
*/ */
#define IOPTE_SHIFT 12 #define IOPTE_SHIFT 12
#define IOPTE_SIZE (1UL << IOPTE_SHIFT) #define IOPTE_SIZE BIT(IOPTE_SHIFT)
#define IOPTE_MASK (~(IOPTE_SIZE - 1)) #define IOPTE_MASK (~(IOPTE_SIZE - 1))
/* /*
* "large page" address mask and size definitions. * "large page" address mask and size definitions.
*/ */
#define IOLARGE_SHIFT 16 #define IOLARGE_SHIFT 16
#define IOLARGE_SIZE (1UL << IOLARGE_SHIFT) #define IOLARGE_SIZE BIT(IOLARGE_SHIFT)
#define IOLARGE_MASK (~(IOLARGE_SIZE - 1)) #define IOLARGE_MASK (~(IOLARGE_SIZE - 1))
#define PTRS_PER_IOPTE (1UL << (IOPGD_SHIFT - IOPTE_SHIFT)) #define PTRS_PER_IOPTE (1UL << (IOPGD_SHIFT - IOPTE_SHIFT))
...@@ -69,16 +74,16 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask) ...@@ -69,16 +74,16 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
/* /*
* some descriptor attributes. * some descriptor attributes.
*/ */
#define IOPGD_TABLE (1 << 0) #define IOPGD_TABLE (1)
#define IOPGD_SECTION (2 << 0) #define IOPGD_SECTION (2)
#define IOPGD_SUPER (1 << 18 | 2 << 0) #define IOPGD_SUPER (BIT(18) | IOPGD_SECTION)
#define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE) #define iopgd_is_table(x) (((x) & 3) == IOPGD_TABLE)
#define iopgd_is_section(x) (((x) & (1 << 18 | 3)) == IOPGD_SECTION) #define iopgd_is_section(x) (((x) & (1 << 18 | 3)) == IOPGD_SECTION)
#define iopgd_is_super(x) (((x) & (1 << 18 | 3)) == IOPGD_SUPER) #define iopgd_is_super(x) (((x) & (1 << 18 | 3)) == IOPGD_SUPER)
#define IOPTE_SMALL (2 << 0) #define IOPTE_SMALL (2)
#define IOPTE_LARGE (1 << 0) #define IOPTE_LARGE (1)
#define iopte_is_small(x) (((x) & 2) == IOPTE_SMALL) #define iopte_is_small(x) (((x) & 2) == IOPTE_SMALL)
#define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE) #define iopte_is_large(x) (((x) & 3) == IOPTE_LARGE)
...@@ -93,3 +98,5 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask) ...@@ -93,3 +98,5 @@ static inline phys_addr_t omap_iommu_translate(u32 d, u32 va, u32 mask)
/* to find an entry in the second-level page table. */ /* to find an entry in the second-level page table. */
#define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1)) #define iopte_index(da) (((da) >> IOPTE_SHIFT) & (PTRS_PER_IOPTE - 1))
#define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da)) #define iopte_offset(iopgd, da) (iopgd_page_vaddr(iopgd) + iopte_index(da))
#endif /* _OMAP_IOPGTABLE_H */
This diff is collapsed.
...@@ -9,8 +9,6 @@ ...@@ -9,8 +9,6 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/cacheflush.h>
#include <dt-bindings/memory/tegra114-mc.h> #include <dt-bindings/memory/tegra114-mc.h>
#include "mc.h" #include "mc.h"
...@@ -914,20 +912,6 @@ static const struct tegra_smmu_swgroup tegra114_swgroups[] = { ...@@ -914,20 +912,6 @@ static const struct tegra_smmu_swgroup tegra114_swgroups[] = {
{ .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 }, { .name = "tsec", .swgroup = TEGRA_SWGROUP_TSEC, .reg = 0x294 },
}; };
static void tegra114_flush_dcache(struct page *page, unsigned long offset,
size_t size)
{
phys_addr_t phys = page_to_phys(page) + offset;
void *virt = page_address(page) + offset;
__cpuc_flush_dcache_area(virt, size);
outer_flush_range(phys, phys + size);
}
static const struct tegra_smmu_ops tegra114_smmu_ops = {
.flush_dcache = tegra114_flush_dcache,
};
static const struct tegra_smmu_soc tegra114_smmu_soc = { static const struct tegra_smmu_soc tegra114_smmu_soc = {
.clients = tegra114_mc_clients, .clients = tegra114_mc_clients,
.num_clients = ARRAY_SIZE(tegra114_mc_clients), .num_clients = ARRAY_SIZE(tegra114_mc_clients),
...@@ -935,8 +919,8 @@ static const struct tegra_smmu_soc tegra114_smmu_soc = { ...@@ -935,8 +919,8 @@ static const struct tegra_smmu_soc tegra114_smmu_soc = {
.num_swgroups = ARRAY_SIZE(tegra114_swgroups), .num_swgroups = ARRAY_SIZE(tegra114_swgroups),
.supports_round_robin_arbitration = false, .supports_round_robin_arbitration = false,
.supports_request_limit = false, .supports_request_limit = false,
.num_tlb_lines = 32,
.num_asids = 4, .num_asids = 4,
.ops = &tegra114_smmu_ops,
}; };
const struct tegra_mc_soc tegra114_mc_soc = { const struct tegra_mc_soc tegra114_mc_soc = {
......
...@@ -9,8 +9,6 @@ ...@@ -9,8 +9,6 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/cacheflush.h>
#include <dt-bindings/memory/tegra124-mc.h> #include <dt-bindings/memory/tegra124-mc.h>
#include "mc.h" #include "mc.h"
...@@ -1002,20 +1000,6 @@ static const struct tegra_smmu_swgroup tegra124_swgroups[] = { ...@@ -1002,20 +1000,6 @@ static const struct tegra_smmu_swgroup tegra124_swgroups[] = {
}; };
#ifdef CONFIG_ARCH_TEGRA_124_SOC #ifdef CONFIG_ARCH_TEGRA_124_SOC
static void tegra124_flush_dcache(struct page *page, unsigned long offset,
size_t size)
{
phys_addr_t phys = page_to_phys(page) + offset;
void *virt = page_address(page) + offset;
__cpuc_flush_dcache_area(virt, size);
outer_flush_range(phys, phys + size);
}
static const struct tegra_smmu_ops tegra124_smmu_ops = {
.flush_dcache = tegra124_flush_dcache,
};
static const struct tegra_smmu_soc tegra124_smmu_soc = { static const struct tegra_smmu_soc tegra124_smmu_soc = {
.clients = tegra124_mc_clients, .clients = tegra124_mc_clients,
.num_clients = ARRAY_SIZE(tegra124_mc_clients), .num_clients = ARRAY_SIZE(tegra124_mc_clients),
...@@ -1024,7 +1008,6 @@ static const struct tegra_smmu_soc tegra124_smmu_soc = { ...@@ -1024,7 +1008,6 @@ static const struct tegra_smmu_soc tegra124_smmu_soc = {
.supports_round_robin_arbitration = true, .supports_round_robin_arbitration = true,
.supports_request_limit = true, .supports_request_limit = true,
.num_asids = 128, .num_asids = 128,
.ops = &tegra124_smmu_ops,
}; };
const struct tegra_mc_soc tegra124_mc_soc = { const struct tegra_mc_soc tegra124_mc_soc = {
...@@ -1040,18 +1023,6 @@ const struct tegra_mc_soc tegra124_mc_soc = { ...@@ -1040,18 +1023,6 @@ const struct tegra_mc_soc tegra124_mc_soc = {
#endif /* CONFIG_ARCH_TEGRA_124_SOC */ #endif /* CONFIG_ARCH_TEGRA_124_SOC */
#ifdef CONFIG_ARCH_TEGRA_132_SOC #ifdef CONFIG_ARCH_TEGRA_132_SOC
static void tegra132_flush_dcache(struct page *page, unsigned long offset,
size_t size)
{
void *virt = page_address(page) + offset;
__flush_dcache_area(virt, size);
}
static const struct tegra_smmu_ops tegra132_smmu_ops = {
.flush_dcache = tegra132_flush_dcache,
};
static const struct tegra_smmu_soc tegra132_smmu_soc = { static const struct tegra_smmu_soc tegra132_smmu_soc = {
.clients = tegra124_mc_clients, .clients = tegra124_mc_clients,
.num_clients = ARRAY_SIZE(tegra124_mc_clients), .num_clients = ARRAY_SIZE(tegra124_mc_clients),
...@@ -1059,8 +1030,8 @@ static const struct tegra_smmu_soc tegra132_smmu_soc = { ...@@ -1059,8 +1030,8 @@ static const struct tegra_smmu_soc tegra132_smmu_soc = {
.num_swgroups = ARRAY_SIZE(tegra124_swgroups), .num_swgroups = ARRAY_SIZE(tegra124_swgroups),
.supports_round_robin_arbitration = true, .supports_round_robin_arbitration = true,
.supports_request_limit = true, .supports_request_limit = true,
.num_tlb_lines = 32,
.num_asids = 128, .num_asids = 128,
.ops = &tegra132_smmu_ops,
}; };
const struct tegra_mc_soc tegra132_mc_soc = { const struct tegra_mc_soc tegra132_mc_soc = {
......
...@@ -9,8 +9,6 @@ ...@@ -9,8 +9,6 @@
#include <linux/of.h> #include <linux/of.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <asm/cacheflush.h>
#include <dt-bindings/memory/tegra30-mc.h> #include <dt-bindings/memory/tegra30-mc.h>
#include "mc.h" #include "mc.h"
...@@ -936,20 +934,6 @@ static const struct tegra_smmu_swgroup tegra30_swgroups[] = { ...@@ -936,20 +934,6 @@ static const struct tegra_smmu_swgroup tegra30_swgroups[] = {
{ .name = "isp", .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 }, { .name = "isp", .swgroup = TEGRA_SWGROUP_ISP, .reg = 0x258 },
}; };
static void tegra30_flush_dcache(struct page *page, unsigned long offset,
size_t size)
{
phys_addr_t phys = page_to_phys(page) + offset;
void *virt = page_address(page) + offset;
__cpuc_flush_dcache_area(virt, size);
outer_flush_range(phys, phys + size);
}
static const struct tegra_smmu_ops tegra30_smmu_ops = {
.flush_dcache = tegra30_flush_dcache,
};
static const struct tegra_smmu_soc tegra30_smmu_soc = { static const struct tegra_smmu_soc tegra30_smmu_soc = {
.clients = tegra30_mc_clients, .clients = tegra30_mc_clients,
.num_clients = ARRAY_SIZE(tegra30_mc_clients), .num_clients = ARRAY_SIZE(tegra30_mc_clients),
...@@ -957,8 +941,8 @@ static const struct tegra_smmu_soc tegra30_smmu_soc = { ...@@ -957,8 +941,8 @@ static const struct tegra_smmu_soc tegra30_smmu_soc = {
.num_swgroups = ARRAY_SIZE(tegra30_swgroups), .num_swgroups = ARRAY_SIZE(tegra30_swgroups),
.supports_round_robin_arbitration = false, .supports_round_robin_arbitration = false,
.supports_request_limit = false, .supports_request_limit = false,
.num_tlb_lines = 16,
.num_asids = 4, .num_asids = 4,
.ops = &tegra30_smmu_ops,
}; };
const struct tegra_mc_soc tegra30_mc_soc = { const struct tegra_mc_soc tegra30_mc_soc = {
......
...@@ -344,7 +344,7 @@ struct intel_iommu { ...@@ -344,7 +344,7 @@ struct intel_iommu {
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
unsigned long *domain_ids; /* bitmap of domains */ unsigned long *domain_ids; /* bitmap of domains */
struct dmar_domain **domains; /* ptr to domains */ struct dmar_domain ***domains; /* ptr to domains */
spinlock_t lock; /* protect context, domain ids */ spinlock_t lock; /* protect context, domain ids */
struct root_entry *root_entry; /* virtual address */ struct root_entry *root_entry; /* virtual address */
......
...@@ -51,11 +51,6 @@ struct tegra_smmu_swgroup { ...@@ -51,11 +51,6 @@ struct tegra_smmu_swgroup {
unsigned int reg; unsigned int reg;
}; };
struct tegra_smmu_ops {
void (*flush_dcache)(struct page *page, unsigned long offset,
size_t size);
};
struct tegra_smmu_soc { struct tegra_smmu_soc {
const struct tegra_mc_client *clients; const struct tegra_mc_client *clients;
unsigned int num_clients; unsigned int num_clients;
...@@ -66,9 +61,8 @@ struct tegra_smmu_soc { ...@@ -66,9 +61,8 @@ struct tegra_smmu_soc {
bool supports_round_robin_arbitration; bool supports_round_robin_arbitration;
bool supports_request_limit; bool supports_request_limit;
unsigned int num_tlb_lines;
unsigned int num_asids; unsigned int num_asids;
const struct tegra_smmu_ops *ops;
}; };
struct tegra_mc; struct tegra_mc;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment