Commit 8d140667 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'iommu-updates-v3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu

Pull IOMMU updates from Joerg Roedel:
 "This time with:

   - A new IOMMU-API call: iommu_map_sg() to map multiple non-contiguous
     pages into an IO address space with only one API call.  This allows
     certain optimizations in the IOMMU driver.

   - DMAR device hotplug in the Intel VT-d driver.  It is now possible
     to hotplug the IOMMU itself.

   - A new IOMMU driver for the Rockchip ARM platform.

   - Couple of cleanups and improvements in the OMAP IOMMU driver.

   - Nesting support for the ARM-SMMU driver.

   - Various other small cleanups and improvements.

  Please note that this time some branches were also pulled into other
  trees, like the DRI and the Tegra tree.  The VT-d branch was also
  pulled into tip/x86/apic.

  Some patches for the AMD IOMMUv2 driver are not in the IOMMU tree but
  were merged by Andrew (or finally ended up in the DRI tree)"

* tag 'iommu-updates-v3.19' of git://git.kernel.org/pub/scm/linux/kernel/git/joro/iommu: (42 commits)
  iommu: Decouple iommu_map_sg from CPU page size
  iommu/vt-d: Fix an off-by-one bug in __domain_mapping()
  pci, ACPI, iommu: Enhance pci_root to support DMAR device hotplug
  iommu/vt-d: Enhance intel-iommu driver to support DMAR unit hotplug
  iommu/vt-d: Enhance error recovery in function intel_enable_irq_remapping()
  iommu/vt-d: Enhance intel_irq_remapping driver to support DMAR unit hotplug
  iommu/vt-d: Search for ACPI _DSM method for DMAR hotplug
  iommu/vt-d: Implement DMAR unit hotplug framework
  iommu/vt-d: Dynamically allocate and free seq_id for DMAR units
  iommu/vt-d: Introduce helper function dmar_walk_resources()
  iommu/arm-smmu: add support for DOMAIN_ATTR_NESTING attribute
  iommu/arm-smmu: Play nice on non-ARM/SMMU systems
  iommu/amd: remove compiler warning due to IOMMU_CAP_NOEXEC
  iommu/arm-smmu: add IOMMU_CAP_NOEXEC to the ARM SMMU driver
  iommu: add capability IOMMU_CAP_NOEXEC
  iommu/arm-smmu: change IOMMU_EXEC to IOMMU_NOEXEC
  iommu/amd: Fix accounting of device_state
  x86/vt-d: Fix incorrect bit operations in setting values
  iommu/rockchip: Allow to compile with COMPILE_TEST
  iommu/ipmmu-vmsa: Return proper error if devm_request_irq fails
  ...
parents 87c779ba 76771c93
Rockchip IOMMU
==============
A Rockchip DRM iommu translates io virtual addresses to physical addresses for
its master device. Each slave device is bound to a single master device, and
shares its clocks, power domain and irq.
Required properties:
- compatible : Should be "rockchip,iommu"
- reg : Address space for the configuration registers
- interrupts : Interrupt specifier for the IOMMU instance
- interrupt-names : Interrupt name for the IOMMU instance
- #iommu-cells : Should be <0>. This indicates the iommu is a
"single-master" device, and needs no additional information
to associate with its master device. See:
Documentation/devicetree/bindings/iommu/iommu.txt
Example:
vopl_mmu: iommu@ff940300 {
compatible = "rockchip,iommu";
reg = <0xff940300 0x100>;
interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "vopl_mmu";
#iommu-cells = <0>;
};
...@@ -33,6 +33,7 @@ ...@@ -33,6 +33,7 @@
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/pci-acpi.h> #include <linux/pci-acpi.h>
#include <linux/pci-aspm.h> #include <linux/pci-aspm.h>
#include <linux/dmar.h>
#include <linux/acpi.h> #include <linux/acpi.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/dmi.h> #include <linux/dmi.h>
...@@ -525,6 +526,7 @@ static int acpi_pci_root_add(struct acpi_device *device, ...@@ -525,6 +526,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
struct acpi_pci_root *root; struct acpi_pci_root *root;
acpi_handle handle = device->handle; acpi_handle handle = device->handle;
int no_aspm = 0, clear_aspm = 0; int no_aspm = 0, clear_aspm = 0;
bool hotadd = system_state != SYSTEM_BOOTING;
root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
if (!root) if (!root)
...@@ -571,6 +573,11 @@ static int acpi_pci_root_add(struct acpi_device *device, ...@@ -571,6 +573,11 @@ static int acpi_pci_root_add(struct acpi_device *device,
strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
device->driver_data = root; device->driver_data = root;
if (hotadd && dmar_device_add(handle)) {
result = -ENXIO;
goto end;
}
pr_info(PREFIX "%s [%s] (domain %04x %pR)\n", pr_info(PREFIX "%s [%s] (domain %04x %pR)\n",
acpi_device_name(device), acpi_device_bid(device), acpi_device_name(device), acpi_device_bid(device),
root->segment, &root->secondary); root->segment, &root->secondary);
...@@ -597,7 +604,7 @@ static int acpi_pci_root_add(struct acpi_device *device, ...@@ -597,7 +604,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
root->segment, (unsigned int)root->secondary.start); root->segment, (unsigned int)root->secondary.start);
device->driver_data = NULL; device->driver_data = NULL;
result = -ENODEV; result = -ENODEV;
goto end; goto remove_dmar;
} }
if (clear_aspm) { if (clear_aspm) {
...@@ -611,7 +618,7 @@ static int acpi_pci_root_add(struct acpi_device *device, ...@@ -611,7 +618,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
if (device->wakeup.flags.run_wake) if (device->wakeup.flags.run_wake)
device_set_run_wake(root->bus->bridge, true); device_set_run_wake(root->bus->bridge, true);
if (system_state != SYSTEM_BOOTING) { if (hotadd) {
pcibios_resource_survey_bus(root->bus); pcibios_resource_survey_bus(root->bus);
pci_assign_unassigned_root_bus_resources(root->bus); pci_assign_unassigned_root_bus_resources(root->bus);
} }
...@@ -621,6 +628,9 @@ static int acpi_pci_root_add(struct acpi_device *device, ...@@ -621,6 +628,9 @@ static int acpi_pci_root_add(struct acpi_device *device,
pci_unlock_rescan_remove(); pci_unlock_rescan_remove();
return 1; return 1;
remove_dmar:
if (hotadd)
dmar_device_remove(handle);
end: end:
kfree(root); kfree(root);
return result; return result;
...@@ -639,6 +649,8 @@ static void acpi_pci_root_remove(struct acpi_device *device) ...@@ -639,6 +649,8 @@ static void acpi_pci_root_remove(struct acpi_device *device)
pci_remove_root_bus(root->bus); pci_remove_root_bus(root->bus);
dmar_device_remove(device->handle);
pci_unlock_rescan_remove(); pci_unlock_rescan_remove();
kfree(root); kfree(root);
......
...@@ -144,14 +144,27 @@ config OMAP_IOMMU ...@@ -144,14 +144,27 @@ config OMAP_IOMMU
select IOMMU_API select IOMMU_API
config OMAP_IOMMU_DEBUG config OMAP_IOMMU_DEBUG
tristate "Export OMAP IOMMU internals in DebugFS" bool "Export OMAP IOMMU internals in DebugFS"
depends on OMAP_IOMMU && DEBUG_FS depends on OMAP_IOMMU && DEBUG_FS
help ---help---
Select this to see extensive information about Select this to see extensive information about
the internal state of OMAP IOMMU in debugfs. the internal state of OMAP IOMMU in debugfs.
Say N unless you know you need this. Say N unless you know you need this.
config ROCKCHIP_IOMMU
bool "Rockchip IOMMU Support"
depends on ARM
depends on ARCH_ROCKCHIP || COMPILE_TEST
select IOMMU_API
select ARM_DMA_USE_IOMMU
help
Support for IOMMUs found on Rockchip rk32xx SOCs.
These IOMMUs allow virtualization of the address space used by most
cores within the multimedia subsystem.
Say Y here if you are using a Rockchip SoC that includes an IOMMU
device.
config TEGRA_IOMMU_GART config TEGRA_IOMMU_GART
bool "Tegra GART IOMMU Support" bool "Tegra GART IOMMU Support"
depends on ARCH_TEGRA_2x_SOC depends on ARCH_TEGRA_2x_SOC
......
...@@ -11,8 +11,8 @@ obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o ...@@ -11,8 +11,8 @@ obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
......
...@@ -3411,6 +3411,8 @@ static bool amd_iommu_capable(enum iommu_cap cap) ...@@ -3411,6 +3411,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
return true; return true;
case IOMMU_CAP_INTR_REMAP: case IOMMU_CAP_INTR_REMAP:
return (irq_remapping_enabled == 1); return (irq_remapping_enabled == 1);
case IOMMU_CAP_NOEXEC:
return false;
} }
return false; return false;
......
...@@ -279,10 +279,8 @@ static void free_pasid_state(struct pasid_state *pasid_state) ...@@ -279,10 +279,8 @@ static void free_pasid_state(struct pasid_state *pasid_state)
static void put_pasid_state(struct pasid_state *pasid_state) static void put_pasid_state(struct pasid_state *pasid_state)
{ {
if (atomic_dec_and_test(&pasid_state->count)) { if (atomic_dec_and_test(&pasid_state->count))
put_device_state(pasid_state->device_state);
wake_up(&pasid_state->wq); wake_up(&pasid_state->wq);
}
} }
static void put_pasid_state_wait(struct pasid_state *pasid_state) static void put_pasid_state_wait(struct pasid_state *pasid_state)
...@@ -291,9 +289,7 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state) ...@@ -291,9 +289,7 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state)
prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
if (atomic_dec_and_test(&pasid_state->count)) if (!atomic_dec_and_test(&pasid_state->count))
put_device_state(pasid_state->device_state);
else
schedule(); schedule();
finish_wait(&pasid_state->wq, &wait); finish_wait(&pasid_state->wq, &wait);
......
...@@ -404,9 +404,16 @@ struct arm_smmu_cfg { ...@@ -404,9 +404,16 @@ struct arm_smmu_cfg {
#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_NESTED,
};
struct arm_smmu_domain { struct arm_smmu_domain {
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct arm_smmu_cfg cfg; struct arm_smmu_cfg cfg;
enum arm_smmu_domain_stage stage;
spinlock_t lock; spinlock_t lock;
}; };
...@@ -906,19 +913,46 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ...@@ -906,19 +913,46 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu) if (smmu_domain->smmu)
goto out_unlock; goto out_unlock;
if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) {
/* /*
* We will likely want to change this if/when KVM gets * Mapping the requested stage onto what we support is surprisingly
* involved. * complicated, mainly because the spec allows S1+S2 SMMUs without
* support for nested translation. That means we end up with the
* following table:
*
* Requested Supported Actual
* S1 N S1
* S1 S1+S2 S1
* S1 S2 S2
* S1 S1 S1
* N N N
* N S1+S2 S2
* N S2 S2
* N S1 S1
*
* Note that you can't actually request stage-2 mappings.
*/ */
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1:
cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
start = smmu->num_s2_context_banks; start = smmu->num_s2_context_banks;
} else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) { break;
cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; case ARM_SMMU_DOMAIN_NESTED:
start = smmu->num_s2_context_banks; /*
} else { * We will likely want to change this if/when KVM gets
* involved.
*/
case ARM_SMMU_DOMAIN_S2:
cfg->cbar = CBAR_TYPE_S2_TRANS; cfg->cbar = CBAR_TYPE_S2_TRANS;
start = 0; start = 0;
break;
default:
ret = -EINVAL;
goto out_unlock;
} }
ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
...@@ -1281,7 +1315,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, ...@@ -1281,7 +1315,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
unsigned long pfn, int prot, int stage) unsigned long pfn, int prot, int stage)
{ {
pte_t *pte, *start; pte_t *pte, *start;
pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
if (pmd_none(*pmd)) { if (pmd_none(*pmd)) {
/* Allocate a new set of tables */ /* Allocate a new set of tables */
...@@ -1315,10 +1349,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, ...@@ -1315,10 +1349,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
pteval |= ARM_SMMU_PTE_MEMATTR_NC; pteval |= ARM_SMMU_PTE_MEMATTR_NC;
} }
if (prot & IOMMU_NOEXEC)
pteval |= ARM_SMMU_PTE_XN;
/* If no access, create a faulting entry to avoid TLB fills */ /* If no access, create a faulting entry to avoid TLB fills */
if (prot & IOMMU_EXEC) if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
pteval &= ~ARM_SMMU_PTE_XN;
else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
pteval &= ~ARM_SMMU_PTE_PAGE; pteval &= ~ARM_SMMU_PTE_PAGE;
pteval |= ARM_SMMU_PTE_SH_IS; pteval |= ARM_SMMU_PTE_SH_IS;
...@@ -1568,6 +1603,8 @@ static bool arm_smmu_capable(enum iommu_cap cap) ...@@ -1568,6 +1603,8 @@ static bool arm_smmu_capable(enum iommu_cap cap)
return true; return true;
case IOMMU_CAP_INTR_REMAP: case IOMMU_CAP_INTR_REMAP:
return true; /* MSIs are just memory writes */ return true; /* MSIs are just memory writes */
case IOMMU_CAP_NOEXEC:
return true;
default: default:
return false; return false;
} }
...@@ -1644,6 +1681,40 @@ static void arm_smmu_remove_device(struct device *dev) ...@@ -1644,6 +1681,40 @@ static void arm_smmu_remove_device(struct device *dev)
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
} }
static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
switch (attr) {
case DOMAIN_ATTR_NESTING:
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
return 0;
default:
return -ENODEV;
}
}
static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
enum iommu_attr attr, void *data)
{
struct arm_smmu_domain *smmu_domain = domain->priv;
switch (attr) {
case DOMAIN_ATTR_NESTING:
if (smmu_domain->smmu)
return -EPERM;
if (*(int *)data)
smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
return 0;
default:
return -ENODEV;
}
}
static const struct iommu_ops arm_smmu_ops = { static const struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable, .capable = arm_smmu_capable,
.domain_init = arm_smmu_domain_init, .domain_init = arm_smmu_domain_init,
...@@ -1656,6 +1727,8 @@ static const struct iommu_ops arm_smmu_ops = { ...@@ -1656,6 +1727,8 @@ static const struct iommu_ops arm_smmu_ops = {
.iova_to_phys = arm_smmu_iova_to_phys, .iova_to_phys = arm_smmu_iova_to_phys,
.add_device = arm_smmu_add_device, .add_device = arm_smmu_add_device,
.remove_device = arm_smmu_remove_device, .remove_device = arm_smmu_remove_device,
.domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr,
.pgsize_bitmap = (SECTION_SIZE | .pgsize_bitmap = (SECTION_SIZE |
ARM_SMMU_PTE_CONT_SIZE | ARM_SMMU_PTE_CONT_SIZE |
PAGE_SIZE), PAGE_SIZE),
...@@ -2073,8 +2146,20 @@ static struct platform_driver arm_smmu_driver = { ...@@ -2073,8 +2146,20 @@ static struct platform_driver arm_smmu_driver = {
static int __init arm_smmu_init(void) static int __init arm_smmu_init(void)
{ {
struct device_node *np;
int ret; int ret;
/*
* Play nice with systems that don't have an ARM SMMU by checking that
* an ARM SMMU exists in the system before proceeding with the driver
* and IOMMU bus operation registration.
*/
np = of_find_matching_node(NULL, arm_smmu_of_match);
if (!np)
return 0;
of_node_put(np);
ret = platform_driver_register(&arm_smmu_driver); ret = platform_driver_register(&arm_smmu_driver);
if (ret) if (ret)
return ret; return ret;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -1143,14 +1143,24 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, ...@@ -1143,14 +1143,24 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
{ {
struct scatterlist *s; struct scatterlist *s;
size_t mapped = 0; size_t mapped = 0;
unsigned int i; unsigned int i, min_pagesz;
int ret; int ret;
if (unlikely(domain->ops->pgsize_bitmap == 0UL))
return 0;
min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
for_each_sg(sg, s, nents, i) { for_each_sg(sg, s, nents, i) {
phys_addr_t phys = page_to_phys(sg_page(s)); phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
/* We are mapping on page boundarys, so offset must be 0 */ /*
if (s->offset) * We are mapping on IOMMU page boundaries, so offset within
* the page must be 0. However, the IOMMU may support pages
* smaller than PAGE_SIZE, so s->offset may still represent
* an offset of that boundary within the CPU page.
*/
if (!IS_ALIGNED(s->offset, min_pagesz))
goto out_err; goto out_err;
ret = iommu_map(domain, iova + mapped, phys, s->length, prot); ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
......
...@@ -1185,7 +1185,7 @@ static int ipmmu_probe(struct platform_device *pdev) ...@@ -1185,7 +1185,7 @@ static int ipmmu_probe(struct platform_device *pdev)
dev_name(&pdev->dev), mmu); dev_name(&pdev->dev), mmu);
if (ret < 0) { if (ret < 0) {
dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
return irq; return ret;
} }
ipmmu_device_reset(mmu); ipmmu_device_reset(mmu);
......
...@@ -73,7 +73,6 @@ static int __enable_clocks(struct msm_iommu_drvdata *drvdata) ...@@ -73,7 +73,6 @@ static int __enable_clocks(struct msm_iommu_drvdata *drvdata)
static void __disable_clocks(struct msm_iommu_drvdata *drvdata) static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
{ {
if (drvdata->clk)
clk_disable(drvdata->clk); clk_disable(drvdata->clk);
clk_disable(drvdata->pclk); clk_disable(drvdata->pclk);
} }
......
...@@ -131,7 +131,7 @@ static int msm_iommu_probe(struct platform_device *pdev) ...@@ -131,7 +131,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
struct clk *iommu_clk; struct clk *iommu_clk;
struct clk *iommu_pclk; struct clk *iommu_pclk;
struct msm_iommu_drvdata *drvdata; struct msm_iommu_drvdata *drvdata;
struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data; struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev);
void __iomem *regs_base; void __iomem *regs_base;
int ret, irq, par; int ret, irq, par;
...@@ -224,7 +224,6 @@ static int msm_iommu_probe(struct platform_device *pdev) ...@@ -224,7 +224,6 @@ static int msm_iommu_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, drvdata); platform_set_drvdata(pdev, drvdata);
if (iommu_clk)
clk_disable(iommu_clk); clk_disable(iommu_clk);
clk_disable(iommu_pclk); clk_disable(iommu_pclk);
...@@ -264,7 +263,7 @@ static int msm_iommu_remove(struct platform_device *pdev) ...@@ -264,7 +263,7 @@ static int msm_iommu_remove(struct platform_device *pdev)
static int msm_iommu_ctx_probe(struct platform_device *pdev) static int msm_iommu_ctx_probe(struct platform_device *pdev)
{ {
struct msm_iommu_ctx_dev *c = pdev->dev.platform_data; struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev);
struct msm_iommu_drvdata *drvdata; struct msm_iommu_drvdata *drvdata;
struct msm_iommu_ctx_drvdata *ctx_drvdata; struct msm_iommu_ctx_drvdata *ctx_drvdata;
int i, ret; int i, ret;
...@@ -323,7 +322,6 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev) ...@@ -323,7 +322,6 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev)
SET_NSCFG(drvdata->base, mid, 3); SET_NSCFG(drvdata->base, mid, 3);
} }
if (drvdata->clk)
clk_disable(drvdata->clk); clk_disable(drvdata->clk);
clk_disable(drvdata->pclk); clk_disable(drvdata->pclk);
......
...@@ -10,45 +10,35 @@ ...@@ -10,45 +10,35 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#include <linux/module.h>
#include <linux/err.h> #include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/platform_device.h>
#include <linux/debugfs.h> #include <linux/debugfs.h>
#include <linux/omap-iommu.h>
#include <linux/platform_data/iommu-omap.h> #include <linux/platform_data/iommu-omap.h>
#include "omap-iopgtable.h" #include "omap-iopgtable.h"
#include "omap-iommu.h" #include "omap-iommu.h"
#define MAXCOLUMN 100 /* for short messages */
static DEFINE_MUTEX(iommu_debug_lock); static DEFINE_MUTEX(iommu_debug_lock);
static struct dentry *iommu_debug_root; static struct dentry *iommu_debug_root;
static ssize_t debug_read_ver(struct file *file, char __user *userbuf, static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
size_t count, loff_t *ppos)
{ {
u32 ver = omap_iommu_arch_version(); return !obj->domain;
char buf[MAXCOLUMN], *p = buf;
p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf);
return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
} }
static ssize_t debug_read_regs(struct file *file, char __user *userbuf, static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct device *dev = file->private_data; struct omap_iommu *obj = file->private_data;
struct omap_iommu *obj = dev_to_omap_iommu(dev);
char *p, *buf; char *p, *buf;
ssize_t bytes; ssize_t bytes;
if (is_omap_iommu_detached(obj))
return -EPERM;
buf = kmalloc(count, GFP_KERNEL); buf = kmalloc(count, GFP_KERNEL);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
...@@ -68,11 +58,13 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf, ...@@ -68,11 +58,13 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct device *dev = file->private_data; struct omap_iommu *obj = file->private_data;
struct omap_iommu *obj = dev_to_omap_iommu(dev);
char *p, *buf; char *p, *buf;
ssize_t bytes, rest; ssize_t bytes, rest;
if (is_omap_iommu_detached(obj))
return -EPERM;
buf = kmalloc(count, GFP_KERNEL); buf = kmalloc(count, GFP_KERNEL);
if (!buf) if (!buf)
return -ENOMEM; return -ENOMEM;
...@@ -93,133 +85,69 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, ...@@ -93,133 +85,69 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
return bytes; return bytes;
} }
static ssize_t debug_write_pagetable(struct file *file, static void dump_ioptable(struct seq_file *s)
const char __user *userbuf, size_t count, loff_t *ppos)
{
struct iotlb_entry e;
struct cr_regs cr;
int err;
struct device *dev = file->private_data;
struct omap_iommu *obj = dev_to_omap_iommu(dev);
char buf[MAXCOLUMN], *p = buf;
count = min(count, sizeof(buf));
mutex_lock(&iommu_debug_lock);
if (copy_from_user(p, userbuf, count)) {
mutex_unlock(&iommu_debug_lock);
return -EFAULT;
}
sscanf(p, "%x %x", &cr.cam, &cr.ram);
if (!cr.cam || !cr.ram) {
mutex_unlock(&iommu_debug_lock);
return -EINVAL;
}
omap_iotlb_cr_to_e(&cr, &e);
err = omap_iopgtable_store_entry(obj, &e);
if (err)
dev_err(obj->dev, "%s: fail to store cr\n", __func__);
mutex_unlock(&iommu_debug_lock);
return count;
}
#define dump_ioptable_entry_one(lv, da, val) \
({ \
int __err = 0; \
ssize_t bytes; \
const int maxcol = 22; \
const char *str = "%d: %08x %08x\n"; \
bytes = snprintf(p, maxcol, str, lv, da, val); \
p += bytes; \
len -= bytes; \
if (len < maxcol) \
__err = -ENOMEM; \
__err; \
})
static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len)
{ {
int i; int i, j;
u32 *iopgd; u32 da;
char *p = buf; u32 *iopgd, *iopte;
struct omap_iommu *obj = s->private;
spin_lock(&obj->page_table_lock); spin_lock(&obj->page_table_lock);
iopgd = iopgd_offset(obj, 0); iopgd = iopgd_offset(obj, 0);
for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) {
int j, err;
u32 *iopte;
u32 da;
if (!*iopgd) if (!*iopgd)
continue; continue;
if (!(*iopgd & IOPGD_TABLE)) { if (!(*iopgd & IOPGD_TABLE)) {
da = i << IOPGD_SHIFT; da = i << IOPGD_SHIFT;
seq_printf(s, "1: 0x%08x 0x%08x\n", da, *iopgd);
err = dump_ioptable_entry_one(1, da, *iopgd);
if (err)
goto out;
continue; continue;
} }
iopte = iopte_offset(iopgd, 0); iopte = iopte_offset(iopgd, 0);
for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) {
if (!*iopte) if (!*iopte)
continue; continue;
da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT);
err = dump_ioptable_entry_one(2, da, *iopgd); seq_printf(s, "2: 0x%08x 0x%08x\n", da, *iopte);
if (err)
goto out;
} }
} }
out:
spin_unlock(&obj->page_table_lock);
return p - buf; spin_unlock(&obj->page_table_lock);
} }
static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, static int debug_read_pagetable(struct seq_file *s, void *data)
size_t count, loff_t *ppos)
{ {
struct device *dev = file->private_data; struct omap_iommu *obj = s->private;
struct omap_iommu *obj = dev_to_omap_iommu(dev);
char *p, *buf;
size_t bytes;
buf = (char *)__get_free_page(GFP_KERNEL);
if (!buf)
return -ENOMEM;
p = buf;
p += sprintf(p, "L: %8s %8s\n", "da:", "pa:"); if (is_omap_iommu_detached(obj))
p += sprintf(p, "-----------------------------------------\n"); return -EPERM;
mutex_lock(&iommu_debug_lock); mutex_lock(&iommu_debug_lock);
bytes = PAGE_SIZE - (p - buf); seq_printf(s, "L: %8s %8s\n", "da:", "pte:");
p += dump_ioptable(obj, p, bytes); seq_puts(s, "--------------------------\n");
dump_ioptable(s);
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
mutex_unlock(&iommu_debug_lock); mutex_unlock(&iommu_debug_lock);
free_page((unsigned long)buf);
return bytes; return 0;
} }
#define DEBUG_FOPS(name) \ #define DEBUG_SEQ_FOPS_RO(name) \
static int debug_open_##name(struct inode *inode, struct file *file) \
{ \
return single_open(file, debug_read_##name, inode->i_private); \
} \
\
static const struct file_operations debug_##name##_fops = { \ static const struct file_operations debug_##name##_fops = { \
.open = simple_open, \ .open = debug_open_##name, \
.read = debug_read_##name, \ .read = seq_read, \
.write = debug_write_##name, \ .llseek = seq_lseek, \
.llseek = generic_file_llseek, \ .release = single_release, \
}; }
#define DEBUG_FOPS_RO(name) \ #define DEBUG_FOPS_RO(name) \
static const struct file_operations debug_##name##_fops = { \ static const struct file_operations debug_##name##_fops = { \
...@@ -228,103 +156,63 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, ...@@ -228,103 +156,63 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
.llseek = generic_file_llseek, \ .llseek = generic_file_llseek, \
}; };
DEBUG_FOPS_RO(ver);
DEBUG_FOPS_RO(regs); DEBUG_FOPS_RO(regs);
DEBUG_FOPS_RO(tlb); DEBUG_FOPS_RO(tlb);
DEBUG_FOPS(pagetable); DEBUG_SEQ_FOPS_RO(pagetable);
#define __DEBUG_ADD_FILE(attr, mode) \ #define __DEBUG_ADD_FILE(attr, mode) \
{ \ { \
struct dentry *dent; \ struct dentry *dent; \
dent = debugfs_create_file(#attr, mode, parent, \ dent = debugfs_create_file(#attr, mode, obj->debug_dir, \
dev, &debug_##attr##_fops); \ obj, &debug_##attr##_fops); \
if (!dent) \ if (!dent) \
return -ENOMEM; \ goto err; \
} }
#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600)
#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400) #define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
static int iommu_debug_register(struct device *dev, void *data) void omap_iommu_debugfs_add(struct omap_iommu *obj)
{ {
struct platform_device *pdev = to_platform_device(dev); struct dentry *d;
struct omap_iommu *obj = platform_get_drvdata(pdev);
struct omap_iommu_arch_data *arch_data;
struct dentry *d, *parent;
if (!obj || !obj->dev)
return -EINVAL;
arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
if (!arch_data)
return -ENOMEM;
arch_data->iommu_dev = obj;
dev->archdata.iommu = arch_data; if (!iommu_debug_root)
return;
d = debugfs_create_dir(obj->name, iommu_debug_root); obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root);
if (!d) if (!obj->debug_dir)
goto nomem; return;
parent = d;
d = debugfs_create_u8("nr_tlb_entries", 400, parent, d = debugfs_create_u8("nr_tlb_entries", 0400, obj->debug_dir,
(u8 *)&obj->nr_tlb_entries); (u8 *)&obj->nr_tlb_entries);
if (!d) if (!d)
goto nomem; return;
DEBUG_ADD_FILE_RO(ver);
DEBUG_ADD_FILE_RO(regs); DEBUG_ADD_FILE_RO(regs);
DEBUG_ADD_FILE_RO(tlb); DEBUG_ADD_FILE_RO(tlb);
DEBUG_ADD_FILE(pagetable); DEBUG_ADD_FILE_RO(pagetable);
return 0; return;
nomem: err:
kfree(arch_data); debugfs_remove_recursive(obj->debug_dir);
return -ENOMEM;
} }
static int iommu_debug_unregister(struct device *dev, void *data) void omap_iommu_debugfs_remove(struct omap_iommu *obj)
{ {
if (!dev->archdata.iommu) if (!obj->debug_dir)
return 0; return;
kfree(dev->archdata.iommu);
dev->archdata.iommu = NULL; debugfs_remove_recursive(obj->debug_dir);
return 0;
} }
static int __init iommu_debug_init(void) void __init omap_iommu_debugfs_init(void)
{ {
struct dentry *d; iommu_debug_root = debugfs_create_dir("omap_iommu", NULL);
int err; if (!iommu_debug_root)
pr_err("can't create debugfs dir\n");
d = debugfs_create_dir("iommu", NULL);
if (!d)
return -ENOMEM;
iommu_debug_root = d;
err = omap_foreach_iommu_device(d, iommu_debug_register);
if (err)
goto err_out;
return 0;
err_out:
debugfs_remove_recursive(iommu_debug_root);
return err;
} }
module_init(iommu_debug_init)
static void __exit iommu_debugfs_exit(void) void __exit omap_iommu_debugfs_exit(void)
{ {
debugfs_remove_recursive(iommu_debug_root); debugfs_remove(iommu_debug_root);
omap_foreach_iommu_device(NULL, iommu_debug_unregister);
} }
module_exit(iommu_debugfs_exit)
MODULE_DESCRIPTION("omap iommu: debugfs interface");
MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
MODULE_LICENSE("GPL v2");
This diff is collapsed.
...@@ -10,9 +10,8 @@ ...@@ -10,9 +10,8 @@
* published by the Free Software Foundation. * published by the Free Software Foundation.
*/ */
#if defined(CONFIG_ARCH_OMAP1) #ifndef _OMAP_IOMMU_H
#error "iommu for this processor not implemented yet" #define _OMAP_IOMMU_H
#endif
struct iotlb_entry { struct iotlb_entry {
u32 da; u32 da;
...@@ -30,10 +29,9 @@ struct omap_iommu { ...@@ -30,10 +29,9 @@ struct omap_iommu {
const char *name; const char *name;
void __iomem *regbase; void __iomem *regbase;
struct device *dev; struct device *dev;
void *isr_priv;
struct iommu_domain *domain; struct iommu_domain *domain;
struct dentry *debug_dir;
unsigned int refcount;
spinlock_t iommu_lock; /* global for this whole object */ spinlock_t iommu_lock; /* global for this whole object */
/* /*
...@@ -67,34 +65,6 @@ struct cr_regs { ...@@ -67,34 +65,6 @@ struct cr_regs {
}; };
}; };
/* architecture specific functions */
struct iommu_functions {
unsigned long version;
int (*enable)(struct omap_iommu *obj);
void (*disable)(struct omap_iommu *obj);
void (*set_twl)(struct omap_iommu *obj, bool on);
u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra);
void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr);
void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr);
struct cr_regs *(*alloc_cr)(struct omap_iommu *obj,
struct iotlb_entry *e);
int (*cr_valid)(struct cr_regs *cr);
u32 (*cr_to_virt)(struct cr_regs *cr);
void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e);
ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr,
char *buf);
u32 (*get_pte_attr)(struct iotlb_entry *e);
void (*save_ctx)(struct omap_iommu *obj);
void (*restore_ctx)(struct omap_iommu *obj);
ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len);
};
#ifdef CONFIG_IOMMU_API
/** /**
* dev_to_omap_iommu() - retrieves an omap iommu object from a user device * dev_to_omap_iommu() - retrieves an omap iommu object from a user device
* @dev: iommu client device * @dev: iommu client device
...@@ -105,7 +75,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -105,7 +75,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
return arch_data->iommu_dev; return arch_data->iommu_dev;
} }
#endif
/* /*
* MMU Register offsets * MMU Register offsets
...@@ -133,6 +102,28 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -133,6 +102,28 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/* /*
* MMU Register bit definitions * MMU Register bit definitions
*/ */
/* IRQSTATUS & IRQENABLE */
#define MMU_IRQ_MULTIHITFAULT (1 << 4)
#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
#define MMU_IRQ_EMUMISS (1 << 2)
#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
#define MMU_IRQ_TLBMISS (1 << 0)
#define __MMU_IRQ_FAULT \
(MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
#define MMU_IRQ_MASK \
(__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
/* MMU_CNTL */
#define MMU_CNTL_SHIFT 1
#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
#define MMU_CNTL_EML_TLB (1 << 3)
#define MMU_CNTL_TWL_EN (1 << 2)
#define MMU_CNTL_MMU_EN (1 << 1)
/* CAM */
#define MMU_CAM_VATAG_SHIFT 12 #define MMU_CAM_VATAG_SHIFT 12
#define MMU_CAM_VATAG_MASK \ #define MMU_CAM_VATAG_MASK \
((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT) ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
...@@ -144,6 +135,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -144,6 +135,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_CAM_PGSZ_4K (2 << 0) #define MMU_CAM_PGSZ_4K (2 << 0)
#define MMU_CAM_PGSZ_16M (3 << 0) #define MMU_CAM_PGSZ_16M (3 << 0)
/* RAM */
#define MMU_RAM_PADDR_SHIFT 12 #define MMU_RAM_PADDR_SHIFT 12
#define MMU_RAM_PADDR_MASK \ #define MMU_RAM_PADDR_MASK \
((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT) ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
...@@ -165,6 +157,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -165,6 +157,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1 #define MMU_GP_REG_BUS_ERR_BACK_EN 0x1
#define get_cam_va_mask(pgsz) \
(((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
/* /*
* utilities for super page(16MB, 1MB, 64KB and 4KB) * utilities for super page(16MB, 1MB, 64KB and 4KB)
*/ */
...@@ -192,27 +190,25 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev) ...@@ -192,27 +190,25 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
/* /*
* global functions * global functions
*/ */
extern u32 omap_iommu_arch_version(void); #ifdef CONFIG_OMAP_IOMMU_DEBUG
extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e);
extern int
omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e);
extern void omap_iommu_save_ctx(struct device *dev);
extern void omap_iommu_restore_ctx(struct device *dev);
extern int omap_foreach_iommu_device(void *data,
int (*fn)(struct device *, void *));
extern int omap_install_iommu_arch(const struct iommu_functions *ops);
extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops);
extern ssize_t extern ssize_t
omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
extern size_t extern size_t
omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len); omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
void omap_iommu_debugfs_init(void);
void omap_iommu_debugfs_exit(void);
void omap_iommu_debugfs_add(struct omap_iommu *obj);
void omap_iommu_debugfs_remove(struct omap_iommu *obj);
#else
static inline void omap_iommu_debugfs_init(void) { }
static inline void omap_iommu_debugfs_exit(void) { }
static inline void omap_iommu_debugfs_add(struct omap_iommu *obj) { }
static inline void omap_iommu_debugfs_remove(struct omap_iommu *obj) { }
#endif
/* /*
* register accessors * register accessors
*/ */
...@@ -225,3 +221,5 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs) ...@@ -225,3 +221,5 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
{ {
__raw_writel(val, obj->regbase + offs); __raw_writel(val, obj->regbase + offs);
} }
#endif /* _OMAP_IOMMU_H */
/*
* omap iommu: omap2/3 architecture specific functions
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
* Paul Mundt and Toshihiro Kobayashi
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/device.h>
#include <linux/io.h>
#include <linux/jiffies.h>
#include <linux/module.h>
#include <linux/omap-iommu.h>
#include <linux/slab.h>
#include <linux/stringify.h>
#include <linux/platform_data/iommu-omap.h>
#include "omap-iommu.h"
/*
* omap2 architecture specific register bit definitions
*/
#define IOMMU_ARCH_VERSION 0x00000011
/* IRQSTATUS & IRQENABLE */
#define MMU_IRQ_MULTIHITFAULT (1 << 4)
#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
#define MMU_IRQ_EMUMISS (1 << 2)
#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
#define MMU_IRQ_TLBMISS (1 << 0)
#define __MMU_IRQ_FAULT \
(MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
#define MMU_IRQ_MASK \
(__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
/* MMU_CNTL */
#define MMU_CNTL_SHIFT 1
#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
#define MMU_CNTL_EML_TLB (1 << 3)
#define MMU_CNTL_TWL_EN (1 << 2)
#define MMU_CNTL_MMU_EN (1 << 1)
#define get_cam_va_mask(pgsz) \
(((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
/* IOMMU errors */
#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0)
#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1)
#define OMAP_IOMMU_ERR_EMU_MISS (1 << 2)
#define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3)
#define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4)
static void __iommu_set_twl(struct omap_iommu *obj, bool on)
{
u32 l = iommu_read_reg(obj, MMU_CNTL);
if (on)
iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
else
iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
l &= ~MMU_CNTL_MASK;
if (on)
l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
else
l |= (MMU_CNTL_MMU_EN);
iommu_write_reg(obj, l, MMU_CNTL);
}
static int omap2_iommu_enable(struct omap_iommu *obj)
{
u32 l, pa;
if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
return -EINVAL;
pa = virt_to_phys(obj->iopgd);
if (!IS_ALIGNED(pa, SZ_16K))
return -EINVAL;
l = iommu_read_reg(obj, MMU_REVISION);
dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
(l >> 4) & 0xf, l & 0xf);
iommu_write_reg(obj, pa, MMU_TTB);
if (obj->has_bus_err_back)
iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
__iommu_set_twl(obj, true);
return 0;
}
static void omap2_iommu_disable(struct omap_iommu *obj)
{
u32 l = iommu_read_reg(obj, MMU_CNTL);
l &= ~MMU_CNTL_MASK;
iommu_write_reg(obj, l, MMU_CNTL);
dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
}
static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on)
{
__iommu_set_twl(obj, false);
}
static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra)
{
u32 stat, da;
u32 errs = 0;
stat = iommu_read_reg(obj, MMU_IRQSTATUS);
stat &= MMU_IRQ_MASK;
if (!stat) {
*ra = 0;
return 0;
}
da = iommu_read_reg(obj, MMU_FAULT_AD);
*ra = da;
if (stat & MMU_IRQ_TLBMISS)
errs |= OMAP_IOMMU_ERR_TLB_MISS;
if (stat & MMU_IRQ_TRANSLATIONFAULT)
errs |= OMAP_IOMMU_ERR_TRANS_FAULT;
if (stat & MMU_IRQ_EMUMISS)
errs |= OMAP_IOMMU_ERR_EMU_MISS;
if (stat & MMU_IRQ_TABLEWALKFAULT)
errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
if (stat & MMU_IRQ_MULTIHITFAULT)
errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
iommu_write_reg(obj, stat, MMU_IRQSTATUS);
return errs;
}
static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
}
static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
{
iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
iommu_write_reg(obj, cr->ram, MMU_RAM);
}
static u32 omap2_cr_to_virt(struct cr_regs *cr)
{
u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
u32 mask = get_cam_va_mask(cr->cam & page_size);
return cr->cam & mask;
}
static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj,
struct iotlb_entry *e)
{
struct cr_regs *cr;
if (e->da & ~(get_cam_va_mask(e->pgsz))) {
dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
e->da);
return ERR_PTR(-EINVAL);
}
cr = kmalloc(sizeof(*cr), GFP_KERNEL);
if (!cr)
return ERR_PTR(-ENOMEM);
cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
cr->ram = e->pa | e->endian | e->elsz | e->mixed;
return cr;
}
static inline int omap2_cr_valid(struct cr_regs *cr)
{
return cr->cam & MMU_CAM_V;
}
static u32 omap2_get_pte_attr(struct iotlb_entry *e)
{
u32 attr;
attr = e->mixed << 5;
attr |= e->endian;
attr |= e->elsz >> 3;
attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
(e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
return attr;
}
static ssize_t
omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf)
{
char *p = buf;
/* FIXME: Need more detail analysis of cam/ram */
p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
(cr->cam & MMU_CAM_P) ? 1 : 0);
return p - buf;
}
#define pr_reg(name) \
do { \
ssize_t bytes; \
const char *str = "%20s: %08x\n"; \
const int maxcol = 32; \
bytes = snprintf(p, maxcol, str, __stringify(name), \
iommu_read_reg(obj, MMU_##name)); \
p += bytes; \
len -= bytes; \
if (len < maxcol) \
goto out; \
} while (0)
static ssize_t
omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
{
char *p = buf;
pr_reg(REVISION);
pr_reg(IRQSTATUS);
pr_reg(IRQENABLE);
pr_reg(WALKING_ST);
pr_reg(CNTL);
pr_reg(FAULT_AD);
pr_reg(TTB);
pr_reg(LOCK);
pr_reg(LD_TLB);
pr_reg(CAM);
pr_reg(RAM);
pr_reg(GFLUSH);
pr_reg(FLUSH_ENTRY);
pr_reg(READ_CAM);
pr_reg(READ_RAM);
pr_reg(EMU_FAULT_AD);
out:
return p - buf;
}
static void omap2_iommu_save_ctx(struct omap_iommu *obj)
{
int i;
u32 *p = obj->ctx;
for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
p[i] = iommu_read_reg(obj, i * sizeof(u32));
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
}
BUG_ON(p[0] != IOMMU_ARCH_VERSION);
}
static void omap2_iommu_restore_ctx(struct omap_iommu *obj)
{
int i;
u32 *p = obj->ctx;
for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
iommu_write_reg(obj, p[i], i * sizeof(u32));
dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
}
BUG_ON(p[0] != IOMMU_ARCH_VERSION);
}
static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
{
e->da = cr->cam & MMU_CAM_VATAG_MASK;
e->pa = cr->ram & MMU_RAM_PADDR_MASK;
e->valid = cr->cam & MMU_CAM_V;
e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK;
e->endian = cr->ram & MMU_RAM_ENDIAN_MASK;
e->elsz = cr->ram & MMU_RAM_ELSZ_MASK;
e->mixed = cr->ram & MMU_RAM_MIXED;
}
static const struct iommu_functions omap2_iommu_ops = {
.version = IOMMU_ARCH_VERSION,
.enable = omap2_iommu_enable,
.disable = omap2_iommu_disable,
.set_twl = omap2_iommu_set_twl,
.fault_isr = omap2_iommu_fault_isr,
.tlb_read_cr = omap2_tlb_read_cr,
.tlb_load_cr = omap2_tlb_load_cr,
.cr_to_e = omap2_cr_to_e,
.cr_to_virt = omap2_cr_to_virt,
.alloc_cr = omap2_alloc_cr,
.cr_valid = omap2_cr_valid,
.dump_cr = omap2_dump_cr,
.get_pte_attr = omap2_get_pte_attr,
.save_ctx = omap2_iommu_save_ctx,
.restore_ctx = omap2_iommu_restore_ctx,
.dump_ctx = omap2_iommu_dump_ctx,
};
static int __init omap2_iommu_init(void)
{
return omap_install_iommu_arch(&omap2_iommu_ops);
}
module_init(omap2_iommu_init);
static void __exit omap2_iommu_exit(void)
{
omap_uninstall_iommu_arch(&omap2_iommu_ops);
}
module_exit(omap2_iommu_exit);
MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
MODULE_LICENSE("GPL v2");
This diff is collapsed.
...@@ -30,6 +30,12 @@ ...@@ -30,6 +30,12 @@
struct acpi_dmar_header; struct acpi_dmar_header;
#ifdef CONFIG_X86
# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
#else
# define DMAR_UNITS_SUPPORTED 64
#endif
/* DMAR Flags */ /* DMAR Flags */
#define DMAR_INTR_REMAP 0x1 #define DMAR_INTR_REMAP 0x1
#define DMAR_X2APIC_OPT_OUT 0x2 #define DMAR_X2APIC_OPT_OUT 0x2
...@@ -120,28 +126,60 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info, ...@@ -120,28 +126,60 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
/* Intel IOMMU detection */ /* Intel IOMMU detection */
extern int detect_intel_iommu(void); extern int detect_intel_iommu(void);
extern int enable_drhd_fault_handling(void); extern int enable_drhd_fault_handling(void);
extern int dmar_device_add(acpi_handle handle);
extern int dmar_device_remove(acpi_handle handle);
static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
{
return 0;
}
#ifdef CONFIG_INTEL_IOMMU #ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu; extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void); extern int intel_iommu_init(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
#else /* !CONFIG_INTEL_IOMMU: */ #else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; } static inline int intel_iommu_init(void) { return -ENODEV; }
static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header)
#define dmar_parse_one_rmrr dmar_res_noop
#define dmar_parse_one_atsr dmar_res_noop
#define dmar_check_one_atsr dmar_res_noop
#define dmar_release_one_atsr dmar_res_noop
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
{ {
return 0; return 0;
} }
static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header)
static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{ {
return 0; return 0;
} }
static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) #endif /* CONFIG_INTEL_IOMMU */
#ifdef CONFIG_IRQ_REMAP
extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
#else /* CONFIG_IRQ_REMAP */
static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
{ return 0; }
#endif /* CONFIG_IRQ_REMAP */
#else /* CONFIG_DMAR_TABLE */
static inline int dmar_device_add(void *handle)
{
return 0;
}
static inline int dmar_device_remove(void *handle)
{ {
return 0; return 0;
} }
#endif /* CONFIG_INTEL_IOMMU */
#endif /* CONFIG_DMAR_TABLE */ #endif /* CONFIG_DMAR_TABLE */
......
...@@ -28,7 +28,7 @@ ...@@ -28,7 +28,7 @@
#define IOMMU_READ (1 << 0) #define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1) #define IOMMU_WRITE (1 << 1)
#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
#define IOMMU_EXEC (1 << 3) #define IOMMU_NOEXEC (1 << 3)
struct iommu_ops; struct iommu_ops;
struct iommu_group; struct iommu_group;
...@@ -62,6 +62,7 @@ enum iommu_cap { ...@@ -62,6 +62,7 @@ enum iommu_cap {
IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
transactions */ transactions */
IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
}; };
/* /*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment