Commit 8f785154 authored by Robin Murphy's avatar Robin Murphy Committed by Will Deacon

iommu/arm-smmu: Implement of_xlate() for SMMUv3

Now that we can properly describe the mapping between PCI RIDs and
stream IDs via "iommu-map", and have it fed it to the driver
automatically via of_xlate(), rework the SMMUv3 driver to benefit from
that, and get rid of the current misuse of the "iommus" binding.

Since having of_xlate wired up means that masters will now be given the
appropriate DMA ops, we also need to make sure that default domains work
properly. This necessitates dispensing with the "whole group at a time"
notion for attaching to a domain, as devices which share a group get
attached to the group's default domain one by one as they are initially
probed.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarWill Deacon <will.deacon@arm.com>
parent dc87a98d
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/msi.h> #include <linux/msi.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_address.h> #include <linux/of_address.h>
#include <linux/of_iommu.h>
#include <linux/of_platform.h> #include <linux/of_platform.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
...@@ -610,12 +611,9 @@ struct arm_smmu_device { ...@@ -610,12 +611,9 @@ struct arm_smmu_device {
struct arm_smmu_strtab_cfg strtab_cfg; struct arm_smmu_strtab_cfg strtab_cfg;
}; };
/* SMMU private data for an IOMMU group */ /* SMMU private data for each master */
struct arm_smmu_group { struct arm_smmu_master_data {
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct arm_smmu_domain *domain;
int num_sids;
u32 *sids;
struct arm_smmu_strtab_ent ste; struct arm_smmu_strtab_ent ste;
}; };
...@@ -1555,20 +1553,6 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain) ...@@ -1555,20 +1553,6 @@ static int arm_smmu_domain_finalise(struct iommu_domain *domain)
return ret; return ret;
} }
static struct arm_smmu_group *arm_smmu_group_get(struct device *dev)
{
struct iommu_group *group;
struct arm_smmu_group *smmu_group;
group = iommu_group_get(dev);
if (!group)
return NULL;
smmu_group = iommu_group_get_iommudata(group);
iommu_group_put(group);
return smmu_group;
}
static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
{ {
__le64 *step; __le64 *step;
...@@ -1591,27 +1575,17 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid) ...@@ -1591,27 +1575,17 @@ static __le64 *arm_smmu_get_step_for_sid(struct arm_smmu_device *smmu, u32 sid)
return step; return step;
} }
static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group) static int arm_smmu_install_ste_for_dev(struct iommu_fwspec *fwspec)
{ {
int i; int i;
struct arm_smmu_domain *smmu_domain = smmu_group->domain; struct arm_smmu_master_data *master = fwspec->iommu_priv;
struct arm_smmu_strtab_ent *ste = &smmu_group->ste; struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_device *smmu = smmu_group->smmu;
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
ste->s1_cfg = &smmu_domain->s1_cfg;
ste->s2_cfg = NULL;
arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
} else {
ste->s1_cfg = NULL;
ste->s2_cfg = &smmu_domain->s2_cfg;
}
for (i = 0; i < smmu_group->num_sids; ++i) { for (i = 0; i < fwspec->num_ids; ++i) {
u32 sid = smmu_group->sids[i]; u32 sid = fwspec->ids[i];
__le64 *step = arm_smmu_get_step_for_sid(smmu, sid); __le64 *step = arm_smmu_get_step_for_sid(smmu, sid);
arm_smmu_write_strtab_ent(smmu, sid, step, ste); arm_smmu_write_strtab_ent(smmu, sid, step, &master->ste);
} }
return 0; return 0;
...@@ -1619,13 +1593,11 @@ static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group) ...@@ -1619,13 +1593,11 @@ static int arm_smmu_install_ste_for_group(struct arm_smmu_group *smmu_group)
static void arm_smmu_detach_dev(struct device *dev) static void arm_smmu_detach_dev(struct device *dev)
{ {
struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev); struct arm_smmu_master_data *master = dev->iommu_fwspec->iommu_priv;
smmu_group->ste.bypass = true; master->ste.bypass = true;
if (arm_smmu_install_ste_for_group(smmu_group) < 0) if (arm_smmu_install_ste_for_dev(dev->iommu_fwspec) < 0)
dev_warn(dev, "failed to install bypass STE\n"); dev_warn(dev, "failed to install bypass STE\n");
smmu_group->domain = NULL;
} }
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
...@@ -1633,16 +1605,20 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -1633,16 +1605,20 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
int ret = 0; int ret = 0;
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_group *smmu_group = arm_smmu_group_get(dev); struct arm_smmu_master_data *master;
struct arm_smmu_strtab_ent *ste;
if (!smmu_group) if (!dev->iommu_fwspec)
return -ENOENT; return -ENOENT;
master = dev->iommu_fwspec->iommu_priv;
smmu = master->smmu;
ste = &master->ste;
/* Already attached to a different domain? */ /* Already attached to a different domain? */
if (smmu_group->domain && smmu_group->domain != smmu_domain) if (!ste->bypass)
arm_smmu_detach_dev(dev); arm_smmu_detach_dev(dev);
smmu = smmu_group->smmu;
mutex_lock(&smmu_domain->init_mutex); mutex_lock(&smmu_domain->init_mutex);
if (!smmu_domain->smmu) { if (!smmu_domain->smmu) {
...@@ -1661,21 +1637,21 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -1661,21 +1637,21 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto out_unlock; goto out_unlock;
} }
/* Group already attached to this domain? */ ste->bypass = false;
if (smmu_group->domain) ste->valid = true;
goto out_unlock;
smmu_group->domain = smmu_domain;
/* if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
* FIXME: This should always be "false" once we have IOMMU-backed ste->s1_cfg = &smmu_domain->s1_cfg;
* DMA ops for all devices behind the SMMU. ste->s2_cfg = NULL;
*/ arm_smmu_write_ctx_desc(smmu, ste->s1_cfg);
smmu_group->ste.bypass = domain->type == IOMMU_DOMAIN_DMA; } else {
ste->s1_cfg = NULL;
ste->s2_cfg = &smmu_domain->s2_cfg;
}
ret = arm_smmu_install_ste_for_group(smmu_group); ret = arm_smmu_install_ste_for_dev(dev->iommu_fwspec);
if (ret < 0) if (ret < 0)
smmu_group->domain = NULL; ste->valid = false;
out_unlock: out_unlock:
mutex_unlock(&smmu_domain->init_mutex); mutex_unlock(&smmu_domain->init_mutex);
...@@ -1734,40 +1710,19 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova) ...@@ -1734,40 +1710,19 @@ arm_smmu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova)
return ret; return ret;
} }
static int __arm_smmu_get_pci_sid(struct pci_dev *pdev, u16 alias, void *sidp) static struct platform_driver arm_smmu_driver;
{
*(u32 *)sidp = alias;
return 0; /* Continue walking */
}
static void __arm_smmu_release_pci_iommudata(void *data) static int arm_smmu_match_node(struct device *dev, void *data)
{ {
kfree(data); return dev->of_node == data;
} }
static struct arm_smmu_device *arm_smmu_get_for_pci_dev(struct pci_dev *pdev) static struct arm_smmu_device *arm_smmu_get_by_node(struct device_node *np)
{ {
struct device_node *of_node; struct device *dev = driver_find_device(&arm_smmu_driver.driver, NULL,
struct platform_device *smmu_pdev; np, arm_smmu_match_node);
struct arm_smmu_device *smmu = NULL; put_device(dev);
struct pci_bus *bus = pdev->bus; return dev ? dev_get_drvdata(dev) : NULL;
/* Walk up to the root bus */
while (!pci_is_root_bus(bus))
bus = bus->parent;
/* Follow the "iommus" phandle from the host controller */
of_node = of_parse_phandle(bus->bridge->parent->of_node, "iommus", 0);
if (!of_node)
return NULL;
/* See if we can find an SMMU corresponding to the phandle */
smmu_pdev = of_find_device_by_node(of_node);
if (smmu_pdev)
smmu = platform_get_drvdata(smmu_pdev);
of_node_put(of_node);
return smmu;
} }
static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
...@@ -1780,94 +1735,74 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid) ...@@ -1780,94 +1735,74 @@ static bool arm_smmu_sid_in_range(struct arm_smmu_device *smmu, u32 sid)
return sid < limit; return sid < limit;
} }
static struct iommu_ops arm_smmu_ops;
static int arm_smmu_add_device(struct device *dev) static int arm_smmu_add_device(struct device *dev)
{ {
int i, ret; int i, ret;
u32 sid, *sids;
struct pci_dev *pdev;
struct iommu_group *group;
struct arm_smmu_group *smmu_group;
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct arm_smmu_master_data *master;
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
struct iommu_group *group;
/* We only support PCI, for now */ if (!fwspec || fwspec->ops != &arm_smmu_ops)
if (!dev_is_pci(dev))
return -ENODEV; return -ENODEV;
/*
pdev = to_pci_dev(dev); * We _can_ actually withstand dodgy bus code re-calling add_device()
group = iommu_group_get_for_dev(dev); * without an intervening remove_device()/of_xlate() sequence, but
if (IS_ERR(group)) * we're not going to do so quietly...
return PTR_ERR(group); */
if (WARN_ON_ONCE(fwspec->iommu_priv)) {
smmu_group = iommu_group_get_iommudata(group); master = fwspec->iommu_priv;
if (!smmu_group) { smmu = master->smmu;
smmu = arm_smmu_get_for_pci_dev(pdev);
if (!smmu) {
ret = -ENOENT;
goto out_remove_dev;
}
smmu_group = kzalloc(sizeof(*smmu_group), GFP_KERNEL);
if (!smmu_group) {
ret = -ENOMEM;
goto out_remove_dev;
}
smmu_group->ste.valid = true;
smmu_group->smmu = smmu;
iommu_group_set_iommudata(group, smmu_group,
__arm_smmu_release_pci_iommudata);
} else { } else {
smmu = smmu_group->smmu; smmu = arm_smmu_get_by_node(to_of_node(fwspec->iommu_fwnode));
if (!smmu)
return -ENODEV;
master = kzalloc(sizeof(*master), GFP_KERNEL);
if (!master)
return -ENOMEM;
master->smmu = smmu;
fwspec->iommu_priv = master;
} }
/* Assume SID == RID until firmware tells us otherwise */ /* Check the SIDs are in range of the SMMU and our stream table */
pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); for (i = 0; i < fwspec->num_ids; i++) {
for (i = 0; i < smmu_group->num_sids; ++i) { u32 sid = fwspec->ids[i];
/* If we already know about this SID, then we're done */
if (smmu_group->sids[i] == sid)
goto out_put_group;
}
/* Check the SID is in range of the SMMU and our stream table */ if (!arm_smmu_sid_in_range(smmu, sid))
if (!arm_smmu_sid_in_range(smmu, sid)) { return -ERANGE;
ret = -ERANGE;
goto out_remove_dev;
}
/* Ensure l2 strtab is initialised */
if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
ret = arm_smmu_init_l2_strtab(smmu, sid);
if (ret)
goto out_remove_dev;
}
/* Resize the SID array for the group */ /* Ensure l2 strtab is initialised */
smmu_group->num_sids++; if (smmu->features & ARM_SMMU_FEAT_2_LVL_STRTAB) {
sids = krealloc(smmu_group->sids, smmu_group->num_sids * sizeof(*sids), ret = arm_smmu_init_l2_strtab(smmu, sid);
GFP_KERNEL); if (ret)
if (!sids) { return ret;
smmu_group->num_sids--; }
ret = -ENOMEM;
goto out_remove_dev;
} }
/* Add the new SID */ group = iommu_group_get_for_dev(dev);
sids[smmu_group->num_sids - 1] = sid; if (!IS_ERR(group))
smmu_group->sids = sids; iommu_group_put(group);
out_put_group:
iommu_group_put(group);
return 0;
out_remove_dev: return PTR_ERR_OR_ZERO(group);
iommu_group_remove_device(dev);
iommu_group_put(group);
return ret;
} }
static void arm_smmu_remove_device(struct device *dev) static void arm_smmu_remove_device(struct device *dev)
{ {
struct iommu_fwspec *fwspec = dev->iommu_fwspec;
struct arm_smmu_master_data *master;
if (!fwspec || fwspec->ops != &arm_smmu_ops)
return;
master = fwspec->iommu_priv;
if (master && master->ste.valid)
arm_smmu_detach_dev(dev);
iommu_group_remove_device(dev); iommu_group_remove_device(dev);
kfree(master);
iommu_fwspec_free(dev);
} }
static int arm_smmu_domain_get_attr(struct iommu_domain *domain, static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
...@@ -1914,6 +1849,15 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain, ...@@ -1914,6 +1849,15 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
return ret; return ret;
} }
static int arm_smmu_of_xlate(struct device *dev, struct of_phandle_args *args)
{
/* We only support PCI, for now */
if (!dev_is_pci(dev))
return -ENODEV;
return iommu_fwspec_add_ids(dev, args->args, 1);
}
static struct iommu_ops arm_smmu_ops = { static struct iommu_ops arm_smmu_ops = {
.capable = arm_smmu_capable, .capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc, .domain_alloc = arm_smmu_domain_alloc,
...@@ -1928,6 +1872,7 @@ static struct iommu_ops arm_smmu_ops = { ...@@ -1928,6 +1872,7 @@ static struct iommu_ops arm_smmu_ops = {
.device_group = pci_device_group, .device_group = pci_device_group,
.domain_get_attr = arm_smmu_domain_get_attr, .domain_get_attr = arm_smmu_domain_get_attr,
.domain_set_attr = arm_smmu_domain_set_attr, .domain_set_attr = arm_smmu_domain_set_attr,
.of_xlate = arm_smmu_of_xlate,
.pgsize_bitmap = -1UL, /* Restricted during device attach */ .pgsize_bitmap = -1UL, /* Restricted during device attach */
}; };
...@@ -2662,7 +2607,14 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) ...@@ -2662,7 +2607,14 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, smmu); platform_set_drvdata(pdev, smmu);
/* Reset the device */ /* Reset the device */
return arm_smmu_device_reset(smmu, bypass); ret = arm_smmu_device_reset(smmu, bypass);
if (ret)
return ret;
/* And we're up. Go go go! */
of_iommu_set_ops(dev->of_node, &arm_smmu_ops);
pci_request_acs();
return bus_set_iommu(&pci_bus_type, &arm_smmu_ops);
} }
static int arm_smmu_device_remove(struct platform_device *pdev) static int arm_smmu_device_remove(struct platform_device *pdev)
...@@ -2690,22 +2642,14 @@ static struct platform_driver arm_smmu_driver = { ...@@ -2690,22 +2642,14 @@ static struct platform_driver arm_smmu_driver = {
static int __init arm_smmu_init(void) static int __init arm_smmu_init(void)
{ {
struct device_node *np; static bool registered;
int ret; int ret = 0;
np = of_find_matching_node(NULL, arm_smmu_of_match);
if (!np)
return 0;
of_node_put(np);
ret = platform_driver_register(&arm_smmu_driver);
if (ret)
return ret;
pci_request_acs();
return bus_set_iommu(&pci_bus_type, &arm_smmu_ops); if (!registered) {
ret = platform_driver_register(&arm_smmu_driver);
registered = !ret;
}
return ret;
} }
static void __exit arm_smmu_exit(void) static void __exit arm_smmu_exit(void)
...@@ -2716,6 +2660,20 @@ static void __exit arm_smmu_exit(void) ...@@ -2716,6 +2660,20 @@ static void __exit arm_smmu_exit(void)
subsys_initcall(arm_smmu_init); subsys_initcall(arm_smmu_init);
module_exit(arm_smmu_exit); module_exit(arm_smmu_exit);
static int __init arm_smmu_of_init(struct device_node *np)
{
int ret = arm_smmu_init();
if (ret)
return ret;
if (!of_platform_device_create(np, NULL, platform_bus_type.dev_root))
return -ENODEV;
return 0;
}
IOMMU_OF_DECLARE(arm_smmuv3, "arm,smmu-v3", arm_smmu_of_init);
MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations"); MODULE_DESCRIPTION("IOMMU API for ARM architected SMMUv3 implementations");
MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment