Commit 22bb7b41 authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Will Deacon

iommu/arm-smmu: Convert to a global static identity domain

Create a global static identity domain with it's own
arm_smmu_attach_dev_identity() that simply calls
arm_smmu_master_install_s2crs() with the identity parameters.

This is done by giving the attach path for identity its own unique
implementation that simply calls arm_smmu_master_install_s2crs().

Remove ARM_SMMU_DOMAIN_BYPASS and all checks of IOMMU_DOMAIN_IDENTITY.
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/2-v2-c86cc8c2230e+160bb-smmu_newapi_jgg@nvidia.com
[will: Move duplicated autosuspend logic into a helper function]
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent ff0f8029
...@@ -82,6 +82,23 @@ static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu) ...@@ -82,6 +82,23 @@ static inline void arm_smmu_rpm_put(struct arm_smmu_device *smmu)
pm_runtime_put_autosuspend(smmu->dev); pm_runtime_put_autosuspend(smmu->dev);
} }
static void arm_smmu_rpm_use_autosuspend(struct arm_smmu_device *smmu)
{
/*
* Setup an autosuspend delay to avoid bouncing runpm state.
* Otherwise, if a driver for a suspended consumer device
* unmaps buffers, it will runpm resume/suspend for each one.
*
* For example, when used by a GPU device, when an application
* or game exits, it can trigger unmapping 100s or 1000s of
* buffers. With a runpm cycle for each buffer, that adds up
* to 5-10sec worth of reprogramming the context bank, while
* the system appears to be locked up to the user.
*/
pm_runtime_set_autosuspend_delay(smmu->dev, 20);
pm_runtime_use_autosuspend(smmu->dev);
}
static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{ {
return container_of(dom, struct arm_smmu_domain, domain); return container_of(dom, struct arm_smmu_domain, domain);
...@@ -624,12 +641,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, ...@@ -624,12 +641,6 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
if (smmu_domain->smmu) if (smmu_domain->smmu)
goto out_unlock; goto out_unlock;
if (domain->type == IOMMU_DOMAIN_IDENTITY) {
smmu_domain->stage = ARM_SMMU_DOMAIN_BYPASS;
smmu_domain->smmu = smmu;
goto out_unlock;
}
/* /*
* Mapping the requested stage onto what we support is surprisingly * Mapping the requested stage onto what we support is surprisingly
* complicated, mainly because the spec allows S1+S2 SMMUs without * complicated, mainly because the spec allows S1+S2 SMMUs without
...@@ -825,7 +836,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) ...@@ -825,7 +836,7 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
int ret, irq; int ret, irq;
if (!smmu || domain->type == IOMMU_DOMAIN_IDENTITY) if (!smmu)
return; return;
ret = arm_smmu_rpm_get(smmu); ret = arm_smmu_rpm_get(smmu);
...@@ -854,7 +865,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) ...@@ -854,7 +865,7 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
{ {
struct arm_smmu_domain *smmu_domain; struct arm_smmu_domain *smmu_domain;
if (type != IOMMU_DOMAIN_UNMANAGED && type != IOMMU_DOMAIN_IDENTITY) { if (type != IOMMU_DOMAIN_UNMANAGED) {
if (using_legacy_binding || type != IOMMU_DOMAIN_DMA) if (using_legacy_binding || type != IOMMU_DOMAIN_DMA)
return NULL; return NULL;
} }
...@@ -1145,32 +1156,45 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -1145,32 +1156,45 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
} }
/* Looks ok, so add the device to the domain */ /* Looks ok, so add the device to the domain */
arm_smmu_master_install_s2crs(cfg, arm_smmu_master_install_s2crs(cfg, S2CR_TYPE_TRANS,
smmu_domain->stage ==
ARM_SMMU_DOMAIN_BYPASS ?
S2CR_TYPE_BYPASS :
S2CR_TYPE_TRANS,
smmu_domain->cfg.cbndx, fwspec); smmu_domain->cfg.cbndx, fwspec);
arm_smmu_rpm_use_autosuspend(smmu);
/*
* Setup an autosuspend delay to avoid bouncing runpm state.
* Otherwise, if a driver for a suspended consumer device
* unmaps buffers, it will runpm resume/suspend for each one.
*
* For example, when used by a GPU device, when an application
* or game exits, it can trigger unmapping 100s or 1000s of
* buffers. With a runpm cycle for each buffer, that adds up
* to 5-10sec worth of reprogramming the context bank, while
* the system appears to be locked up to the user.
*/
pm_runtime_set_autosuspend_delay(smmu->dev, 20);
pm_runtime_use_autosuspend(smmu->dev);
rpm_put: rpm_put:
arm_smmu_rpm_put(smmu); arm_smmu_rpm_put(smmu);
return ret; return ret;
} }
static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
struct device *dev)
{
struct arm_smmu_master_cfg *cfg = dev_iommu_priv_get(dev);
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_device *smmu;
int ret;
if (!cfg)
return -ENODEV;
smmu = cfg->smmu;
ret = arm_smmu_rpm_get(smmu);
if (ret < 0)
return ret;
arm_smmu_master_install_s2crs(cfg, S2CR_TYPE_BYPASS, 0, fwspec);
arm_smmu_rpm_use_autosuspend(smmu);
arm_smmu_rpm_put(smmu);
return 0;
}
static const struct iommu_domain_ops arm_smmu_identity_ops = {
.attach_dev = arm_smmu_attach_dev_identity,
};
static struct iommu_domain arm_smmu_identity_domain = {
.type = IOMMU_DOMAIN_IDENTITY,
.ops = &arm_smmu_identity_ops,
};
static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova, static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount, phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped) int prot, gfp_t gfp, size_t *mapped)
...@@ -1557,6 +1581,7 @@ static int arm_smmu_def_domain_type(struct device *dev) ...@@ -1557,6 +1581,7 @@ static int arm_smmu_def_domain_type(struct device *dev)
} }
static struct iommu_ops arm_smmu_ops = { static struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain,
.capable = arm_smmu_capable, .capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc, .domain_alloc = arm_smmu_domain_alloc,
.probe_device = arm_smmu_probe_device, .probe_device = arm_smmu_probe_device,
......
...@@ -361,7 +361,6 @@ enum arm_smmu_domain_stage { ...@@ -361,7 +361,6 @@ enum arm_smmu_domain_stage {
ARM_SMMU_DOMAIN_S1 = 0, ARM_SMMU_DOMAIN_S1 = 0,
ARM_SMMU_DOMAIN_S2, ARM_SMMU_DOMAIN_S2,
ARM_SMMU_DOMAIN_NESTED, ARM_SMMU_DOMAIN_NESTED,
ARM_SMMU_DOMAIN_BYPASS,
}; };
struct arm_smmu_domain { struct arm_smmu_domain {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment