Commit 57365a04 authored by Robin Murphy's avatar Robin Murphy Committed by Joerg Roedel

iommu: Move bus setup to IOMMU device registration

Move the bus setup to iommu_device_register(). This should allow
bus_iommu_probe() to be correctly replayed for multiple IOMMU instances,
and leaves bus_set_iommu() as a glorified no-op to be cleaned up next.

At this point we can also handle cleanup better than just rolling back
the most-recently-touched bus upon failure - which may release devices
owned by other already-registered instances, and still leave devices on
other buses with dangling pointers to the failed instance. Now it's easy
to clean up the exact footprint of a given instance, no more, no less.
Tested-by: default avatarMarek Szyprowski <m.szyprowski@samsung.com>
Reviewed-by: default avatarKrishna Reddy <vdumpa@nvidia.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Tested-by: Matthew Rosato <mjrosato@linux.ibm.com> # s390
Tested-by: Niklas Schnelle <schnelle@linux.ibm.com> # s390
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/d342b6f27efb5ef3e93aacaa3012d25386d74866.1660572783.git.robin.murphy@arm.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent c13dbc1e
...@@ -188,6 +188,14 @@ static int __init iommu_subsys_init(void) ...@@ -188,6 +188,14 @@ static int __init iommu_subsys_init(void)
} }
subsys_initcall(iommu_subsys_init); subsys_initcall(iommu_subsys_init);
static int remove_iommu_group(struct device *dev, void *data)
{
if (dev->iommu && dev->iommu->iommu_dev == data)
iommu_release_device(dev);
return 0;
}
/** /**
* iommu_device_register() - Register an IOMMU hardware instance * iommu_device_register() - Register an IOMMU hardware instance
* @iommu: IOMMU handle for the instance * @iommu: IOMMU handle for the instance
...@@ -199,9 +207,18 @@ subsys_initcall(iommu_subsys_init); ...@@ -199,9 +207,18 @@ subsys_initcall(iommu_subsys_init);
int iommu_device_register(struct iommu_device *iommu, int iommu_device_register(struct iommu_device *iommu,
const struct iommu_ops *ops, struct device *hwdev) const struct iommu_ops *ops, struct device *hwdev)
{ {
int err = 0;
/* We need to be able to take module references appropriately */ /* We need to be able to take module references appropriately */
if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner)) if (WARN_ON(is_module_address((unsigned long)ops) && !ops->owner))
return -EINVAL; return -EINVAL;
/*
* Temporarily enforce global restriction to a single driver. This was
* already the de-facto behaviour, since any possible combination of
* existing drivers would compete for at least the PCI or platform bus.
*/
if (iommu_buses[0]->iommu_ops && iommu_buses[0]->iommu_ops != ops)
return -EBUSY;
iommu->ops = ops; iommu->ops = ops;
if (hwdev) if (hwdev)
...@@ -210,12 +227,22 @@ int iommu_device_register(struct iommu_device *iommu, ...@@ -210,12 +227,22 @@ int iommu_device_register(struct iommu_device *iommu,
spin_lock(&iommu_device_lock); spin_lock(&iommu_device_lock);
list_add_tail(&iommu->list, &iommu_device_list); list_add_tail(&iommu->list, &iommu_device_list);
spin_unlock(&iommu_device_lock); spin_unlock(&iommu_device_lock);
return 0;
for (int i = 0; i < ARRAY_SIZE(iommu_buses) && !err; i++) {
iommu_buses[i]->iommu_ops = ops;
err = bus_iommu_probe(iommu_buses[i]);
}
if (err)
iommu_device_unregister(iommu);
return err;
} }
EXPORT_SYMBOL_GPL(iommu_device_register); EXPORT_SYMBOL_GPL(iommu_device_register);
void iommu_device_unregister(struct iommu_device *iommu) void iommu_device_unregister(struct iommu_device *iommu)
{ {
for (int i = 0; i < ARRAY_SIZE(iommu_buses); i++)
bus_for_each_dev(iommu_buses[i], NULL, iommu, remove_iommu_group);
spin_lock(&iommu_device_lock); spin_lock(&iommu_device_lock);
list_del(&iommu->list); list_del(&iommu->list);
spin_unlock(&iommu_device_lock); spin_unlock(&iommu_device_lock);
...@@ -1643,13 +1670,6 @@ static int probe_iommu_group(struct device *dev, void *data) ...@@ -1643,13 +1670,6 @@ static int probe_iommu_group(struct device *dev, void *data)
return ret; return ret;
} }
static int remove_iommu_group(struct device *dev, void *data)
{
iommu_release_device(dev);
return 0;
}
static int iommu_bus_notifier(struct notifier_block *nb, static int iommu_bus_notifier(struct notifier_block *nb,
unsigned long action, void *data) unsigned long action, void *data)
{ {
...@@ -1821,27 +1841,12 @@ int bus_iommu_probe(struct bus_type *bus) ...@@ -1821,27 +1841,12 @@ int bus_iommu_probe(struct bus_type *bus)
*/ */
int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops)
{ {
int err; if (bus->iommu_ops && ops && bus->iommu_ops != ops)
if (ops == NULL) {
bus->iommu_ops = NULL;
return 0;
}
if (bus->iommu_ops != NULL)
return -EBUSY; return -EBUSY;
bus->iommu_ops = ops; bus->iommu_ops = ops;
/* Do IOMMU specific setup for this bus-type */ return 0;
err = bus_iommu_probe(bus);
if (err) {
/* Clean up */
bus_for_each_dev(bus, NULL, NULL, remove_iommu_group);
bus->iommu_ops = NULL;
}
return err;
} }
EXPORT_SYMBOL_GPL(bus_set_iommu); EXPORT_SYMBOL_GPL(bus_set_iommu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment