Commit cd221bd2 authored by Will Deacon's avatar Will Deacon Committed by Joerg Roedel

iommu/arm-smmu: Allow building as a module

By conditionally dropping support for the legacy binding and exporting
the newly introduced 'arm_smmu_impl_init()' function we can allow the
ARM SMMU driver to be built as a module.
Signed-off-by: default avatarWill Deacon <will@kernel.org>
Tested-by: John Garry <john.garry@huawei.com> # smmu v3
Reviewed-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Signed-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 7359572e
...@@ -356,7 +356,7 @@ config SPAPR_TCE_IOMMU ...@@ -356,7 +356,7 @@ config SPAPR_TCE_IOMMU
# ARM IOMMU support # ARM IOMMU support
config ARM_SMMU config ARM_SMMU
bool "ARM Ltd. System MMU (SMMU) Support" tristate "ARM Ltd. System MMU (SMMU) Support"
depends on (ARM64 || ARM) && MMU depends on (ARM64 || ARM) && MMU
select IOMMU_API select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE select IOMMU_IO_PGTABLE_LPAE
...@@ -368,6 +368,18 @@ config ARM_SMMU ...@@ -368,6 +368,18 @@ config ARM_SMMU
Say Y here if your SoC includes an IOMMU device implementing Say Y here if your SoC includes an IOMMU device implementing
the ARM SMMU architecture. the ARM SMMU architecture.
config ARM_SMMU_LEGACY_DT_BINDINGS
bool "Support the legacy \"mmu-masters\" devicetree bindings"
depends on ARM_SMMU=y && OF
help
Support for the badly designed and deprecated "mmu-masters"
devicetree bindings. This allows some DMA masters to attach
to the SMMU but does not provide any support via the DMA API.
If you're lucky, you might be able to get VFIO up and running.
If you say Y here then you'll make me very sad. Instead, say N
and move your firmware to the utopian future that was 2016.
config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT config ARM_SMMU_DISABLE_BYPASS_BY_DEFAULT
bool "Default to disabling bypass on ARM SMMU v1 and v2" bool "Default to disabling bypass on ARM SMMU v1 and v2"
depends on ARM_SMMU depends on ARM_SMMU
......
...@@ -14,7 +14,8 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o ...@@ -14,7 +14,8 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
obj-$(CONFIG_ARM_SMMU) += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o obj-$(CONFIG_ARM_SMMU) += arm-smmu-mod.o
arm-smmu-mod-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
......
...@@ -126,6 +126,12 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) ...@@ -126,6 +126,12 @@ static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
return container_of(dom, struct arm_smmu_domain, domain); return container_of(dom, struct arm_smmu_domain, domain);
} }
static struct platform_driver arm_smmu_driver;
static struct iommu_ops arm_smmu_ops;
#ifdef CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS
static int arm_smmu_bus_init(struct iommu_ops *ops);
static struct device_node *dev_get_dev_node(struct device *dev) static struct device_node *dev_get_dev_node(struct device *dev)
{ {
if (dev_is_pci(dev)) { if (dev_is_pci(dev)) {
...@@ -161,9 +167,6 @@ static int __find_legacy_master_phandle(struct device *dev, void *data) ...@@ -161,9 +167,6 @@ static int __find_legacy_master_phandle(struct device *dev, void *data)
return err == -ENOENT ? 0 : err; return err == -ENOENT ? 0 : err;
} }
static struct platform_driver arm_smmu_driver;
static struct iommu_ops arm_smmu_ops;
static int arm_smmu_register_legacy_master(struct device *dev, static int arm_smmu_register_legacy_master(struct device *dev,
struct arm_smmu_device **smmu) struct arm_smmu_device **smmu)
{ {
...@@ -215,6 +218,27 @@ static int arm_smmu_register_legacy_master(struct device *dev, ...@@ -215,6 +218,27 @@ static int arm_smmu_register_legacy_master(struct device *dev,
return err; return err;
} }
/*
* With the legacy DT binding in play, we have no guarantees about
* probe order, but then we're also not doing default domains, so we can
* delay setting bus ops until we're sure every possible SMMU is ready,
* and that way ensure that no add_device() calls get missed.
*/
static int arm_smmu_legacy_bus_init(void)
{
if (using_legacy_binding)
return arm_smmu_bus_init(&arm_smmu_ops);
return 0;
}
device_initcall_sync(arm_smmu_legacy_bus_init);
#else
static int arm_smmu_register_legacy_master(struct device *dev,
struct arm_smmu_device **smmu)
{
return -ENODEV;
}
#endif /* CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS */
static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end) static int __arm_smmu_alloc_bitmap(unsigned long *map, int start, int end)
{ {
int idx; int idx;
...@@ -1599,6 +1623,7 @@ static struct iommu_ops arm_smmu_ops = { ...@@ -1599,6 +1623,7 @@ static struct iommu_ops arm_smmu_ops = {
.get_resv_regions = arm_smmu_get_resv_regions, .get_resv_regions = arm_smmu_get_resv_regions,
.put_resv_regions = arm_smmu_put_resv_regions, .put_resv_regions = arm_smmu_put_resv_regions,
.pgsize_bitmap = -1UL, /* Restricted during device attach */ .pgsize_bitmap = -1UL, /* Restricted during device attach */
.owner = THIS_MODULE,
}; };
static void arm_smmu_device_reset(struct arm_smmu_device *smmu) static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
...@@ -1993,8 +2018,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev, ...@@ -1993,8 +2018,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev,
legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL); legacy_binding = of_find_property(dev->of_node, "mmu-masters", NULL);
if (legacy_binding && !using_generic_binding) { if (legacy_binding && !using_generic_binding) {
if (!using_legacy_binding) if (!using_legacy_binding) {
pr_notice("deprecated \"mmu-masters\" DT property in use; DMA API support unavailable\n"); pr_notice("deprecated \"mmu-masters\" DT property in use; %s support unavailable\n",
IS_ENABLED(CONFIG_ARM_SMMU_LEGACY_DT_BINDINGS) ? "DMA API" : "SMMU");
}
using_legacy_binding = true; using_legacy_binding = true;
} else if (!legacy_binding && !using_legacy_binding) { } else if (!legacy_binding && !using_legacy_binding) {
using_generic_binding = true; using_generic_binding = true;
...@@ -2028,7 +2055,6 @@ static int arm_smmu_bus_init(struct iommu_ops *ops) ...@@ -2028,7 +2055,6 @@ static int arm_smmu_bus_init(struct iommu_ops *ops)
#endif #endif
#ifdef CONFIG_PCI #ifdef CONFIG_PCI
if (!iommu_present(&pci_bus_type)) { if (!iommu_present(&pci_bus_type)) {
pci_request_acs();
err = bus_set_iommu(&pci_bus_type, ops); err = bus_set_iommu(&pci_bus_type, ops);
if (err) if (err)
goto err_reset_amba_ops; goto err_reset_amba_ops;
...@@ -2204,20 +2230,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev) ...@@ -2204,20 +2230,6 @@ static int arm_smmu_device_probe(struct platform_device *pdev)
return 0; return 0;
} }
/*
* With the legacy DT binding in play, though, we have no guarantees about
* probe order, but then we're also not doing default domains, so we can
* delay setting bus ops until we're sure every possible SMMU is ready,
* and that way ensure that no add_device() calls get missed.
*/
static int arm_smmu_legacy_bus_init(void)
{
if (using_legacy_binding)
return arm_smmu_bus_init(&arm_smmu_ops);
return 0;
}
device_initcall_sync(arm_smmu_legacy_bus_init);
static int arm_smmu_device_remove(struct platform_device *pdev) static int arm_smmu_device_remove(struct platform_device *pdev)
{ {
struct arm_smmu_device *smmu = platform_get_drvdata(pdev); struct arm_smmu_device *smmu = platform_get_drvdata(pdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment