Commit da55da5a authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Joerg Roedel

iommu/arm-smmu-v3: Make the kunit into a module

It turns out kconfig has problems ensuring the SMMU module and the KUNIT
module are consistently y/m to allow linking. It will permit KUNIT to be a
module while SMMU is built in.

Also, Fedora apparently enables kunit on production kernels.

So, put the entire kunit in its own module using the
VISIBLE_IF_KUNIT/EXPORT_SYMBOL_IF_KUNIT machinery. This keeps it out of
vmlinus on Fedora and makes the kconfig work in the normal way. There is
no cost if kunit is disabled.

Fixes: 56e1a4cc ("iommu/arm-smmu-v3: Add unit tests for arm_smmu_write_entry")
Reported-by: default avatarThorsten Leemhuis <linux@leemhuis.info>
Link: https://lore.kernel.org/all/aeea8546-5bce-4c51-b506-5d2008e52fef@leemhuis.infoSigned-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Tested-by: default avatarThorsten Leemhuis <linux@leemhuis.info>
Acked-by: default avatarWill Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/0-v1-24cba6c0f404+2ae-smmu_kunit_module_jgg@nvidia.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 278bd82c
...@@ -415,7 +415,7 @@ config ARM_SMMU_V3_SVA ...@@ -415,7 +415,7 @@ config ARM_SMMU_V3_SVA
and PRI. and PRI.
config ARM_SMMU_V3_KUNIT_TEST config ARM_SMMU_V3_KUNIT_TEST
bool "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS tristate "KUnit tests for arm-smmu-v3 driver" if !KUNIT_ALL_TESTS
depends on KUNIT depends on KUNIT
depends on ARM_SMMU_V3_SVA depends on ARM_SMMU_V3_SVA
default KUNIT_ALL_TESTS default KUNIT_ALL_TESTS
......
...@@ -2,5 +2,6 @@ ...@@ -2,5 +2,6 @@
obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o
arm_smmu_v3-objs-y += arm-smmu-v3.o arm_smmu_v3-objs-y += arm-smmu-v3.o
arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_KUNIT_TEST) += arm-smmu-v3-test.o
arm_smmu_v3-objs := $(arm_smmu_v3-objs-y) arm_smmu_v3-objs := $(arm_smmu_v3-objs-y)
obj-$(CONFIG_ARM_SMMU_V3_KUNIT_TEST) += arm-smmu-v3-test.o
...@@ -185,6 +185,7 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target, ...@@ -185,6 +185,7 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
*/ */
target->data[3] = cpu_to_le64(read_sysreg(mair_el1)); target->data[3] = cpu_to_le64(read_sysreg(mair_el1));
} }
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd);
static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm) static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
{ {
......
...@@ -463,3 +463,6 @@ static struct kunit_suite arm_smmu_v3_test_module = { ...@@ -463,3 +463,6 @@ static struct kunit_suite arm_smmu_v3_test_module = {
.test_cases = arm_smmu_v3_test_cases, .test_cases = arm_smmu_v3_test_cases,
}; };
kunit_test_suites(&arm_smmu_v3_test_module); kunit_test_suites(&arm_smmu_v3_test_module);
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
MODULE_LICENSE("GPL v2");
...@@ -1007,6 +1007,7 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits) ...@@ -1007,6 +1007,7 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
if (cfg == STRTAB_STE_0_CFG_BYPASS) if (cfg == STRTAB_STE_0_CFG_BYPASS)
used_bits[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG); used_bits[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
} }
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_ste_used);
/* /*
* Figure out if we can do a hitless update of entry to become target. Returns a * Figure out if we can do a hitless update of entry to become target. Returns a
...@@ -1141,6 +1142,7 @@ void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *entry, ...@@ -1141,6 +1142,7 @@ void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *entry,
entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS)); entry_set(writer, entry, target, 0, NUM_ENTRY_QWORDS));
} }
} }
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_write_entry);
static void arm_smmu_sync_cd(struct arm_smmu_master *master, static void arm_smmu_sync_cd(struct arm_smmu_master *master,
int ssid, bool leaf) int ssid, bool leaf)
...@@ -1268,6 +1270,7 @@ void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits) ...@@ -1268,6 +1270,7 @@ void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits)
used_bits[1] &= ~cpu_to_le64(CTXDESC_CD_1_TTB0_MASK); used_bits[1] &= ~cpu_to_le64(CTXDESC_CD_1_TTB0_MASK);
} }
} }
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_get_cd_used);
static void arm_smmu_cd_writer_sync_entry(struct arm_smmu_entry_writer *writer) static void arm_smmu_cd_writer_sync_entry(struct arm_smmu_entry_writer *writer)
{ {
...@@ -1332,6 +1335,7 @@ void arm_smmu_make_s1_cd(struct arm_smmu_cd *target, ...@@ -1332,6 +1335,7 @@ void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
CTXDESC_CD_1_TTB0_MASK); CTXDESC_CD_1_TTB0_MASK);
target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair); target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair);
} }
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s1_cd);
void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid) void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid)
{ {
...@@ -1515,6 +1519,7 @@ void arm_smmu_make_abort_ste(struct arm_smmu_ste *target) ...@@ -1515,6 +1519,7 @@ void arm_smmu_make_abort_ste(struct arm_smmu_ste *target)
STRTAB_STE_0_V | STRTAB_STE_0_V |
FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT)); FIELD_PREP(STRTAB_STE_0_CFG, STRTAB_STE_0_CFG_ABORT));
} }
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_abort_ste);
VISIBLE_IF_KUNIT VISIBLE_IF_KUNIT
void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu, void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
...@@ -1529,6 +1534,7 @@ void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu, ...@@ -1529,6 +1534,7 @@ void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, target->data[1] = cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
STRTAB_STE_1_SHCFG_INCOMING)); STRTAB_STE_1_SHCFG_INCOMING));
} }
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_bypass_ste);
VISIBLE_IF_KUNIT VISIBLE_IF_KUNIT
void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
...@@ -1580,6 +1586,7 @@ void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, ...@@ -1580,6 +1586,7 @@ void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0)); cpu_to_le64(FIELD_PREP(STRTAB_STE_2_S2VMID, 0));
} }
} }
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_cdtable_ste);
VISIBLE_IF_KUNIT VISIBLE_IF_KUNIT
void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
...@@ -1627,6 +1634,7 @@ void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, ...@@ -1627,6 +1634,7 @@ void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr & target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s2_cfg.vttbr &
STRTAB_STE_3_S2TTB_MASK); STRTAB_STE_3_S2TTB_MASK);
} }
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_s2_domain_ste);
/* /*
* This can safely directly manipulate the STE memory without a sync sequence * This can safely directly manipulate the STE memory without a sync sequence
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment