Commit 02252b3b authored by Zhen Lei's avatar Zhen Lei Committed by Joerg Roedel

iommu/amd: Add support for IOMMU default DMA mode build options

Make IOMMU_DEFAULT_LAZY default for when AMD_IOMMU config is set, which
matches current behaviour.

For "fullflush" param, just call iommu_set_dma_strict(true) directly.

Since we get a strict vs lazy mode print already in iommu_subsys_init(),
and maintain a deprecation print when "fullflush" param is passed, drop the
prints in amd_iommu_init_dma_ops().

Finally drop global flag amd_iommu_unmap_flush, as it has no longer has any
purpose.

[jpg: Rebase for relocated file and drop amd_iommu_unmap_flush]
Signed-off-by: default avatarZhen Lei <thunder.leizhen@huawei.com>
Signed-off-by: default avatarJohn Garry <john.garry@huawei.com>
Link: https://lore.kernel.org/r/1626088340-5838-6-git-send-email-john.garry@huawei.comSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent d0e108b8
...@@ -94,7 +94,7 @@ choice ...@@ -94,7 +94,7 @@ choice
prompt "IOMMU default DMA IOTLB invalidation mode" prompt "IOMMU default DMA IOTLB invalidation mode"
depends on IOMMU_DMA depends on IOMMU_DMA
default IOMMU_DEFAULT_LAZY if INTEL_IOMMU default IOMMU_DEFAULT_LAZY if (AMD_IOMMU || INTEL_IOMMU)
default IOMMU_DEFAULT_STRICT default IOMMU_DEFAULT_STRICT
help help
This option allows an IOMMU DMA IOTLB invalidation mode to be This option allows an IOMMU DMA IOTLB invalidation mode to be
......
...@@ -779,12 +779,6 @@ extern u16 amd_iommu_last_bdf; ...@@ -779,12 +779,6 @@ extern u16 amd_iommu_last_bdf;
/* allocation bitmap for domain ids */ /* allocation bitmap for domain ids */
extern unsigned long *amd_iommu_pd_alloc_bitmap; extern unsigned long *amd_iommu_pd_alloc_bitmap;
/*
* If true, the addresses will be flushed on unmap time, not when
* they are reused
*/
extern bool amd_iommu_unmap_flush;
/* Smallest max PASID supported by any IOMMU in the system */ /* Smallest max PASID supported by any IOMMU in the system */
extern u32 amd_iommu_max_pasid; extern u32 amd_iommu_max_pasid;
......
...@@ -161,7 +161,6 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have ...@@ -161,7 +161,6 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
to handle */ to handle */
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
we find in ACPI */ we find in ACPI */
bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
system */ system */
...@@ -3100,7 +3099,7 @@ static int __init parse_amd_iommu_options(char *str) ...@@ -3100,7 +3099,7 @@ static int __init parse_amd_iommu_options(char *str)
for (; *str; ++str) { for (; *str; ++str) {
if (strncmp(str, "fullflush", 9) == 0) { if (strncmp(str, "fullflush", 9) == 0) {
pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n"); pr_warn("amd_iommu=fullflush deprecated; use iommu.strict=1 instead\n");
amd_iommu_unmap_flush = true; iommu_set_dma_strict(true);
} }
if (strncmp(str, "force_enable", 12) == 0) if (strncmp(str, "force_enable", 12) == 0)
amd_iommu_force_enable = true; amd_iommu_force_enable = true;
......
...@@ -1775,12 +1775,6 @@ void amd_iommu_domain_update(struct protection_domain *domain) ...@@ -1775,12 +1775,6 @@ void amd_iommu_domain_update(struct protection_domain *domain)
static void __init amd_iommu_init_dma_ops(void) static void __init amd_iommu_init_dma_ops(void)
{ {
swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0; swiotlb = (iommu_default_passthrough() || sme_me_mask) ? 1 : 0;
if (amd_iommu_unmap_flush)
pr_info("IO/TLB flush on unmap enabled\n");
else
pr_info("Lazy IO/TLB flushing enabled\n");
iommu_set_dma_strict(amd_iommu_unmap_flush);
} }
int __init amd_iommu_init_api(void) int __init amd_iommu_init_api(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment