Commit ac4e52c6 authored by Will Deacon's avatar Will Deacon

Merge branch 'for-joerg/arm-smmu/updates' into for-joerg/arm-smmu/next

* for-joerg/arm-smmu/updates: (29 commits)
  iommu/arm-smmu-qcom: Register the TBU driver in qcom_smmu_impl_init
  iommu/arm-smmu-v3: Enable HTTU for stage1 with io-pgtable mapping
  iommu/arm-smmu-v3: Add support for dirty tracking in domain alloc
  iommu/io-pgtable-arm: Add read_and_clear_dirty() support
  iommu/arm-smmu-v3: Add feature detection for HTTU
  iommu/arm-smmu-v3: Add support for domain_alloc_user fn
  iommu/arm-smmu-qcom: record reason for deferring probe
  iommu/arm-smmu: Pretty-print context fault related regs
  iommu/arm-smmu-qcom-debug: Do not print for handled faults
  iommu/arm-smmu: Add CB prefix to register bitfields
  iommu/arm-smmu-v3: add missing MODULE_DESCRIPTION() macro
  iommu/arm-smmu-v3: Shrink the strtab l1_desc array
  iommu/arm-smmu-v3: Do not zero the strtab twice
  iommu/arm-smmu-v3: Allow setting a S1 domain to a PASID
  iommu/arm-smmu-v3: Allow a PASID to be set when RID is IDENTITY/BLOCKED
  iommu/arm-smmu-v3: Test the STE S1DSS functionality
  iommu/arm-smmu-v3: Allow IDENTITY/BLOCKED to be set while PASID is used
  iommu/arm-smmu-v3: Put the SVA mmu notifier in the smmu_domain
  iommu/arm-smmu-v3: Keep track of arm_smmu_master_domain for SVA
  iommu/arm-smmu-v3: Make SVA allocate a normal arm_smmu_domain
  ...
parents a4ce3999 0b4eeee2
...@@ -394,6 +394,7 @@ config ARM_SMMU_V3 ...@@ -394,6 +394,7 @@ config ARM_SMMU_V3
select IOMMU_API select IOMMU_API
select IOMMU_IO_PGTABLE_LPAE select IOMMU_IO_PGTABLE_LPAE
select GENERIC_MSI_IRQ select GENERIC_MSI_IRQ
select IOMMUFD_DRIVER if IOMMUFD
help help
Support for implementations of the ARM System MMU architecture Support for implementations of the ARM System MMU architecture
version 3 providing translation support to a PCIe root complex. version 3 providing translation support to a PCIe root complex.
......
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o obj-$(CONFIG_ARM_SMMU_V3) += arm_smmu_v3.o
arm_smmu_v3-objs-y += arm-smmu-v3.o arm_smmu_v3-y := arm-smmu-v3.o
arm_smmu_v3-objs-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o arm_smmu_v3-$(CONFIG_ARM_SMMU_V3_SVA) += arm-smmu-v3-sva.o
arm_smmu_v3-objs := $(arm_smmu_v3-objs-y)
obj-$(CONFIG_ARM_SMMU_V3_KUNIT_TEST) += arm-smmu-v3-test.o obj-$(CONFIG_ARM_SMMU_V3_KUNIT_TEST) += arm-smmu-v3-test.o
...@@ -13,103 +13,31 @@ ...@@ -13,103 +13,31 @@
#include "arm-smmu-v3.h" #include "arm-smmu-v3.h"
#include "../../io-pgtable-arm.h" #include "../../io-pgtable-arm.h"
struct arm_smmu_mmu_notifier {
struct mmu_notifier mn;
struct arm_smmu_ctx_desc *cd;
bool cleared;
refcount_t refs;
struct list_head list;
struct arm_smmu_domain *domain;
};
#define mn_to_smmu(mn) container_of(mn, struct arm_smmu_mmu_notifier, mn)
struct arm_smmu_bond {
struct mm_struct *mm;
struct arm_smmu_mmu_notifier *smmu_mn;
struct list_head list;
};
#define sva_to_bond(handle) \
container_of(handle, struct arm_smmu_bond, sva)
static DEFINE_MUTEX(sva_lock); static DEFINE_MUTEX(sva_lock);
static void static void __maybe_unused
arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain) arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
{ {
struct arm_smmu_master *master; struct arm_smmu_master_domain *master_domain;
struct arm_smmu_cd target_cd; struct arm_smmu_cd target_cd;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&smmu_domain->devices_lock, flags); spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) { list_for_each_entry(master_domain, &smmu_domain->devices, devices_elm) {
struct arm_smmu_master *master = master_domain->master;
struct arm_smmu_cd *cdptr; struct arm_smmu_cd *cdptr;
/* S1 domains only support RID attachment right now */ cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid);
cdptr = arm_smmu_get_cd_ptr(master, IOMMU_NO_PASID);
if (WARN_ON(!cdptr)) if (WARN_ON(!cdptr))
continue; continue;
arm_smmu_make_s1_cd(&target_cd, master, smmu_domain); arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr, arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr,
&target_cd); &target_cd);
} }
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
} }
/*
* Check if the CPU ASID is available on the SMMU side. If a private context
* descriptor is using it, try to replace it.
*/
static struct arm_smmu_ctx_desc *
arm_smmu_share_asid(struct mm_struct *mm, u16 asid)
{
int ret;
u32 new_asid;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain;
cd = xa_load(&arm_smmu_asid_xa, asid);
if (!cd)
return NULL;
if (cd->mm) {
if (WARN_ON(cd->mm != mm))
return ERR_PTR(-EINVAL);
/* All devices bound to this mm use the same cd struct. */
refcount_inc(&cd->refs);
return cd;
}
smmu_domain = container_of(cd, struct arm_smmu_domain, cd);
smmu = smmu_domain->smmu;
ret = xa_alloc(&arm_smmu_asid_xa, &new_asid, cd,
XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
if (ret)
return ERR_PTR(-ENOSPC);
/*
* Race with unmap: TLB invalidations will start targeting the new ASID,
* which isn't assigned yet. We'll do an invalidate-all on the old ASID
* later, so it doesn't matter.
*/
cd->asid = new_asid;
/*
* Update ASID and invalidate CD in all associated masters. There will
* be some overlap between use of both ASIDs, until we invalidate the
* TLB.
*/
arm_smmu_update_s1_domain_cd_entry(smmu_domain);
/* Invalidate TLB entries previously associated with that context */
arm_smmu_tlb_inv_asid(smmu, asid);
xa_erase(&arm_smmu_asid_xa, asid);
return NULL;
}
static u64 page_size_to_cd(void) static u64 page_size_to_cd(void)
{ {
static_assert(PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K || static_assert(PAGE_SIZE == SZ_4K || PAGE_SIZE == SZ_16K ||
...@@ -187,69 +115,6 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target, ...@@ -187,69 +115,6 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
} }
EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd); EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_sva_cd);
static struct arm_smmu_ctx_desc *arm_smmu_alloc_shared_cd(struct mm_struct *mm)
{
u16 asid;
int err = 0;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_ctx_desc *ret = NULL;
/* Don't free the mm until we release the ASID */
mmgrab(mm);
asid = arm64_mm_context_get(mm);
if (!asid) {
err = -ESRCH;
goto out_drop_mm;
}
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
if (!cd) {
err = -ENOMEM;
goto out_put_context;
}
refcount_set(&cd->refs, 1);
mutex_lock(&arm_smmu_asid_lock);
ret = arm_smmu_share_asid(mm, asid);
if (ret) {
mutex_unlock(&arm_smmu_asid_lock);
goto out_free_cd;
}
err = xa_insert(&arm_smmu_asid_xa, asid, cd, GFP_KERNEL);
mutex_unlock(&arm_smmu_asid_lock);
if (err)
goto out_free_asid;
cd->asid = asid;
cd->mm = mm;
return cd;
out_free_asid:
arm_smmu_free_asid(cd);
out_free_cd:
kfree(cd);
out_put_context:
arm64_mm_context_put(mm);
out_drop_mm:
mmdrop(mm);
return err < 0 ? ERR_PTR(err) : ret;
}
static void arm_smmu_free_shared_cd(struct arm_smmu_ctx_desc *cd)
{
if (arm_smmu_free_asid(cd)) {
/* Unpin ASID */
arm64_mm_context_put(cd->mm);
mmdrop(cd->mm);
kfree(cd);
}
}
/* /*
* Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this * Cloned from the MAX_TLBI_OPS in arch/arm64/include/asm/tlbflush.h, this
* is used as a threshold to replace per-page TLBI commands to issue in the * is used as a threshold to replace per-page TLBI commands to issue in the
...@@ -264,8 +129,8 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, ...@@ -264,8 +129,8 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
unsigned long start, unsigned long start,
unsigned long end) unsigned long end)
{ {
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); struct arm_smmu_domain *smmu_domain =
struct arm_smmu_domain *smmu_domain = smmu_mn->domain; container_of(mn, struct arm_smmu_domain, mmu_notifier);
size_t size; size_t size;
/* /*
...@@ -282,62 +147,50 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn, ...@@ -282,62 +147,50 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
size = 0; size = 0;
} }
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_BTM)) { if (!size)
if (!size) arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid);
arm_smmu_tlb_inv_asid(smmu_domain->smmu, else
smmu_mn->cd->asid); arm_smmu_tlb_inv_range_asid(start, size, smmu_domain->cd.asid,
else PAGE_SIZE, false, smmu_domain);
arm_smmu_tlb_inv_range_asid(start, size,
smmu_mn->cd->asid,
PAGE_SIZE, false,
smmu_domain);
}
arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), start, arm_smmu_atc_inv_domain(smmu_domain, start, size);
size);
} }
static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm) static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{ {
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn); struct arm_smmu_domain *smmu_domain =
struct arm_smmu_domain *smmu_domain = smmu_mn->domain; container_of(mn, struct arm_smmu_domain, mmu_notifier);
struct arm_smmu_master *master; struct arm_smmu_master_domain *master_domain;
unsigned long flags; unsigned long flags;
mutex_lock(&sva_lock);
if (smmu_mn->cleared) {
mutex_unlock(&sva_lock);
return;
}
/* /*
* DMA may still be running. Keep the cd valid to avoid C_BAD_CD events, * DMA may still be running. Keep the cd valid to avoid C_BAD_CD events,
* but disable translation. * but disable translation.
*/ */
spin_lock_irqsave(&smmu_domain->devices_lock, flags); spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) { list_for_each_entry(master_domain, &smmu_domain->devices,
devices_elm) {
struct arm_smmu_master *master = master_domain->master;
struct arm_smmu_cd target; struct arm_smmu_cd target;
struct arm_smmu_cd *cdptr; struct arm_smmu_cd *cdptr;
cdptr = arm_smmu_get_cd_ptr(master, mm_get_enqcmd_pasid(mm)); cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid);
if (WARN_ON(!cdptr)) if (WARN_ON(!cdptr))
continue; continue;
arm_smmu_make_sva_cd(&target, master, NULL, smmu_mn->cd->asid); arm_smmu_make_sva_cd(&target, master, NULL,
arm_smmu_write_cd_entry(master, mm_get_enqcmd_pasid(mm), cdptr, smmu_domain->cd.asid);
arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr,
&target); &target);
} }
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid); arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid);
arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0, 0); arm_smmu_atc_inv_domain(smmu_domain, 0, 0);
smmu_mn->cleared = true;
mutex_unlock(&sva_lock);
} }
static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn) static void arm_smmu_mmu_notifier_free(struct mmu_notifier *mn)
{ {
kfree(mn_to_smmu(mn)); kfree(container_of(mn, struct arm_smmu_domain, mmu_notifier));
} }
static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = { static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
...@@ -346,127 +199,6 @@ static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = { ...@@ -346,127 +199,6 @@ static const struct mmu_notifier_ops arm_smmu_mmu_notifier_ops = {
.free_notifier = arm_smmu_mmu_notifier_free, .free_notifier = arm_smmu_mmu_notifier_free,
}; };
/* Allocate or get existing MMU notifier for this {domain, mm} pair */
static struct arm_smmu_mmu_notifier *
arm_smmu_mmu_notifier_get(struct arm_smmu_domain *smmu_domain,
struct mm_struct *mm)
{
int ret;
struct arm_smmu_ctx_desc *cd;
struct arm_smmu_mmu_notifier *smmu_mn;
list_for_each_entry(smmu_mn, &smmu_domain->mmu_notifiers, list) {
if (smmu_mn->mn.mm == mm) {
refcount_inc(&smmu_mn->refs);
return smmu_mn;
}
}
cd = arm_smmu_alloc_shared_cd(mm);
if (IS_ERR(cd))
return ERR_CAST(cd);
smmu_mn = kzalloc(sizeof(*smmu_mn), GFP_KERNEL);
if (!smmu_mn) {
ret = -ENOMEM;
goto err_free_cd;
}
refcount_set(&smmu_mn->refs, 1);
smmu_mn->cd = cd;
smmu_mn->domain = smmu_domain;
smmu_mn->mn.ops = &arm_smmu_mmu_notifier_ops;
ret = mmu_notifier_register(&smmu_mn->mn, mm);
if (ret) {
kfree(smmu_mn);
goto err_free_cd;
}
list_add(&smmu_mn->list, &smmu_domain->mmu_notifiers);
return smmu_mn;
err_free_cd:
arm_smmu_free_shared_cd(cd);
return ERR_PTR(ret);
}
static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
{
struct mm_struct *mm = smmu_mn->mn.mm;
struct arm_smmu_ctx_desc *cd = smmu_mn->cd;
struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
if (!refcount_dec_and_test(&smmu_mn->refs))
return;
list_del(&smmu_mn->list);
/*
* If we went through clear(), we've already invalidated, and no
* new TLB entry can have been formed.
*/
if (!smmu_mn->cleared) {
arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0,
0);
}
/* Frees smmu_mn */
mmu_notifier_put(&smmu_mn->mn);
arm_smmu_free_shared_cd(cd);
}
static int __arm_smmu_sva_bind(struct device *dev, ioasid_t pasid,
struct mm_struct *mm)
{
int ret;
struct arm_smmu_cd target;
struct arm_smmu_cd *cdptr;
struct arm_smmu_bond *bond;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
struct arm_smmu_domain *smmu_domain;
if (!(domain->type & __IOMMU_DOMAIN_PAGING))
return -ENODEV;
smmu_domain = to_smmu_domain(domain);
if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
return -ENODEV;
if (!master || !master->sva_enabled)
return -ENODEV;
bond = kzalloc(sizeof(*bond), GFP_KERNEL);
if (!bond)
return -ENOMEM;
bond->mm = mm;
bond->smmu_mn = arm_smmu_mmu_notifier_get(smmu_domain, mm);
if (IS_ERR(bond->smmu_mn)) {
ret = PTR_ERR(bond->smmu_mn);
goto err_free_bond;
}
cdptr = arm_smmu_alloc_cd_ptr(master, mm_get_enqcmd_pasid(mm));
if (!cdptr) {
ret = -ENOMEM;
goto err_put_notifier;
}
arm_smmu_make_sva_cd(&target, master, mm, bond->smmu_mn->cd->asid);
arm_smmu_write_cd_entry(master, pasid, cdptr, &target);
list_add(&bond->list, &master->bonds);
return 0;
err_put_notifier:
arm_smmu_mmu_notifier_put(bond->smmu_mn);
err_free_bond:
kfree(bond);
return ret;
}
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
{ {
unsigned long reg, fld; unsigned long reg, fld;
...@@ -583,11 +315,6 @@ int arm_smmu_master_enable_sva(struct arm_smmu_master *master) ...@@ -583,11 +315,6 @@ int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
int arm_smmu_master_disable_sva(struct arm_smmu_master *master) int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
{ {
mutex_lock(&sva_lock); mutex_lock(&sva_lock);
if (!list_empty(&master->bonds)) {
dev_err(master->dev, "cannot disable SVA, device is bound\n");
mutex_unlock(&sva_lock);
return -EBUSY;
}
arm_smmu_master_sva_disable_iopf(master); arm_smmu_master_sva_disable_iopf(master);
master->sva_enabled = false; master->sva_enabled = false;
mutex_unlock(&sva_lock); mutex_unlock(&sva_lock);
...@@ -604,51 +331,51 @@ void arm_smmu_sva_notifier_synchronize(void) ...@@ -604,51 +331,51 @@ void arm_smmu_sva_notifier_synchronize(void)
mmu_notifier_synchronize(); mmu_notifier_synchronize();
} }
void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t id)
{
struct mm_struct *mm = domain->mm;
struct arm_smmu_bond *bond = NULL, *t;
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
mutex_lock(&sva_lock);
arm_smmu_clear_cd(master, id);
list_for_each_entry(t, &master->bonds, list) {
if (t->mm == mm) {
bond = t;
break;
}
}
if (!WARN_ON(!bond)) {
list_del(&bond->list);
arm_smmu_mmu_notifier_put(bond->smmu_mn);
kfree(bond);
}
mutex_unlock(&sva_lock);
}
static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain, static int arm_smmu_sva_set_dev_pasid(struct iommu_domain *domain,
struct device *dev, ioasid_t id) struct device *dev, ioasid_t id)
{ {
int ret = 0; struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct mm_struct *mm = domain->mm; struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct arm_smmu_cd target;
int ret;
if (mm_get_enqcmd_pasid(mm) != id) /* Prevent arm_smmu_mm_release from being called while we are attaching */
if (!mmget_not_zero(domain->mm))
return -EINVAL; return -EINVAL;
mutex_lock(&sva_lock); /*
ret = __arm_smmu_sva_bind(dev, id, mm); * This does not need the arm_smmu_asid_lock because SVA domains never
mutex_unlock(&sva_lock); * get reassigned
*/
arm_smmu_make_sva_cd(&target, master, domain->mm, smmu_domain->cd.asid);
ret = arm_smmu_set_pasid(master, smmu_domain, id, &target);
mmput(domain->mm);
return ret; return ret;
} }
static void arm_smmu_sva_domain_free(struct iommu_domain *domain) static void arm_smmu_sva_domain_free(struct iommu_domain *domain)
{ {
kfree(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
/*
* Ensure the ASID is empty in the iommu cache before allowing reuse.
*/
arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_domain->cd.asid);
/*
* Notice that the arm_smmu_mm_arch_invalidate_secondary_tlbs op can
* still be called/running at this point. We allow the ASID to be
* reused, and if there is a race then it just suffers harmless
* unnecessary invalidation.
*/
xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid);
/*
* Actual free is defered to the SRCU callback
* arm_smmu_mmu_notifier_free()
*/
mmu_notifier_put(&smmu_domain->mmu_notifier);
} }
static const struct iommu_domain_ops arm_smmu_sva_domain_ops = { static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
...@@ -656,14 +383,38 @@ static const struct iommu_domain_ops arm_smmu_sva_domain_ops = { ...@@ -656,14 +383,38 @@ static const struct iommu_domain_ops arm_smmu_sva_domain_ops = {
.free = arm_smmu_sva_domain_free .free = arm_smmu_sva_domain_free
}; };
struct iommu_domain *arm_smmu_sva_domain_alloc(void) struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
struct mm_struct *mm)
{ {
struct iommu_domain *domain; struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_domain *smmu_domain;
u32 asid;
int ret;
smmu_domain = arm_smmu_domain_alloc();
if (IS_ERR(smmu_domain))
return ERR_CAST(smmu_domain);
smmu_domain->domain.type = IOMMU_DOMAIN_SVA;
smmu_domain->domain.ops = &arm_smmu_sva_domain_ops;
smmu_domain->smmu = smmu;
ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
if (ret)
goto err_free;
smmu_domain->cd.asid = asid;
smmu_domain->mmu_notifier.ops = &arm_smmu_mmu_notifier_ops;
ret = mmu_notifier_register(&smmu_domain->mmu_notifier, mm);
if (ret)
goto err_asid;
domain = kzalloc(sizeof(*domain), GFP_KERNEL); return &smmu_domain->domain;
if (!domain)
return NULL;
domain->ops = &arm_smmu_sva_domain_ops;
return domain; err_asid:
xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid);
err_free:
kfree(smmu_domain);
return ERR_PTR(ret);
} }
...@@ -144,6 +144,14 @@ static void arm_smmu_v3_test_ste_expect_transition( ...@@ -144,6 +144,14 @@ static void arm_smmu_v3_test_ste_expect_transition(
KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy)); KUNIT_EXPECT_MEMEQ(test, target->data, cur_copy.data, sizeof(cur_copy));
} }
static void arm_smmu_v3_test_ste_expect_non_hitless_transition(
struct kunit *test, const struct arm_smmu_ste *cur,
const struct arm_smmu_ste *target, unsigned int num_syncs_expected)
{
arm_smmu_v3_test_ste_expect_transition(test, cur, target,
num_syncs_expected, false);
}
static void arm_smmu_v3_test_ste_expect_hitless_transition( static void arm_smmu_v3_test_ste_expect_hitless_transition(
struct kunit *test, const struct arm_smmu_ste *cur, struct kunit *test, const struct arm_smmu_ste *cur,
const struct arm_smmu_ste *target, unsigned int num_syncs_expected) const struct arm_smmu_ste *target, unsigned int num_syncs_expected)
...@@ -155,6 +163,7 @@ static void arm_smmu_v3_test_ste_expect_hitless_transition( ...@@ -155,6 +163,7 @@ static void arm_smmu_v3_test_ste_expect_hitless_transition(
static const dma_addr_t fake_cdtab_dma_addr = 0xF0F0F0F0F0F0; static const dma_addr_t fake_cdtab_dma_addr = 0xF0F0F0F0F0F0;
static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste, static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste,
unsigned int s1dss,
const dma_addr_t dma_addr) const dma_addr_t dma_addr)
{ {
struct arm_smmu_master master = { struct arm_smmu_master master = {
...@@ -164,7 +173,7 @@ static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste, ...@@ -164,7 +173,7 @@ static void arm_smmu_test_make_cdtable_ste(struct arm_smmu_ste *ste,
.smmu = &smmu, .smmu = &smmu,
}; };
arm_smmu_make_cdtable_ste(ste, &master); arm_smmu_make_cdtable_ste(ste, &master, true, s1dss);
} }
static void arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit *test) static void arm_smmu_v3_write_ste_test_bypass_to_abort(struct kunit *test)
...@@ -194,7 +203,8 @@ static void arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit *test) ...@@ -194,7 +203,8 @@ static void arm_smmu_v3_write_ste_test_cdtable_to_abort(struct kunit *test)
{ {
struct arm_smmu_ste ste; struct arm_smmu_ste ste;
arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr); arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste, arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &abort_ste,
NUM_EXPECTED_SYNCS(2)); NUM_EXPECTED_SYNCS(2));
} }
...@@ -203,7 +213,8 @@ static void arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit *test) ...@@ -203,7 +213,8 @@ static void arm_smmu_v3_write_ste_test_abort_to_cdtable(struct kunit *test)
{ {
struct arm_smmu_ste ste; struct arm_smmu_ste ste;
arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr); arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste, arm_smmu_v3_test_ste_expect_hitless_transition(test, &abort_ste, &ste,
NUM_EXPECTED_SYNCS(2)); NUM_EXPECTED_SYNCS(2));
} }
...@@ -212,7 +223,8 @@ static void arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit *test) ...@@ -212,7 +223,8 @@ static void arm_smmu_v3_write_ste_test_cdtable_to_bypass(struct kunit *test)
{ {
struct arm_smmu_ste ste; struct arm_smmu_ste ste;
arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr); arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste, arm_smmu_v3_test_ste_expect_hitless_transition(test, &ste, &bypass_ste,
NUM_EXPECTED_SYNCS(3)); NUM_EXPECTED_SYNCS(3));
} }
...@@ -221,17 +233,59 @@ static void arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit *test) ...@@ -221,17 +233,59 @@ static void arm_smmu_v3_write_ste_test_bypass_to_cdtable(struct kunit *test)
{ {
struct arm_smmu_ste ste; struct arm_smmu_ste ste;
arm_smmu_test_make_cdtable_ste(&ste, fake_cdtab_dma_addr); arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste, arm_smmu_v3_test_ste_expect_hitless_transition(test, &bypass_ste, &ste,
NUM_EXPECTED_SYNCS(3)); NUM_EXPECTED_SYNCS(3));
} }
static void arm_smmu_v3_write_ste_test_cdtable_s1dss_change(struct kunit *test)
{
struct arm_smmu_ste ste;
struct arm_smmu_ste s1dss_bypass;
arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
fake_cdtab_dma_addr);
/*
* Flipping s1dss on a CD table STE only involves changes to the second
* qword of an STE and can be done in a single write.
*/
arm_smmu_v3_test_ste_expect_hitless_transition(
test, &ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(1));
arm_smmu_v3_test_ste_expect_hitless_transition(
test, &s1dss_bypass, &ste, NUM_EXPECTED_SYNCS(1));
}
static void
arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass(struct kunit *test)
{
struct arm_smmu_ste s1dss_bypass;
arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
fake_cdtab_dma_addr);
arm_smmu_v3_test_ste_expect_hitless_transition(
test, &s1dss_bypass, &bypass_ste, NUM_EXPECTED_SYNCS(2));
}
static void
arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass(struct kunit *test)
{
struct arm_smmu_ste s1dss_bypass;
arm_smmu_test_make_cdtable_ste(&s1dss_bypass, STRTAB_STE_1_S1DSS_BYPASS,
fake_cdtab_dma_addr);
arm_smmu_v3_test_ste_expect_hitless_transition(
test, &bypass_ste, &s1dss_bypass, NUM_EXPECTED_SYNCS(2));
}
static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste, static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste,
bool ats_enabled) bool ats_enabled)
{ {
struct arm_smmu_master master = { struct arm_smmu_master master = {
.smmu = &smmu, .smmu = &smmu,
.ats_enabled = ats_enabled,
}; };
struct io_pgtable io_pgtable = {}; struct io_pgtable io_pgtable = {};
struct arm_smmu_domain smmu_domain = { struct arm_smmu_domain smmu_domain = {
...@@ -247,7 +301,7 @@ static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste, ...@@ -247,7 +301,7 @@ static void arm_smmu_test_make_s2_ste(struct arm_smmu_ste *ste,
io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sl = 3; io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.sl = 3;
io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tsz = 4; io_pgtable.cfg.arm_lpae_s2_cfg.vtcr.tsz = 4;
arm_smmu_make_s2_domain_ste(ste, &master, &smmu_domain); arm_smmu_make_s2_domain_ste(ste, &master, &smmu_domain, ats_enabled);
} }
static void arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit *test) static void arm_smmu_v3_write_ste_test_s2_to_abort(struct kunit *test)
...@@ -286,6 +340,48 @@ static void arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit *test) ...@@ -286,6 +340,48 @@ static void arm_smmu_v3_write_ste_test_bypass_to_s2(struct kunit *test)
NUM_EXPECTED_SYNCS(2)); NUM_EXPECTED_SYNCS(2));
} }
static void arm_smmu_v3_write_ste_test_s1_to_s2(struct kunit *test)
{
struct arm_smmu_ste s1_ste;
struct arm_smmu_ste s2_ste;
arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_test_make_s2_ste(&s2_ste, true);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &s1_ste, &s2_ste,
NUM_EXPECTED_SYNCS(3));
}
static void arm_smmu_v3_write_ste_test_s2_to_s1(struct kunit *test)
{
struct arm_smmu_ste s1_ste;
struct arm_smmu_ste s2_ste;
arm_smmu_test_make_cdtable_ste(&s1_ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_test_make_s2_ste(&s2_ste, true);
arm_smmu_v3_test_ste_expect_hitless_transition(test, &s2_ste, &s1_ste,
NUM_EXPECTED_SYNCS(3));
}
static void arm_smmu_v3_write_ste_test_non_hitless(struct kunit *test)
{
struct arm_smmu_ste ste;
struct arm_smmu_ste ste_2;
/*
* Although no flow resembles this in practice, one way to force an STE
* update to be non-hitless is to change its CD table pointer as well as
* s1 dss field in the same update.
*/
arm_smmu_test_make_cdtable_ste(&ste, STRTAB_STE_1_S1DSS_SSID0,
fake_cdtab_dma_addr);
arm_smmu_test_make_cdtable_ste(&ste_2, STRTAB_STE_1_S1DSS_BYPASS,
0x4B4B4b4B4B);
arm_smmu_v3_test_ste_expect_non_hitless_transition(
test, &ste, &ste_2, NUM_EXPECTED_SYNCS(3));
}
static void arm_smmu_v3_test_cd_expect_transition( static void arm_smmu_v3_test_cd_expect_transition(
struct kunit *test, const struct arm_smmu_cd *cur, struct kunit *test, const struct arm_smmu_cd *cur,
const struct arm_smmu_cd *target, unsigned int num_syncs_expected, const struct arm_smmu_cd *target, unsigned int num_syncs_expected,
...@@ -439,10 +535,16 @@ static struct kunit_case arm_smmu_v3_test_cases[] = { ...@@ -439,10 +535,16 @@ static struct kunit_case arm_smmu_v3_test_cases[] = {
KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_cdtable), KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_cdtable),
KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_bypass), KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_to_bypass),
KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_cdtable), KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_cdtable),
KUNIT_CASE(arm_smmu_v3_write_ste_test_cdtable_s1dss_change),
KUNIT_CASE(arm_smmu_v3_write_ste_test_s1dssbypass_to_stebypass),
KUNIT_CASE(arm_smmu_v3_write_ste_test_stebypass_to_s1dssbypass),
KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_abort), KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_abort),
KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_s2), KUNIT_CASE(arm_smmu_v3_write_ste_test_abort_to_s2),
KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_bypass), KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_bypass),
KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_s2), KUNIT_CASE(arm_smmu_v3_write_ste_test_bypass_to_s2),
KUNIT_CASE(arm_smmu_v3_write_ste_test_s1_to_s2),
KUNIT_CASE(arm_smmu_v3_write_ste_test_s2_to_s1),
KUNIT_CASE(arm_smmu_v3_write_ste_test_non_hitless),
KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_clear), KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_clear),
KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_change_asid), KUNIT_CASE(arm_smmu_v3_write_cd_test_s1_change_asid),
KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_clear), KUNIT_CASE(arm_smmu_v3_write_cd_test_sva_clear),
...@@ -465,4 +567,5 @@ static struct kunit_suite arm_smmu_v3_test_module = { ...@@ -465,4 +567,5 @@ static struct kunit_suite arm_smmu_v3_test_module = {
kunit_test_suites(&arm_smmu_v3_test_module); kunit_test_suites(&arm_smmu_v3_test_module);
MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING);
MODULE_DESCRIPTION("KUnit tests for arm-smmu-v3 driver");
MODULE_LICENSE("GPL v2"); MODULE_LICENSE("GPL v2");
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#include <linux/pci-ats.h> #include <linux/pci-ats.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <kunit/visibility.h> #include <kunit/visibility.h>
#include <uapi/linux/iommufd.h>
#include "arm-smmu-v3.h" #include "arm-smmu-v3.h"
#include "../../dma-iommu.h" #include "../../dma-iommu.h"
...@@ -36,6 +37,9 @@ module_param(disable_msipolling, bool, 0444); ...@@ -36,6 +37,9 @@ module_param(disable_msipolling, bool, 0444);
MODULE_PARM_DESC(disable_msipolling, MODULE_PARM_DESC(disable_msipolling,
"Disable MSI-based polling for CMD_SYNC completion."); "Disable MSI-based polling for CMD_SYNC completion.");
static struct iommu_ops arm_smmu_ops;
static struct iommu_dirty_ops arm_smmu_dirty_ops;
enum arm_smmu_msi_index { enum arm_smmu_msi_index {
EVTQ_MSI_INDEX, EVTQ_MSI_INDEX,
GERROR_MSI_INDEX, GERROR_MSI_INDEX,
...@@ -80,7 +84,7 @@ static struct arm_smmu_option_prop arm_smmu_options[] = { ...@@ -80,7 +84,7 @@ static struct arm_smmu_option_prop arm_smmu_options[] = {
}; };
static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu); struct arm_smmu_device *smmu, u32 flags);
static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master); static int arm_smmu_alloc_cd_tables(struct arm_smmu_master *master);
static void parse_driver_options(struct arm_smmu_device *smmu) static void parse_driver_options(struct arm_smmu_device *smmu)
...@@ -991,6 +995,14 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits) ...@@ -991,6 +995,14 @@ void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits)
STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW | STRTAB_STE_1_S1STALLD | STRTAB_STE_1_STRW |
STRTAB_STE_1_EATS); STRTAB_STE_1_EATS);
used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID); used_bits[2] |= cpu_to_le64(STRTAB_STE_2_S2VMID);
/*
* See 13.5 Summary of attribute/permission configuration fields
* for the SHCFG behavior.
*/
if (FIELD_GET(STRTAB_STE_1_S1DSS, le64_to_cpu(ent[1])) ==
STRTAB_STE_1_S1DSS_BYPASS)
used_bits[1] |= cpu_to_le64(STRTAB_STE_1_SHCFG);
} }
/* S2 translates */ /* S2 translates */
...@@ -1211,8 +1223,8 @@ struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, ...@@ -1211,8 +1223,8 @@ struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
return &l1_desc->l2ptr[ssid % CTXDESC_L2_ENTRIES]; return &l1_desc->l2ptr[ssid % CTXDESC_L2_ENTRIES];
} }
struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master, static struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
u32 ssid) u32 ssid)
{ {
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
struct arm_smmu_device *smmu = master->smmu; struct arm_smmu_device *smmu = master->smmu;
...@@ -1289,6 +1301,8 @@ void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid, ...@@ -1289,6 +1301,8 @@ void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
struct arm_smmu_cd *cdptr, struct arm_smmu_cd *cdptr,
const struct arm_smmu_cd *target) const struct arm_smmu_cd *target)
{ {
bool target_valid = target->data[0] & cpu_to_le64(CTXDESC_CD_0_V);
bool cur_valid = cdptr->data[0] & cpu_to_le64(CTXDESC_CD_0_V);
struct arm_smmu_cd_writer cd_writer = { struct arm_smmu_cd_writer cd_writer = {
.writer = { .writer = {
.ops = &arm_smmu_cd_writer_ops, .ops = &arm_smmu_cd_writer_ops,
...@@ -1297,6 +1311,13 @@ void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid, ...@@ -1297,6 +1311,13 @@ void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
.ssid = ssid, .ssid = ssid,
}; };
if (ssid != IOMMU_NO_PASID && cur_valid != target_valid) {
if (cur_valid)
master->cd_table.used_ssids--;
else
master->cd_table.used_ssids++;
}
arm_smmu_write_entry(&cd_writer.writer, cdptr->data, target->data); arm_smmu_write_entry(&cd_writer.writer, cdptr->data, target->data);
} }
...@@ -1331,6 +1352,12 @@ void arm_smmu_make_s1_cd(struct arm_smmu_cd *target, ...@@ -1331,6 +1352,12 @@ void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
CTXDESC_CD_0_ASET | CTXDESC_CD_0_ASET |
FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid) FIELD_PREP(CTXDESC_CD_0_ASID, cd->asid)
); );
/* To enable dirty flag update, set both Access flag and dirty state update */
if (pgtbl_cfg->quirks & IO_PGTABLE_QUIRK_ARM_HD)
target->data[0] |= cpu_to_le64(CTXDESC_CD_0_TCR_HA |
CTXDESC_CD_0_TCR_HD);
target->data[1] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.ttbr & target->data[1] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
CTXDESC_CD_1_TTB0_MASK); CTXDESC_CD_1_TTB0_MASK);
target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair); target->data[3] = cpu_to_le64(pgtbl_cfg->arm_lpae_s1_cfg.mair);
...@@ -1430,30 +1457,13 @@ static void arm_smmu_free_cd_tables(struct arm_smmu_master *master) ...@@ -1430,30 +1457,13 @@ static void arm_smmu_free_cd_tables(struct arm_smmu_master *master)
cd_table->cdtab = NULL; cd_table->cdtab = NULL;
} }
bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd)
{
bool free;
struct arm_smmu_ctx_desc *old_cd;
if (!cd->asid)
return false;
free = refcount_dec_and_test(&cd->refs);
if (free) {
old_cd = xa_erase(&arm_smmu_asid_xa, cd->asid);
WARN_ON(old_cd != cd);
}
return free;
}
/* Stream table manipulation functions */ /* Stream table manipulation functions */
static void static void arm_smmu_write_strtab_l1_desc(__le64 *dst, dma_addr_t l2ptr_dma)
arm_smmu_write_strtab_l1_desc(__le64 *dst, struct arm_smmu_strtab_l1_desc *desc)
{ {
u64 val = 0; u64 val = 0;
val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, desc->span); val |= FIELD_PREP(STRTAB_L1_DESC_SPAN, STRTAB_SPLIT + 1);
val |= desc->l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK; val |= l2ptr_dma & STRTAB_L1_DESC_L2PTR_MASK;
/* The HW has 64 bit atomicity with stores to the L2 STE table */ /* The HW has 64 bit atomicity with stores to the L2 STE table */
WRITE_ONCE(*dst, cpu_to_le64(val)); WRITE_ONCE(*dst, cpu_to_le64(val));
...@@ -1538,7 +1548,8 @@ EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_bypass_ste); ...@@ -1538,7 +1548,8 @@ EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_bypass_ste);
VISIBLE_IF_KUNIT VISIBLE_IF_KUNIT
void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
struct arm_smmu_master *master) struct arm_smmu_master *master, bool ats_enabled,
unsigned int s1dss)
{ {
struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table; struct arm_smmu_ctx_desc_cfg *cd_table = &master->cd_table;
struct arm_smmu_device *smmu = master->smmu; struct arm_smmu_device *smmu = master->smmu;
...@@ -1552,7 +1563,7 @@ void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, ...@@ -1552,7 +1563,7 @@ void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax)); FIELD_PREP(STRTAB_STE_0_S1CDMAX, cd_table->s1cdmax));
target->data[1] = cpu_to_le64( target->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_S1DSS, STRTAB_STE_1_S1DSS_SSID0) | FIELD_PREP(STRTAB_STE_1_S1DSS, s1dss) |
FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) | FIELD_PREP(STRTAB_STE_1_S1CIR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) | FIELD_PREP(STRTAB_STE_1_S1COR, STRTAB_STE_1_S1C_CACHE_WBRA) |
FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) | FIELD_PREP(STRTAB_STE_1_S1CSH, ARM_SMMU_SH_ISH) |
...@@ -1561,7 +1572,12 @@ void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, ...@@ -1561,7 +1572,12 @@ void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
STRTAB_STE_1_S1STALLD : STRTAB_STE_1_S1STALLD :
0) | 0) |
FIELD_PREP(STRTAB_STE_1_EATS, FIELD_PREP(STRTAB_STE_1_EATS,
master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0)); ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));
if ((smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) &&
s1dss == STRTAB_STE_1_S1DSS_BYPASS)
target->data[1] |= cpu_to_le64(FIELD_PREP(
STRTAB_STE_1_SHCFG, STRTAB_STE_1_SHCFG_INCOMING));
if (smmu->features & ARM_SMMU_FEAT_E2H) { if (smmu->features & ARM_SMMU_FEAT_E2H) {
/* /*
...@@ -1591,7 +1607,8 @@ EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_cdtable_ste); ...@@ -1591,7 +1607,8 @@ EXPORT_SYMBOL_IF_KUNIT(arm_smmu_make_cdtable_ste);
VISIBLE_IF_KUNIT VISIBLE_IF_KUNIT
void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
struct arm_smmu_master *master, struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain) struct arm_smmu_domain *smmu_domain,
bool ats_enabled)
{ {
struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg; struct arm_smmu_s2_cfg *s2_cfg = &smmu_domain->s2_cfg;
const struct io_pgtable_cfg *pgtbl_cfg = const struct io_pgtable_cfg *pgtbl_cfg =
...@@ -1608,7 +1625,7 @@ void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, ...@@ -1608,7 +1625,7 @@ void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
target->data[1] = cpu_to_le64( target->data[1] = cpu_to_le64(
FIELD_PREP(STRTAB_STE_1_EATS, FIELD_PREP(STRTAB_STE_1_EATS,
master->ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0)); ats_enabled ? STRTAB_STE_1_EATS_TRANS : 0));
if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR) if (smmu->features & ARM_SMMU_FEAT_ATTR_TYPES_OVR)
target->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG, target->data[1] |= cpu_to_le64(FIELD_PREP(STRTAB_STE_1_SHCFG,
...@@ -1655,6 +1672,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) ...@@ -1655,6 +1672,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
{ {
size_t size; size_t size;
void *strtab; void *strtab;
dma_addr_t l2ptr_dma;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT]; struct arm_smmu_strtab_l1_desc *desc = &cfg->l1_desc[sid >> STRTAB_SPLIT];
...@@ -1664,8 +1682,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) ...@@ -1664,8 +1682,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS]; strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
desc->span = STRTAB_SPLIT + 1; desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &l2ptr_dma,
desc->l2ptr = dmam_alloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
GFP_KERNEL); GFP_KERNEL);
if (!desc->l2ptr) { if (!desc->l2ptr) {
dev_err(smmu->dev, dev_err(smmu->dev,
...@@ -1675,7 +1692,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid) ...@@ -1675,7 +1692,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
} }
arm_smmu_init_initial_stes(desc->l2ptr, 1 << STRTAB_SPLIT); arm_smmu_init_initial_stes(desc->l2ptr, 1 << STRTAB_SPLIT);
arm_smmu_write_strtab_l1_desc(strtab, desc); arm_smmu_write_strtab_l1_desc(strtab, l2ptr_dma);
return 0; return 0;
} }
...@@ -1995,13 +2012,14 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size, ...@@ -1995,13 +2012,14 @@ arm_smmu_atc_inv_to_cmd(int ssid, unsigned long iova, size_t size,
cmd->atc.size = log2_span; cmd->atc.size = log2_span;
} }
static int arm_smmu_atc_inv_master(struct arm_smmu_master *master) static int arm_smmu_atc_inv_master(struct arm_smmu_master *master,
ioasid_t ssid)
{ {
int i; int i;
struct arm_smmu_cmdq_ent cmd; struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_cmdq_batch cmds; struct arm_smmu_cmdq_batch cmds;
arm_smmu_atc_inv_to_cmd(IOMMU_NO_PASID, 0, 0, &cmd); arm_smmu_atc_inv_to_cmd(ssid, 0, 0, &cmd);
cmds.num = 0; cmds.num = 0;
for (i = 0; i < master->num_streams; i++) { for (i = 0; i < master->num_streams; i++) {
...@@ -2012,13 +2030,13 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master) ...@@ -2012,13 +2030,13 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
return arm_smmu_cmdq_batch_submit(master->smmu, &cmds); return arm_smmu_cmdq_batch_submit(master->smmu, &cmds);
} }
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
unsigned long iova, size_t size) unsigned long iova, size_t size)
{ {
struct arm_smmu_master_domain *master_domain;
int i; int i;
unsigned long flags; unsigned long flags;
struct arm_smmu_cmdq_ent cmd; struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_master *master;
struct arm_smmu_cmdq_batch cmds; struct arm_smmu_cmdq_batch cmds;
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS)) if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
...@@ -2041,15 +2059,18 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid, ...@@ -2041,15 +2059,18 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
if (!atomic_read(&smmu_domain->nr_ats_masters)) if (!atomic_read(&smmu_domain->nr_ats_masters))
return 0; return 0;
arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
cmds.num = 0; cmds.num = 0;
spin_lock_irqsave(&smmu_domain->devices_lock, flags); spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) { list_for_each_entry(master_domain, &smmu_domain->devices,
devices_elm) {
struct arm_smmu_master *master = master_domain->master;
if (!master->ats_enabled) if (!master->ats_enabled)
continue; continue;
arm_smmu_atc_inv_to_cmd(master_domain->ssid, iova, size, &cmd);
for (i = 0; i < master->num_streams; i++) { for (i = 0; i < master->num_streams; i++) {
cmd.atc.sid = master->streams[i].id; cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd); arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
...@@ -2081,7 +2102,7 @@ static void arm_smmu_tlb_inv_context(void *cookie) ...@@ -2081,7 +2102,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid; cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd); arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
} }
arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0); arm_smmu_atc_inv_domain(smmu_domain, 0, 0);
} }
static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd, static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
...@@ -2179,7 +2200,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size, ...@@ -2179,7 +2200,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
* Unfortunately, this can't be leaf-only since we may have * Unfortunately, this can't be leaf-only since we may have
* zapped an entire table. * zapped an entire table.
*/ */
arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova, size); arm_smmu_atc_inv_domain(smmu_domain, iova, size);
} }
void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid, void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
...@@ -2220,6 +2241,13 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = { ...@@ -2220,6 +2241,13 @@ static const struct iommu_flush_ops arm_smmu_flush_ops = {
.tlb_add_page = arm_smmu_tlb_inv_page_nosync, .tlb_add_page = arm_smmu_tlb_inv_page_nosync,
}; };
static bool arm_smmu_dbm_capable(struct arm_smmu_device *smmu)
{
u32 features = (ARM_SMMU_FEAT_HD | ARM_SMMU_FEAT_COHERENCY);
return (smmu->features & features) == features;
}
/* IOMMU API */ /* IOMMU API */
static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap) static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
{ {
...@@ -2232,17 +2260,26 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap) ...@@ -2232,17 +2260,26 @@ static bool arm_smmu_capable(struct device *dev, enum iommu_cap cap)
case IOMMU_CAP_NOEXEC: case IOMMU_CAP_NOEXEC:
case IOMMU_CAP_DEFERRED_FLUSH: case IOMMU_CAP_DEFERRED_FLUSH:
return true; return true;
case IOMMU_CAP_DIRTY_TRACKING:
return arm_smmu_dbm_capable(master->smmu);
default: default:
return false; return false;
} }
} }
static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) struct arm_smmu_domain *arm_smmu_domain_alloc(void)
{ {
struct arm_smmu_domain *smmu_domain;
if (type == IOMMU_DOMAIN_SVA) smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL);
return arm_smmu_sva_domain_alloc(); if (!smmu_domain)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-ENOMEM);
mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
spin_lock_init(&smmu_domain->devices_lock);
return smmu_domain;
} }
static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev) static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
...@@ -2254,20 +2291,15 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev) ...@@ -2254,20 +2291,15 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
* We can't really do anything meaningful until we've added a * We can't really do anything meaningful until we've added a
* master. * master.
*/ */
smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); smmu_domain = arm_smmu_domain_alloc();
if (!smmu_domain) if (IS_ERR(smmu_domain))
return ERR_PTR(-ENOMEM); return ERR_CAST(smmu_domain);
mutex_init(&smmu_domain->init_mutex);
INIT_LIST_HEAD(&smmu_domain->devices);
spin_lock_init(&smmu_domain->devices_lock);
INIT_LIST_HEAD(&smmu_domain->mmu_notifiers);
if (dev) { if (dev) {
struct arm_smmu_master *master = dev_iommu_priv_get(dev); struct arm_smmu_master *master = dev_iommu_priv_get(dev);
int ret; int ret;
ret = arm_smmu_domain_finalise(smmu_domain, master->smmu); ret = arm_smmu_domain_finalise(smmu_domain, master->smmu, 0);
if (ret) { if (ret) {
kfree(smmu_domain); kfree(smmu_domain);
return ERR_PTR(ret); return ERR_PTR(ret);
...@@ -2276,7 +2308,7 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev) ...@@ -2276,7 +2308,7 @@ static struct iommu_domain *arm_smmu_domain_alloc_paging(struct device *dev)
return &smmu_domain->domain; return &smmu_domain->domain;
} }
static void arm_smmu_domain_free(struct iommu_domain *domain) static void arm_smmu_domain_free_paging(struct iommu_domain *domain)
{ {
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = smmu_domain->smmu;
...@@ -2287,7 +2319,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain) ...@@ -2287,7 +2319,7 @@ static void arm_smmu_domain_free(struct iommu_domain *domain)
if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { if (smmu_domain->stage == ARM_SMMU_DOMAIN_S1) {
/* Prevent SVA from touching the CD while we're freeing it */ /* Prevent SVA from touching the CD while we're freeing it */
mutex_lock(&arm_smmu_asid_lock); mutex_lock(&arm_smmu_asid_lock);
arm_smmu_free_asid(&smmu_domain->cd); xa_erase(&arm_smmu_asid_xa, smmu_domain->cd.asid);
mutex_unlock(&arm_smmu_asid_lock); mutex_unlock(&arm_smmu_asid_lock);
} else { } else {
struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg; struct arm_smmu_s2_cfg *cfg = &smmu_domain->s2_cfg;
...@@ -2302,14 +2334,12 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu, ...@@ -2302,14 +2334,12 @@ static int arm_smmu_domain_finalise_s1(struct arm_smmu_device *smmu,
struct arm_smmu_domain *smmu_domain) struct arm_smmu_domain *smmu_domain)
{ {
int ret; int ret;
u32 asid; u32 asid = 0;
struct arm_smmu_ctx_desc *cd = &smmu_domain->cd; struct arm_smmu_ctx_desc *cd = &smmu_domain->cd;
refcount_set(&cd->refs, 1);
/* Prevent SVA from modifying the ASID until it is written to the CD */ /* Prevent SVA from modifying the ASID until it is written to the CD */
mutex_lock(&arm_smmu_asid_lock); mutex_lock(&arm_smmu_asid_lock);
ret = xa_alloc(&arm_smmu_asid_xa, &asid, cd, ret = xa_alloc(&arm_smmu_asid_xa, &asid, smmu_domain,
XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL); XA_LIMIT(1, (1 << smmu->asid_bits) - 1), GFP_KERNEL);
cd->asid = (u16)asid; cd->asid = (u16)asid;
mutex_unlock(&arm_smmu_asid_lock); mutex_unlock(&arm_smmu_asid_lock);
...@@ -2333,15 +2363,15 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu, ...@@ -2333,15 +2363,15 @@ static int arm_smmu_domain_finalise_s2(struct arm_smmu_device *smmu,
} }
static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_device *smmu) struct arm_smmu_device *smmu, u32 flags)
{ {
int ret; int ret;
unsigned long ias, oas;
enum io_pgtable_fmt fmt; enum io_pgtable_fmt fmt;
struct io_pgtable_cfg pgtbl_cfg; struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable_ops *pgtbl_ops; struct io_pgtable_ops *pgtbl_ops;
int (*finalise_stage_fn)(struct arm_smmu_device *smmu, int (*finalise_stage_fn)(struct arm_smmu_device *smmu,
struct arm_smmu_domain *smmu_domain); struct arm_smmu_domain *smmu_domain);
bool enable_dirty = flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
/* Restrict the stage to what we can actually support */ /* Restrict the stage to what we can actually support */
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
...@@ -2349,17 +2379,31 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, ...@@ -2349,17 +2379,31 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
smmu_domain->stage = ARM_SMMU_DOMAIN_S1; smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = smmu->pgsize_bitmap,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
.tlb = &arm_smmu_flush_ops,
.iommu_dev = smmu->dev,
};
switch (smmu_domain->stage) { switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1: case ARM_SMMU_DOMAIN_S1: {
ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48; unsigned long ias = (smmu->features &
ias = min_t(unsigned long, ias, VA_BITS); ARM_SMMU_FEAT_VAX) ? 52 : 48;
oas = smmu->ias;
pgtbl_cfg.ias = min_t(unsigned long, ias, VA_BITS);
pgtbl_cfg.oas = smmu->ias;
if (enable_dirty)
pgtbl_cfg.quirks |= IO_PGTABLE_QUIRK_ARM_HD;
fmt = ARM_64_LPAE_S1; fmt = ARM_64_LPAE_S1;
finalise_stage_fn = arm_smmu_domain_finalise_s1; finalise_stage_fn = arm_smmu_domain_finalise_s1;
break; break;
}
case ARM_SMMU_DOMAIN_S2: case ARM_SMMU_DOMAIN_S2:
ias = smmu->ias; if (enable_dirty)
oas = smmu->oas; return -EOPNOTSUPP;
pgtbl_cfg.ias = smmu->ias;
pgtbl_cfg.oas = smmu->oas;
fmt = ARM_64_LPAE_S2; fmt = ARM_64_LPAE_S2;
finalise_stage_fn = arm_smmu_domain_finalise_s2; finalise_stage_fn = arm_smmu_domain_finalise_s2;
break; break;
...@@ -2367,15 +2411,6 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, ...@@ -2367,15 +2411,6 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
return -EINVAL; return -EINVAL;
} }
pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = smmu->pgsize_bitmap,
.ias = ias,
.oas = oas,
.coherent_walk = smmu->features & ARM_SMMU_FEAT_COHERENCY,
.tlb = &arm_smmu_flush_ops,
.iommu_dev = smmu->dev,
};
pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
if (!pgtbl_ops) if (!pgtbl_ops)
return -ENOMEM; return -ENOMEM;
...@@ -2383,6 +2418,8 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain, ...@@ -2383,6 +2418,8 @@ static int arm_smmu_domain_finalise(struct arm_smmu_domain *smmu_domain,
smmu_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; smmu_domain->domain.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap;
smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1; smmu_domain->domain.geometry.aperture_end = (1UL << pgtbl_cfg.ias) - 1;
smmu_domain->domain.geometry.force_aperture = true; smmu_domain->domain.geometry.force_aperture = true;
if (enable_dirty && smmu_domain->stage == ARM_SMMU_DOMAIN_S1)
smmu_domain->domain.dirty_ops = &arm_smmu_dirty_ops;
ret = finalise_stage_fn(smmu, smmu_domain); ret = finalise_stage_fn(smmu, smmu_domain);
if (ret < 0) { if (ret < 0) {
...@@ -2420,6 +2457,13 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master, ...@@ -2420,6 +2457,13 @@ static void arm_smmu_install_ste_for_dev(struct arm_smmu_master *master,
int i, j; int i, j;
struct arm_smmu_device *smmu = master->smmu; struct arm_smmu_device *smmu = master->smmu;
master->cd_table.in_ste =
FIELD_GET(STRTAB_STE_0_CFG, le64_to_cpu(target->data[0])) ==
STRTAB_STE_0_CFG_S1_TRANS;
master->ste_ats_enabled =
FIELD_GET(STRTAB_STE_1_EATS, le64_to_cpu(target->data[1])) ==
STRTAB_STE_1_EATS_TRANS;
for (i = 0; i < master->num_streams; ++i) { for (i = 0; i < master->num_streams; ++i) {
u32 sid = master->streams[i].id; u32 sid = master->streams[i].id;
struct arm_smmu_ste *step = struct arm_smmu_ste *step =
...@@ -2451,46 +2495,24 @@ static bool arm_smmu_ats_supported(struct arm_smmu_master *master) ...@@ -2451,46 +2495,24 @@ static bool arm_smmu_ats_supported(struct arm_smmu_master *master)
return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev)); return dev_is_pci(dev) && pci_ats_supported(to_pci_dev(dev));
} }
static void arm_smmu_enable_ats(struct arm_smmu_master *master, static void arm_smmu_enable_ats(struct arm_smmu_master *master)
struct arm_smmu_domain *smmu_domain)
{ {
size_t stu; size_t stu;
struct pci_dev *pdev; struct pci_dev *pdev;
struct arm_smmu_device *smmu = master->smmu; struct arm_smmu_device *smmu = master->smmu;
/* Don't enable ATS at the endpoint if it's not enabled in the STE */
if (!master->ats_enabled)
return;
/* Smallest Translation Unit: log2 of the smallest supported granule */ /* Smallest Translation Unit: log2 of the smallest supported granule */
stu = __ffs(smmu->pgsize_bitmap); stu = __ffs(smmu->pgsize_bitmap);
pdev = to_pci_dev(master->dev); pdev = to_pci_dev(master->dev);
atomic_inc(&smmu_domain->nr_ats_masters);
/* /*
* ATC invalidation of PASID 0 causes the entire ATC to be flushed. * ATC invalidation of PASID 0 causes the entire ATC to be flushed.
*/ */
arm_smmu_atc_inv_master(master); arm_smmu_atc_inv_master(master, IOMMU_NO_PASID);
if (pci_enable_ats(pdev, stu)) if (pci_enable_ats(pdev, stu))
dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu); dev_err(master->dev, "Failed to enable ATS (STU %zu)\n", stu);
} }
static void arm_smmu_disable_ats(struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain)
{
if (!master->ats_enabled)
return;
pci_disable_ats(to_pci_dev(master->dev));
/*
* Ensure ATS is disabled at the endpoint before we issue the
* ATC invalidation via the SMMU.
*/
wmb();
arm_smmu_atc_inv_master(master);
atomic_dec(&smmu_domain->nr_ats_masters);
}
static int arm_smmu_enable_pasid(struct arm_smmu_master *master) static int arm_smmu_enable_pasid(struct arm_smmu_master *master)
{ {
int ret; int ret;
...@@ -2538,56 +2560,216 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master) ...@@ -2538,56 +2560,216 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
pci_disable_pasid(pdev); pci_disable_pasid(pdev);
} }
static void arm_smmu_detach_dev(struct arm_smmu_master *master) static struct arm_smmu_master_domain *
arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_master *master,
ioasid_t ssid)
{ {
struct iommu_domain *domain = iommu_get_domain_for_dev(master->dev); struct arm_smmu_master_domain *master_domain;
struct arm_smmu_domain *smmu_domain;
lockdep_assert_held(&smmu_domain->devices_lock);
list_for_each_entry(master_domain, &smmu_domain->devices,
devices_elm) {
if (master_domain->master == master &&
master_domain->ssid == ssid)
return master_domain;
}
return NULL;
}
/*
* If the domain uses the smmu_domain->devices list return the arm_smmu_domain
* structure, otherwise NULL. These domains track attached devices so they can
* issue invalidations.
*/
static struct arm_smmu_domain *
to_smmu_domain_devices(struct iommu_domain *domain)
{
/* The domain can be NULL only when processing the first attach */
if (!domain)
return NULL;
if ((domain->type & __IOMMU_DOMAIN_PAGING) ||
domain->type == IOMMU_DOMAIN_SVA)
return to_smmu_domain(domain);
return NULL;
}
static void arm_smmu_remove_master_domain(struct arm_smmu_master *master,
struct iommu_domain *domain,
ioasid_t ssid)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain_devices(domain);
struct arm_smmu_master_domain *master_domain;
unsigned long flags; unsigned long flags;
if (!domain || !(domain->type & __IOMMU_DOMAIN_PAGING)) if (!smmu_domain)
return; return;
smmu_domain = to_smmu_domain(domain);
arm_smmu_disable_ats(master, smmu_domain);
spin_lock_irqsave(&smmu_domain->devices_lock, flags); spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_del_init(&master->domain_head); master_domain = arm_smmu_find_master_domain(smmu_domain, master, ssid);
if (master_domain) {
list_del(&master_domain->devices_elm);
kfree(master_domain);
if (master->ats_enabled)
atomic_dec(&smmu_domain->nr_ats_masters);
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags); spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
}
struct arm_smmu_attach_state {
/* Inputs */
struct iommu_domain *old_domain;
struct arm_smmu_master *master;
bool cd_needs_ats;
ioasid_t ssid;
/* Resulting state */
bool ats_enabled;
};
/*
* Start the sequence to attach a domain to a master. The sequence contains three
* steps:
* arm_smmu_attach_prepare()
* arm_smmu_install_ste_for_dev()
* arm_smmu_attach_commit()
*
* If prepare succeeds then the sequence must be completed. The STE installed
* must set the STE.EATS field according to state.ats_enabled.
*
* If the device supports ATS then this determines if EATS should be enabled
* in the STE, and starts sequencing EATS disable if required.
*
* The change of the EATS in the STE and the PCI ATS config space is managed by
* this sequence to be in the right order so that if PCI ATS is enabled then
* STE.ETAS is enabled.
*
* new_domain can be a non-paging domain. In this case ATS will not be enabled,
* and invalidations won't be tracked.
*/
static int arm_smmu_attach_prepare(struct arm_smmu_attach_state *state,
struct iommu_domain *new_domain)
{
struct arm_smmu_master *master = state->master;
struct arm_smmu_master_domain *master_domain;
struct arm_smmu_domain *smmu_domain =
to_smmu_domain_devices(new_domain);
unsigned long flags;
/*
* arm_smmu_share_asid() must not see two domains pointing to the same
* arm_smmu_master_domain contents otherwise it could randomly write one
* or the other to the CD.
*/
lockdep_assert_held(&arm_smmu_asid_lock);
if (smmu_domain || state->cd_needs_ats) {
/*
* The SMMU does not support enabling ATS with bypass/abort.
* When the STE is in bypass (STE.Config[2:0] == 0b100), ATS
* Translation Requests and Translated transactions are denied
* as though ATS is disabled for the stream (STE.EATS == 0b00),
* causing F_BAD_ATS_TREQ and F_TRANSL_FORBIDDEN events
* (IHI0070Ea 5.2 Stream Table Entry). Thus ATS can only be
* enabled if we have arm_smmu_domain, those always have page
* tables.
*/
state->ats_enabled = arm_smmu_ats_supported(master);
}
if (smmu_domain) {
master_domain = kzalloc(sizeof(*master_domain), GFP_KERNEL);
if (!master_domain)
return -ENOMEM;
master_domain->master = master;
master_domain->ssid = state->ssid;
master->ats_enabled = false; /*
* During prepare we want the current smmu_domain and new
* smmu_domain to be in the devices list before we change any
* HW. This ensures that both domains will send ATS
* invalidations to the master until we are done.
*
* It is tempting to make this list only track masters that are
* using ATS, but arm_smmu_share_asid() also uses this to change
* the ASID of a domain, unrelated to ATS.
*
* Notice if we are re-attaching the same domain then the list
* will have two identical entries and commit will remove only
* one of them.
*/
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
if (state->ats_enabled)
atomic_inc(&smmu_domain->nr_ats_masters);
list_add(&master_domain->devices_elm, &smmu_domain->devices);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
}
if (!state->ats_enabled && master->ats_enabled) {
pci_disable_ats(to_pci_dev(master->dev));
/*
* This is probably overkill, but the config write for disabling
* ATS should complete before the STE is configured to generate
* UR to avoid AER noise.
*/
wmb();
}
return 0;
}
/*
* Commit is done after the STE/CD are configured with the EATS setting. It
* completes synchronizing the PCI device's ATC and finishes manipulating the
* smmu_domain->devices list.
*/
static void arm_smmu_attach_commit(struct arm_smmu_attach_state *state)
{
struct arm_smmu_master *master = state->master;
lockdep_assert_held(&arm_smmu_asid_lock);
if (state->ats_enabled && !master->ats_enabled) {
arm_smmu_enable_ats(master);
} else if (state->ats_enabled && master->ats_enabled) {
/*
* The translation has changed, flush the ATC. At this point the
* SMMU is translating for the new domain and both the old&new
* domain will issue invalidations.
*/
arm_smmu_atc_inv_master(master, state->ssid);
} else if (!state->ats_enabled && master->ats_enabled) {
/* ATS is being switched off, invalidate the entire ATC */
arm_smmu_atc_inv_master(master, IOMMU_NO_PASID);
}
master->ats_enabled = state->ats_enabled;
arm_smmu_remove_master_domain(master, state->old_domain, state->ssid);
} }
static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
{ {
int ret = 0; int ret = 0;
unsigned long flags;
struct arm_smmu_ste target; struct arm_smmu_ste target;
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev); struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_attach_state state = {
.old_domain = iommu_get_domain_for_dev(dev),
.ssid = IOMMU_NO_PASID,
};
struct arm_smmu_master *master; struct arm_smmu_master *master;
struct arm_smmu_cd *cdptr; struct arm_smmu_cd *cdptr;
if (!fwspec) if (!fwspec)
return -ENOENT; return -ENOENT;
master = dev_iommu_priv_get(dev); state.master = master = dev_iommu_priv_get(dev);
smmu = master->smmu; smmu = master->smmu;
/*
* Checking that SVA is disabled ensures that this device isn't bound to
* any mm, and can be safely detached from its old domain. Bonds cannot
* be removed concurrently since we're holding the group mutex.
*/
if (arm_smmu_master_sva_enabled(master)) {
dev_err(dev, "cannot attach - SVA enabled\n");
return -EBUSY;
}
mutex_lock(&smmu_domain->init_mutex); mutex_lock(&smmu_domain->init_mutex);
if (!smmu_domain->smmu) { if (!smmu_domain->smmu) {
ret = arm_smmu_domain_finalise(smmu_domain, smmu); ret = arm_smmu_domain_finalise(smmu_domain, smmu, 0);
} else if (smmu_domain->smmu != smmu) } else if (smmu_domain->smmu != smmu)
ret = -EINVAL; ret = -EINVAL;
...@@ -2599,7 +2781,8 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -2599,7 +2781,8 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
cdptr = arm_smmu_alloc_cd_ptr(master, IOMMU_NO_PASID); cdptr = arm_smmu_alloc_cd_ptr(master, IOMMU_NO_PASID);
if (!cdptr) if (!cdptr)
return -ENOMEM; return -ENOMEM;
} } else if (arm_smmu_ssids_in_use(&master->cd_table))
return -EBUSY;
/* /*
* Prevent arm_smmu_share_asid() from trying to change the ASID * Prevent arm_smmu_share_asid() from trying to change the ASID
...@@ -2609,13 +2792,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -2609,13 +2792,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
*/ */
mutex_lock(&arm_smmu_asid_lock); mutex_lock(&arm_smmu_asid_lock);
arm_smmu_detach_dev(master); ret = arm_smmu_attach_prepare(&state, domain);
if (ret) {
master->ats_enabled = arm_smmu_ats_supported(master); mutex_unlock(&arm_smmu_asid_lock);
return ret;
spin_lock_irqsave(&smmu_domain->devices_lock, flags); }
list_add(&master->domain_head, &smmu_domain->devices);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
switch (smmu_domain->stage) { switch (smmu_domain->stage) {
case ARM_SMMU_DOMAIN_S1: { case ARM_SMMU_DOMAIN_S1: {
...@@ -2624,29 +2805,172 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -2624,29 +2805,172 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
arm_smmu_make_s1_cd(&target_cd, master, smmu_domain); arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr, arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr,
&target_cd); &target_cd);
arm_smmu_make_cdtable_ste(&target, master); arm_smmu_make_cdtable_ste(&target, master, state.ats_enabled,
STRTAB_STE_1_S1DSS_SSID0);
arm_smmu_install_ste_for_dev(master, &target); arm_smmu_install_ste_for_dev(master, &target);
break; break;
} }
case ARM_SMMU_DOMAIN_S2: case ARM_SMMU_DOMAIN_S2:
arm_smmu_make_s2_domain_ste(&target, master, smmu_domain); arm_smmu_make_s2_domain_ste(&target, master, smmu_domain,
state.ats_enabled);
arm_smmu_install_ste_for_dev(master, &target); arm_smmu_install_ste_for_dev(master, &target);
arm_smmu_clear_cd(master, IOMMU_NO_PASID); arm_smmu_clear_cd(master, IOMMU_NO_PASID);
break; break;
} }
arm_smmu_enable_ats(master, smmu_domain); arm_smmu_attach_commit(&state);
mutex_unlock(&arm_smmu_asid_lock); mutex_unlock(&arm_smmu_asid_lock);
return 0; return 0;
} }
static int arm_smmu_attach_dev_ste(struct device *dev, static int arm_smmu_s1_set_dev_pasid(struct iommu_domain *domain,
struct arm_smmu_ste *ste) struct device *dev, ioasid_t id)
{ {
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_master *master = dev_iommu_priv_get(dev); struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct arm_smmu_device *smmu = master->smmu;
struct arm_smmu_cd target_cd;
int ret = 0;
if (arm_smmu_master_sva_enabled(master)) mutex_lock(&smmu_domain->init_mutex);
return -EBUSY; if (!smmu_domain->smmu)
ret = arm_smmu_domain_finalise(smmu_domain, smmu, 0);
else if (smmu_domain->smmu != smmu)
ret = -EINVAL;
mutex_unlock(&smmu_domain->init_mutex);
if (ret)
return ret;
if (smmu_domain->stage != ARM_SMMU_DOMAIN_S1)
return -EINVAL;
/*
* We can read cd.asid outside the lock because arm_smmu_set_pasid()
* will fix it
*/
arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
return arm_smmu_set_pasid(master, to_smmu_domain(domain), id,
&target_cd);
}
static void arm_smmu_update_ste(struct arm_smmu_master *master,
struct iommu_domain *sid_domain,
bool ats_enabled)
{
unsigned int s1dss = STRTAB_STE_1_S1DSS_TERMINATE;
struct arm_smmu_ste ste;
if (master->cd_table.in_ste && master->ste_ats_enabled == ats_enabled)
return;
if (sid_domain->type == IOMMU_DOMAIN_IDENTITY)
s1dss = STRTAB_STE_1_S1DSS_BYPASS;
else
WARN_ON(sid_domain->type != IOMMU_DOMAIN_BLOCKED);
/*
* Change the STE into a cdtable one with SID IDENTITY/BLOCKED behavior
* using s1dss if necessary. If the cd_table is already installed then
* the S1DSS is correct and this will just update the EATS. Otherwise it
* installs the entire thing. This will be hitless.
*/
arm_smmu_make_cdtable_ste(&ste, master, ats_enabled, s1dss);
arm_smmu_install_ste_for_dev(master, &ste);
}
int arm_smmu_set_pasid(struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain, ioasid_t pasid,
struct arm_smmu_cd *cd)
{
struct iommu_domain *sid_domain = iommu_get_domain_for_dev(master->dev);
struct arm_smmu_attach_state state = {
.master = master,
/*
* For now the core code prevents calling this when a domain is
* already attached, no need to set old_domain.
*/
.ssid = pasid,
};
struct arm_smmu_cd *cdptr;
int ret;
/* The core code validates pasid */
if (smmu_domain->smmu != master->smmu)
return -EINVAL;
if (!master->cd_table.in_ste &&
sid_domain->type != IOMMU_DOMAIN_IDENTITY &&
sid_domain->type != IOMMU_DOMAIN_BLOCKED)
return -EINVAL;
cdptr = arm_smmu_alloc_cd_ptr(master, pasid);
if (!cdptr)
return -ENOMEM;
mutex_lock(&arm_smmu_asid_lock);
ret = arm_smmu_attach_prepare(&state, &smmu_domain->domain);
if (ret)
goto out_unlock;
/*
* We don't want to obtain to the asid_lock too early, so fix up the
* caller set ASID under the lock in case it changed.
*/
cd->data[0] &= ~cpu_to_le64(CTXDESC_CD_0_ASID);
cd->data[0] |= cpu_to_le64(
FIELD_PREP(CTXDESC_CD_0_ASID, smmu_domain->cd.asid));
arm_smmu_write_cd_entry(master, pasid, cdptr, cd);
arm_smmu_update_ste(master, sid_domain, state.ats_enabled);
arm_smmu_attach_commit(&state);
out_unlock:
mutex_unlock(&arm_smmu_asid_lock);
return ret;
}
static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
struct iommu_domain *domain)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct arm_smmu_domain *smmu_domain;
smmu_domain = to_smmu_domain(domain);
mutex_lock(&arm_smmu_asid_lock);
arm_smmu_clear_cd(master, pasid);
if (master->ats_enabled)
arm_smmu_atc_inv_master(master, pasid);
arm_smmu_remove_master_domain(master, &smmu_domain->domain, pasid);
mutex_unlock(&arm_smmu_asid_lock);
/*
* When the last user of the CD table goes away downgrade the STE back
* to a non-cd_table one.
*/
if (!arm_smmu_ssids_in_use(&master->cd_table)) {
struct iommu_domain *sid_domain =
iommu_get_domain_for_dev(master->dev);
if (sid_domain->type == IOMMU_DOMAIN_IDENTITY ||
sid_domain->type == IOMMU_DOMAIN_BLOCKED)
sid_domain->ops->attach_dev(sid_domain, dev);
}
}
static void arm_smmu_attach_dev_ste(struct iommu_domain *domain,
struct device *dev,
struct arm_smmu_ste *ste,
unsigned int s1dss)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
struct arm_smmu_attach_state state = {
.master = master,
.old_domain = iommu_get_domain_for_dev(dev),
.ssid = IOMMU_NO_PASID,
};
/* /*
* Do not allow any ASID to be changed while are working on the STE, * Do not allow any ASID to be changed while are working on the STE,
...@@ -2655,15 +2979,23 @@ static int arm_smmu_attach_dev_ste(struct device *dev, ...@@ -2655,15 +2979,23 @@ static int arm_smmu_attach_dev_ste(struct device *dev,
mutex_lock(&arm_smmu_asid_lock); mutex_lock(&arm_smmu_asid_lock);
/* /*
* The SMMU does not support enabling ATS with bypass/abort. When the * If the CD table is not in use we can use the provided STE, otherwise
* STE is in bypass (STE.Config[2:0] == 0b100), ATS Translation Requests * we use a cdtable STE with the provided S1DSS.
* and Translated transactions are denied as though ATS is disabled for
* the stream (STE.EATS == 0b00), causing F_BAD_ATS_TREQ and
* F_TRANSL_FORBIDDEN events (IHI0070Ea 5.2 Stream Table Entry).
*/ */
arm_smmu_detach_dev(master); if (arm_smmu_ssids_in_use(&master->cd_table)) {
/*
* If a CD table has to be present then we need to run with ATS
* on even though the RID will fail ATS queries with UR. This is
* because we have no idea what the PASID's need.
*/
state.cd_needs_ats = true;
arm_smmu_attach_prepare(&state, domain);
arm_smmu_make_cdtable_ste(ste, master, state.ats_enabled, s1dss);
} else {
arm_smmu_attach_prepare(&state, domain);
}
arm_smmu_install_ste_for_dev(master, ste); arm_smmu_install_ste_for_dev(master, ste);
arm_smmu_attach_commit(&state);
mutex_unlock(&arm_smmu_asid_lock); mutex_unlock(&arm_smmu_asid_lock);
/* /*
...@@ -2672,7 +3004,6 @@ static int arm_smmu_attach_dev_ste(struct device *dev, ...@@ -2672,7 +3004,6 @@ static int arm_smmu_attach_dev_ste(struct device *dev,
* descriptor from arm_smmu_share_asid(). * descriptor from arm_smmu_share_asid().
*/ */
arm_smmu_clear_cd(master, IOMMU_NO_PASID); arm_smmu_clear_cd(master, IOMMU_NO_PASID);
return 0;
} }
static int arm_smmu_attach_dev_identity(struct iommu_domain *domain, static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
...@@ -2682,7 +3013,8 @@ static int arm_smmu_attach_dev_identity(struct iommu_domain *domain, ...@@ -2682,7 +3013,8 @@ static int arm_smmu_attach_dev_identity(struct iommu_domain *domain,
struct arm_smmu_master *master = dev_iommu_priv_get(dev); struct arm_smmu_master *master = dev_iommu_priv_get(dev);
arm_smmu_make_bypass_ste(master->smmu, &ste); arm_smmu_make_bypass_ste(master->smmu, &ste);
return arm_smmu_attach_dev_ste(dev, &ste); arm_smmu_attach_dev_ste(domain, dev, &ste, STRTAB_STE_1_S1DSS_BYPASS);
return 0;
} }
static const struct iommu_domain_ops arm_smmu_identity_ops = { static const struct iommu_domain_ops arm_smmu_identity_ops = {
...@@ -2700,7 +3032,9 @@ static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain, ...@@ -2700,7 +3032,9 @@ static int arm_smmu_attach_dev_blocked(struct iommu_domain *domain,
struct arm_smmu_ste ste; struct arm_smmu_ste ste;
arm_smmu_make_abort_ste(&ste); arm_smmu_make_abort_ste(&ste);
return arm_smmu_attach_dev_ste(dev, &ste); arm_smmu_attach_dev_ste(domain, dev, &ste,
STRTAB_STE_1_S1DSS_TERMINATE);
return 0;
} }
static const struct iommu_domain_ops arm_smmu_blocked_ops = { static const struct iommu_domain_ops arm_smmu_blocked_ops = {
...@@ -2712,6 +3046,37 @@ static struct iommu_domain arm_smmu_blocked_domain = { ...@@ -2712,6 +3046,37 @@ static struct iommu_domain arm_smmu_blocked_domain = {
.ops = &arm_smmu_blocked_ops, .ops = &arm_smmu_blocked_ops,
}; };
static struct iommu_domain *
arm_smmu_domain_alloc_user(struct device *dev, u32 flags,
struct iommu_domain *parent,
const struct iommu_user_data *user_data)
{
struct arm_smmu_master *master = dev_iommu_priv_get(dev);
const u32 PAGING_FLAGS = IOMMU_HWPT_ALLOC_DIRTY_TRACKING;
struct arm_smmu_domain *smmu_domain;
int ret;
if (flags & ~PAGING_FLAGS)
return ERR_PTR(-EOPNOTSUPP);
if (parent || user_data)
return ERR_PTR(-EOPNOTSUPP);
smmu_domain = arm_smmu_domain_alloc();
if (!smmu_domain)
return ERR_PTR(-ENOMEM);
smmu_domain->domain.type = IOMMU_DOMAIN_UNMANAGED;
smmu_domain->domain.ops = arm_smmu_ops.default_domain_ops;
ret = arm_smmu_domain_finalise(smmu_domain, master->smmu, flags);
if (ret)
goto err_free;
return &smmu_domain->domain;
err_free:
kfree(smmu_domain);
return ERR_PTR(ret);
}
static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova, static int arm_smmu_map_pages(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t pgsize, size_t pgcount, phys_addr_t paddr, size_t pgsize, size_t pgcount,
int prot, gfp_t gfp, size_t *mapped) int prot, gfp_t gfp, size_t *mapped)
...@@ -2882,8 +3247,6 @@ static void arm_smmu_remove_master(struct arm_smmu_master *master) ...@@ -2882,8 +3247,6 @@ static void arm_smmu_remove_master(struct arm_smmu_master *master)
kfree(master->streams); kfree(master->streams);
} }
static struct iommu_ops arm_smmu_ops;
static struct iommu_device *arm_smmu_probe_device(struct device *dev) static struct iommu_device *arm_smmu_probe_device(struct device *dev)
{ {
int ret; int ret;
...@@ -2904,8 +3267,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev) ...@@ -2904,8 +3267,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
master->dev = dev; master->dev = dev;
master->smmu = smmu; master->smmu = smmu;
INIT_LIST_HEAD(&master->bonds);
INIT_LIST_HEAD(&master->domain_head);
dev_iommu_priv_set(dev, master); dev_iommu_priv_set(dev, master);
ret = arm_smmu_insert_master(smmu, master); ret = arm_smmu_insert_master(smmu, master);
...@@ -2961,6 +3322,27 @@ static void arm_smmu_release_device(struct device *dev) ...@@ -2961,6 +3322,27 @@ static void arm_smmu_release_device(struct device *dev)
kfree(master); kfree(master);
} }
static int arm_smmu_read_and_clear_dirty(struct iommu_domain *domain,
unsigned long iova, size_t size,
unsigned long flags,
struct iommu_dirty_bitmap *dirty)
{
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
return ops->read_and_clear_dirty(ops, iova, size, flags, dirty);
}
static int arm_smmu_set_dirty_tracking(struct iommu_domain *domain,
bool enabled)
{
/*
* Always enabled and the dirty bitmap is cleared prior to
* set_dirty_tracking().
*/
return 0;
}
static struct iommu_group *arm_smmu_device_group(struct device *dev) static struct iommu_group *arm_smmu_device_group(struct device *dev)
{ {
struct iommu_group *group; struct iommu_group *group;
...@@ -3087,18 +3469,13 @@ static int arm_smmu_def_domain_type(struct device *dev) ...@@ -3087,18 +3469,13 @@ static int arm_smmu_def_domain_type(struct device *dev)
return 0; return 0;
} }
static void arm_smmu_remove_dev_pasid(struct device *dev, ioasid_t pasid,
struct iommu_domain *domain)
{
arm_smmu_sva_remove_dev_pasid(domain, dev, pasid);
}
static struct iommu_ops arm_smmu_ops = { static struct iommu_ops arm_smmu_ops = {
.identity_domain = &arm_smmu_identity_domain, .identity_domain = &arm_smmu_identity_domain,
.blocked_domain = &arm_smmu_blocked_domain, .blocked_domain = &arm_smmu_blocked_domain,
.capable = arm_smmu_capable, .capable = arm_smmu_capable,
.domain_alloc = arm_smmu_domain_alloc,
.domain_alloc_paging = arm_smmu_domain_alloc_paging, .domain_alloc_paging = arm_smmu_domain_alloc_paging,
.domain_alloc_sva = arm_smmu_sva_domain_alloc,
.domain_alloc_user = arm_smmu_domain_alloc_user,
.probe_device = arm_smmu_probe_device, .probe_device = arm_smmu_probe_device,
.release_device = arm_smmu_release_device, .release_device = arm_smmu_release_device,
.device_group = arm_smmu_device_group, .device_group = arm_smmu_device_group,
...@@ -3113,16 +3490,22 @@ static struct iommu_ops arm_smmu_ops = { ...@@ -3113,16 +3490,22 @@ static struct iommu_ops arm_smmu_ops = {
.owner = THIS_MODULE, .owner = THIS_MODULE,
.default_domain_ops = &(const struct iommu_domain_ops) { .default_domain_ops = &(const struct iommu_domain_ops) {
.attach_dev = arm_smmu_attach_dev, .attach_dev = arm_smmu_attach_dev,
.set_dev_pasid = arm_smmu_s1_set_dev_pasid,
.map_pages = arm_smmu_map_pages, .map_pages = arm_smmu_map_pages,
.unmap_pages = arm_smmu_unmap_pages, .unmap_pages = arm_smmu_unmap_pages,
.flush_iotlb_all = arm_smmu_flush_iotlb_all, .flush_iotlb_all = arm_smmu_flush_iotlb_all,
.iotlb_sync = arm_smmu_iotlb_sync, .iotlb_sync = arm_smmu_iotlb_sync,
.iova_to_phys = arm_smmu_iova_to_phys, .iova_to_phys = arm_smmu_iova_to_phys,
.enable_nesting = arm_smmu_enable_nesting, .enable_nesting = arm_smmu_enable_nesting,
.free = arm_smmu_domain_free, .free = arm_smmu_domain_free_paging,
} }
}; };
static struct iommu_dirty_ops arm_smmu_dirty_ops = {
.read_and_clear_dirty = arm_smmu_read_and_clear_dirty,
.set_dirty_tracking = arm_smmu_set_dirty_tracking,
};
/* Probing and initialisation functions */ /* Probing and initialisation functions */
static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu, static int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
struct arm_smmu_queue *q, struct arm_smmu_queue *q,
...@@ -3221,25 +3604,6 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu) ...@@ -3221,25 +3604,6 @@ static int arm_smmu_init_queues(struct arm_smmu_device *smmu)
PRIQ_ENT_DWORDS, "priq"); PRIQ_ENT_DWORDS, "priq");
} }
static int arm_smmu_init_l1_strtab(struct arm_smmu_device *smmu)
{
unsigned int i;
struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
void *strtab = smmu->strtab_cfg.strtab;
cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents,
sizeof(*cfg->l1_desc), GFP_KERNEL);
if (!cfg->l1_desc)
return -ENOMEM;
for (i = 0; i < cfg->num_l1_ents; ++i) {
arm_smmu_write_strtab_l1_desc(strtab, &cfg->l1_desc[i]);
strtab += STRTAB_L1_DESC_DWORDS << 3;
}
return 0;
}
static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
{ {
void *strtab; void *strtab;
...@@ -3275,7 +3639,12 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu) ...@@ -3275,7 +3639,12 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT); reg |= FIELD_PREP(STRTAB_BASE_CFG_SPLIT, STRTAB_SPLIT);
cfg->strtab_base_cfg = reg; cfg->strtab_base_cfg = reg;
return arm_smmu_init_l1_strtab(smmu); cfg->l1_desc = devm_kcalloc(smmu->dev, cfg->num_l1_ents,
sizeof(*cfg->l1_desc), GFP_KERNEL);
if (!cfg->l1_desc)
return -ENOMEM;
return 0;
} }
static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu) static int arm_smmu_init_strtab_linear(struct arm_smmu_device *smmu)
...@@ -3698,6 +4067,28 @@ static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu) ...@@ -3698,6 +4067,28 @@ static void arm_smmu_device_iidr_probe(struct arm_smmu_device *smmu)
} }
} }
static void arm_smmu_get_httu(struct arm_smmu_device *smmu, u32 reg)
{
u32 fw_features = smmu->features & (ARM_SMMU_FEAT_HA | ARM_SMMU_FEAT_HD);
u32 hw_features = 0;
switch (FIELD_GET(IDR0_HTTU, reg)) {
case IDR0_HTTU_ACCESS_DIRTY:
hw_features |= ARM_SMMU_FEAT_HD;
fallthrough;
case IDR0_HTTU_ACCESS:
hw_features |= ARM_SMMU_FEAT_HA;
}
if (smmu->dev->of_node)
smmu->features |= hw_features;
else if (hw_features != fw_features)
/* ACPI IORT sets the HTTU bits */
dev_warn(smmu->dev,
"IDR0.HTTU features(0x%x) overridden by FW configuration (0x%x)\n",
hw_features, fw_features);
}
static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
{ {
u32 reg; u32 reg;
...@@ -3758,6 +4149,8 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu) ...@@ -3758,6 +4149,8 @@ static int arm_smmu_device_hw_probe(struct arm_smmu_device *smmu)
smmu->features |= ARM_SMMU_FEAT_E2H; smmu->features |= ARM_SMMU_FEAT_E2H;
} }
arm_smmu_get_httu(smmu, reg);
/* /*
* The coherency feature as set by FW is used in preference to the ID * The coherency feature as set by FW is used in preference to the ID
* register, but warn on mismatch. * register, but warn on mismatch.
...@@ -3953,6 +4346,14 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev, ...@@ -3953,6 +4346,14 @@ static int arm_smmu_device_acpi_probe(struct platform_device *pdev,
if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE) if (iort_smmu->flags & ACPI_IORT_SMMU_V3_COHACC_OVERRIDE)
smmu->features |= ARM_SMMU_FEAT_COHERENCY; smmu->features |= ARM_SMMU_FEAT_COHERENCY;
switch (FIELD_GET(ACPI_IORT_SMMU_V3_HTTU_OVERRIDE, iort_smmu->flags)) {
case IDR0_HTTU_ACCESS_DIRTY:
smmu->features |= ARM_SMMU_FEAT_HD;
fallthrough;
case IDR0_HTTU_ACCESS:
smmu->features |= ARM_SMMU_FEAT_HA;
}
return 0; return 0;
} }
#else #else
......
...@@ -33,6 +33,9 @@ ...@@ -33,6 +33,9 @@
#define IDR0_ASID16 (1 << 12) #define IDR0_ASID16 (1 << 12)
#define IDR0_ATS (1 << 10) #define IDR0_ATS (1 << 10)
#define IDR0_HYP (1 << 9) #define IDR0_HYP (1 << 9)
#define IDR0_HTTU GENMASK(7, 6)
#define IDR0_HTTU_ACCESS 1
#define IDR0_HTTU_ACCESS_DIRTY 2
#define IDR0_COHACC (1 << 4) #define IDR0_COHACC (1 << 4)
#define IDR0_TTF GENMASK(3, 2) #define IDR0_TTF GENMASK(3, 2)
#define IDR0_TTF_AARCH64 2 #define IDR0_TTF_AARCH64 2
...@@ -301,6 +304,9 @@ struct arm_smmu_cd { ...@@ -301,6 +304,9 @@ struct arm_smmu_cd {
#define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32) #define CTXDESC_CD_0_TCR_IPS GENMASK_ULL(34, 32)
#define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38) #define CTXDESC_CD_0_TCR_TBI0 (1ULL << 38)
#define CTXDESC_CD_0_TCR_HA (1UL << 43)
#define CTXDESC_CD_0_TCR_HD (1UL << 42)
#define CTXDESC_CD_0_AA64 (1UL << 41) #define CTXDESC_CD_0_AA64 (1UL << 41)
#define CTXDESC_CD_0_S (1UL << 44) #define CTXDESC_CD_0_S (1UL << 44)
#define CTXDESC_CD_0_R (1UL << 45) #define CTXDESC_CD_0_R (1UL << 45)
...@@ -579,17 +585,11 @@ struct arm_smmu_priq { ...@@ -579,17 +585,11 @@ struct arm_smmu_priq {
/* High-level stream table and context descriptor structures */ /* High-level stream table and context descriptor structures */
struct arm_smmu_strtab_l1_desc { struct arm_smmu_strtab_l1_desc {
u8 span;
struct arm_smmu_ste *l2ptr; struct arm_smmu_ste *l2ptr;
dma_addr_t l2ptr_dma;
}; };
struct arm_smmu_ctx_desc { struct arm_smmu_ctx_desc {
u16 asid; u16 asid;
refcount_t refs;
struct mm_struct *mm;
}; };
struct arm_smmu_l1_ctx_desc { struct arm_smmu_l1_ctx_desc {
...@@ -602,11 +602,19 @@ struct arm_smmu_ctx_desc_cfg { ...@@ -602,11 +602,19 @@ struct arm_smmu_ctx_desc_cfg {
dma_addr_t cdtab_dma; dma_addr_t cdtab_dma;
struct arm_smmu_l1_ctx_desc *l1_desc; struct arm_smmu_l1_ctx_desc *l1_desc;
unsigned int num_l1_ents; unsigned int num_l1_ents;
unsigned int used_ssids;
u8 in_ste;
u8 s1fmt; u8 s1fmt;
/* log2 of the maximum number of CDs supported by this table */ /* log2 of the maximum number of CDs supported by this table */
u8 s1cdmax; u8 s1cdmax;
}; };
/* True if the cd table has SSIDS > 0 in use. */
static inline bool arm_smmu_ssids_in_use(struct arm_smmu_ctx_desc_cfg *cd_table)
{
return cd_table->used_ssids;
}
struct arm_smmu_s2_cfg { struct arm_smmu_s2_cfg {
u16 vmid; u16 vmid;
}; };
...@@ -648,6 +656,8 @@ struct arm_smmu_device { ...@@ -648,6 +656,8 @@ struct arm_smmu_device {
#define ARM_SMMU_FEAT_E2H (1 << 18) #define ARM_SMMU_FEAT_E2H (1 << 18)
#define ARM_SMMU_FEAT_NESTING (1 << 19) #define ARM_SMMU_FEAT_NESTING (1 << 19)
#define ARM_SMMU_FEAT_ATTR_TYPES_OVR (1 << 20) #define ARM_SMMU_FEAT_ATTR_TYPES_OVR (1 << 20)
#define ARM_SMMU_FEAT_HA (1 << 21)
#define ARM_SMMU_FEAT_HD (1 << 22)
u32 features; u32 features;
#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0) #define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
...@@ -696,16 +706,15 @@ struct arm_smmu_stream { ...@@ -696,16 +706,15 @@ struct arm_smmu_stream {
struct arm_smmu_master { struct arm_smmu_master {
struct arm_smmu_device *smmu; struct arm_smmu_device *smmu;
struct device *dev; struct device *dev;
struct list_head domain_head;
struct arm_smmu_stream *streams; struct arm_smmu_stream *streams;
/* Locked by the iommu core using the group mutex */ /* Locked by the iommu core using the group mutex */
struct arm_smmu_ctx_desc_cfg cd_table; struct arm_smmu_ctx_desc_cfg cd_table;
unsigned int num_streams; unsigned int num_streams;
bool ats_enabled; bool ats_enabled : 1;
bool ste_ats_enabled : 1;
bool stall_enabled; bool stall_enabled;
bool sva_enabled; bool sva_enabled;
bool iopf_enabled; bool iopf_enabled;
struct list_head bonds;
unsigned int ssid_bits; unsigned int ssid_bits;
}; };
...@@ -730,10 +739,11 @@ struct arm_smmu_domain { ...@@ -730,10 +739,11 @@ struct arm_smmu_domain {
struct iommu_domain domain; struct iommu_domain domain;
/* List of struct arm_smmu_master_domain */
struct list_head devices; struct list_head devices;
spinlock_t devices_lock; spinlock_t devices_lock;
struct list_head mmu_notifiers; struct mmu_notifier mmu_notifier;
}; };
/* The following are exposed for testing purposes. */ /* The following are exposed for testing purposes. */
...@@ -757,15 +767,23 @@ void arm_smmu_make_abort_ste(struct arm_smmu_ste *target); ...@@ -757,15 +767,23 @@ void arm_smmu_make_abort_ste(struct arm_smmu_ste *target);
void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu, void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
struct arm_smmu_ste *target); struct arm_smmu_ste *target);
void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target, void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
struct arm_smmu_master *master); struct arm_smmu_master *master, bool ats_enabled,
unsigned int s1dss);
void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target, void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
struct arm_smmu_master *master, struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain); struct arm_smmu_domain *smmu_domain,
bool ats_enabled);
void arm_smmu_make_sva_cd(struct arm_smmu_cd *target, void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
struct arm_smmu_master *master, struct mm_struct *mm, struct arm_smmu_master *master, struct mm_struct *mm,
u16 asid); u16 asid);
#endif #endif
struct arm_smmu_master_domain {
struct list_head devices_elm;
struct arm_smmu_master *master;
ioasid_t ssid;
};
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{ {
return container_of(dom, struct arm_smmu_domain, domain); return container_of(dom, struct arm_smmu_domain, domain);
...@@ -774,11 +792,11 @@ static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) ...@@ -774,11 +792,11 @@ static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
extern struct xarray arm_smmu_asid_xa; extern struct xarray arm_smmu_asid_xa;
extern struct mutex arm_smmu_asid_lock; extern struct mutex arm_smmu_asid_lock;
struct arm_smmu_domain *arm_smmu_domain_alloc(void);
void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid); void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid);
struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master, struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
u32 ssid); u32 ssid);
struct arm_smmu_cd *arm_smmu_alloc_cd_ptr(struct arm_smmu_master *master,
u32 ssid);
void arm_smmu_make_s1_cd(struct arm_smmu_cd *target, void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
struct arm_smmu_master *master, struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain); struct arm_smmu_domain *smmu_domain);
...@@ -786,12 +804,15 @@ void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid, ...@@ -786,12 +804,15 @@ void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
struct arm_smmu_cd *cdptr, struct arm_smmu_cd *cdptr,
const struct arm_smmu_cd *target); const struct arm_smmu_cd *target);
int arm_smmu_set_pasid(struct arm_smmu_master *master,
struct arm_smmu_domain *smmu_domain, ioasid_t pasid,
struct arm_smmu_cd *cd);
void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid); void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid, void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
size_t granule, bool leaf, size_t granule, bool leaf,
struct arm_smmu_domain *smmu_domain); struct arm_smmu_domain *smmu_domain);
bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd); int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
unsigned long iova, size_t size); unsigned long iova, size_t size);
#ifdef CONFIG_ARM_SMMU_V3_SVA #ifdef CONFIG_ARM_SMMU_V3_SVA
...@@ -802,9 +823,8 @@ int arm_smmu_master_enable_sva(struct arm_smmu_master *master); ...@@ -802,9 +823,8 @@ int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
int arm_smmu_master_disable_sva(struct arm_smmu_master *master); int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master); bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
void arm_smmu_sva_notifier_synchronize(void); void arm_smmu_sva_notifier_synchronize(void);
struct iommu_domain *arm_smmu_sva_domain_alloc(void); struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, struct mm_struct *mm);
struct device *dev, ioasid_t id);
#else /* CONFIG_ARM_SMMU_V3_SVA */ #else /* CONFIG_ARM_SMMU_V3_SVA */
static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu) static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
{ {
...@@ -838,10 +858,7 @@ static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master ...@@ -838,10 +858,7 @@ static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master
static inline void arm_smmu_sva_notifier_synchronize(void) {} static inline void arm_smmu_sva_notifier_synchronize(void) {}
static inline struct iommu_domain *arm_smmu_sva_domain_alloc(void) #define arm_smmu_sva_domain_alloc NULL
{
return NULL;
}
static inline void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain, static inline void arm_smmu_sva_remove_dev_pasid(struct iommu_domain *domain,
struct device *dev, struct device *dev,
......
...@@ -200,7 +200,7 @@ static irqreturn_t nvidia_smmu_context_fault_bank(int irq, ...@@ -200,7 +200,7 @@ static irqreturn_t nvidia_smmu_context_fault_bank(int irq,
void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx); void __iomem *cb_base = nvidia_smmu_page(smmu, inst, smmu->numpage + idx);
fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR); fsr = readl_relaxed(cb_base + ARM_SMMU_CB_FSR);
if (!(fsr & ARM_SMMU_FSR_FAULT)) if (!(fsr & ARM_SMMU_CB_FSR_FAULT))
return IRQ_NONE; return IRQ_NONE;
fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0); fsynr = readl_relaxed(cb_base + ARM_SMMU_CB_FSYNR0);
......
...@@ -141,7 +141,7 @@ static int qcom_tbu_halt(struct qcom_tbu *tbu, struct arm_smmu_domain *smmu_doma ...@@ -141,7 +141,7 @@ static int qcom_tbu_halt(struct qcom_tbu *tbu, struct arm_smmu_domain *smmu_doma
writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG); writel_relaxed(val, tbu->base + DEBUG_SID_HALT_REG);
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
if ((fsr & ARM_SMMU_FSR_FAULT) && (fsr & ARM_SMMU_FSR_SS)) { if ((fsr & ARM_SMMU_CB_FSR_FAULT) && (fsr & ARM_SMMU_CB_FSR_SS)) {
u32 sctlr_orig, sctlr; u32 sctlr_orig, sctlr;
/* /*
...@@ -298,7 +298,7 @@ static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain, ...@@ -298,7 +298,7 @@ static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain,
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr); arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_SCTLR, sctlr);
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
if (fsr & ARM_SMMU_FSR_FAULT) { if (fsr & ARM_SMMU_CB_FSR_FAULT) {
/* Clear pending interrupts */ /* Clear pending interrupts */
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
...@@ -306,7 +306,7 @@ static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain, ...@@ -306,7 +306,7 @@ static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain,
* TBU halt takes care of resuming any stalled transcation. * TBU halt takes care of resuming any stalled transcation.
* Kept it here for completeness sake. * Kept it here for completeness sake.
*/ */
if (fsr & ARM_SMMU_FSR_SS) if (fsr & ARM_SMMU_CB_FSR_SS)
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
ARM_SMMU_RESUME_TERMINATE); ARM_SMMU_RESUME_TERMINATE);
} }
...@@ -320,11 +320,11 @@ static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain, ...@@ -320,11 +320,11 @@ static phys_addr_t qcom_iova_to_phys(struct arm_smmu_domain *smmu_domain,
phys = qcom_tbu_trigger_atos(smmu_domain, tbu, iova, sid); phys = qcom_tbu_trigger_atos(smmu_domain, tbu, iova, sid);
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
if (fsr & ARM_SMMU_FSR_FAULT) { if (fsr & ARM_SMMU_CB_FSR_FAULT) {
/* Clear pending interrupts */ /* Clear pending interrupts */
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr);
if (fsr & ARM_SMMU_FSR_SS) if (fsr & ARM_SMMU_CB_FSR_SS)
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME,
ARM_SMMU_RESUME_TERMINATE); ARM_SMMU_RESUME_TERMINATE);
} }
...@@ -383,68 +383,44 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev) ...@@ -383,68 +383,44 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
struct arm_smmu_domain *smmu_domain = dev; struct arm_smmu_domain *smmu_domain = dev;
struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops; struct io_pgtable_ops *ops = smmu_domain->pgtbl_ops;
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = smmu_domain->smmu;
u32 fsr, fsynr, cbfrsynra, resume = 0; struct arm_smmu_context_fault_info cfi;
u32 resume = 0;
int idx = smmu_domain->cfg.cbndx; int idx = smmu_domain->cfg.cbndx;
phys_addr_t phys_soft; phys_addr_t phys_soft;
unsigned long iova;
int ret, tmp; int ret, tmp;
static DEFINE_RATELIMIT_STATE(_rs, static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST); DEFAULT_RATELIMIT_BURST);
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); arm_smmu_read_context_fault_info(smmu, idx, &cfi);
if (!(fsr & ARM_SMMU_FSR_FAULT))
return IRQ_NONE;
fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0); if (!(cfi.fsr & ARM_SMMU_CB_FSR_FAULT))
iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR); return IRQ_NONE;
cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
if (list_empty(&tbu_list)) { if (list_empty(&tbu_list)) {
ret = report_iommu_fault(&smmu_domain->domain, NULL, iova, ret = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova,
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ); cfi.fsynr & ARM_SMMU_CB_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
if (ret == -ENOSYS) if (ret == -ENOSYS)
dev_err_ratelimited(smmu->dev, arm_smmu_print_context_fault_info(smmu, idx, &cfi);
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
fsr, iova, fsynr, cbfrsynra, idx);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
phys_soft = ops->iova_to_phys(ops, iova); phys_soft = ops->iova_to_phys(ops, cfi.iova);
tmp = report_iommu_fault(&smmu_domain->domain, NULL, iova, tmp = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova,
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ); cfi.fsynr & ARM_SMMU_CB_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
if (!tmp || tmp == -EBUSY) { if (!tmp || tmp == -EBUSY) {
dev_dbg(smmu->dev,
"Context fault handled by client: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
iova, fsr, fsynr, idx);
dev_dbg(smmu->dev, "soft iova-to-phys=%pa\n", &phys_soft);
ret = IRQ_HANDLED; ret = IRQ_HANDLED;
resume = ARM_SMMU_RESUME_TERMINATE; resume = ARM_SMMU_RESUME_TERMINATE;
} else { } else {
phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, iova, fsr); phys_addr_t phys_atos = qcom_smmu_verify_fault(smmu_domain, cfi.iova, cfi.fsr);
if (__ratelimit(&_rs)) { if (__ratelimit(&_rs)) {
dev_err(smmu->dev, arm_smmu_print_context_fault_info(smmu, idx, &cfi);
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
fsr, iova, fsynr, cbfrsynra, idx);
dev_err(smmu->dev,
"FSR = %08x [%s%s%s%s%s%s%s%s%s], SID=0x%x\n",
fsr,
(fsr & 0x02) ? "TF " : "",
(fsr & 0x04) ? "AFF " : "",
(fsr & 0x08) ? "PF " : "",
(fsr & 0x10) ? "EF " : "",
(fsr & 0x20) ? "TLBMCF " : "",
(fsr & 0x40) ? "TLBLKF " : "",
(fsr & 0x80) ? "MHF " : "",
(fsr & 0x40000000) ? "SS " : "",
(fsr & 0x80000000) ? "MULTI " : "",
cbfrsynra);
dev_err(smmu->dev, dev_err(smmu->dev,
"soft iova-to-phys=%pa\n", &phys_soft); "soft iova-to-phys=%pa\n", &phys_soft);
...@@ -478,17 +454,17 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev) ...@@ -478,17 +454,17 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev)
*/ */
if (tmp != -EBUSY) { if (tmp != -EBUSY) {
/* Clear the faulting FSR */ /* Clear the faulting FSR */
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
/* Retry or terminate any stalled transactions */ /* Retry or terminate any stalled transactions */
if (fsr & ARM_SMMU_FSR_SS) if (cfi.fsr & ARM_SMMU_CB_FSR_SS)
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, resume); arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_RESUME, resume);
} }
return ret; return ret;
} }
static int qcom_tbu_probe(struct platform_device *pdev) int qcom_tbu_probe(struct platform_device *pdev)
{ {
struct of_phandle_args args = { .args_count = 2 }; struct of_phandle_args args = { .args_count = 2 };
struct device_node *np = pdev->dev.of_node; struct device_node *np = pdev->dev.of_node;
...@@ -530,18 +506,3 @@ static int qcom_tbu_probe(struct platform_device *pdev) ...@@ -530,18 +506,3 @@ static int qcom_tbu_probe(struct platform_device *pdev)
return 0; return 0;
} }
static const struct of_device_id qcom_tbu_of_match[] = {
{ .compatible = "qcom,sc7280-tbu" },
{ .compatible = "qcom,sdm845-tbu" },
{ }
};
static struct platform_driver qcom_tbu_driver = {
.driver = {
.name = "qcom_tbu",
.of_match_table = qcom_tbu_of_match,
},
.probe = qcom_tbu_probe,
};
builtin_platform_driver(qcom_tbu_driver);
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/firmware/qcom/qcom_scm.h> #include <linux/firmware/qcom/qcom_scm.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include "arm-smmu.h" #include "arm-smmu.h"
#include "arm-smmu-qcom.h" #include "arm-smmu-qcom.h"
...@@ -469,7 +471,8 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu, ...@@ -469,7 +471,8 @@ static struct arm_smmu_device *qcom_smmu_create(struct arm_smmu_device *smmu,
/* Check to make sure qcom_scm has finished probing */ /* Check to make sure qcom_scm has finished probing */
if (!qcom_scm_is_available()) if (!qcom_scm_is_available())
return ERR_PTR(-EPROBE_DEFER); return ERR_PTR(dev_err_probe(smmu->dev, -EPROBE_DEFER,
"qcom_scm not ready\n"));
qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL); qsmmu = devm_krealloc(smmu->dev, smmu, sizeof(*qsmmu), GFP_KERNEL);
if (!qsmmu) if (!qsmmu)
...@@ -561,10 +564,47 @@ static struct acpi_platform_list qcom_acpi_platlist[] = { ...@@ -561,10 +564,47 @@ static struct acpi_platform_list qcom_acpi_platlist[] = {
}; };
#endif #endif
static int qcom_smmu_tbu_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
int ret;
if (IS_ENABLED(CONFIG_ARM_SMMU_QCOM_DEBUG)) {
ret = qcom_tbu_probe(pdev);
if (ret)
return ret;
}
if (dev->pm_domain) {
pm_runtime_set_active(dev);
pm_runtime_enable(dev);
}
return 0;
}
static const struct of_device_id qcom_smmu_tbu_of_match[] = {
{ .compatible = "qcom,sc7280-tbu" },
{ .compatible = "qcom,sdm845-tbu" },
{ }
};
static struct platform_driver qcom_smmu_tbu_driver = {
.driver = {
.name = "qcom_tbu",
.of_match_table = qcom_smmu_tbu_of_match,
},
.probe = qcom_smmu_tbu_probe,
};
struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu) struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu)
{ {
const struct device_node *np = smmu->dev->of_node; const struct device_node *np = smmu->dev->of_node;
const struct of_device_id *match; const struct of_device_id *match;
static u8 tbu_registered;
if (!tbu_registered++)
platform_driver_register(&qcom_smmu_tbu_driver);
#ifdef CONFIG_ACPI #ifdef CONFIG_ACPI
if (np == NULL) { if (np == NULL) {
......
...@@ -34,8 +34,10 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev); ...@@ -34,8 +34,10 @@ irqreturn_t qcom_smmu_context_fault(int irq, void *dev);
#ifdef CONFIG_ARM_SMMU_QCOM_DEBUG #ifdef CONFIG_ARM_SMMU_QCOM_DEBUG
void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu); void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu);
int qcom_tbu_probe(struct platform_device *pdev);
#else #else
static inline void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { } static inline void qcom_smmu_tlb_sync_debug(struct arm_smmu_device *smmu) { }
static inline int qcom_tbu_probe(struct platform_device *pdev) { return -EINVAL; }
#endif #endif
#endif /* _ARM_SMMU_QCOM_H */ #endif /* _ARM_SMMU_QCOM_H */
...@@ -405,32 +405,72 @@ static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = { ...@@ -405,32 +405,72 @@ static const struct iommu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
.tlb_add_page = arm_smmu_tlb_add_page_s2_v1, .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
}; };
void arm_smmu_read_context_fault_info(struct arm_smmu_device *smmu, int idx,
struct arm_smmu_context_fault_info *cfi)
{
cfi->iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR);
cfi->fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR);
cfi->fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0);
cfi->cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
}
void arm_smmu_print_context_fault_info(struct arm_smmu_device *smmu, int idx,
const struct arm_smmu_context_fault_info *cfi)
{
dev_dbg(smmu->dev,
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
cfi->fsr, cfi->iova, cfi->fsynr, cfi->cbfrsynra, idx);
dev_err(smmu->dev, "FSR = %08x [%s%sFormat=%u%s%s%s%s%s%s%s%s], SID=0x%x\n",
cfi->fsr,
(cfi->fsr & ARM_SMMU_CB_FSR_MULTI) ? "MULTI " : "",
(cfi->fsr & ARM_SMMU_CB_FSR_SS) ? "SS " : "",
(u32)FIELD_GET(ARM_SMMU_CB_FSR_FORMAT, cfi->fsr),
(cfi->fsr & ARM_SMMU_CB_FSR_UUT) ? " UUT" : "",
(cfi->fsr & ARM_SMMU_CB_FSR_ASF) ? " ASF" : "",
(cfi->fsr & ARM_SMMU_CB_FSR_TLBLKF) ? " TLBLKF" : "",
(cfi->fsr & ARM_SMMU_CB_FSR_TLBMCF) ? " TLBMCF" : "",
(cfi->fsr & ARM_SMMU_CB_FSR_EF) ? " EF" : "",
(cfi->fsr & ARM_SMMU_CB_FSR_PF) ? " PF" : "",
(cfi->fsr & ARM_SMMU_CB_FSR_AFF) ? " AFF" : "",
(cfi->fsr & ARM_SMMU_CB_FSR_TF) ? " TF" : "",
cfi->cbfrsynra);
dev_err(smmu->dev, "FSYNR0 = %08x [S1CBNDX=%u%s%s%s%s%s%s PLVL=%u]\n",
cfi->fsynr,
(u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_S1CBNDX, cfi->fsynr),
(cfi->fsynr & ARM_SMMU_CB_FSYNR0_AFR) ? " AFR" : "",
(cfi->fsynr & ARM_SMMU_CB_FSYNR0_PTWF) ? " PTWF" : "",
(cfi->fsynr & ARM_SMMU_CB_FSYNR0_NSATTR) ? " NSATTR" : "",
(cfi->fsynr & ARM_SMMU_CB_FSYNR0_IND) ? " IND" : "",
(cfi->fsynr & ARM_SMMU_CB_FSYNR0_PNU) ? " PNU" : "",
(cfi->fsynr & ARM_SMMU_CB_FSYNR0_WNR) ? " WNR" : "",
(u32)FIELD_GET(ARM_SMMU_CB_FSYNR0_PLVL, cfi->fsynr));
}
static irqreturn_t arm_smmu_context_fault(int irq, void *dev) static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
{ {
u32 fsr, fsynr, cbfrsynra; struct arm_smmu_context_fault_info cfi;
unsigned long iova;
struct arm_smmu_domain *smmu_domain = dev; struct arm_smmu_domain *smmu_domain = dev;
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = smmu_domain->smmu;
static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
DEFAULT_RATELIMIT_BURST);
int idx = smmu_domain->cfg.cbndx; int idx = smmu_domain->cfg.cbndx;
int ret; int ret;
fsr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSR); arm_smmu_read_context_fault_info(smmu, idx, &cfi);
if (!(fsr & ARM_SMMU_FSR_FAULT))
return IRQ_NONE;
fsynr = arm_smmu_cb_read(smmu, idx, ARM_SMMU_CB_FSYNR0); if (!(cfi.fsr & ARM_SMMU_CB_FSR_FAULT))
iova = arm_smmu_cb_readq(smmu, idx, ARM_SMMU_CB_FAR); return IRQ_NONE;
cbfrsynra = arm_smmu_gr1_read(smmu, ARM_SMMU_GR1_CBFRSYNRA(idx));
ret = report_iommu_fault(&smmu_domain->domain, NULL, iova, ret = report_iommu_fault(&smmu_domain->domain, NULL, cfi.iova,
fsynr & ARM_SMMU_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ); cfi.fsynr & ARM_SMMU_CB_FSYNR0_WNR ? IOMMU_FAULT_WRITE : IOMMU_FAULT_READ);
if (ret == -ENOSYS) if (ret == -ENOSYS && __ratelimit(&rs))
dev_err_ratelimited(smmu->dev, arm_smmu_print_context_fault_info(smmu, idx, &cfi);
"Unhandled context fault: fsr=0x%x, iova=0x%08lx, fsynr=0x%x, cbfrsynra=0x%x, cb=%d\n",
fsr, iova, fsynr, cbfrsynra, idx);
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, fsr); arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_FSR, cfi.fsr);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -1306,7 +1346,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, ...@@ -1306,7 +1346,7 @@ static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain,
arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va); arm_smmu_cb_write(smmu, idx, ARM_SMMU_CB_ATS1PR, va);
reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR; reg = arm_smmu_page(smmu, ARM_SMMU_CB(smmu, idx)) + ARM_SMMU_CB_ATSR;
if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_ATSR_ACTIVE), if (readl_poll_timeout_atomic(reg, tmp, !(tmp & ARM_SMMU_CB_ATSR_ACTIVE),
5, 50)) { 5, 50)) {
spin_unlock_irqrestore(&smmu_domain->cb_lock, flags); spin_unlock_irqrestore(&smmu_domain->cb_lock, flags);
dev_err(dev, dev_err(dev,
...@@ -1642,7 +1682,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) ...@@ -1642,7 +1682,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
/* Make sure all context banks are disabled and clear CB_FSR */ /* Make sure all context banks are disabled and clear CB_FSR */
for (i = 0; i < smmu->num_context_banks; ++i) { for (i = 0; i < smmu->num_context_banks; ++i) {
arm_smmu_write_context_bank(smmu, i); arm_smmu_write_context_bank(smmu, i);
arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT); arm_smmu_cb_write(smmu, i, ARM_SMMU_CB_FSR, ARM_SMMU_CB_FSR_FAULT);
} }
/* Invalidate the TLB, just in case */ /* Invalidate the TLB, just in case */
......
...@@ -196,34 +196,42 @@ enum arm_smmu_cbar_type { ...@@ -196,34 +196,42 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_CB_PAR_F BIT(0) #define ARM_SMMU_CB_PAR_F BIT(0)
#define ARM_SMMU_CB_FSR 0x58 #define ARM_SMMU_CB_FSR 0x58
#define ARM_SMMU_FSR_MULTI BIT(31) #define ARM_SMMU_CB_FSR_MULTI BIT(31)
#define ARM_SMMU_FSR_SS BIT(30) #define ARM_SMMU_CB_FSR_SS BIT(30)
#define ARM_SMMU_FSR_UUT BIT(8) #define ARM_SMMU_CB_FSR_FORMAT GENMASK(10, 9)
#define ARM_SMMU_FSR_ASF BIT(7) #define ARM_SMMU_CB_FSR_UUT BIT(8)
#define ARM_SMMU_FSR_TLBLKF BIT(6) #define ARM_SMMU_CB_FSR_ASF BIT(7)
#define ARM_SMMU_FSR_TLBMCF BIT(5) #define ARM_SMMU_CB_FSR_TLBLKF BIT(6)
#define ARM_SMMU_FSR_EF BIT(4) #define ARM_SMMU_CB_FSR_TLBMCF BIT(5)
#define ARM_SMMU_FSR_PF BIT(3) #define ARM_SMMU_CB_FSR_EF BIT(4)
#define ARM_SMMU_FSR_AFF BIT(2) #define ARM_SMMU_CB_FSR_PF BIT(3)
#define ARM_SMMU_FSR_TF BIT(1) #define ARM_SMMU_CB_FSR_AFF BIT(2)
#define ARM_SMMU_CB_FSR_TF BIT(1)
#define ARM_SMMU_FSR_IGN (ARM_SMMU_FSR_AFF | \
ARM_SMMU_FSR_ASF | \ #define ARM_SMMU_CB_FSR_IGN (ARM_SMMU_CB_FSR_AFF | \
ARM_SMMU_FSR_TLBMCF | \ ARM_SMMU_CB_FSR_ASF | \
ARM_SMMU_FSR_TLBLKF) ARM_SMMU_CB_FSR_TLBMCF | \
ARM_SMMU_CB_FSR_TLBLKF)
#define ARM_SMMU_FSR_FAULT (ARM_SMMU_FSR_MULTI | \
ARM_SMMU_FSR_SS | \ #define ARM_SMMU_CB_FSR_FAULT (ARM_SMMU_CB_FSR_MULTI | \
ARM_SMMU_FSR_UUT | \ ARM_SMMU_CB_FSR_SS | \
ARM_SMMU_FSR_EF | \ ARM_SMMU_CB_FSR_UUT | \
ARM_SMMU_FSR_PF | \ ARM_SMMU_CB_FSR_EF | \
ARM_SMMU_FSR_TF | \ ARM_SMMU_CB_FSR_PF | \
ARM_SMMU_FSR_IGN) ARM_SMMU_CB_FSR_TF | \
ARM_SMMU_CB_FSR_IGN)
#define ARM_SMMU_CB_FAR 0x60 #define ARM_SMMU_CB_FAR 0x60
#define ARM_SMMU_CB_FSYNR0 0x68 #define ARM_SMMU_CB_FSYNR0 0x68
#define ARM_SMMU_FSYNR0_WNR BIT(4) #define ARM_SMMU_CB_FSYNR0_PLVL GENMASK(1, 0)
#define ARM_SMMU_CB_FSYNR0_WNR BIT(4)
#define ARM_SMMU_CB_FSYNR0_PNU BIT(5)
#define ARM_SMMU_CB_FSYNR0_IND BIT(6)
#define ARM_SMMU_CB_FSYNR0_NSATTR BIT(8)
#define ARM_SMMU_CB_FSYNR0_PTWF BIT(10)
#define ARM_SMMU_CB_FSYNR0_AFR BIT(11)
#define ARM_SMMU_CB_FSYNR0_S1CBNDX GENMASK(23, 16)
#define ARM_SMMU_CB_FSYNR1 0x6c #define ARM_SMMU_CB_FSYNR1 0x6c
...@@ -237,7 +245,7 @@ enum arm_smmu_cbar_type { ...@@ -237,7 +245,7 @@ enum arm_smmu_cbar_type {
#define ARM_SMMU_CB_ATS1PR 0x800 #define ARM_SMMU_CB_ATS1PR 0x800
#define ARM_SMMU_CB_ATSR 0x8f0 #define ARM_SMMU_CB_ATSR 0x8f0
#define ARM_SMMU_ATSR_ACTIVE BIT(0) #define ARM_SMMU_CB_ATSR_ACTIVE BIT(0)
#define ARM_SMMU_RESUME_TERMINATE BIT(0) #define ARM_SMMU_RESUME_TERMINATE BIT(0)
...@@ -533,4 +541,17 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu); ...@@ -533,4 +541,17 @@ struct arm_smmu_device *qcom_smmu_impl_init(struct arm_smmu_device *smmu);
void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx); void arm_smmu_write_context_bank(struct arm_smmu_device *smmu, int idx);
int arm_mmu500_reset(struct arm_smmu_device *smmu); int arm_mmu500_reset(struct arm_smmu_device *smmu);
struct arm_smmu_context_fault_info {
unsigned long iova;
u32 fsr;
u32 fsynr;
u32 cbfrsynra;
};
void arm_smmu_read_context_fault_info(struct arm_smmu_device *smmu, int idx,
struct arm_smmu_context_fault_info *cfi);
void arm_smmu_print_context_fault_info(struct arm_smmu_device *smmu, int idx,
const struct arm_smmu_context_fault_info *cfi);
#endif /* _ARM_SMMU_H */ #endif /* _ARM_SMMU_H */
...@@ -194,7 +194,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev) ...@@ -194,7 +194,7 @@ static irqreturn_t qcom_iommu_fault(int irq, void *dev)
fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR); fsr = iommu_readl(ctx, ARM_SMMU_CB_FSR);
if (!(fsr & ARM_SMMU_FSR_FAULT)) if (!(fsr & ARM_SMMU_CB_FSR_FAULT))
return IRQ_NONE; return IRQ_NONE;
fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0); fsynr = iommu_readl(ctx, ARM_SMMU_CB_FSYNR0);
...@@ -274,7 +274,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain, ...@@ -274,7 +274,7 @@ static int qcom_iommu_init_domain(struct iommu_domain *domain,
/* Clear context bank fault address fault status registers */ /* Clear context bank fault address fault status registers */
iommu_writel(ctx, ARM_SMMU_CB_FAR, 0); iommu_writel(ctx, ARM_SMMU_CB_FAR, 0);
iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_FSR_FAULT); iommu_writel(ctx, ARM_SMMU_CB_FSR, ARM_SMMU_CB_FSR_FAULT);
/* TTBRs */ /* TTBRs */
iommu_writeq(ctx, ARM_SMMU_CB_TTBR0, iommu_writeq(ctx, ARM_SMMU_CB_TTBR0,
......
...@@ -76,6 +76,7 @@ ...@@ -76,6 +76,7 @@
#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
#define ARM_LPAE_PTE_DBM (((arm_lpae_iopte)1) << 51)
#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
...@@ -85,7 +86,7 @@ ...@@ -85,7 +86,7 @@
#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
/* Ignore the contiguous bit for block splitting */ /* Ignore the contiguous bit for block splitting */
#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) #define ARM_LPAE_PTE_ATTR_HI_MASK (ARM_LPAE_PTE_XN | ARM_LPAE_PTE_DBM)
#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
ARM_LPAE_PTE_ATTR_HI_MASK) ARM_LPAE_PTE_ATTR_HI_MASK)
/* Software bit for solving coherency races */ /* Software bit for solving coherency races */
...@@ -93,7 +94,11 @@ ...@@ -93,7 +94,11 @@
/* Stage-1 PTE */ /* Stage-1 PTE */
#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) #define ARM_LPAE_PTE_AP_RDONLY_BIT 7
#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)1) << \
ARM_LPAE_PTE_AP_RDONLY_BIT)
#define ARM_LPAE_PTE_AP_WR_CLEAN_MASK (ARM_LPAE_PTE_AP_RDONLY | \
ARM_LPAE_PTE_DBM)
#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
...@@ -139,6 +144,12 @@ ...@@ -139,6 +144,12 @@
#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
#define iopte_writeable_dirty(pte) \
(((pte) & ARM_LPAE_PTE_AP_WR_CLEAN_MASK) == ARM_LPAE_PTE_DBM)
#define iopte_set_writeable_clean(ptep) \
set_bit(ARM_LPAE_PTE_AP_RDONLY_BIT, (unsigned long *)(ptep))
struct arm_lpae_io_pgtable { struct arm_lpae_io_pgtable {
struct io_pgtable iop; struct io_pgtable iop;
...@@ -160,6 +171,13 @@ static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl, ...@@ -160,6 +171,13 @@ static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK; return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
} }
static inline bool iopte_table(arm_lpae_iopte pte, int lvl)
{
if (lvl == (ARM_LPAE_MAX_LEVELS - 1))
return false;
return iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE;
}
static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr, static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
struct arm_lpae_io_pgtable *data) struct arm_lpae_io_pgtable *data)
{ {
...@@ -422,6 +440,8 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, ...@@ -422,6 +440,8 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
pte = ARM_LPAE_PTE_nG; pte = ARM_LPAE_PTE_nG;
if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
pte |= ARM_LPAE_PTE_AP_RDONLY; pte |= ARM_LPAE_PTE_AP_RDONLY;
else if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_HD)
pte |= ARM_LPAE_PTE_DBM;
if (!(prot & IOMMU_PRIV)) if (!(prot & IOMMU_PRIV))
pte |= ARM_LPAE_PTE_AP_UNPRIV; pte |= ARM_LPAE_PTE_AP_UNPRIV;
} else { } else {
...@@ -726,6 +746,97 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, ...@@ -726,6 +746,97 @@ static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
return iopte_to_paddr(pte, data) | iova; return iopte_to_paddr(pte, data) | iova;
} }
struct io_pgtable_walk_data {
struct iommu_dirty_bitmap *dirty;
unsigned long flags;
u64 addr;
const u64 end;
};
static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
struct io_pgtable_walk_data *walk_data,
arm_lpae_iopte *ptep,
int lvl);
static int io_pgtable_visit_dirty(struct arm_lpae_io_pgtable *data,
struct io_pgtable_walk_data *walk_data,
arm_lpae_iopte *ptep, int lvl)
{
struct io_pgtable *iop = &data->iop;
arm_lpae_iopte pte = READ_ONCE(*ptep);
if (iopte_leaf(pte, lvl, iop->fmt)) {
size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
if (iopte_writeable_dirty(pte)) {
iommu_dirty_bitmap_record(walk_data->dirty,
walk_data->addr, size);
if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
iopte_set_writeable_clean(ptep);
}
walk_data->addr += size;
return 0;
}
if (WARN_ON(!iopte_table(pte, lvl)))
return -EINVAL;
ptep = iopte_deref(pte, data);
return __arm_lpae_iopte_walk_dirty(data, walk_data, ptep, lvl + 1);
}
static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
struct io_pgtable_walk_data *walk_data,
arm_lpae_iopte *ptep,
int lvl)
{
u32 idx;
int max_entries, ret;
if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
return -EINVAL;
if (lvl == data->start_level)
max_entries = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
else
max_entries = ARM_LPAE_PTES_PER_TABLE(data);
for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data);
(idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) {
ret = io_pgtable_visit_dirty(data, walk_data, ptep + idx, lvl);
if (ret)
return ret;
}
return 0;
}
static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
unsigned long iova, size_t size,
unsigned long flags,
struct iommu_dirty_bitmap *dirty)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
struct io_pgtable_walk_data walk_data = {
.dirty = dirty,
.flags = flags,
.addr = iova,
.end = iova + size,
};
arm_lpae_iopte *ptep = data->pgd;
int lvl = data->start_level;
if (WARN_ON(!size))
return -EINVAL;
if (WARN_ON((iova + size - 1) & ~(BIT(cfg->ias) - 1)))
return -EINVAL;
if (data->iop.fmt != ARM_64_LPAE_S1)
return -EINVAL;
return __arm_lpae_iopte_walk_dirty(data, &walk_data, ptep, lvl);
}
static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
{ {
unsigned long granule, page_sizes; unsigned long granule, page_sizes;
...@@ -804,6 +915,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) ...@@ -804,6 +915,7 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
.map_pages = arm_lpae_map_pages, .map_pages = arm_lpae_map_pages,
.unmap_pages = arm_lpae_unmap_pages, .unmap_pages = arm_lpae_unmap_pages,
.iova_to_phys = arm_lpae_iova_to_phys, .iova_to_phys = arm_lpae_iova_to_phys,
.read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
}; };
return data; return data;
...@@ -819,7 +931,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) ...@@ -819,7 +931,8 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
IO_PGTABLE_QUIRK_ARM_TTBR1 | IO_PGTABLE_QUIRK_ARM_TTBR1 |
IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)) IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
IO_PGTABLE_QUIRK_ARM_HD))
return NULL; return NULL;
data = arm_lpae_alloc_pgtable(cfg); data = arm_lpae_alloc_pgtable(cfg);
......
...@@ -114,6 +114,9 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas, ...@@ -114,6 +114,9 @@ iommufd_hwpt_paging_alloc(struct iommufd_ctx *ictx, struct iommufd_ioas *ioas,
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
if (flags & ~valid_flags) if (flags & ~valid_flags)
return ERR_PTR(-EOPNOTSUPP); return ERR_PTR(-EOPNOTSUPP);
if ((flags & IOMMU_HWPT_ALLOC_DIRTY_TRACKING) &&
!device_iommu_capable(idev->dev, IOMMU_CAP_DIRTY_TRACKING))
return ERR_PTR(-EOPNOTSUPP);
hwpt_paging = __iommufd_object_alloc( hwpt_paging = __iommufd_object_alloc(
ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj); ictx, hwpt_paging, IOMMUFD_OBJ_HWPT_PAGING, common.obj);
......
...@@ -85,6 +85,8 @@ struct io_pgtable_cfg { ...@@ -85,6 +85,8 @@ struct io_pgtable_cfg {
* *
* IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability * IO_PGTABLE_QUIRK_ARM_OUTER_WBWA: Override the outer-cacheability
* attributes set in the TCR for a non-coherent page-table walker. * attributes set in the TCR for a non-coherent page-table walker.
*
* IO_PGTABLE_QUIRK_ARM_HD: Enables dirty tracking in stage 1 pagetable.
*/ */
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0) #define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1) #define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
...@@ -92,6 +94,7 @@ struct io_pgtable_cfg { ...@@ -92,6 +94,7 @@ struct io_pgtable_cfg {
#define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4) #define IO_PGTABLE_QUIRK_ARM_MTK_TTBR_EXT BIT(4)
#define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5) #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
#define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6) #define IO_PGTABLE_QUIRK_ARM_OUTER_WBWA BIT(6)
#define IO_PGTABLE_QUIRK_ARM_HD BIT(7)
unsigned long quirks; unsigned long quirks;
unsigned long pgsize_bitmap; unsigned long pgsize_bitmap;
unsigned int ias; unsigned int ias;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment