Commit 71e8a8cd authored by Robin Murphy's avatar Robin Murphy Committed by Will Deacon

iommu/arm-smmu: Split arm_smmu_tlb_inv_range_nosync()

Since we now use separate iommu_gather_ops for stage 1 and stage 2
contexts, we may as well divide up the monolithic callback into its
respective stage 1 and stage 2 parts.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 490325e0
......@@ -490,46 +490,54 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
arm_smmu_tlb_sync_global(smmu);
}
static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS;
void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
void __iomem *reg = ARM_SMMU_CB(smmu, cfg->cbndx);
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
if (stage1) {
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
iova = (iova >> 12) << 12;
iova |= cfg->asid;
do {
writel_relaxed(iova, reg);
iova += granule;
} while (size -= granule);
} else {
iova >>= 12;
iova |= (u64)cfg->asid << 48;
do {
writeq_relaxed(iova, reg);
iova += granule >> 12;
} while (size -= granule);
}
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
iova = (iova >> 12) << 12;
iova |= cfg->asid;
do {
writel_relaxed(iova, reg);
iova += granule;
} while (size -= granule);
} else {
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
ARM_SMMU_CB_S2_TLBIIPAS2;
iova >>= 12;
iova |= (u64)cfg->asid << 48;
do {
smmu_write_atomic_lq(iova, reg);
writeq_relaxed(iova, reg);
iova += granule >> 12;
} while (size -= granule);
}
}
static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *reg = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
iova >>= 12;
do {
smmu_write_atomic_lq(iova, reg);
iova += granule >> 12;
} while (size -= granule);
}
/*
* On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
* almost negligible, but the benefit of getting the first one in as far ahead
......@@ -550,13 +558,13 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s1,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_add_flush = arm_smmu_tlb_inv_range_s1,
.tlb_sync = arm_smmu_tlb_sync_context,
};
static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync,
.tlb_add_flush = arm_smmu_tlb_inv_range_s2,
.tlb_sync = arm_smmu_tlb_sync_context,
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment