Commit 71e8a8cd authored by Robin Murphy's avatar Robin Murphy Committed by Will Deacon

iommu/arm-smmu: Split arm_smmu_tlb_inv_range_nosync()

Since we now use separate iommu_gather_ops for stage 1 and stage 2
contexts, we may as well divide up the monolithic callback into its
respective stage 1 and stage 2 parts.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 490325e0
...@@ -490,18 +490,17 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie) ...@@ -490,18 +490,17 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
arm_smmu_tlb_sync_global(smmu); arm_smmu_tlb_sync_global(smmu);
} }
static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie) size_t granule, bool leaf, void *cookie)
{ {
struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
struct arm_smmu_cfg *cfg = &smmu_domain->cfg; struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; void __iomem *reg = ARM_SMMU_CB(smmu, cfg->cbndx);
void __iomem *reg = ARM_SMMU_CB(smmu_domain->smmu, cfg->cbndx);
if (smmu_domain->smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb(); wmb();
if (stage1) {
reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA;
if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) { if (cfg->fmt != ARM_SMMU_CTX_FMT_AARCH64) {
...@@ -519,15 +518,24 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, ...@@ -519,15 +518,24 @@ static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
iova += granule >> 12; iova += granule >> 12;
} while (size -= granule); } while (size -= granule);
} }
} else { }
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L :
ARM_SMMU_CB_S2_TLBIIPAS2; static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{
struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu;
void __iomem *reg = ARM_SMMU_CB(smmu, smmu_domain->cfg.cbndx);
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : ARM_SMMU_CB_S2_TLBIIPAS2;
iova >>= 12; iova >>= 12;
do { do {
smmu_write_atomic_lq(iova, reg); smmu_write_atomic_lq(iova, reg);
iova += granule >> 12; iova += granule >> 12;
} while (size -= granule); } while (size -= granule);
}
} }
/* /*
...@@ -550,13 +558,13 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size, ...@@ -550,13 +558,13 @@ static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = { static const struct iommu_gather_ops arm_smmu_s1_tlb_ops = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s1, .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_add_flush = arm_smmu_tlb_inv_range_s1,
.tlb_sync = arm_smmu_tlb_sync_context, .tlb_sync = arm_smmu_tlb_sync_context,
}; };
static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = { static const struct iommu_gather_ops arm_smmu_s2_tlb_ops_v2 = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2, .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_add_flush = arm_smmu_tlb_inv_range_nosync, .tlb_add_flush = arm_smmu_tlb_inv_range_s2,
.tlb_sync = arm_smmu_tlb_sync_context, .tlb_sync = arm_smmu_tlb_sync_context,
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment