Commit 3f3b8d0c authored by Robin Murphy's avatar Robin Murphy Committed by Will Deacon

iommu/arm-smmu: Remove .tlb_inv_range indirection

Fill in 'native' iommu_flush_ops callbacks for all the
arm_smmu_flush_ops variants, and clear up the remains of the previous
.tlb_inv_range abstraction.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 54ecb8f7
...@@ -312,7 +312,7 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie) ...@@ -312,7 +312,7 @@ static void arm_smmu_tlb_inv_context_s2(void *cookie)
} }
static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie) size_t granule, void *cookie, bool leaf)
{ {
struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = smmu_domain->smmu;
...@@ -342,7 +342,7 @@ static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size, ...@@ -342,7 +342,7 @@ static void arm_smmu_tlb_inv_range_s1(unsigned long iova, size_t size,
} }
static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie) size_t granule, void *cookie, bool leaf)
{ {
struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_domain *smmu_domain = cookie;
struct arm_smmu_device *smmu = smmu_domain->smmu; struct arm_smmu_device *smmu = smmu_domain->smmu;
...@@ -362,84 +362,100 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size, ...@@ -362,84 +362,100 @@ static void arm_smmu_tlb_inv_range_s2(unsigned long iova, size_t size,
} while (size -= granule); } while (size -= granule);
} }
/* static void arm_smmu_tlb_inv_walk_s1(unsigned long iova, size_t size,
* On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears size_t granule, void *cookie)
* almost negligible, but the benefit of getting the first one in as far ahead
* of the sync as possible is significant, hence we don't just make this a
* no-op and set .tlb_sync to arm_smmu_tlb_inv_context_s2() as you might think.
*/
static void arm_smmu_tlb_inv_vmid_nosync(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{ {
struct arm_smmu_domain *smmu_domain = cookie; arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, false);
struct arm_smmu_device *smmu = smmu_domain->smmu; arm_smmu_tlb_sync_context(cookie);
}
if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid); static void arm_smmu_tlb_inv_leaf_s1(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
arm_smmu_tlb_inv_range_s1(iova, size, granule, cookie, true);
arm_smmu_tlb_sync_context(cookie);
} }
static void arm_smmu_tlb_inv_walk(unsigned long iova, size_t size, static void arm_smmu_tlb_add_page_s1(struct iommu_iotlb_gather *gather,
size_t granule, void *cookie) unsigned long iova, size_t granule,
void *cookie)
{ {
struct arm_smmu_domain *smmu_domain = cookie; arm_smmu_tlb_inv_range_s1(iova, granule, granule, cookie, true);
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; }
ops->tlb_inv_range(iova, size, granule, false, cookie); static void arm_smmu_tlb_inv_walk_s2(unsigned long iova, size_t size,
ops->tlb_sync(cookie); size_t granule, void *cookie)
{
arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, false);
arm_smmu_tlb_sync_context(cookie);
} }
static void arm_smmu_tlb_inv_leaf(unsigned long iova, size_t size, static void arm_smmu_tlb_inv_leaf_s2(unsigned long iova, size_t size,
size_t granule, void *cookie) size_t granule, void *cookie)
{ {
struct arm_smmu_domain *smmu_domain = cookie; arm_smmu_tlb_inv_range_s2(iova, size, granule, cookie, true);
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; arm_smmu_tlb_sync_context(cookie);
}
ops->tlb_inv_range(iova, size, granule, true, cookie); static void arm_smmu_tlb_add_page_s2(struct iommu_iotlb_gather *gather,
ops->tlb_sync(cookie); unsigned long iova, size_t granule,
void *cookie)
{
arm_smmu_tlb_inv_range_s2(iova, granule, granule, cookie, true);
} }
static void arm_smmu_tlb_add_page(struct iommu_iotlb_gather *gather, static void arm_smmu_tlb_inv_any_s2_v1(unsigned long iova, size_t size,
unsigned long iova, size_t granule, size_t granule, void *cookie)
void *cookie) {
arm_smmu_tlb_inv_context_s2(cookie);
}
/*
* On MMU-401 at least, the cost of firing off multiple TLBIVMIDs appears
* almost negligible, but the benefit of getting the first one in as far ahead
* of the sync as possible is significant, hence we don't just make this a
* no-op and call arm_smmu_tlb_inv_context_s2() from .iotlb_sync as you might
* think.
*/
static void arm_smmu_tlb_add_page_s2_v1(struct iommu_iotlb_gather *gather,
unsigned long iova, size_t granule,
void *cookie)
{ {
struct arm_smmu_domain *smmu_domain = cookie; struct arm_smmu_domain *smmu_domain = cookie;
const struct arm_smmu_flush_ops *ops = smmu_domain->flush_ops; struct arm_smmu_device *smmu = smmu_domain->smmu;
ops->tlb_inv_range(iova, granule, granule, true, cookie); if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
wmb();
arm_smmu_gr0_write(smmu, ARM_SMMU_GR0_TLBIVMID, smmu_domain->cfg.vmid);
} }
static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = { static const struct arm_smmu_flush_ops arm_smmu_s1_tlb_ops = {
.tlb = { .tlb = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s1, .tlb_flush_all = arm_smmu_tlb_inv_context_s1,
.tlb_flush_walk = arm_smmu_tlb_inv_walk, .tlb_flush_walk = arm_smmu_tlb_inv_walk_s1,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s1,
.tlb_add_page = arm_smmu_tlb_add_page, .tlb_add_page = arm_smmu_tlb_add_page_s1,
}, },
.tlb_inv_range = arm_smmu_tlb_inv_range_s1,
.tlb_sync = arm_smmu_tlb_sync_context, .tlb_sync = arm_smmu_tlb_sync_context,
}; };
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = { static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v2 = {
.tlb = { .tlb = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2, .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_flush_walk = arm_smmu_tlb_inv_walk, .tlb_flush_walk = arm_smmu_tlb_inv_walk_s2,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_flush_leaf = arm_smmu_tlb_inv_leaf_s2,
.tlb_add_page = arm_smmu_tlb_add_page, .tlb_add_page = arm_smmu_tlb_add_page_s2,
}, },
.tlb_inv_range = arm_smmu_tlb_inv_range_s2,
.tlb_sync = arm_smmu_tlb_sync_context, .tlb_sync = arm_smmu_tlb_sync_context,
}; };
static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = { static const struct arm_smmu_flush_ops arm_smmu_s2_tlb_ops_v1 = {
.tlb = { .tlb = {
.tlb_flush_all = arm_smmu_tlb_inv_context_s2, .tlb_flush_all = arm_smmu_tlb_inv_context_s2,
.tlb_flush_walk = arm_smmu_tlb_inv_walk, .tlb_flush_walk = arm_smmu_tlb_inv_any_s2_v1,
.tlb_flush_leaf = arm_smmu_tlb_inv_leaf, .tlb_flush_leaf = arm_smmu_tlb_inv_any_s2_v1,
.tlb_add_page = arm_smmu_tlb_add_page, .tlb_add_page = arm_smmu_tlb_add_page_s2_v1,
}, },
.tlb_inv_range = arm_smmu_tlb_inv_vmid_nosync,
.tlb_sync = arm_smmu_tlb_sync_vmid, .tlb_sync = arm_smmu_tlb_sync_vmid,
}; };
......
...@@ -306,8 +306,6 @@ enum arm_smmu_domain_stage { ...@@ -306,8 +306,6 @@ enum arm_smmu_domain_stage {
struct arm_smmu_flush_ops { struct arm_smmu_flush_ops {
struct iommu_flush_ops tlb; struct iommu_flush_ops tlb;
void (*tlb_inv_range)(unsigned long iova, size_t size, size_t granule,
bool leaf, void *cookie);
void (*tlb_sync)(void *cookie); void (*tlb_sync)(void *cookie);
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment