Commit 64efb3de authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Will Deacon

iommu/arm-smmu-v3: Add ssid to struct arm_smmu_master_domain

Prepare to allow a S1 domain to be attached to a PASID as well. Keep track
of the SSID the domain is using on each master in the
arm_smmu_master_domain.
Tested-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Tested-by: default avatarShameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Reviewed-by: default avatarMichael Shavit <mshavit@google.com>
Reviewed-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Reviewed-by: default avatarJerry Snitselaar <jsnitsel@redhat.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/5-v9-5cd718286059+79186-smmuv3_newapi_p2b_jgg@nvidia.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 7497f421
......@@ -47,13 +47,12 @@ arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
struct arm_smmu_master *master = master_domain->master;
struct arm_smmu_cd *cdptr;
/* S1 domains only support RID attachment right now */
cdptr = arm_smmu_get_cd_ptr(master, IOMMU_NO_PASID);
cdptr = arm_smmu_get_cd_ptr(master, master_domain->ssid);
if (WARN_ON(!cdptr))
continue;
arm_smmu_make_s1_cd(&target_cd, master, smmu_domain);
arm_smmu_write_cd_entry(master, IOMMU_NO_PASID, cdptr,
arm_smmu_write_cd_entry(master, master_domain->ssid, cdptr,
&target_cd);
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
......@@ -294,8 +293,8 @@ static void arm_smmu_mm_arch_invalidate_secondary_tlbs(struct mmu_notifier *mn,
smmu_domain);
}
arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), start,
size);
arm_smmu_atc_inv_domain_sva(smmu_domain, mm_get_enqcmd_pasid(mm), start,
size);
}
static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
......@@ -332,7 +331,7 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
arm_smmu_tlb_inv_asid(smmu_domain->smmu, smmu_mn->cd->asid);
arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0, 0);
arm_smmu_atc_inv_domain_sva(smmu_domain, mm_get_enqcmd_pasid(mm), 0, 0);
smmu_mn->cleared = true;
mutex_unlock(&sva_lock);
......@@ -411,8 +410,8 @@ static void arm_smmu_mmu_notifier_put(struct arm_smmu_mmu_notifier *smmu_mn)
*/
if (!smmu_mn->cleared) {
arm_smmu_tlb_inv_asid(smmu_domain->smmu, cd->asid);
arm_smmu_atc_inv_domain(smmu_domain, mm_get_enqcmd_pasid(mm), 0,
0);
arm_smmu_atc_inv_domain_sva(smmu_domain,
mm_get_enqcmd_pasid(mm), 0, 0);
}
/* Frees smmu_mn */
......
......@@ -2013,8 +2013,8 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
return arm_smmu_cmdq_batch_submit(master->smmu, &cmds);
}
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
unsigned long iova, size_t size)
static int __arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
ioasid_t ssid, unsigned long iova, size_t size)
{
struct arm_smmu_master_domain *master_domain;
int i;
......@@ -2042,8 +2042,6 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
if (!atomic_read(&smmu_domain->nr_ats_masters))
return 0;
arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
cmds.num = 0;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
......@@ -2054,6 +2052,16 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
if (!master->ats_enabled)
continue;
/*
* Non-zero ssid means SVA is co-opting the S1 domain to issue
* invalidations for SVA PASIDs.
*/
if (ssid != IOMMU_NO_PASID)
arm_smmu_atc_inv_to_cmd(ssid, iova, size, &cmd);
else
arm_smmu_atc_inv_to_cmd(master_domain->ssid, iova, size,
&cmd);
for (i = 0; i < master->num_streams; i++) {
cmd.atc.sid = master->streams[i].id;
arm_smmu_cmdq_batch_add(smmu_domain->smmu, &cmds, &cmd);
......@@ -2064,6 +2072,19 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
return arm_smmu_cmdq_batch_submit(smmu_domain->smmu, &cmds);
}
static int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
unsigned long iova, size_t size)
{
return __arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova,
size);
}
int arm_smmu_atc_inv_domain_sva(struct arm_smmu_domain *smmu_domain,
ioasid_t ssid, unsigned long iova, size_t size)
{
return __arm_smmu_atc_inv_domain(smmu_domain, ssid, iova, size);
}
/* IO_PGTABLE API */
static void arm_smmu_tlb_inv_context(void *cookie)
{
......@@ -2085,7 +2106,7 @@ static void arm_smmu_tlb_inv_context(void *cookie)
cmd.tlbi.vmid = smmu_domain->s2_cfg.vmid;
arm_smmu_cmdq_issue_cmd_with_sync(smmu, &cmd);
}
arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, 0, 0);
arm_smmu_atc_inv_domain(smmu_domain, 0, 0);
}
static void __arm_smmu_tlb_inv_range(struct arm_smmu_cmdq_ent *cmd,
......@@ -2183,7 +2204,7 @@ static void arm_smmu_tlb_inv_range_domain(unsigned long iova, size_t size,
* Unfortunately, this can't be leaf-only since we may have
* zapped an entire table.
*/
arm_smmu_atc_inv_domain(smmu_domain, IOMMU_NO_PASID, iova, size);
arm_smmu_atc_inv_domain(smmu_domain, iova, size);
}
void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
......@@ -2518,7 +2539,8 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
static struct arm_smmu_master_domain *
arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_master *master)
struct arm_smmu_master *master,
ioasid_t ssid)
{
struct arm_smmu_master_domain *master_domain;
......@@ -2526,7 +2548,8 @@ arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
list_for_each_entry(master_domain, &smmu_domain->devices,
devices_elm) {
if (master_domain->master == master)
if (master_domain->master == master &&
master_domain->ssid == ssid)
return master_domain;
}
return NULL;
......@@ -2559,7 +2582,8 @@ static void arm_smmu_remove_master_domain(struct arm_smmu_master *master,
return;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
master_domain = arm_smmu_find_master_domain(smmu_domain, master);
master_domain = arm_smmu_find_master_domain(smmu_domain, master,
IOMMU_NO_PASID);
if (master_domain) {
list_del(&master_domain->devices_elm);
kfree(master_domain);
......
......@@ -772,6 +772,7 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
struct arm_smmu_master_domain {
struct list_head devices_elm;
struct arm_smmu_master *master;
ioasid_t ssid;
};
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
......@@ -803,8 +804,8 @@ void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
size_t granule, bool leaf,
struct arm_smmu_domain *smmu_domain);
bool arm_smmu_free_asid(struct arm_smmu_ctx_desc *cd);
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
unsigned long iova, size_t size);
int arm_smmu_atc_inv_domain_sva(struct arm_smmu_domain *smmu_domain,
ioasid_t ssid, unsigned long iova, size_t size);
#ifdef CONFIG_ARM_SMMU_V3_SVA
bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment