Commit ad10dce6 authored by Jason Gunthorpe's avatar Jason Gunthorpe Committed by Will Deacon

iommu/arm-smmu-v3: Make smmu_domain->devices into an allocated list

The next patch will need to store the same master twice (with different
SSIDs), so allocate memory for each list element.
Tested-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Tested-by: default avatarShameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Reviewed-by: default avatarMichael Shavit <mshavit@google.com>
Reviewed-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Reviewed-by: default avatarJerry Snitselaar <jsnitsel@redhat.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
Link: https://lore.kernel.org/r/3-v9-5cd718286059+79186-smmuv3_newapi_p2b_jgg@nvidia.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent 85f2fb6e
......@@ -38,12 +38,13 @@ static DEFINE_MUTEX(sva_lock);
static void
arm_smmu_update_s1_domain_cd_entry(struct arm_smmu_domain *smmu_domain)
{
struct arm_smmu_master *master;
struct arm_smmu_master_domain *master_domain;
struct arm_smmu_cd target_cd;
unsigned long flags;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
list_for_each_entry(master_domain, &smmu_domain->devices, devices_elm) {
struct arm_smmu_master *master = master_domain->master;
struct arm_smmu_cd *cdptr;
/* S1 domains only support RID attachment right now */
......@@ -301,7 +302,7 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct arm_smmu_mmu_notifier *smmu_mn = mn_to_smmu(mn);
struct arm_smmu_domain *smmu_domain = smmu_mn->domain;
struct arm_smmu_master *master;
struct arm_smmu_master_domain *master_domain;
unsigned long flags;
mutex_lock(&sva_lock);
......@@ -315,7 +316,9 @@ static void arm_smmu_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
* but disable translation.
*/
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
list_for_each_entry(master_domain, &smmu_domain->devices,
devices_elm) {
struct arm_smmu_master *master = master_domain->master;
struct arm_smmu_cd target;
struct arm_smmu_cd *cdptr;
......
......@@ -2015,10 +2015,10 @@ static int arm_smmu_atc_inv_master(struct arm_smmu_master *master)
int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
unsigned long iova, size_t size)
{
struct arm_smmu_master_domain *master_domain;
int i;
unsigned long flags;
struct arm_smmu_cmdq_ent cmd;
struct arm_smmu_master *master;
struct arm_smmu_cmdq_batch cmds;
if (!(smmu_domain->smmu->features & ARM_SMMU_FEAT_ATS))
......@@ -2046,7 +2046,10 @@ int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain, int ssid,
cmds.num = 0;
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_for_each_entry(master, &smmu_domain->devices, domain_head) {
list_for_each_entry(master_domain, &smmu_domain->devices,
devices_elm) {
struct arm_smmu_master *master = master_domain->master;
if (!master->ats_enabled)
continue;
......@@ -2534,9 +2537,26 @@ static void arm_smmu_disable_pasid(struct arm_smmu_master *master)
pci_disable_pasid(pdev);
}
static struct arm_smmu_master_domain *
arm_smmu_find_master_domain(struct arm_smmu_domain *smmu_domain,
struct arm_smmu_master *master)
{
struct arm_smmu_master_domain *master_domain;
lockdep_assert_held(&smmu_domain->devices_lock);
list_for_each_entry(master_domain, &smmu_domain->devices,
devices_elm) {
if (master_domain->master == master)
return master_domain;
}
return NULL;
}
static void arm_smmu_detach_dev(struct arm_smmu_master *master)
{
struct iommu_domain *domain = iommu_get_domain_for_dev(master->dev);
struct arm_smmu_master_domain *master_domain;
struct arm_smmu_domain *smmu_domain;
unsigned long flags;
......@@ -2547,7 +2567,11 @@ static void arm_smmu_detach_dev(struct arm_smmu_master *master)
arm_smmu_disable_ats(master, smmu_domain);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_del_init(&master->domain_head);
master_domain = arm_smmu_find_master_domain(smmu_domain, master);
if (master_domain) {
list_del(&master_domain->devices_elm);
kfree(master_domain);
}
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
master->ats_enabled = false;
......@@ -2561,6 +2585,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
struct iommu_fwspec *fwspec = dev_iommu_fwspec_get(dev);
struct arm_smmu_device *smmu;
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_master_domain *master_domain;
struct arm_smmu_master *master;
struct arm_smmu_cd *cdptr;
......@@ -2597,6 +2622,11 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
return -ENOMEM;
}
master_domain = kzalloc(sizeof(*master_domain), GFP_KERNEL);
if (!master_domain)
return -ENOMEM;
master_domain->master = master;
/*
* Prevent arm_smmu_share_asid() from trying to change the ASID
* of either the old or new domain while we are working on it.
......@@ -2610,7 +2640,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
master->ats_enabled = arm_smmu_ats_supported(master);
spin_lock_irqsave(&smmu_domain->devices_lock, flags);
list_add(&master->domain_head, &smmu_domain->devices);
list_add(&master_domain->devices_elm, &smmu_domain->devices);
spin_unlock_irqrestore(&smmu_domain->devices_lock, flags);
switch (smmu_domain->stage) {
......@@ -2925,7 +2955,6 @@ static struct iommu_device *arm_smmu_probe_device(struct device *dev)
master->dev = dev;
master->smmu = smmu;
INIT_LIST_HEAD(&master->bonds);
INIT_LIST_HEAD(&master->domain_head);
dev_iommu_priv_set(dev, master);
ret = arm_smmu_insert_master(smmu, master);
......
......@@ -697,7 +697,6 @@ struct arm_smmu_stream {
struct arm_smmu_master {
struct arm_smmu_device *smmu;
struct device *dev;
struct list_head domain_head;
struct arm_smmu_stream *streams;
/* Locked by the iommu core using the group mutex */
struct arm_smmu_ctx_desc_cfg cd_table;
......@@ -731,6 +730,7 @@ struct arm_smmu_domain {
struct iommu_domain domain;
/* List of struct arm_smmu_master_domain */
struct list_head devices;
spinlock_t devices_lock;
......@@ -767,6 +767,11 @@ void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
u16 asid);
#endif
struct arm_smmu_master_domain {
struct list_head devices_elm;
struct arm_smmu_master *master;
};
static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
{
return container_of(dom, struct arm_smmu_domain, domain);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment