Commit 34f327a9 authored by Jason Gunthorpe's avatar Jason Gunthorpe

iommufd: Keep track of each device's reserved regions instead of groups

The driver facing API in the iommu core makes the reserved regions
per-device. An algorithm in the core code consolidates the regions of all
the devices in a group to return the group view.

To allow for devices to be hotplugged into the group iommufd would re-load
the entire group's reserved regions for each device, just in case they
changed.

Further iommufd already has to deal with duplicated/overlapping reserved
regions as it must union all the groups together.

Thus simplify all of this to just use the device reserved regions
interface directly from the iommu driver.

Link: https://lore.kernel.org/r/5-v8-6659224517ea+532-iommufd_alloc_jgg@nvidia.comSuggested-by: default avatarKevin Tian <kevin.tian@intel.com>
Reviewed-by: default avatarLu Baolu <baolu.lu@linux.intel.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Tested-by: default avatarNicolin Chen <nicolinc@nvidia.com>
Signed-off-by: default avatarJason Gunthorpe <jgg@nvidia.com>
parent 8d0e2e9d
...@@ -356,9 +356,8 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt, ...@@ -356,9 +356,8 @@ int iommufd_hw_pagetable_attach(struct iommufd_hw_pagetable *hwpt,
} }
} }
rc = iopt_table_enforce_group_resv_regions(&hwpt->ioas->iopt, idev->dev, rc = iopt_table_enforce_dev_resv_regions(
idev->igroup->group, &hwpt->ioas->iopt, idev->dev, &sw_msi_start);
&sw_msi_start);
if (rc) if (rc)
goto err_unlock; goto err_unlock;
......
...@@ -1169,25 +1169,22 @@ void iopt_remove_access(struct io_pagetable *iopt, ...@@ -1169,25 +1169,22 @@ void iopt_remove_access(struct io_pagetable *iopt,
up_write(&iopt->domains_rwsem); up_write(&iopt->domains_rwsem);
} }
/* Narrow the valid_iova_itree to include reserved ranges from a group. */ /* Narrow the valid_iova_itree to include reserved ranges from a device. */
int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt, int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
struct device *device, struct device *dev,
struct iommu_group *group, phys_addr_t *sw_msi_start)
phys_addr_t *sw_msi_start)
{ {
struct iommu_resv_region *resv; struct iommu_resv_region *resv;
struct iommu_resv_region *tmp; LIST_HEAD(resv_regions);
LIST_HEAD(group_resv_regions);
unsigned int num_hw_msi = 0; unsigned int num_hw_msi = 0;
unsigned int num_sw_msi = 0; unsigned int num_sw_msi = 0;
int rc; int rc;
down_write(&iopt->iova_rwsem); down_write(&iopt->iova_rwsem);
rc = iommu_get_group_resv_regions(group, &group_resv_regions); /* FIXME: drivers allocate memory but there is no failure propogated */
if (rc) iommu_get_resv_regions(dev, &resv_regions);
goto out_unlock;
list_for_each_entry(resv, &group_resv_regions, list) { list_for_each_entry(resv, &resv_regions, list) {
if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE) if (resv->type == IOMMU_RESV_DIRECT_RELAXABLE)
continue; continue;
...@@ -1199,7 +1196,7 @@ int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt, ...@@ -1199,7 +1196,7 @@ int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt,
} }
rc = iopt_reserve_iova(iopt, resv->start, rc = iopt_reserve_iova(iopt, resv->start,
resv->length - 1 + resv->start, device); resv->length - 1 + resv->start, dev);
if (rc) if (rc)
goto out_reserved; goto out_reserved;
} }
...@@ -1214,11 +1211,9 @@ int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt, ...@@ -1214,11 +1211,9 @@ int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt,
goto out_free_resv; goto out_free_resv;
out_reserved: out_reserved:
__iopt_remove_reserved_iova(iopt, device); __iopt_remove_reserved_iova(iopt, dev);
out_free_resv: out_free_resv:
list_for_each_entry_safe(resv, tmp, &group_resv_regions, list) iommu_put_resv_regions(dev, &resv_regions);
kfree(resv);
out_unlock:
up_write(&iopt->iova_rwsem); up_write(&iopt->iova_rwsem);
return rc; return rc;
} }
...@@ -76,10 +76,9 @@ int iopt_table_add_domain(struct io_pagetable *iopt, ...@@ -76,10 +76,9 @@ int iopt_table_add_domain(struct io_pagetable *iopt,
struct iommu_domain *domain); struct iommu_domain *domain);
void iopt_table_remove_domain(struct io_pagetable *iopt, void iopt_table_remove_domain(struct io_pagetable *iopt,
struct iommu_domain *domain); struct iommu_domain *domain);
int iopt_table_enforce_group_resv_regions(struct io_pagetable *iopt, int iopt_table_enforce_dev_resv_regions(struct io_pagetable *iopt,
struct device *device, struct device *dev,
struct iommu_group *group, phys_addr_t *sw_msi_start);
phys_addr_t *sw_msi_start);
int iopt_set_allow_iova(struct io_pagetable *iopt, int iopt_set_allow_iova(struct io_pagetable *iopt,
struct rb_root_cached *allowed_iova); struct rb_root_cached *allowed_iova);
int iopt_reserve_iova(struct io_pagetable *iopt, unsigned long start, int iopt_reserve_iova(struct io_pagetable *iopt, unsigned long start,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment