Commit b03cbca4 authored by Jean-Philippe Brucker's avatar Jean-Philippe Brucker Committed by Joerg Roedel

iommu/virtio: Support identity-mapped domains

Support identity domains for devices that do not offer the
VIRTIO_IOMMU_F_BYPASS_CONFIG feature, by creating 1:1 mappings between
the virtual and physical address space. Identity domains created this
way still perform noticeably better than DMA domains, because they don't
have the overhead of setting up and tearing down mappings at runtime.
The performance difference between this and bypass is minimal in
comparison.

It does not matter that the physical addresses in the identity mappings
do not all correspond to memory. By enabling passthrough we are trusting
the device driver and the device itself to only perform DMA to suitable
locations. In some cases it may even be desirable to perform DMA to MMIO
regions.
Reviewed-by: default avatarEric Auger <eric.auger@redhat.com>
Reviewed-by: default avatarKevin Tian <kevin.tian@intel.com>
Signed-off-by: default avatarJean-Philippe Brucker <jean-philippe@linaro.org>
Link: https://lore.kernel.org/r/20211201173323.1045819-6-jean-philippe@linaro.orgSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent c0c76359
......@@ -375,6 +375,55 @@ static size_t viommu_del_mappings(struct viommu_domain *vdomain,
return unmapped;
}
/*
* Fill the domain with identity mappings, skipping the device's reserved
* regions.
*/
static int viommu_domain_map_identity(struct viommu_endpoint *vdev,
struct viommu_domain *vdomain)
{
int ret;
struct iommu_resv_region *resv;
u64 iova = vdomain->domain.geometry.aperture_start;
u64 limit = vdomain->domain.geometry.aperture_end;
u32 flags = VIRTIO_IOMMU_MAP_F_READ | VIRTIO_IOMMU_MAP_F_WRITE;
unsigned long granule = 1UL << __ffs(vdomain->domain.pgsize_bitmap);
iova = ALIGN(iova, granule);
limit = ALIGN_DOWN(limit + 1, granule) - 1;
list_for_each_entry(resv, &vdev->resv_regions, list) {
u64 resv_start = ALIGN_DOWN(resv->start, granule);
u64 resv_end = ALIGN(resv->start + resv->length, granule) - 1;
if (resv_end < iova || resv_start > limit)
/* No overlap */
continue;
if (resv_start > iova) {
ret = viommu_add_mapping(vdomain, iova, resv_start - 1,
(phys_addr_t)iova, flags);
if (ret)
goto err_unmap;
}
if (resv_end >= limit)
return 0;
iova = resv_end + 1;
}
ret = viommu_add_mapping(vdomain, iova, limit, (phys_addr_t)iova,
flags);
if (ret)
goto err_unmap;
return 0;
err_unmap:
viommu_del_mappings(vdomain, 0, iova);
return ret;
}
/*
* viommu_replay_mappings - re-send MAP requests
*
......@@ -637,14 +686,18 @@ static int viommu_domain_finalise(struct viommu_endpoint *vdev,
vdomain->viommu = viommu;
if (domain->type == IOMMU_DOMAIN_IDENTITY) {
if (!virtio_has_feature(viommu->vdev,
if (virtio_has_feature(viommu->vdev,
VIRTIO_IOMMU_F_BYPASS_CONFIG)) {
vdomain->bypass = true;
return 0;
}
ret = viommu_domain_map_identity(vdev, vdomain);
if (ret) {
ida_free(&viommu->domain_ids, vdomain->id);
vdomain->viommu = NULL;
return -EOPNOTSUPP;
}
vdomain->bypass = true;
}
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment