Commit 39b3b3c9 authored by Jean-Philippe Brucker's avatar Jean-Philippe Brucker Committed by Joerg Roedel

iommu/virtio: Reject IOMMU page granule larger than PAGE_SIZE

We don't currently support IOMMUs with a page granule larger than the
system page size. The IOVA allocator has a BUG_ON() in this case, and
VFIO has a WARN_ON().

Removing these obstacles ranges doesn't seem possible without major
changes to the DMA API and VFIO. Some callers of iommu_map(), for
example, want to map multiple page-aligned regions adjacent to each
others for scatter-gather purposes. Even in simple DMA API uses, a call
to dma_map_page() would let the endpoint access neighbouring memory. And
VFIO users cannot ensure that their virtual address buffer is physically
contiguous at the IOMMU granule.

Rather than triggering the IOVA BUG_ON() on mismatched page sizes, abort
the vdomain finalise() with an error message. We could simply abort the
viommu probe(), but an upcoming extension to virtio-iommu will allow
setting different page masks for each endpoint.
Reported-by: default avatarBharat Bhushan <bbhushan2@marvell.com>
Signed-off-by: default avatarJean-Philippe Brucker <jean-philippe@linaro.org>
Reviewed-by: default avatarBharat Bhushan <bbhushan2@marvell.com>
Reviewed-by: default avatarEric Auger <eric.auger@redhat.com>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
Link: https://lore.kernel.org/r/20200326093558.2641019-4-jean-philippe@linaro.orgSigned-off-by: default avatarJoerg Roedel <jroedel@suse.de>
parent 7062af3e
...@@ -607,12 +607,22 @@ static struct iommu_domain *viommu_domain_alloc(unsigned type) ...@@ -607,12 +607,22 @@ static struct iommu_domain *viommu_domain_alloc(unsigned type)
return &vdomain->domain; return &vdomain->domain;
} }
static int viommu_domain_finalise(struct viommu_dev *viommu, static int viommu_domain_finalise(struct viommu_endpoint *vdev,
struct iommu_domain *domain) struct iommu_domain *domain)
{ {
int ret; int ret;
unsigned long viommu_page_size;
struct viommu_dev *viommu = vdev->viommu;
struct viommu_domain *vdomain = to_viommu_domain(domain); struct viommu_domain *vdomain = to_viommu_domain(domain);
viommu_page_size = 1UL << __ffs(viommu->pgsize_bitmap);
if (viommu_page_size > PAGE_SIZE) {
dev_err(vdev->dev,
"granule 0x%lx larger than system page size 0x%lx\n",
viommu_page_size, PAGE_SIZE);
return -EINVAL;
}
ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain, ret = ida_alloc_range(&viommu->domain_ids, viommu->first_domain,
viommu->last_domain, GFP_KERNEL); viommu->last_domain, GFP_KERNEL);
if (ret < 0) if (ret < 0)
...@@ -659,7 +669,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev) ...@@ -659,7 +669,7 @@ static int viommu_attach_dev(struct iommu_domain *domain, struct device *dev)
* Properly initialize the domain now that we know which viommu * Properly initialize the domain now that we know which viommu
* owns it. * owns it.
*/ */
ret = viommu_domain_finalise(vdev->viommu, domain); ret = viommu_domain_finalise(vdev, domain);
} else if (vdomain->viommu != vdev->viommu) { } else if (vdomain->viommu != vdev->viommu) {
dev_err(dev, "cannot attach to foreign vIOMMU\n"); dev_err(dev, "cannot attach to foreign vIOMMU\n");
ret = -EXDEV; ret = -EXDEV;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment