Commit 8f6429c7 authored by Robin Murphy's avatar Robin Murphy Committed by David Woodhouse

iommu/iova: Avoid over-allocating when size-aligned

Currently, allocating a size-aligned IOVA region quietly adjusts the
actual allocation size in the process, returning a rounded-up
power-of-two-sized allocation. This results in mismatched behaviour in
the IOMMU driver if the original size was not a power of two, where the
original size is mapped, but the rounded-up IOVA size is unmapped.

Whilst some IOMMUs will happily unmap already-unmapped pages, others
consider this an error, so fix it by computing the necessary alignment
padding without altering the actual allocation size. Also clean up by
making pad_size unsigned, since its callers always pass unsigned values
and negative padding makes little sense here anyway.
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
Signed-off-by: default avatarDavid Woodhouse <David.Woodhouse@intel.com>
parent 52721d9d
...@@ -3233,6 +3233,8 @@ static struct iova *intel_alloc_iova(struct device *dev, ...@@ -3233,6 +3233,8 @@ static struct iova *intel_alloc_iova(struct device *dev,
/* Restrict dma_mask to the width that the iommu can handle */ /* Restrict dma_mask to the width that the iommu can handle */
dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask); dma_mask = min_t(uint64_t, DOMAIN_MAX_ADDR(domain->gaw), dma_mask);
/* Ensure we reserve the whole size-aligned region */
nrpages = __roundup_pow_of_two(nrpages);
if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) { if (!dmar_forcedac && dma_mask > DMA_BIT_MASK(32)) {
/* /*
......
...@@ -120,19 +120,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free) ...@@ -120,19 +120,14 @@ __cached_rbnode_delete_update(struct iova_domain *iovad, struct iova *free)
} }
} }
/* Computes the padding size required, to make the /*
* the start address naturally aligned on its size * Computes the padding size required, to make the start address
* naturally aligned on the power-of-two order of its size
*/ */
static int static unsigned int
iova_get_pad_size(int size, unsigned int limit_pfn) iova_get_pad_size(unsigned int size, unsigned int limit_pfn)
{ {
unsigned int pad_size = 0; return (limit_pfn + 1 - size) & (__roundup_pow_of_two(size) - 1);
unsigned int order = ilog2(size);
if (order)
pad_size = (limit_pfn + 1) % (1 << order);
return pad_size;
} }
static int __alloc_and_insert_iova_range(struct iova_domain *iovad, static int __alloc_and_insert_iova_range(struct iova_domain *iovad,
...@@ -265,12 +260,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size, ...@@ -265,12 +260,6 @@ alloc_iova(struct iova_domain *iovad, unsigned long size,
if (!new_iova) if (!new_iova)
return NULL; return NULL;
/* If size aligned is set then round the size to
* to next power of two.
*/
if (size_aligned)
size = __roundup_pow_of_two(size);
ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn, ret = __alloc_and_insert_iova_range(iovad, size, limit_pfn,
new_iova, size_aligned); new_iova, size_aligned);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment