Commit 5a5e02a6 authored by David Woodhouse's avatar David Woodhouse

intel-iommu: Fix dma vs. mm page confusion with aligned_nrpages()

The aligned_nrpages() function rounds up to the next VM page, but
returns its result as a number of DMA pages.

Purely theoretical except on IA64, which doesn't boot with VT-d right
now anyway.
Signed-off-by: default avatarDavid Woodhouse <David.Woodhouse@intel.com>
parent 405d7ca5
...@@ -2368,15 +2368,15 @@ int __init init_dmars(void) ...@@ -2368,15 +2368,15 @@ int __init init_dmars(void)
return ret; return ret;
} }
/* Returns a number of VTD pages, but aligned to MM page size */
static inline unsigned long aligned_nrpages(unsigned long host_addr, static inline unsigned long aligned_nrpages(unsigned long host_addr,
size_t size) size_t size)
{ {
host_addr &= ~PAGE_MASK; host_addr &= ~PAGE_MASK;
host_addr += size + PAGE_SIZE - 1; return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
return host_addr >> VTD_PAGE_SHIFT;
} }
/* This takes a number of _MM_ pages, not VTD pages */
static struct iova *intel_alloc_iova(struct device *dev, static struct iova *intel_alloc_iova(struct device *dev,
struct dmar_domain *domain, struct dmar_domain *domain,
unsigned long nrpages, uint64_t dma_mask) unsigned long nrpages, uint64_t dma_mask)
...@@ -2506,7 +2506,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr, ...@@ -2506,7 +2506,8 @@ static dma_addr_t __intel_map_single(struct device *hwdev, phys_addr_t paddr,
iommu = domain_get_iommu(domain); iommu = domain_get_iommu(domain);
size = aligned_nrpages(paddr, size); size = aligned_nrpages(paddr, size);
iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
pdev->dma_mask);
if (!iova) if (!iova)
goto error; goto error;
...@@ -2797,7 +2798,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne ...@@ -2797,7 +2798,8 @@ static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int ne
for_each_sg(sglist, sg, nelems, i) for_each_sg(sglist, sg, nelems, i)
size += aligned_nrpages(sg->offset, sg->length); size += aligned_nrpages(sg->offset, sg->length);
iova = intel_alloc_iova(hwdev, domain, size, pdev->dma_mask); iova = intel_alloc_iova(hwdev, domain, dma_to_mm_pfn(size),
pdev->dma_mask);
if (!iova) { if (!iova) {
sglist->dma_length = 0; sglist->dma_length = 0;
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment