Commit 832a90c3 authored by Joerg Roedel's avatar Joerg Roedel Committed by Ingo Molnar

AMD IOMMU: use coherent_dma_mask in alloc_coherent

The alloc_coherent implementation for AMD IOMMU currently uses
*dev->dma_mask per default. This patch changes it to prefer
dev->coherent_dma_mask if it is set.
Signed-off-by: default avatarJoerg Roedel <joerg.roedel@amd.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
parent bbd001c7
...@@ -483,9 +483,10 @@ static unsigned long dma_mask_to_pages(unsigned long mask) ...@@ -483,9 +483,10 @@ static unsigned long dma_mask_to_pages(unsigned long mask)
static unsigned long dma_ops_alloc_addresses(struct device *dev, static unsigned long dma_ops_alloc_addresses(struct device *dev,
struct dma_ops_domain *dom, struct dma_ops_domain *dom,
unsigned int pages, unsigned int pages,
unsigned long align_mask) unsigned long align_mask,
u64 dma_mask)
{ {
unsigned long limit = dma_mask_to_pages(*dev->dma_mask); unsigned long limit = dma_mask_to_pages(dma_mask);
unsigned long address; unsigned long address;
unsigned long size = dom->aperture_size >> PAGE_SHIFT; unsigned long size = dom->aperture_size >> PAGE_SHIFT;
unsigned long boundary_size; unsigned long boundary_size;
...@@ -919,7 +920,8 @@ static dma_addr_t __map_single(struct device *dev, ...@@ -919,7 +920,8 @@ static dma_addr_t __map_single(struct device *dev,
phys_addr_t paddr, phys_addr_t paddr,
size_t size, size_t size,
int dir, int dir,
bool align) bool align,
u64 dma_mask)
{ {
dma_addr_t offset = paddr & ~PAGE_MASK; dma_addr_t offset = paddr & ~PAGE_MASK;
dma_addr_t address, start; dma_addr_t address, start;
...@@ -933,7 +935,8 @@ static dma_addr_t __map_single(struct device *dev, ...@@ -933,7 +935,8 @@ static dma_addr_t __map_single(struct device *dev,
if (align) if (align)
align_mask = (1UL << get_order(size)) - 1; align_mask = (1UL << get_order(size)) - 1;
address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask); address = dma_ops_alloc_addresses(dev, dma_dom, pages, align_mask,
dma_mask);
if (unlikely(address == bad_dma_address)) if (unlikely(address == bad_dma_address))
goto out; goto out;
...@@ -997,10 +1000,13 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, ...@@ -997,10 +1000,13 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
struct protection_domain *domain; struct protection_domain *domain;
u16 devid; u16 devid;
dma_addr_t addr; dma_addr_t addr;
u64 dma_mask;
if (!check_device(dev)) if (!check_device(dev))
return bad_dma_address; return bad_dma_address;
dma_mask = *dev->dma_mask;
get_device_resources(dev, &iommu, &domain, &devid); get_device_resources(dev, &iommu, &domain, &devid);
if (iommu == NULL || domain == NULL) if (iommu == NULL || domain == NULL)
...@@ -1008,7 +1014,8 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr, ...@@ -1008,7 +1014,8 @@ static dma_addr_t map_single(struct device *dev, phys_addr_t paddr,
return (dma_addr_t)paddr; return (dma_addr_t)paddr;
spin_lock_irqsave(&domain->lock, flags); spin_lock_irqsave(&domain->lock, flags);
addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false); addr = __map_single(dev, iommu, domain->priv, paddr, size, dir, false,
dma_mask);
if (addr == bad_dma_address) if (addr == bad_dma_address)
goto out; goto out;
...@@ -1080,10 +1087,13 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -1080,10 +1087,13 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
struct scatterlist *s; struct scatterlist *s;
phys_addr_t paddr; phys_addr_t paddr;
int mapped_elems = 0; int mapped_elems = 0;
u64 dma_mask;
if (!check_device(dev)) if (!check_device(dev))
return 0; return 0;
dma_mask = *dev->dma_mask;
get_device_resources(dev, &iommu, &domain, &devid); get_device_resources(dev, &iommu, &domain, &devid);
if (!iommu || !domain) if (!iommu || !domain)
...@@ -1095,7 +1105,8 @@ static int map_sg(struct device *dev, struct scatterlist *sglist, ...@@ -1095,7 +1105,8 @@ static int map_sg(struct device *dev, struct scatterlist *sglist,
paddr = sg_phys(s); paddr = sg_phys(s);
s->dma_address = __map_single(dev, iommu, domain->priv, s->dma_address = __map_single(dev, iommu, domain->priv,
paddr, s->length, dir, false); paddr, s->length, dir, false,
dma_mask);
if (s->dma_address) { if (s->dma_address) {
s->dma_length = s->length; s->dma_length = s->length;
...@@ -1168,6 +1179,7 @@ static void *alloc_coherent(struct device *dev, size_t size, ...@@ -1168,6 +1179,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
struct protection_domain *domain; struct protection_domain *domain;
u16 devid; u16 devid;
phys_addr_t paddr; phys_addr_t paddr;
u64 dma_mask = dev->coherent_dma_mask;
if (!check_device(dev)) if (!check_device(dev))
return NULL; return NULL;
...@@ -1187,10 +1199,13 @@ static void *alloc_coherent(struct device *dev, size_t size, ...@@ -1187,10 +1199,13 @@ static void *alloc_coherent(struct device *dev, size_t size,
return virt_addr; return virt_addr;
} }
if (!dma_mask)
dma_mask = *dev->dma_mask;
spin_lock_irqsave(&domain->lock, flags); spin_lock_irqsave(&domain->lock, flags);
*dma_addr = __map_single(dev, iommu, domain->priv, paddr, *dma_addr = __map_single(dev, iommu, domain->priv, paddr,
size, DMA_BIDIRECTIONAL, true); size, DMA_BIDIRECTIONAL, true, dma_mask);
if (*dma_addr == bad_dma_address) { if (*dma_addr == bad_dma_address) {
free_pages((unsigned long)virt_addr, get_order(size)); free_pages((unsigned long)virt_addr, get_order(size));
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment