Commit b4ebe606 authored by Christoph Hellwig's avatar Christoph Hellwig

dma-direct: implement complete bus_dma_mask handling

Instead of rejecting devices with a too small bus_dma_mask we can handle
by taking the bus dma_mask into account for allocations and bounce
buffering decisions.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 7d21ee4c
...@@ -27,7 +27,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) ...@@ -27,7 +27,8 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
if (!dev->dma_mask) if (!dev->dma_mask)
return false; return false;
return addr + size - 1 <= *dev->dma_mask; return addr + size - 1 <=
min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
} }
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */ #endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
......
...@@ -44,10 +44,11 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size, ...@@ -44,10 +44,11 @@ check_addr(struct device *dev, dma_addr_t dma_addr, size_t size,
return false; return false;
} }
if (*dev->dma_mask >= DMA_BIT_MASK(32)) { if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_mask) {
dev_err(dev, dev_err(dev,
"%s: overflow %pad+%zu of device mask %llx\n", "%s: overflow %pad+%zu of device mask %llx bus mask %llx\n",
caller, &dma_addr, size, *dev->dma_mask); caller, &dma_addr, size,
*dev->dma_mask, dev->bus_dma_mask);
} }
return false; return false;
} }
...@@ -66,12 +67,18 @@ u64 dma_direct_get_required_mask(struct device *dev) ...@@ -66,12 +67,18 @@ u64 dma_direct_get_required_mask(struct device *dev)
{ {
u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT); u64 max_dma = phys_to_dma_direct(dev, (max_pfn - 1) << PAGE_SHIFT);
if (dev->bus_dma_mask && dev->bus_dma_mask < max_dma)
max_dma = dev->bus_dma_mask;
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
} }
static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
u64 *phys_mask) u64 *phys_mask)
{ {
if (dev->bus_dma_mask && dev->bus_dma_mask < dma_mask)
dma_mask = dev->bus_dma_mask;
if (force_dma_unencrypted()) if (force_dma_unencrypted())
*phys_mask = __dma_to_phys(dev, dma_mask); *phys_mask = __dma_to_phys(dev, dma_mask);
else else
...@@ -88,7 +95,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, ...@@ -88,7 +95,7 @@ static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
{ {
return phys_to_dma_direct(dev, phys) + size - 1 <= return phys_to_dma_direct(dev, phys) + size - 1 <=
dev->coherent_dma_mask; min_not_zero(dev->coherent_dma_mask, dev->bus_dma_mask);
} }
void *dma_direct_alloc_pages(struct device *dev, size_t size, void *dma_direct_alloc_pages(struct device *dev, size_t size,
...@@ -292,12 +299,6 @@ int dma_direct_supported(struct device *dev, u64 mask) ...@@ -292,12 +299,6 @@ int dma_direct_supported(struct device *dev, u64 mask)
if (mask < phys_to_dma(dev, DMA_BIT_MASK(32))) if (mask < phys_to_dma(dev, DMA_BIT_MASK(32)))
return 0; return 0;
#endif #endif
/*
* Upstream PCI/PCIe bridges or SoC interconnects may not carry
* as many DMA address bits as the device itself supports.
*/
if (dev->bus_dma_mask && mask > dev->bus_dma_mask)
return 0;
return 1; return 1;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment