Commit 7d21ee4c authored by Christoph Hellwig's avatar Christoph Hellwig

dma-direct: refine dma_direct_alloc zone selection

We need to take the DMA offset and encryption bit into account when
selecting a zone.  User the opportunity to factor out the zone
selection into a helper for reuse.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarRobin Murphy <robin.murphy@arm.com>
parent a20bb058
...@@ -69,6 +69,22 @@ u64 dma_direct_get_required_mask(struct device *dev) ...@@ -69,6 +69,22 @@ u64 dma_direct_get_required_mask(struct device *dev)
return (1ULL << (fls64(max_dma) - 1)) * 2 - 1; return (1ULL << (fls64(max_dma) - 1)) * 2 - 1;
} }
static gfp_t __dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask,
u64 *phys_mask)
{
if (force_dma_unencrypted())
*phys_mask = __dma_to_phys(dev, dma_mask);
else
*phys_mask = dma_to_phys(dev, dma_mask);
/* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */
if (*phys_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
return GFP_DMA;
if (*phys_mask <= DMA_BIT_MASK(32))
return GFP_DMA32;
return 0;
}
static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
{ {
return phys_to_dma_direct(dev, phys) + size - 1 <= return phys_to_dma_direct(dev, phys) + size - 1 <=
...@@ -81,17 +97,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, ...@@ -81,17 +97,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
int page_order = get_order(size); int page_order = get_order(size);
struct page *page = NULL; struct page *page = NULL;
u64 phys_mask;
void *ret; void *ret;
/* we always manually zero the memory once we are done: */ /* we always manually zero the memory once we are done: */
gfp &= ~__GFP_ZERO; gfp &= ~__GFP_ZERO;
gfp |= __dma_direct_optimal_gfp_mask(dev, dev->coherent_dma_mask,
/* GFP_DMA32 and GFP_DMA are no ops without the corresponding zones: */ &phys_mask);
if (dev->coherent_dma_mask <= DMA_BIT_MASK(ARCH_ZONE_DMA_BITS))
gfp |= GFP_DMA;
if (dev->coherent_dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
gfp |= GFP_DMA32;
again: again:
/* CMA can be used only in the context which permits sleeping */ /* CMA can be used only in the context which permits sleeping */
if (gfpflags_allow_blocking(gfp)) { if (gfpflags_allow_blocking(gfp)) {
...@@ -110,15 +122,14 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, ...@@ -110,15 +122,14 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
page = NULL; page = NULL;
if (IS_ENABLED(CONFIG_ZONE_DMA32) && if (IS_ENABLED(CONFIG_ZONE_DMA32) &&
dev->coherent_dma_mask < DMA_BIT_MASK(64) && phys_mask < DMA_BIT_MASK(64) &&
!(gfp & (GFP_DMA32 | GFP_DMA))) { !(gfp & (GFP_DMA32 | GFP_DMA))) {
gfp |= GFP_DMA32; gfp |= GFP_DMA32;
goto again; goto again;
} }
if (IS_ENABLED(CONFIG_ZONE_DMA) && if (IS_ENABLED(CONFIG_ZONE_DMA) &&
dev->coherent_dma_mask < DMA_BIT_MASK(32) && phys_mask < DMA_BIT_MASK(32) && !(gfp & GFP_DMA)) {
!(gfp & GFP_DMA)) {
gfp = (gfp & ~GFP_DMA32) | GFP_DMA; gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
goto again; goto again;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment