Commit 76a19940 authored by David Rientjes's avatar David Rientjes Committed by Christoph Hellwig

dma-direct: atomic allocations must come from atomic coherent pools

When a device requires unencrypted memory and the context does not allow
blocking, memory must be returned from the atomic coherent pools.

This avoids the remap when CONFIG_DMA_DIRECT_REMAP is not enabled and the
config only requires CONFIG_DMA_COHERENT_POOL.  This will be used for
CONFIG_AMD_MEM_ENCRYPT in a subsequent patch.

Keep all memory in these pools unencrypted.  When set_memory_decrypted()
fails, this prohibits the memory from being added.  If adding memory to
the genpool fails, and set_memory_encrypted() subsequently fails, there
is no alternative other than leaking the memory.
Signed-off-by: default avatarDavid Rientjes <rientjes@google.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
parent 54adadf9
...@@ -76,6 +76,39 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size) ...@@ -76,6 +76,39 @@ static bool dma_coherent_ok(struct device *dev, phys_addr_t phys, size_t size)
min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit); min_not_zero(dev->coherent_dma_mask, dev->bus_dma_limit);
} }
/*
* Decrypting memory is allowed to block, so if this device requires
* unencrypted memory it must come from atomic pools.
*/
static inline bool dma_should_alloc_from_pool(struct device *dev, gfp_t gfp,
unsigned long attrs)
{
if (!IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
return false;
if (gfpflags_allow_blocking(gfp))
return false;
if (force_dma_unencrypted(dev))
return true;
if (!IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
return false;
if (dma_alloc_need_uncached(dev, attrs))
return true;
return false;
}
static inline bool dma_should_free_from_pool(struct device *dev,
unsigned long attrs)
{
if (IS_ENABLED(CONFIG_DMA_COHERENT_POOL))
return true;
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev))
return false;
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP))
return true;
return false;
}
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
gfp_t gfp, unsigned long attrs) gfp_t gfp, unsigned long attrs)
{ {
...@@ -125,9 +158,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size, ...@@ -125,9 +158,7 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
struct page *page; struct page *page;
void *ret; void *ret;
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) && if (dma_should_alloc_from_pool(dev, gfp, attrs)) {
dma_alloc_need_uncached(dev, attrs) &&
!gfpflags_allow_blocking(gfp)) {
ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp); ret = dma_alloc_from_pool(dev, PAGE_ALIGN(size), &page, gfp);
if (!ret) if (!ret)
return NULL; return NULL;
...@@ -204,6 +235,11 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, ...@@ -204,6 +235,11 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
{ {
unsigned int page_order = get_order(size); unsigned int page_order = get_order(size);
/* If cpu_addr is not from an atomic pool, dma_free_from_pool() fails */
if (dma_should_free_from_pool(dev, attrs) &&
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
return;
if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) && if ((attrs & DMA_ATTR_NO_KERNEL_MAPPING) &&
!force_dma_unencrypted(dev)) { !force_dma_unencrypted(dev)) {
/* cpu_addr is a struct page cookie, not a kernel address */ /* cpu_addr is a struct page cookie, not a kernel address */
...@@ -211,10 +247,6 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr, ...@@ -211,10 +247,6 @@ void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
return; return;
} }
if (IS_ENABLED(CONFIG_DMA_DIRECT_REMAP) &&
dma_free_from_pool(dev, cpu_addr, PAGE_ALIGN(size)))
return;
if (force_dma_unencrypted(dev)) if (force_dma_unencrypted(dev))
set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order); set_memory_encrypted((unsigned long)cpu_addr, 1 << page_order);
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
#include <linux/dma-contiguous.h> #include <linux/dma-contiguous.h>
#include <linux/init.h> #include <linux/init.h>
#include <linux/genalloc.h> #include <linux/genalloc.h>
#include <linux/set_memory.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/workqueue.h> #include <linux/workqueue.h>
...@@ -53,22 +54,42 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size, ...@@ -53,22 +54,42 @@ static int atomic_pool_expand(struct gen_pool *pool, size_t pool_size,
arch_dma_prep_coherent(page, pool_size); arch_dma_prep_coherent(page, pool_size);
#ifdef CONFIG_DMA_DIRECT_REMAP
addr = dma_common_contiguous_remap(page, pool_size, addr = dma_common_contiguous_remap(page, pool_size,
pgprot_dmacoherent(PAGE_KERNEL), pgprot_dmacoherent(PAGE_KERNEL),
__builtin_return_address(0)); __builtin_return_address(0));
if (!addr) if (!addr)
goto free_page; goto free_page;
#else
addr = page_to_virt(page);
#endif
/*
* Memory in the atomic DMA pools must be unencrypted, the pools do not
* shrink so no re-encryption occurs in dma_direct_free_pages().
*/
ret = set_memory_decrypted((unsigned long)page_to_virt(page),
1 << order);
if (ret)
goto remove_mapping;
ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page), ret = gen_pool_add_virt(pool, (unsigned long)addr, page_to_phys(page),
pool_size, NUMA_NO_NODE); pool_size, NUMA_NO_NODE);
if (ret) if (ret)
goto remove_mapping; goto encrypt_mapping;
return 0; return 0;
encrypt_mapping:
ret = set_memory_encrypted((unsigned long)page_to_virt(page),
1 << order);
if (WARN_ON_ONCE(ret)) {
/* Decrypt succeeded but encrypt failed, purposely leak */
goto out;
}
remove_mapping: remove_mapping:
#ifdef CONFIG_DMA_DIRECT_REMAP
dma_common_free_remap(addr, pool_size); dma_common_free_remap(addr, pool_size);
free_page: #endif
free_page: __maybe_unused
if (!dma_release_from_contiguous(NULL, page, 1 << order)) if (!dma_release_from_contiguous(NULL, page, 1 << order))
__free_pages(page, order); __free_pages(page, order);
out: out:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment