Commit 1b504402 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'dma-mapping-5.8-3' of git://git.infradead.org/users/hch/dma-mapping

Pull dma-mapping fixes from Christoph Hellwig:
 "Fixes for the SEV atomic pool (Geert Uytterhoeven and David Rientjes)"

* tag 'dma-mapping-5.8-3' of git://git.infradead.org/users/hch/dma-mapping:
  dma-pool: decouple DMA_REMAP from DMA_COHERENT_POOL
  dma-pool: fix too large DMA pools on medium memory size systems
parents 69119673 dbed452a
...@@ -73,18 +73,18 @@ config SWIOTLB ...@@ -73,18 +73,18 @@ config SWIOTLB
config DMA_NONCOHERENT_MMAP config DMA_NONCOHERENT_MMAP
bool bool
config DMA_COHERENT_POOL
bool
config DMA_REMAP config DMA_REMAP
bool
depends on MMU depends on MMU
select GENERIC_ALLOCATOR select GENERIC_ALLOCATOR
select DMA_NONCOHERENT_MMAP select DMA_NONCOHERENT_MMAP
bool
config DMA_COHERENT_POOL
bool
select DMA_REMAP
config DMA_DIRECT_REMAP config DMA_DIRECT_REMAP
bool bool
select DMA_REMAP
select DMA_COHERENT_POOL select DMA_COHERENT_POOL
config DMA_CMA config DMA_CMA
......
...@@ -175,10 +175,9 @@ static int __init dma_atomic_pool_init(void) ...@@ -175,10 +175,9 @@ static int __init dma_atomic_pool_init(void)
* sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1. * sizes to 128KB per 1GB of memory, min 128KB, max MAX_ORDER-1.
*/ */
if (!atomic_pool_size) { if (!atomic_pool_size) {
atomic_pool_size = max(totalram_pages() >> PAGE_SHIFT, 1UL) * unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K);
SZ_128K; pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES);
atomic_pool_size = min_t(size_t, atomic_pool_size, atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K);
1 << (PAGE_SHIFT + MAX_ORDER-1));
} }
INIT_WORK(&atomic_pool_work, atomic_pool_work_fn); INIT_WORK(&atomic_pool_work, atomic_pool_work_fn);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment