Commit 5ea3b1b2 authored by Akinobu Mita's avatar Akinobu Mita Committed by Linus Torvalds

cma: add placement specifier for "cma=" kernel parameter

Currently, "cma=" kernel parameter is used to specify the size of CMA,
but we can't specify where it is located.  We want to locate CMA below
4GB for devices only supporting 32-bit addressing on 64-bit systems
without iommu.

This enables to specify the placement of CMA by extending "cma=" kernel
parameter.

Examples:
 1. locate 64MB CMA below 4GB by "cma=64M@0-4G"
 2. locate 64MB CMA exact at 512MB by "cma=64M@512M"

Note that the DMA contiguous memory allocator on x86 assumes that
page_address() works for the pages to allocate.  So this change requires
to limit end address of contiguous memory area upto max_pfn_mapped to
prevent from locating it on highmem area by the argument of
dma_contiguous_reserve().
Signed-off-by: default avatarAkinobu Mita <akinobu.mita@gmail.com>
Cc: Marek Szyprowski <m.szyprowski@samsung.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: David Woodhouse <dwmw2@infradead.org>
Cc: Don Dutile <ddutile@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 2bfc2862
...@@ -630,8 +630,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. ...@@ -630,8 +630,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
Also note the kernel might malfunction if you disable Also note the kernel might malfunction if you disable
some critical bits. some critical bits.
cma=nn[MG] [ARM,KNL] cma=nn[MG]@[start[MG][-end[MG]]]
Sets the size of kernel global memory area for contiguous [ARM,X86,KNL]
Sets the size of kernel global memory area for
contiguous memory allocations and optionally the
placement constraint by the physical address range of
memory allocations. For more information, see memory allocations. For more information, see
include/linux/dma-contiguous.h include/linux/dma-contiguous.h
......
...@@ -1119,7 +1119,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -1119,7 +1119,7 @@ void __init setup_arch(char **cmdline_p)
setup_real_mode(); setup_real_mode();
memblock_set_current_limit(get_max_mapped()); memblock_set_current_limit(get_max_mapped());
dma_contiguous_reserve(0); dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
/* /*
* NOTE: On x86-32, only from this point on, fixmaps are ready for use. * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
......
...@@ -60,11 +60,22 @@ struct cma *dma_contiguous_default_area; ...@@ -60,11 +60,22 @@ struct cma *dma_contiguous_default_area;
*/ */
static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M; static const phys_addr_t size_bytes = CMA_SIZE_MBYTES * SZ_1M;
static phys_addr_t size_cmdline = -1; static phys_addr_t size_cmdline = -1;
static phys_addr_t base_cmdline;
static phys_addr_t limit_cmdline;
static int __init early_cma(char *p) static int __init early_cma(char *p)
{ {
pr_debug("%s(%s)\n", __func__, p); pr_debug("%s(%s)\n", __func__, p);
size_cmdline = memparse(p, &p); size_cmdline = memparse(p, &p);
if (*p != '@')
return 0;
base_cmdline = memparse(p + 1, &p);
if (*p != '-') {
limit_cmdline = base_cmdline + size_cmdline;
return 0;
}
limit_cmdline = memparse(p + 1, &p);
return 0; return 0;
} }
early_param("cma", early_cma); early_param("cma", early_cma);
...@@ -108,11 +119,18 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void) ...@@ -108,11 +119,18 @@ static inline __maybe_unused phys_addr_t cma_early_percent_memory(void)
void __init dma_contiguous_reserve(phys_addr_t limit) void __init dma_contiguous_reserve(phys_addr_t limit)
{ {
phys_addr_t selected_size = 0; phys_addr_t selected_size = 0;
phys_addr_t selected_base = 0;
phys_addr_t selected_limit = limit;
bool fixed = false;
pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit); pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
if (size_cmdline != -1) { if (size_cmdline != -1) {
selected_size = size_cmdline; selected_size = size_cmdline;
selected_base = base_cmdline;
selected_limit = min_not_zero(limit_cmdline, limit);
if (base_cmdline + size_cmdline == limit_cmdline)
fixed = true;
} else { } else {
#ifdef CONFIG_CMA_SIZE_SEL_MBYTES #ifdef CONFIG_CMA_SIZE_SEL_MBYTES
selected_size = size_bytes; selected_size = size_bytes;
...@@ -129,10 +147,12 @@ void __init dma_contiguous_reserve(phys_addr_t limit) ...@@ -129,10 +147,12 @@ void __init dma_contiguous_reserve(phys_addr_t limit)
pr_debug("%s: reserving %ld MiB for global area\n", __func__, pr_debug("%s: reserving %ld MiB for global area\n", __func__,
(unsigned long)selected_size / SZ_1M); (unsigned long)selected_size / SZ_1M);
dma_contiguous_reserve_area(selected_size, 0, limit, dma_contiguous_reserve_area(selected_size, selected_base,
&dma_contiguous_default_area); selected_limit,
&dma_contiguous_default_area,
fixed);
} }
}; }
static DEFINE_MUTEX(cma_mutex); static DEFINE_MUTEX(cma_mutex);
...@@ -189,15 +209,20 @@ core_initcall(cma_init_reserved_areas); ...@@ -189,15 +209,20 @@ core_initcall(cma_init_reserved_areas);
* @base: Base address of the reserved area optional, use 0 for any * @base: Base address of the reserved area optional, use 0 for any
* @limit: End address of the reserved memory (optional, 0 for any). * @limit: End address of the reserved memory (optional, 0 for any).
* @res_cma: Pointer to store the created cma region. * @res_cma: Pointer to store the created cma region.
* @fixed: hint about where to place the reserved area
* *
* This function reserves memory from early allocator. It should be * This function reserves memory from early allocator. It should be
* called by arch specific code once the early allocator (memblock or bootmem) * called by arch specific code once the early allocator (memblock or bootmem)
* has been activated and all other subsystems have already allocated/reserved * has been activated and all other subsystems have already allocated/reserved
* memory. This function allows to create custom reserved areas for specific * memory. This function allows to create custom reserved areas for specific
* devices. * devices.
*
* If @fixed is true, reserve contiguous area at exactly @base. If false,
* reserve in range from @base to @limit.
*/ */
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
phys_addr_t limit, struct cma **res_cma) phys_addr_t limit, struct cma **res_cma,
bool fixed)
{ {
struct cma *cma = &cma_areas[cma_area_count]; struct cma *cma = &cma_areas[cma_area_count];
phys_addr_t alignment; phys_addr_t alignment;
...@@ -223,18 +248,15 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, ...@@ -223,18 +248,15 @@ int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
limit &= ~(alignment - 1); limit &= ~(alignment - 1);
/* Reserve memory */ /* Reserve memory */
if (base) { if (base && fixed) {
if (memblock_is_region_reserved(base, size) || if (memblock_is_region_reserved(base, size) ||
memblock_reserve(base, size) < 0) { memblock_reserve(base, size) < 0) {
ret = -EBUSY; ret = -EBUSY;
goto err; goto err;
} }
} else { } else {
/* phys_addr_t addr = memblock_alloc_range(size, alignment, base,
* Use __memblock_alloc_base() since limit);
* memblock_alloc_base() panic()s.
*/
phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
if (!addr) { if (!addr) {
ret = -ENOMEM; ret = -ENOMEM;
goto err; goto err;
......
...@@ -88,7 +88,8 @@ static inline void dma_contiguous_set_default(struct cma *cma) ...@@ -88,7 +88,8 @@ static inline void dma_contiguous_set_default(struct cma *cma)
void dma_contiguous_reserve(phys_addr_t addr_limit); void dma_contiguous_reserve(phys_addr_t addr_limit);
int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
phys_addr_t limit, struct cma **res_cma); phys_addr_t limit, struct cma **res_cma,
bool fixed);
/** /**
* dma_declare_contiguous() - reserve area for contiguous memory handling * dma_declare_contiguous() - reserve area for contiguous memory handling
...@@ -108,7 +109,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size, ...@@ -108,7 +109,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
{ {
struct cma *cma; struct cma *cma;
int ret; int ret;
ret = dma_contiguous_reserve_area(size, base, limit, &cma); ret = dma_contiguous_reserve_area(size, base, limit, &cma, true);
if (ret == 0) if (ret == 0)
dev_set_cma_area(dev, cma); dev_set_cma_area(dev, cma);
...@@ -136,7 +137,9 @@ static inline void dma_contiguous_set_default(struct cma *cma) { } ...@@ -136,7 +137,9 @@ static inline void dma_contiguous_set_default(struct cma *cma) { }
static inline void dma_contiguous_reserve(phys_addr_t limit) { } static inline void dma_contiguous_reserve(phys_addr_t limit) { }
static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base, static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
phys_addr_t limit, struct cma **res_cma) { phys_addr_t limit, struct cma **res_cma,
bool fixed)
{
return -ENOSYS; return -ENOSYS;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment