Commit 999c17e3 authored by Santosh Shilimkar's avatar Santosh Shilimkar Committed by Linus Torvalds

mm/percpu.c: use memblock apis for early memory allocations

Switch to memblock interfaces for early memory allocator instead of
bootmem allocator.  No functional change in beahvior than what it is in
current code from bootmem users points of view.

Archs already converted to NO_BOOTMEM now directly use memblock
interfaces instead of bootmem wrappers build on top of memblock.  And
the archs which still uses bootmem, these new apis just fallback to
exiting bootmem APIs.
Signed-off-by: default avatarSantosh Shilimkar <santosh.shilimkar@ti.com>
Cc: "Rafael J. Wysocki" <rjw@sisk.pl>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Cc: Grygorii Strashko <grygorii.strashko@ti.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Paul Walmsley <paul@pwsan.com>
Cc: Pavel Machek <pavel@ucw.cz>
Cc: Russell King <linux@arm.linux.org.uk>
Cc: Tejun Heo <tj@kernel.org>
Cc: Tony Lindgren <tony@atomide.com>
Cc: Yinghai Lu <yinghai@kernel.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0d036e9e
...@@ -1063,7 +1063,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, ...@@ -1063,7 +1063,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
__alignof__(ai->groups[0].cpu_map[0])); __alignof__(ai->groups[0].cpu_map[0]));
ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]); ai_size = base_size + nr_units * sizeof(ai->groups[0].cpu_map[0]);
ptr = alloc_bootmem_nopanic(PFN_ALIGN(ai_size)); ptr = memblock_virt_alloc_nopanic(PFN_ALIGN(ai_size), 0);
if (!ptr) if (!ptr)
return NULL; return NULL;
ai = ptr; ai = ptr;
...@@ -1088,7 +1088,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups, ...@@ -1088,7 +1088,7 @@ struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
*/ */
void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai)
{ {
free_bootmem(__pa(ai), ai->__ai_size); memblock_free_early(__pa(ai), ai->__ai_size);
} }
/** /**
...@@ -1246,10 +1246,12 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1246,10 +1246,12 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
/* process group information and build config tables accordingly */ /* process group information and build config tables accordingly */
group_offsets = alloc_bootmem(ai->nr_groups * sizeof(group_offsets[0])); group_offsets = memblock_virt_alloc(ai->nr_groups *
group_sizes = alloc_bootmem(ai->nr_groups * sizeof(group_sizes[0])); sizeof(group_offsets[0]), 0);
unit_map = alloc_bootmem(nr_cpu_ids * sizeof(unit_map[0])); group_sizes = memblock_virt_alloc(ai->nr_groups *
unit_off = alloc_bootmem(nr_cpu_ids * sizeof(unit_off[0])); sizeof(group_sizes[0]), 0);
unit_map = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_map[0]), 0);
unit_off = memblock_virt_alloc(nr_cpu_ids * sizeof(unit_off[0]), 0);
for (cpu = 0; cpu < nr_cpu_ids; cpu++) for (cpu = 0; cpu < nr_cpu_ids; cpu++)
unit_map[cpu] = UINT_MAX; unit_map[cpu] = UINT_MAX;
...@@ -1311,7 +1313,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1311,7 +1313,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
* empty chunks. * empty chunks.
*/ */
pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2; pcpu_nr_slots = __pcpu_size_to_slot(pcpu_unit_size) + 2;
pcpu_slot = alloc_bootmem(pcpu_nr_slots * sizeof(pcpu_slot[0])); pcpu_slot = memblock_virt_alloc(
pcpu_nr_slots * sizeof(pcpu_slot[0]), 0);
for (i = 0; i < pcpu_nr_slots; i++) for (i = 0; i < pcpu_nr_slots; i++)
INIT_LIST_HEAD(&pcpu_slot[i]); INIT_LIST_HEAD(&pcpu_slot[i]);
...@@ -1322,7 +1325,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1322,7 +1325,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
* covers static area + reserved area (mostly used for module * covers static area + reserved area (mostly used for module
* static percpu allocation). * static percpu allocation).
*/ */
schunk = alloc_bootmem(pcpu_chunk_struct_size); schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&schunk->list); INIT_LIST_HEAD(&schunk->list);
schunk->base_addr = base_addr; schunk->base_addr = base_addr;
schunk->map = smap; schunk->map = smap;
...@@ -1346,7 +1349,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1346,7 +1349,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
/* init dynamic chunk if necessary */ /* init dynamic chunk if necessary */
if (dyn_size) { if (dyn_size) {
dchunk = alloc_bootmem(pcpu_chunk_struct_size); dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&dchunk->list); INIT_LIST_HEAD(&dchunk->list);
dchunk->base_addr = base_addr; dchunk->base_addr = base_addr;
dchunk->map = dmap; dchunk->map = dmap;
...@@ -1626,7 +1629,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, ...@@ -1626,7 +1629,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
size_sum = ai->static_size + ai->reserved_size + ai->dyn_size; size_sum = ai->static_size + ai->reserved_size + ai->dyn_size;
areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *)); areas_size = PFN_ALIGN(ai->nr_groups * sizeof(void *));
areas = alloc_bootmem_nopanic(areas_size); areas = memblock_virt_alloc_nopanic(areas_size, 0);
if (!areas) { if (!areas) {
rc = -ENOMEM; rc = -ENOMEM;
goto out_free; goto out_free;
...@@ -1712,7 +1715,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size, ...@@ -1712,7 +1715,7 @@ int __init pcpu_embed_first_chunk(size_t reserved_size, size_t dyn_size,
out_free: out_free:
pcpu_free_alloc_info(ai); pcpu_free_alloc_info(ai);
if (areas) if (areas)
free_bootmem(__pa(areas), areas_size); memblock_free_early(__pa(areas), areas_size);
return rc; return rc;
} }
#endif /* BUILD_EMBED_FIRST_CHUNK */ #endif /* BUILD_EMBED_FIRST_CHUNK */
...@@ -1760,7 +1763,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, ...@@ -1760,7 +1763,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
/* unaligned allocations can't be freed, round up to page size */ /* unaligned allocations can't be freed, round up to page size */
pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() * pages_size = PFN_ALIGN(unit_pages * num_possible_cpus() *
sizeof(pages[0])); sizeof(pages[0]));
pages = alloc_bootmem(pages_size); pages = memblock_virt_alloc(pages_size, 0);
/* allocate pages */ /* allocate pages */
j = 0; j = 0;
...@@ -1823,7 +1826,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size, ...@@ -1823,7 +1826,7 @@ int __init pcpu_page_first_chunk(size_t reserved_size,
free_fn(page_address(pages[j]), PAGE_SIZE); free_fn(page_address(pages[j]), PAGE_SIZE);
rc = -ENOMEM; rc = -ENOMEM;
out_free_ar: out_free_ar:
free_bootmem(__pa(pages), pages_size); memblock_free_early(__pa(pages), pages_size);
pcpu_free_alloc_info(ai); pcpu_free_alloc_info(ai);
return rc; return rc;
} }
...@@ -1848,12 +1851,13 @@ EXPORT_SYMBOL(__per_cpu_offset); ...@@ -1848,12 +1851,13 @@ EXPORT_SYMBOL(__per_cpu_offset);
static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size, static void * __init pcpu_dfl_fc_alloc(unsigned int cpu, size_t size,
size_t align) size_t align)
{ {
return __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS)); return memblock_virt_alloc_from_nopanic(
size, align, __pa(MAX_DMA_ADDRESS));
} }
static void __init pcpu_dfl_fc_free(void *ptr, size_t size) static void __init pcpu_dfl_fc_free(void *ptr, size_t size)
{ {
free_bootmem(__pa(ptr), size); memblock_free_early(__pa(ptr), size);
} }
void __init setup_per_cpu_areas(void) void __init setup_per_cpu_areas(void)
...@@ -1896,7 +1900,9 @@ void __init setup_per_cpu_areas(void) ...@@ -1896,7 +1900,9 @@ void __init setup_per_cpu_areas(void)
void *fc; void *fc;
ai = pcpu_alloc_alloc_info(1, 1); ai = pcpu_alloc_alloc_info(1, 1);
fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); fc = memblock_virt_alloc_from_nopanic(unit_size,
PAGE_SIZE,
__pa(MAX_DMA_ADDRESS));
if (!ai || !fc) if (!ai || !fc)
panic("Failed to allocate memory for percpu areas."); panic("Failed to allocate memory for percpu areas.");
/* kmemleak tracks the percpu allocations separately */ /* kmemleak tracks the percpu allocations separately */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment