Commit 099a19d9 authored by Tejun Heo's avatar Tejun Heo

percpu: allow limited allocation before slab is online

This patch updates percpu allocator such that it can serve limited
amount of allocation before slab comes online.  This is primarily to
allow slab to depend on working percpu allocator.

Two parameters, PERCPU_DYNAMIC_EARLY_SIZE and SLOTS, determine how
much memory space and allocation map slots are reserved.  If this
reserved area is exhausted, WARN_ON_ONCE() will trigger and allocation
will fail till slab comes online.

The following changes are made to implement early alloc.

* pcpu_mem_alloc() now checks slab_is_available()

* Chunks are allocated using pcpu_mem_alloc()

* Init paths make sure ai->dyn_size is at least as large as
  PERCPU_DYNAMIC_EARLY_SIZE.

* Initial alloc maps are allocated in __initdata and copied to
  kmalloc'd areas once slab is online.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Cc: Christoph Lameter <cl@linux-foundation.org>
parent 4ba6ce25
...@@ -44,6 +44,16 @@ ...@@ -44,6 +44,16 @@
/* minimum unit size, also is the maximum supported allocation size */ /* minimum unit size, also is the maximum supported allocation size */
#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) #define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
/*
* Percpu allocator can serve percpu allocations before slab is
* initialized which allows slab to depend on the percpu allocator.
* The following two parameters decide how much resource to
* preallocate for this. Keep PERCPU_DYNAMIC_RESERVE equal to or
* larger than PERCPU_DYNAMIC_EARLY_SIZE.
*/
#define PERCPU_DYNAMIC_EARLY_SLOTS 128
#define PERCPU_DYNAMIC_EARLY_SIZE (12 << 10)
/* /*
* PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
* back on the first chunk for dynamic percpu allocation if arch is * back on the first chunk for dynamic percpu allocation if arch is
...@@ -135,6 +145,7 @@ extern bool is_kernel_percpu_address(unsigned long addr); ...@@ -135,6 +145,7 @@ extern bool is_kernel_percpu_address(unsigned long addr);
#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
extern void __init setup_per_cpu_areas(void); extern void __init setup_per_cpu_areas(void);
#endif #endif
extern void __init percpu_init_late(void);
#else /* CONFIG_SMP */ #else /* CONFIG_SMP */
...@@ -148,6 +159,8 @@ static inline bool is_kernel_percpu_address(unsigned long addr) ...@@ -148,6 +159,8 @@ static inline bool is_kernel_percpu_address(unsigned long addr)
static inline void __init setup_per_cpu_areas(void) { } static inline void __init setup_per_cpu_areas(void) { }
static inline void __init percpu_init_late(void) { }
static inline void *pcpu_lpage_remapped(void *kaddr) static inline void *pcpu_lpage_remapped(void *kaddr)
{ {
return NULL; return NULL;
......
...@@ -522,6 +522,7 @@ static void __init mm_init(void) ...@@ -522,6 +522,7 @@ static void __init mm_init(void)
page_cgroup_init_flatmem(); page_cgroup_init_flatmem();
mem_init(); mem_init();
kmem_cache_init(); kmem_cache_init();
percpu_init_late();
pgtable_cache_init(); pgtable_cache_init();
vmalloc_init(); vmalloc_init();
} }
......
...@@ -282,6 +282,9 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, ...@@ -282,6 +282,9 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk,
*/ */
static void *pcpu_mem_alloc(size_t size) static void *pcpu_mem_alloc(size_t size)
{ {
if (WARN_ON_ONCE(!slab_is_available()))
return NULL;
if (size <= PAGE_SIZE) if (size <= PAGE_SIZE)
return kzalloc(size, GFP_KERNEL); return kzalloc(size, GFP_KERNEL);
else { else {
...@@ -392,13 +395,6 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) ...@@ -392,13 +395,6 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
old_size = chunk->map_alloc * sizeof(chunk->map[0]); old_size = chunk->map_alloc * sizeof(chunk->map[0]);
memcpy(new, chunk->map, old_size); memcpy(new, chunk->map, old_size);
/*
* map_alloc < PCPU_DFL_MAP_ALLOC indicates that the chunk is
* one of the first chunks and still using static map.
*/
if (chunk->map_alloc >= PCPU_DFL_MAP_ALLOC)
old = chunk->map;
chunk->map_alloc = new_alloc; chunk->map_alloc = new_alloc;
chunk->map = new; chunk->map = new;
new = NULL; new = NULL;
...@@ -604,7 +600,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void) ...@@ -604,7 +600,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
{ {
struct pcpu_chunk *chunk; struct pcpu_chunk *chunk;
chunk = kzalloc(pcpu_chunk_struct_size, GFP_KERNEL); chunk = pcpu_mem_alloc(pcpu_chunk_struct_size);
if (!chunk) if (!chunk)
return NULL; return NULL;
...@@ -1109,7 +1105,9 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info( ...@@ -1109,7 +1105,9 @@ static struct pcpu_alloc_info * __init pcpu_build_alloc_info(
memset(group_map, 0, sizeof(group_map)); memset(group_map, 0, sizeof(group_map));
memset(group_cnt, 0, sizeof(group_cnt)); memset(group_cnt, 0, sizeof(group_cnt));
size_sum = PFN_ALIGN(static_size + reserved_size + dyn_size); /* calculate size_sum and ensure dyn_size is enough for early alloc */
size_sum = PFN_ALIGN(static_size + reserved_size +
max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE));
dyn_size = size_sum - static_size - reserved_size; dyn_size = size_sum - static_size - reserved_size;
/* /*
...@@ -1338,7 +1336,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1338,7 +1336,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
void *base_addr) void *base_addr)
{ {
static char cpus_buf[4096] __initdata; static char cpus_buf[4096] __initdata;
static int smap[2], dmap[2]; static int smap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
static int dmap[PERCPU_DYNAMIC_EARLY_SLOTS] __initdata;
size_t dyn_size = ai->dyn_size; size_t dyn_size = ai->dyn_size;
size_t size_sum = ai->static_size + ai->reserved_size + dyn_size; size_t size_sum = ai->static_size + ai->reserved_size + dyn_size;
struct pcpu_chunk *schunk, *dchunk = NULL; struct pcpu_chunk *schunk, *dchunk = NULL;
...@@ -1361,14 +1360,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1361,14 +1360,13 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
} while (0) } while (0)
/* sanity checks */ /* sanity checks */
BUILD_BUG_ON(ARRAY_SIZE(smap) >= PCPU_DFL_MAP_ALLOC ||
ARRAY_SIZE(dmap) >= PCPU_DFL_MAP_ALLOC);
PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); PCPU_SETUP_BUG_ON(ai->nr_groups <= 0);
PCPU_SETUP_BUG_ON(!ai->static_size); PCPU_SETUP_BUG_ON(!ai->static_size);
PCPU_SETUP_BUG_ON(!base_addr); PCPU_SETUP_BUG_ON(!base_addr);
PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); PCPU_SETUP_BUG_ON(ai->unit_size < size_sum);
PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK);
PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE); PCPU_SETUP_BUG_ON(ai->unit_size < PCPU_MIN_UNIT_SIZE);
PCPU_SETUP_BUG_ON(ai->dyn_size < PERCPU_DYNAMIC_EARLY_SIZE);
PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0); PCPU_SETUP_BUG_ON(pcpu_verify_alloc_info(ai) < 0);
/* process group information and build config tables accordingly */ /* process group information and build config tables accordingly */
...@@ -1806,3 +1804,33 @@ void __init setup_per_cpu_areas(void) ...@@ -1806,3 +1804,33 @@ void __init setup_per_cpu_areas(void)
__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
} }
#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
/*
* First and reserved chunks are initialized with temporary allocation
* map in initdata so that they can be used before slab is online.
* This function is called after slab is brought up and replaces those
* with properly allocated maps.
*/
void __init percpu_init_late(void)
{
struct pcpu_chunk *target_chunks[] =
{ pcpu_first_chunk, pcpu_reserved_chunk, NULL };
struct pcpu_chunk *chunk;
unsigned long flags;
int i;
for (i = 0; (chunk = target_chunks[i]); i++) {
int *map;
const size_t size = PERCPU_DYNAMIC_EARLY_SLOTS * sizeof(map[0]);
BUILD_BUG_ON(size > PAGE_SIZE);
map = pcpu_mem_alloc(size);
BUG_ON(!map);
spin_lock_irqsave(&pcpu_lock, flags);
memcpy(map, chunk->map, size);
chunk->map = map;
spin_unlock_irqrestore(&pcpu_lock, flags);
}
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment