Commit 1d9d3257 authored by Tejun Heo's avatar Tejun Heo

percpu: make @dyn_size mandatory for pcpu_setup_first_chunk()

Now that all actual first chunk allocation and copying happen in the
first chunk allocators and helpers, there's no reason for
pcpu_setup_first_chunk() to try to determine @dyn_size automatically.
The only left user is page first chunk allocator.  Make it determine
dyn_size like other allocators and make @dyn_size mandatory for
pcpu_setup_first_chunk().
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 9a773769
...@@ -79,7 +79,7 @@ typedef void (*pcpu_fc_map_fn_t)(void *ptr, size_t size, void *addr); ...@@ -79,7 +79,7 @@ typedef void (*pcpu_fc_map_fn_t)(void *ptr, size_t size, void *addr);
extern size_t __init pcpu_setup_first_chunk( extern size_t __init pcpu_setup_first_chunk(
size_t static_size, size_t reserved_size, size_t static_size, size_t reserved_size,
ssize_t dyn_size, size_t unit_size, size_t dyn_size, size_t unit_size,
void *base_addr, const int *unit_map); void *base_addr, const int *unit_map);
#ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
......
...@@ -1235,7 +1235,7 @@ EXPORT_SYMBOL_GPL(free_percpu); ...@@ -1235,7 +1235,7 @@ EXPORT_SYMBOL_GPL(free_percpu);
* pcpu_setup_first_chunk - initialize the first percpu chunk * pcpu_setup_first_chunk - initialize the first percpu chunk
* @static_size: the size of static percpu area in bytes * @static_size: the size of static percpu area in bytes
* @reserved_size: the size of reserved percpu area in bytes, 0 for none * @reserved_size: the size of reserved percpu area in bytes, 0 for none
* @dyn_size: free size for dynamic allocation in bytes, -1 for auto * @dyn_size: free size for dynamic allocation in bytes
* @unit_size: unit size in bytes, must be multiple of PAGE_SIZE * @unit_size: unit size in bytes, must be multiple of PAGE_SIZE
* @base_addr: mapped address * @base_addr: mapped address
* @unit_map: cpu -> unit map, NULL for sequential mapping * @unit_map: cpu -> unit map, NULL for sequential mapping
...@@ -1252,10 +1252,9 @@ EXPORT_SYMBOL_GPL(free_percpu); ...@@ -1252,10 +1252,9 @@ EXPORT_SYMBOL_GPL(free_percpu);
* limited offset range for symbol relocations to guarantee module * limited offset range for symbol relocations to guarantee module
* percpu symbols fall inside the relocatable range. * percpu symbols fall inside the relocatable range.
* *
* @dyn_size, if non-negative, determines the number of bytes * @dyn_size determines the number of bytes available for dynamic
* available for dynamic allocation in the first chunk. Specifying * allocation in the first chunk. The area between @static_size +
* non-negative value makes percpu leave alone the area beyond * @reserved_size + @dyn_size and @unit_size is unused.
* @static_size + @reserved_size + @dyn_size.
* *
* @unit_size specifies unit size and must be aligned to PAGE_SIZE and * @unit_size specifies unit size and must be aligned to PAGE_SIZE and
* equal to or larger than @static_size + @reserved_size + if * equal to or larger than @static_size + @reserved_size + if
...@@ -1276,13 +1275,12 @@ EXPORT_SYMBOL_GPL(free_percpu); ...@@ -1276,13 +1275,12 @@ EXPORT_SYMBOL_GPL(free_percpu);
* percpu access. * percpu access.
*/ */
size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size, size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size,
ssize_t dyn_size, size_t unit_size, size_t dyn_size, size_t unit_size,
void *base_addr, const int *unit_map) void *base_addr, const int *unit_map)
{ {
static struct vm_struct first_vm; static struct vm_struct first_vm;
static int smap[2], dmap[2]; static int smap[2], dmap[2];
size_t size_sum = static_size + reserved_size + size_t size_sum = static_size + reserved_size + dyn_size;
(dyn_size >= 0 ? dyn_size : 0);
struct pcpu_chunk *schunk, *dchunk = NULL; struct pcpu_chunk *schunk, *dchunk = NULL;
unsigned int cpu, tcpu; unsigned int cpu, tcpu;
int i; int i;
...@@ -1345,9 +1343,6 @@ size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size, ...@@ -1345,9 +1343,6 @@ size_t __init pcpu_setup_first_chunk(size_t static_size, size_t reserved_size,
pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) + pcpu_chunk_struct_size = sizeof(struct pcpu_chunk) +
BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long); BITS_TO_LONGS(pcpu_unit_pages) * sizeof(unsigned long);
if (dyn_size < 0)
dyn_size = pcpu_unit_size - static_size - reserved_size;
first_vm.flags = VM_ALLOC; first_vm.flags = VM_ALLOC;
first_vm.size = pcpu_chunk_size; first_vm.size = pcpu_chunk_size;
first_vm.addr = base_addr; first_vm.addr = base_addr;
...@@ -1557,6 +1552,8 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size, ...@@ -1557,6 +1552,8 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size,
{ {
static struct vm_struct vm; static struct vm_struct vm;
const size_t static_size = __per_cpu_end - __per_cpu_start; const size_t static_size = __per_cpu_end - __per_cpu_start;
ssize_t dyn_size = -1;
size_t size_sum, unit_size;
char psize_str[16]; char psize_str[16];
int unit_pages; int unit_pages;
size_t pages_size; size_t pages_size;
...@@ -1567,8 +1564,9 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size, ...@@ -1567,8 +1564,9 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size,
snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10); snprintf(psize_str, sizeof(psize_str), "%luK", PAGE_SIZE >> 10);
unit_pages = PFN_UP(max_t(size_t, static_size + reserved_size, size_sum = pcpu_calc_fc_sizes(static_size, reserved_size, &dyn_size);
PCPU_MIN_UNIT_SIZE)); unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE);
unit_pages = unit_size >> PAGE_SHIFT;
/* unaligned allocations can't be freed, round up to page size */ /* unaligned allocations can't be freed, round up to page size */
pages_size = PFN_ALIGN(unit_pages * nr_cpu_ids * sizeof(pages[0])); pages_size = PFN_ALIGN(unit_pages * nr_cpu_ids * sizeof(pages[0]));
...@@ -1591,12 +1589,12 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size, ...@@ -1591,12 +1589,12 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size,
/* allocate vm area, map the pages and copy static data */ /* allocate vm area, map the pages and copy static data */
vm.flags = VM_ALLOC; vm.flags = VM_ALLOC;
vm.size = nr_cpu_ids * unit_pages << PAGE_SHIFT; vm.size = nr_cpu_ids * unit_size;
vm_area_register_early(&vm, PAGE_SIZE); vm_area_register_early(&vm, PAGE_SIZE);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
unsigned long unit_addr = (unsigned long)vm.addr + unsigned long unit_addr =
(cpu * unit_pages << PAGE_SHIFT); (unsigned long)vm.addr + cpu * unit_size;
for (i = 0; i < unit_pages; i++) for (i = 0; i < unit_pages; i++)
populate_pte_fn(unit_addr + (i << PAGE_SHIFT)); populate_pte_fn(unit_addr + (i << PAGE_SHIFT));
...@@ -1620,11 +1618,12 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size, ...@@ -1620,11 +1618,12 @@ ssize_t __init pcpu_page_first_chunk(size_t reserved_size,
} }
/* we're ready, commit */ /* we're ready, commit */
pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu\n", pr_info("PERCPU: %d %s pages/cpu @%p s%zu r%zu d%zu\n",
unit_pages, psize_str, vm.addr, static_size, reserved_size); unit_pages, psize_str, vm.addr, static_size, reserved_size,
dyn_size);
ret = pcpu_setup_first_chunk(static_size, reserved_size, -1, ret = pcpu_setup_first_chunk(static_size, reserved_size, dyn_size,
unit_pages << PAGE_SHIFT, vm.addr, NULL); unit_size, vm.addr, NULL);
goto out_free_ar; goto out_free_ar;
enomem: enomem:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment