Commit dca49645 authored by Tejun Heo's avatar Tejun Heo

percpu: move common parts out of pcpu_[de]populate_chunk()

percpu-vm and percpu-km implement separate versions of
pcpu_[de]populate_chunk() and some part which is or should be common
are currently in the specific implementations.  Make the following
changes.

* Allocate area clearing is moved from the pcpu_populate_chunk()
  implementations to pcpu_alloc().  This makes percpu-km's version
  noop.

* Quick exit tests in pcpu_[de]populate_chunk() of percpu-vm are moved
  to their respective callers so that they are applied to percpu-km
  too.  This doesn't make any meaningful difference as both functions
  are noop for percpu-km; however, this is more consistent and will
  help implementing atomic allocation support.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent cdb4cba5
...@@ -35,11 +35,6 @@ ...@@ -35,11 +35,6 @@
static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
{ {
unsigned int cpu;
for_each_possible_cpu(cpu)
memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
return 0; return 0;
} }
......
...@@ -265,7 +265,7 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk, ...@@ -265,7 +265,7 @@ static void pcpu_post_map_flush(struct pcpu_chunk *chunk,
* @size: size of the area to populate in bytes * @size: size of the area to populate in bytes
* *
* For each cpu, populate and map pages [@page_start,@page_end) into * For each cpu, populate and map pages [@page_start,@page_end) into
* @chunk. The area is cleared on return. * @chunk.
* *
* CONTEXT: * CONTEXT:
* pcpu_alloc_mutex, does GFP_KERNEL allocation. * pcpu_alloc_mutex, does GFP_KERNEL allocation.
...@@ -276,18 +276,8 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) ...@@ -276,18 +276,8 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
int page_end = PFN_UP(off + size); int page_end = PFN_UP(off + size);
int free_end = page_start, unmap_end = page_start; int free_end = page_start, unmap_end = page_start;
struct page **pages; struct page **pages;
unsigned int cpu;
int rs, re, rc; int rs, re, rc;
/* quick path, check whether all pages are already there */
rs = page_start;
pcpu_next_pop(chunk, &rs, &re, page_end);
if (rs == page_start && re == page_end)
goto clear;
/* need to allocate and map pages, this chunk can't be immutable */
WARN_ON(chunk->immutable);
pages = pcpu_get_pages(chunk); pages = pcpu_get_pages(chunk);
if (!pages) if (!pages)
return -ENOMEM; return -ENOMEM;
...@@ -308,10 +298,6 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) ...@@ -308,10 +298,6 @@ static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size)
} }
pcpu_post_map_flush(chunk, page_start, page_end); pcpu_post_map_flush(chunk, page_start, page_end);
bitmap_set(chunk->populated, page_start, page_end - page_start);
clear:
for_each_possible_cpu(cpu)
memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
return 0; return 0;
err_unmap: err_unmap:
...@@ -345,15 +331,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) ...@@ -345,15 +331,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
struct page **pages; struct page **pages;
int rs, re; int rs, re;
/* quick path, check whether it's empty already */
rs = page_start;
pcpu_next_unpop(chunk, &rs, &re, page_end);
if (rs == page_start && re == page_end)
return;
/* immutable chunks can't be depopulated */
WARN_ON(chunk->immutable);
/* /*
* If control reaches here, there must have been at least one * If control reaches here, there must have been at least one
* successful population attempt so the temp pages array must * successful population attempt so the temp pages array must
...@@ -372,8 +349,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size) ...@@ -372,8 +349,6 @@ static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size)
pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end) pcpu_for_each_pop_region(chunk, rs, re, page_start, page_end)
pcpu_free_pages(chunk, pages, rs, re); pcpu_free_pages(chunk, pages, rs, re);
bitmap_clear(chunk->populated, page_start, page_end - page_start);
} }
static struct pcpu_chunk *pcpu_create_chunk(void) static struct pcpu_chunk *pcpu_create_chunk(void)
......
...@@ -709,7 +709,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) ...@@ -709,7 +709,8 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
static int warn_limit = 10; static int warn_limit = 10;
struct pcpu_chunk *chunk; struct pcpu_chunk *chunk;
const char *err; const char *err;
int slot, off, new_alloc; int slot, off, new_alloc, cpu;
int page_start, page_end, rs, re;
unsigned long flags; unsigned long flags;
void __percpu *ptr; void __percpu *ptr;
...@@ -802,7 +803,16 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) ...@@ -802,7 +803,16 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
area_found: area_found:
spin_unlock_irqrestore(&pcpu_lock, flags); spin_unlock_irqrestore(&pcpu_lock, flags);
/* populate, map and clear the area */ /* populate if not all pages are already there */
page_start = PFN_DOWN(off);
page_end = PFN_UP(off + size);
rs = page_start;
pcpu_next_pop(chunk, &rs, &re, page_end);
if (rs != page_start || re != page_end) {
WARN_ON(chunk->immutable);
if (pcpu_populate_chunk(chunk, off, size)) { if (pcpu_populate_chunk(chunk, off, size)) {
spin_lock_irqsave(&pcpu_lock, flags); spin_lock_irqsave(&pcpu_lock, flags);
pcpu_free_area(chunk, off); pcpu_free_area(chunk, off);
...@@ -810,9 +820,15 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved) ...@@ -810,9 +820,15 @@ static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
goto fail_unlock; goto fail_unlock;
} }
bitmap_set(chunk->populated, page_start, page_end - page_start);
}
mutex_unlock(&pcpu_alloc_mutex); mutex_unlock(&pcpu_alloc_mutex);
/* return address relative to base address */ /* clear the areas and return address relative to base address */
for_each_possible_cpu(cpu)
memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size);
ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); ptr = __addr_to_pcpu_ptr(chunk->base_addr + off);
kmemleak_alloc_percpu(ptr, size); kmemleak_alloc_percpu(ptr, size);
return ptr; return ptr;
...@@ -903,7 +919,12 @@ static void pcpu_reclaim(struct work_struct *work) ...@@ -903,7 +919,12 @@ static void pcpu_reclaim(struct work_struct *work)
spin_unlock_irq(&pcpu_lock); spin_unlock_irq(&pcpu_lock);
list_for_each_entry_safe(chunk, next, &todo, list) { list_for_each_entry_safe(chunk, next, &todo, list) {
int rs = 0, re;
pcpu_next_unpop(chunk, &rs, &re, PFN_UP(pcpu_unit_size));
if (rs || re != PFN_UP(pcpu_unit_size))
pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size); pcpu_depopulate_chunk(chunk, 0, pcpu_unit_size);
pcpu_destroy_chunk(chunk); pcpu_destroy_chunk(chunk);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment