Commit d273823d authored by Tejun Heo's avatar Tejun Heo Committed by Sasha Levin

percpu: fix synchronization between chunk->map_extend_work and chunk destruction

[ Upstream commit 4f996e23 ]

Atomic allocations can trigger async map extensions which is serviced
by chunk->map_extend_work.  pcpu_balance_work which is responsible for
destroying idle chunks wasn't synchronizing properly against
chunk->map_extend_work and may end up freeing the chunk while the work
item is still in flight.

This patch fixes the bug by rolling async map extension operations
into pcpu_balance_work.
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
Reported-and-tested-by: default avatarAlexei Starovoitov <alexei.starovoitov@gmail.com>
Reported-by: default avatarVlastimil Babka <vbabka@suse.cz>
Reported-by: default avatarSasha Levin <sasha.levin@oracle.com>
Cc: stable@vger.kernel.org # v3.18+
Fixes: 9c824b6a ("percpu: make sure chunk->map array has available space")
Signed-off-by: default avatarSasha Levin <sasha.levin@oracle.com>
parent 272d474f
...@@ -110,7 +110,7 @@ struct pcpu_chunk { ...@@ -110,7 +110,7 @@ struct pcpu_chunk {
int map_used; /* # of map entries used before the sentry */ int map_used; /* # of map entries used before the sentry */
int map_alloc; /* # of map entries allocated */ int map_alloc; /* # of map entries allocated */
int *map; /* allocation map */ int *map; /* allocation map */
struct work_struct map_extend_work;/* async ->map[] extension */ struct list_head map_extend_list;/* on pcpu_map_extend_chunks */
void *data; /* chunk data */ void *data; /* chunk data */
int first_free; /* no free below this */ int first_free; /* no free below this */
...@@ -164,6 +164,9 @@ static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */ ...@@ -164,6 +164,9 @@ static DEFINE_MUTEX(pcpu_alloc_mutex); /* chunk create/destroy, [de]pop */
static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */ static struct list_head *pcpu_slot __read_mostly; /* chunk list slots */
/* chunks which need their map areas extended, protected by pcpu_lock */
static LIST_HEAD(pcpu_map_extend_chunks);
/* /*
* The number of empty populated pages, protected by pcpu_lock. The * The number of empty populated pages, protected by pcpu_lock. The
* reserved chunk doesn't contribute to the count. * reserved chunk doesn't contribute to the count.
...@@ -397,13 +400,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic) ...@@ -397,13 +400,19 @@ static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic)
{ {
int margin, new_alloc; int margin, new_alloc;
lockdep_assert_held(&pcpu_lock);
if (is_atomic) { if (is_atomic) {
margin = 3; margin = 3;
if (chunk->map_alloc < if (chunk->map_alloc <
chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW && chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW) {
pcpu_async_enabled) if (list_empty(&chunk->map_extend_list)) {
schedule_work(&chunk->map_extend_work); list_add_tail(&chunk->map_extend_list,
&pcpu_map_extend_chunks);
pcpu_schedule_balance_work();
}
}
} else { } else {
margin = PCPU_ATOMIC_MAP_MARGIN_HIGH; margin = PCPU_ATOMIC_MAP_MARGIN_HIGH;
} }
...@@ -469,20 +478,6 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) ...@@ -469,20 +478,6 @@ static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc)
return 0; return 0;
} }
static void pcpu_map_extend_workfn(struct work_struct *work)
{
struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk,
map_extend_work);
int new_alloc;
spin_lock_irq(&pcpu_lock);
new_alloc = pcpu_need_to_extend(chunk, false);
spin_unlock_irq(&pcpu_lock);
if (new_alloc)
pcpu_extend_area_map(chunk, new_alloc);
}
/** /**
* pcpu_fit_in_area - try to fit the requested allocation in a candidate area * pcpu_fit_in_area - try to fit the requested allocation in a candidate area
* @chunk: chunk the candidate area belongs to * @chunk: chunk the candidate area belongs to
...@@ -742,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void) ...@@ -742,7 +737,7 @@ static struct pcpu_chunk *pcpu_alloc_chunk(void)
chunk->map_used = 1; chunk->map_used = 1;
INIT_LIST_HEAD(&chunk->list); INIT_LIST_HEAD(&chunk->list);
INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn); INIT_LIST_HEAD(&chunk->map_extend_list);
chunk->free_size = pcpu_unit_size; chunk->free_size = pcpu_unit_size;
chunk->contig_hint = pcpu_unit_size; chunk->contig_hint = pcpu_unit_size;
...@@ -1131,6 +1126,7 @@ static void pcpu_balance_workfn(struct work_struct *work) ...@@ -1131,6 +1126,7 @@ static void pcpu_balance_workfn(struct work_struct *work)
if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) if (chunk == list_first_entry(free_head, struct pcpu_chunk, list))
continue; continue;
list_del_init(&chunk->map_extend_list);
list_move(&chunk->list, &to_free); list_move(&chunk->list, &to_free);
} }
...@@ -1148,6 +1144,25 @@ static void pcpu_balance_workfn(struct work_struct *work) ...@@ -1148,6 +1144,25 @@ static void pcpu_balance_workfn(struct work_struct *work)
pcpu_destroy_chunk(chunk); pcpu_destroy_chunk(chunk);
} }
/* service chunks which requested async area map extension */
do {
int new_alloc = 0;
spin_lock_irq(&pcpu_lock);
chunk = list_first_entry_or_null(&pcpu_map_extend_chunks,
struct pcpu_chunk, map_extend_list);
if (chunk) {
list_del_init(&chunk->map_extend_list);
new_alloc = pcpu_need_to_extend(chunk, false);
}
spin_unlock_irq(&pcpu_lock);
if (new_alloc)
pcpu_extend_area_map(chunk, new_alloc);
} while (chunk);
/* /*
* Ensure there are certain number of free populated pages for * Ensure there are certain number of free populated pages for
* atomic allocs. Fill up from the most packed so that atomic * atomic allocs. Fill up from the most packed so that atomic
...@@ -1646,7 +1661,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1646,7 +1661,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
*/ */
schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); schunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&schunk->list); INIT_LIST_HEAD(&schunk->list);
INIT_WORK(&schunk->map_extend_work, pcpu_map_extend_workfn); INIT_LIST_HEAD(&schunk->map_extend_list);
schunk->base_addr = base_addr; schunk->base_addr = base_addr;
schunk->map = smap; schunk->map = smap;
schunk->map_alloc = ARRAY_SIZE(smap); schunk->map_alloc = ARRAY_SIZE(smap);
...@@ -1676,7 +1691,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, ...@@ -1676,7 +1691,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
if (dyn_size) { if (dyn_size) {
dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0); dchunk = memblock_virt_alloc(pcpu_chunk_struct_size, 0);
INIT_LIST_HEAD(&dchunk->list); INIT_LIST_HEAD(&dchunk->list);
INIT_WORK(&dchunk->map_extend_work, pcpu_map_extend_workfn); INIT_LIST_HEAD(&dchunk->map_extend_list);
dchunk->base_addr = base_addr; dchunk->base_addr = base_addr;
dchunk->map = dmap; dchunk->map = dmap;
dchunk->map_alloc = ARRAY_SIZE(dmap); dchunk->map_alloc = ARRAY_SIZE(dmap);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment