Commit fcccc41e authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

lib/stackdepot: fix and clean-up atomic annotations

Drop smp_load_acquire from next_pool_required in depot_init_pool, as both
depot_init_pool and the all smp_store_release's to this variable are
executed under the stack depot lock.

Also simplify and clean up comments accompanying the use of atomic
accesses in the stack depot code.

Link: https://lkml.kernel.org/r/c118ef044d8db80248d9e1f14592c72e8429e9d9.1700502145.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarAlexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent fc60e0ca
...@@ -231,10 +231,10 @@ static void depot_init_pool(void **prealloc) ...@@ -231,10 +231,10 @@ static void depot_init_pool(void **prealloc)
/* /*
* If the next pool is already initialized or the maximum number of * If the next pool is already initialized or the maximum number of
* pools is reached, do not use the preallocated memory. * pools is reached, do not use the preallocated memory.
* smp_load_acquire() here pairs with smp_store_release() below and * Access next_pool_required non-atomically, as there are no concurrent
* in depot_alloc_stack(). * write accesses to this variable.
*/ */
if (!smp_load_acquire(&next_pool_required)) if (!next_pool_required)
return; return;
/* Check if the current pool is not yet allocated. */ /* Check if the current pool is not yet allocated. */
...@@ -255,8 +255,8 @@ static void depot_init_pool(void **prealloc) ...@@ -255,8 +255,8 @@ static void depot_init_pool(void **prealloc)
* At this point, either the next pool is initialized or the * At this point, either the next pool is initialized or the
* maximum number of pools is reached. In either case, take * maximum number of pools is reached. In either case, take
* note that initializing another pool is not required. * note that initializing another pool is not required.
* This smp_store_release pairs with smp_load_acquire() above * smp_store_release() pairs with smp_load_acquire() in
* and in stack_depot_save(). * stack_depot_save().
*/ */
smp_store_release(&next_pool_required, 0); smp_store_release(&next_pool_required, 0);
} }
...@@ -279,7 +279,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) ...@@ -279,7 +279,7 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
/* /*
* Move on to the next pool. * Move on to the next pool.
* WRITE_ONCE pairs with potential concurrent read in * WRITE_ONCE() pairs with potential concurrent read in
* stack_depot_fetch(). * stack_depot_fetch().
*/ */
WRITE_ONCE(pool_index, pool_index + 1); WRITE_ONCE(pool_index, pool_index + 1);
...@@ -287,8 +287,8 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc) ...@@ -287,8 +287,8 @@ depot_alloc_stack(unsigned long *entries, int size, u32 hash, void **prealloc)
/* /*
* If the maximum number of pools is not reached, take note * If the maximum number of pools is not reached, take note
* that the next pool needs to initialized. * that the next pool needs to initialized.
* smp_store_release() here pairs with smp_load_acquire() in * smp_store_release() pairs with smp_load_acquire() in
* stack_depot_save() and depot_init_pool(). * stack_depot_save().
*/ */
if (pool_index + 1 < DEPOT_MAX_POOLS) if (pool_index + 1 < DEPOT_MAX_POOLS)
smp_store_release(&next_pool_required, 1); smp_store_release(&next_pool_required, 1);
...@@ -329,7 +329,7 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle) ...@@ -329,7 +329,7 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
{ {
union handle_parts parts = { .handle = handle }; union handle_parts parts = { .handle = handle };
/* /*
* READ_ONCE pairs with potential concurrent write in * READ_ONCE() pairs with potential concurrent write in
* depot_alloc_stack(). * depot_alloc_stack().
*/ */
int pool_index_cached = READ_ONCE(pool_index); int pool_index_cached = READ_ONCE(pool_index);
...@@ -419,8 +419,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, ...@@ -419,8 +419,7 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
/* /*
* Fast path: look the stack trace up without locking. * Fast path: look the stack trace up without locking.
* The smp_load_acquire() here pairs with smp_store_release() to * smp_load_acquire() pairs with smp_store_release() to |bucket| below.
* |bucket| below.
*/ */
found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash); found = find_stack(smp_load_acquire(bucket), entries, nr_entries, hash);
if (found) if (found)
...@@ -430,8 +429,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, ...@@ -430,8 +429,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
* Check if another stack pool needs to be initialized. If so, allocate * Check if another stack pool needs to be initialized. If so, allocate
* the memory now - we won't be able to do that under the lock. * the memory now - we won't be able to do that under the lock.
* *
* The smp_load_acquire() here pairs with smp_store_release() to * smp_load_acquire() pairs with smp_store_release() in
* |next_pool_inited| in depot_alloc_stack() and depot_init_pool(). * depot_alloc_stack() and depot_init_pool().
*/ */
if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) { if (unlikely(can_alloc && smp_load_acquire(&next_pool_required))) {
/* /*
...@@ -457,8 +456,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, ...@@ -457,8 +456,8 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
if (new) { if (new) {
new->next = *bucket; new->next = *bucket;
/* /*
* This smp_store_release() pairs with * smp_store_release() pairs with smp_load_acquire()
* smp_load_acquire() from |bucket| above. * from |bucket| above.
*/ */
smp_store_release(bucket, new); smp_store_release(bucket, new);
found = new; found = new;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment