Commit c60324fb authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

lib/stackdepot: lower the indentation in stack_depot_init

stack_depot_init does most things inside an if check. Move them out and
use a goto statement instead.

No functional changes.

Link: https://lkml.kernel.org/r/8e382f1f0c352e4b2ad47326fec7782af961fe8e.1676063693.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarAlexander Potapenko <glider@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent df225c87
...@@ -165,46 +165,50 @@ int __init stack_depot_early_init(void) ...@@ -165,46 +165,50 @@ int __init stack_depot_early_init(void)
int stack_depot_init(void) int stack_depot_init(void)
{ {
static DEFINE_MUTEX(stack_depot_init_mutex); static DEFINE_MUTEX(stack_depot_init_mutex);
unsigned long entries;
int ret = 0; int ret = 0;
mutex_lock(&stack_depot_init_mutex); mutex_lock(&stack_depot_init_mutex);
if (!stack_depot_disabled && !stack_table) {
unsigned long entries;
/* if (stack_depot_disabled || stack_table)
* Similarly to stack_depot_early_init, use stack_hash_order goto out_unlock;
* if assigned, and rely on automatic scaling otherwise.
*/
if (stack_hash_order) {
entries = 1UL << stack_hash_order;
} else {
int scale = STACK_HASH_SCALE;
entries = nr_free_buffer_pages();
entries = roundup_pow_of_two(entries);
if (scale > PAGE_SHIFT)
entries >>= (scale - PAGE_SHIFT);
else
entries <<= (PAGE_SHIFT - scale);
}
if (entries < 1UL << STACK_HASH_ORDER_MIN) /*
entries = 1UL << STACK_HASH_ORDER_MIN; * Similarly to stack_depot_early_init, use stack_hash_order
if (entries > 1UL << STACK_HASH_ORDER_MAX) * if assigned, and rely on automatic scaling otherwise.
entries = 1UL << STACK_HASH_ORDER_MAX; */
if (stack_hash_order) {
pr_info("allocating hash table of %lu entries via kvcalloc\n", entries = 1UL << stack_hash_order;
entries); } else {
stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL); int scale = STACK_HASH_SCALE;
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n"); entries = nr_free_buffer_pages();
stack_depot_disabled = true; entries = roundup_pow_of_two(entries);
ret = -ENOMEM;
} if (scale > PAGE_SHIFT)
stack_hash_mask = entries - 1; entries >>= (scale - PAGE_SHIFT);
else
entries <<= (PAGE_SHIFT - scale);
} }
if (entries < 1UL << STACK_HASH_ORDER_MIN)
entries = 1UL << STACK_HASH_ORDER_MIN;
if (entries > 1UL << STACK_HASH_ORDER_MAX)
entries = 1UL << STACK_HASH_ORDER_MAX;
pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
if (!stack_table) {
pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true;
ret = -ENOMEM;
goto out_unlock;
}
stack_hash_mask = entries - 1;
out_unlock:
mutex_unlock(&stack_depot_init_mutex); mutex_unlock(&stack_depot_init_mutex);
return ret; return ret;
} }
EXPORT_SYMBOL_GPL(stack_depot_init); EXPORT_SYMBOL_GPL(stack_depot_init);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment