Commit 4c2e9a67 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

lib/stackdepot: rename hash table constants and variables

Give more meaningful names to hash table-related constants and variables:

1. Rename STACK_HASH_SCALE to STACK_HASH_TABLE_SCALE to point out that it
   is related to scaling the hash table.

2. Rename STACK_HASH_ORDER_MIN/MAX to STACK_BUCKET_NUMBER_ORDER_MIN/MAX
   to point out that it is related to the number of hash table buckets.

3. Rename stack_hash_order to stack_bucket_number_order for the same
   reason as #2.

No functional changes.

Link: https://lkml.kernel.org/r/f166dd6f3cb2378aea78600714393dd568c33ee9.1676063693.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarAlexander Potapenko <glider@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 0d249ac0
...@@ -76,17 +76,17 @@ static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_ST ...@@ -76,17 +76,17 @@ static bool __stack_depot_early_init_requested __initdata = IS_ENABLED(CONFIG_ST
static bool __stack_depot_early_init_passed __initdata; static bool __stack_depot_early_init_passed __initdata;
/* Use one hash table bucket per 16 KB of memory. */ /* Use one hash table bucket per 16 KB of memory. */
#define STACK_HASH_SCALE 14 #define STACK_HASH_TABLE_SCALE 14
/* Limit the number of buckets between 4K and 1M. */ /* Limit the number of buckets between 4K and 1M. */
#define STACK_HASH_ORDER_MIN 12 #define STACK_BUCKET_NUMBER_ORDER_MIN 12
#define STACK_HASH_ORDER_MAX 20 #define STACK_BUCKET_NUMBER_ORDER_MAX 20
/* Initial seed for jhash2. */ /* Initial seed for jhash2. */
#define STACK_HASH_SEED 0x9747b28c #define STACK_HASH_SEED 0x9747b28c
/* Hash table of pointers to stored stack traces. */ /* Hash table of pointers to stored stack traces. */
static struct stack_record **stack_table; static struct stack_record **stack_table;
/* Fixed order of the number of table buckets. Used when KASAN is enabled. */ /* Fixed order of the number of table buckets. Used when KASAN is enabled. */
static unsigned int stack_hash_order; static unsigned int stack_bucket_number_order;
/* Hash mask for indexing the table. */ /* Hash mask for indexing the table. */
static unsigned int stack_hash_mask; static unsigned int stack_hash_mask;
...@@ -137,28 +137,28 @@ int __init stack_depot_early_init(void) ...@@ -137,28 +137,28 @@ int __init stack_depot_early_init(void)
* in fuzzing scenarios, which leads to a large number of different * in fuzzing scenarios, which leads to a large number of different
* stack traces being stored in stack depot. * stack traces being stored in stack depot.
*/ */
if (kasan_enabled() && !stack_hash_order) if (kasan_enabled() && !stack_bucket_number_order)
stack_hash_order = STACK_HASH_ORDER_MAX; stack_bucket_number_order = STACK_BUCKET_NUMBER_ORDER_MAX;
if (!__stack_depot_early_init_requested || stack_depot_disabled) if (!__stack_depot_early_init_requested || stack_depot_disabled)
return 0; return 0;
/* /*
* If stack_hash_order is not set, leave entries as 0 to rely on the * If stack_bucket_number_order is not set, leave entries as 0 to rely
* automatic calculations performed by alloc_large_system_hash. * on the automatic calculations performed by alloc_large_system_hash.
*/ */
if (stack_hash_order) if (stack_bucket_number_order)
entries = 1UL << stack_hash_order; entries = 1UL << stack_bucket_number_order;
pr_info("allocating hash table via alloc_large_system_hash\n"); pr_info("allocating hash table via alloc_large_system_hash\n");
stack_table = alloc_large_system_hash("stackdepot", stack_table = alloc_large_system_hash("stackdepot",
sizeof(struct stack_record *), sizeof(struct stack_record *),
entries, entries,
STACK_HASH_SCALE, STACK_HASH_TABLE_SCALE,
HASH_EARLY | HASH_ZERO, HASH_EARLY | HASH_ZERO,
NULL, NULL,
&stack_hash_mask, &stack_hash_mask,
1UL << STACK_HASH_ORDER_MIN, 1UL << STACK_BUCKET_NUMBER_ORDER_MIN,
1UL << STACK_HASH_ORDER_MAX); 1UL << STACK_BUCKET_NUMBER_ORDER_MAX);
if (!stack_table) { if (!stack_table) {
pr_err("hash table allocation failed, disabling\n"); pr_err("hash table allocation failed, disabling\n");
stack_depot_disabled = true; stack_depot_disabled = true;
...@@ -181,13 +181,13 @@ int stack_depot_init(void) ...@@ -181,13 +181,13 @@ int stack_depot_init(void)
goto out_unlock; goto out_unlock;
/* /*
* Similarly to stack_depot_early_init, use stack_hash_order * Similarly to stack_depot_early_init, use stack_bucket_number_order
* if assigned, and rely on automatic scaling otherwise. * if assigned, and rely on automatic scaling otherwise.
*/ */
if (stack_hash_order) { if (stack_bucket_number_order) {
entries = 1UL << stack_hash_order; entries = 1UL << stack_bucket_number_order;
} else { } else {
int scale = STACK_HASH_SCALE; int scale = STACK_HASH_TABLE_SCALE;
entries = nr_free_buffer_pages(); entries = nr_free_buffer_pages();
entries = roundup_pow_of_two(entries); entries = roundup_pow_of_two(entries);
...@@ -198,10 +198,10 @@ int stack_depot_init(void) ...@@ -198,10 +198,10 @@ int stack_depot_init(void)
entries <<= (PAGE_SHIFT - scale); entries <<= (PAGE_SHIFT - scale);
} }
if (entries < 1UL << STACK_HASH_ORDER_MIN) if (entries < 1UL << STACK_BUCKET_NUMBER_ORDER_MIN)
entries = 1UL << STACK_HASH_ORDER_MIN; entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MIN;
if (entries > 1UL << STACK_HASH_ORDER_MAX) if (entries > 1UL << STACK_BUCKET_NUMBER_ORDER_MAX)
entries = 1UL << STACK_HASH_ORDER_MAX; entries = 1UL << STACK_BUCKET_NUMBER_ORDER_MAX;
pr_info("allocating hash table of %lu entries via kvcalloc\n", entries); pr_info("allocating hash table of %lu entries via kvcalloc\n", entries);
stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL); stack_table = kvcalloc(entries, sizeof(struct stack_record *), GFP_KERNEL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment