Commit 108be8de authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

lib/stackdepot: allow users to evict stack traces

Add stack_depot_put, a function that decrements the reference counter on a
stack record and removes it from the stack depot once the counter reaches
0.

Internally, when removing a stack record, the function unlinks it from the
hash table bucket and returns to the freelist.

With this change, the users of stack depot can call stack_depot_put when
keeping a stack trace in the stack depot is not needed anymore.  This
allows avoiding polluting the stack depot with irrelevant stack traces and
thus have more space to store the relevant ones before the stack depot
reaches its capacity.

Link: https://lkml.kernel.org/r/1d1ad5692ee43d4fc2b3fd9d221331d30b36123f.1700502145.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Evgenii Stepanov <eugenis@google.com>
Cc: Marco Elver <elver@google.com>
Cc: Oscar Salvador <osalvador@suse.de>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent 410b764f
......@@ -97,6 +97,8 @@ static inline int stack_depot_early_init(void) { return 0; }
*
* If STACK_DEPOT_FLAG_GET is set in @depot_flags, stack depot will increment
* the refcount on the saved stack trace if it already exists in stack depot.
* Users of this flag must also call stack_depot_put() when keeping the stack
* trace is no longer required to avoid overflowing the refcount.
*
* If the provided stack trace comes from the interrupt context, only the part
* up to the interrupt entry is saved.
......@@ -162,6 +164,18 @@ void stack_depot_print(depot_stack_handle_t stack);
int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces);
/**
* stack_depot_put - Drop a reference to a stack trace from stack depot
*
* @handle: Stack depot handle returned from stack_depot_save()
*
* The stack trace is evicted from stack depot once all references to it have
* been dropped (once the number of stack_depot_evict() calls matches the
* number of stack_depot_save_flags() calls with STACK_DEPOT_FLAG_GET set for
* this stack trace).
*/
void stack_depot_put(depot_stack_handle_t handle);
/**
* stack_depot_set_extra_bits - Set extra bits in a stack depot handle
*
......
......@@ -394,7 +394,7 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
size_t offset = parts.offset << DEPOT_STACK_ALIGN;
struct stack_record *stack;
lockdep_assert_held_read(&pool_rwlock);
lockdep_assert_held(&pool_rwlock);
if (parts.pool_index > pools_num) {
WARN(1, "pool index %d out of bounds (%d) for stack id %08x\n",
......@@ -410,6 +410,14 @@ static struct stack_record *depot_fetch_stack(depot_stack_handle_t handle)
return stack;
}
/* Links stack into the freelist. */
static void depot_free_stack(struct stack_record *stack)
{
lockdep_assert_held_write(&pool_rwlock);
list_add(&stack->list, &free_stacks);
}
/* Calculates the hash for a stack. */
static inline u32 hash_stack(unsigned long *entries, unsigned int size)
{
......@@ -592,6 +600,33 @@ unsigned int stack_depot_fetch(depot_stack_handle_t handle,
}
EXPORT_SYMBOL_GPL(stack_depot_fetch);
void stack_depot_put(depot_stack_handle_t handle)
{
struct stack_record *stack;
unsigned long flags;
if (!handle || stack_depot_disabled)
return;
write_lock_irqsave(&pool_rwlock, flags);
stack = depot_fetch_stack(handle);
if (WARN_ON(!stack))
goto out;
if (refcount_dec_and_test(&stack->count)) {
/* Unlink stack from the hash table. */
list_del(&stack->list);
/* Free stack. */
depot_free_stack(stack);
}
out:
write_unlock_irqrestore(&pool_rwlock, flags);
}
EXPORT_SYMBOL_GPL(stack_depot_put);
void stack_depot_print(depot_stack_handle_t stack)
{
unsigned long *entries;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment