Commit 36aa1e67 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Andrew Morton

lib/stacktrace, kasan, kmsan: rework extra_bits interface

The current implementation of the extra_bits interface is confusing:
passing extra_bits to __stack_depot_save makes it seem that the extra
bits are somehow stored in stack depot. In reality, they are only
embedded into a stack depot handle and are not used within stack depot.

Drop the extra_bits argument from __stack_depot_save and instead provide
a new stack_depot_set_extra_bits function (similar to the exsiting
stack_depot_get_extra_bits) that saves extra bits into a stack depot
handle.

Update the callers of __stack_depot_save to use the new interace.

This change also fixes a minor issue in the old code: __stack_depot_save
does not return NULL if saving stack trace fails and extra_bits is used.

Link: https://lkml.kernel.org/r/317123b5c05e2f82854fc55d8b285e0869d3cb77.1676063693.git.andreyknvl@google.comSigned-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
Reviewed-by: default avatarAlexander Potapenko <glider@google.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
parent d11a5621
...@@ -57,7 +57,6 @@ static inline int stack_depot_early_init(void) { return 0; } ...@@ -57,7 +57,6 @@ static inline int stack_depot_early_init(void) { return 0; }
depot_stack_handle_t __stack_depot_save(unsigned long *entries, depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries, unsigned int nr_entries,
unsigned int extra_bits,
gfp_t gfp_flags, bool can_alloc); gfp_t gfp_flags, bool can_alloc);
depot_stack_handle_t stack_depot_save(unsigned long *entries, depot_stack_handle_t stack_depot_save(unsigned long *entries,
...@@ -71,6 +70,9 @@ void stack_depot_print(depot_stack_handle_t stack); ...@@ -71,6 +70,9 @@ void stack_depot_print(depot_stack_handle_t stack);
int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size, int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
int spaces); int spaces);
depot_stack_handle_t __must_check stack_depot_set_extra_bits(
depot_stack_handle_t handle, unsigned int extra_bits);
unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle); unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle);
#endif #endif
...@@ -357,7 +357,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket, ...@@ -357,7 +357,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
* *
* @entries: Pointer to storage array * @entries: Pointer to storage array
* @nr_entries: Size of the storage array * @nr_entries: Size of the storage array
* @extra_bits: Flags to store in unused bits of depot_stack_handle_t
* @alloc_flags: Allocation gfp flags * @alloc_flags: Allocation gfp flags
* @can_alloc: Allocate stack pools (increased chance of failure if false) * @can_alloc: Allocate stack pools (increased chance of failure if false)
* *
...@@ -369,10 +368,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket, ...@@ -369,10 +368,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
* If the stack trace in @entries is from an interrupt, only the portion up to * If the stack trace in @entries is from an interrupt, only the portion up to
* interrupt entry is saved. * interrupt entry is saved.
* *
* Additional opaque flags can be passed in @extra_bits, stored in the unused
* bits of the stack handle, and retrieved using stack_depot_get_extra_bits()
* without calling stack_depot_fetch().
*
* Context: Any context, but setting @can_alloc to %false is required if * Context: Any context, but setting @can_alloc to %false is required if
* alloc_pages() cannot be used from the current context. Currently * alloc_pages() cannot be used from the current context. Currently
* this is the case from contexts where neither %GFP_ATOMIC nor * this is the case from contexts where neither %GFP_ATOMIC nor
...@@ -382,7 +377,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket, ...@@ -382,7 +377,6 @@ static inline struct stack_record *find_stack(struct stack_record *bucket,
*/ */
depot_stack_handle_t __stack_depot_save(unsigned long *entries, depot_stack_handle_t __stack_depot_save(unsigned long *entries,
unsigned int nr_entries, unsigned int nr_entries,
unsigned int extra_bits,
gfp_t alloc_flags, bool can_alloc) gfp_t alloc_flags, bool can_alloc)
{ {
struct stack_record *found = NULL, **bucket; struct stack_record *found = NULL, **bucket;
...@@ -471,8 +465,6 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries, ...@@ -471,8 +465,6 @@ depot_stack_handle_t __stack_depot_save(unsigned long *entries,
if (found) if (found)
retval.handle = found->handle.handle; retval.handle = found->handle.handle;
fast_exit: fast_exit:
retval.extra = extra_bits;
return retval.handle; return retval.handle;
} }
EXPORT_SYMBOL_GPL(__stack_depot_save); EXPORT_SYMBOL_GPL(__stack_depot_save);
...@@ -493,7 +485,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries, ...@@ -493,7 +485,7 @@ depot_stack_handle_t stack_depot_save(unsigned long *entries,
unsigned int nr_entries, unsigned int nr_entries,
gfp_t alloc_flags) gfp_t alloc_flags)
{ {
return __stack_depot_save(entries, nr_entries, 0, alloc_flags, true); return __stack_depot_save(entries, nr_entries, alloc_flags, true);
} }
EXPORT_SYMBOL_GPL(stack_depot_save); EXPORT_SYMBOL_GPL(stack_depot_save);
...@@ -576,6 +568,38 @@ int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size, ...@@ -576,6 +568,38 @@ int stack_depot_snprint(depot_stack_handle_t handle, char *buf, size_t size,
} }
EXPORT_SYMBOL_GPL(stack_depot_snprint); EXPORT_SYMBOL_GPL(stack_depot_snprint);
/**
* stack_depot_set_extra_bits - Set extra bits in a stack depot handle
*
* @handle: Stack depot handle returned from stack_depot_save()
* @extra_bits: Value to set the extra bits
*
* Return: Stack depot handle with extra bits set
*
* Stack depot handles have a few unused bits, which can be used for storing
* user-specific information. These bits are transparent to the stack depot.
*/
depot_stack_handle_t __must_check stack_depot_set_extra_bits(
depot_stack_handle_t handle, unsigned int extra_bits)
{
union handle_parts parts = { .handle = handle };
/* Don't set extra bits on empty handles. */
if (!handle)
return 0;
parts.extra = extra_bits;
return parts.handle;
}
EXPORT_SYMBOL(stack_depot_set_extra_bits);
/**
* stack_depot_get_extra_bits - Retrieve extra bits from a stack depot handle
*
* @handle: Stack depot handle with extra bits saved
*
* Return: Extra bits retrieved from the stack depot handle
*/
unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle) unsigned int stack_depot_get_extra_bits(depot_stack_handle_t handle)
{ {
union handle_parts parts = { .handle = handle }; union handle_parts parts = { .handle = handle };
......
...@@ -43,7 +43,7 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc) ...@@ -43,7 +43,7 @@ depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc)
unsigned int nr_entries; unsigned int nr_entries;
nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0); nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
return __stack_depot_save(entries, nr_entries, 0, flags, can_alloc); return __stack_depot_save(entries, nr_entries, flags, can_alloc);
} }
void kasan_set_track(struct kasan_track *track, gfp_t flags) void kasan_set_track(struct kasan_track *track, gfp_t flags)
......
...@@ -69,13 +69,15 @@ depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags, ...@@ -69,13 +69,15 @@ depot_stack_handle_t kmsan_save_stack_with_flags(gfp_t flags,
{ {
unsigned long entries[KMSAN_STACK_DEPTH]; unsigned long entries[KMSAN_STACK_DEPTH];
unsigned int nr_entries; unsigned int nr_entries;
depot_stack_handle_t handle;
nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0); nr_entries = stack_trace_save(entries, KMSAN_STACK_DEPTH, 0);
/* Don't sleep (see might_sleep_if() in __alloc_pages_nodemask()). */ /* Don't sleep (see might_sleep_if() in __alloc_pages_nodemask()). */
flags &= ~__GFP_DIRECT_RECLAIM; flags &= ~__GFP_DIRECT_RECLAIM;
return __stack_depot_save(entries, nr_entries, extra, flags, true); handle = __stack_depot_save(entries, nr_entries, flags, true);
return stack_depot_set_extra_bits(handle, extra);
} }
/* Copy the metadata following the memmove() behavior. */ /* Copy the metadata following the memmove() behavior. */
...@@ -215,6 +217,7 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id) ...@@ -215,6 +217,7 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
u32 extra_bits; u32 extra_bits;
int depth; int depth;
bool uaf; bool uaf;
depot_stack_handle_t handle;
if (!id) if (!id)
return id; return id;
...@@ -250,8 +253,9 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id) ...@@ -250,8 +253,9 @@ depot_stack_handle_t kmsan_internal_chain_origin(depot_stack_handle_t id)
* positives when __stack_depot_save() passes it to instrumented code. * positives when __stack_depot_save() passes it to instrumented code.
*/ */
kmsan_internal_unpoison_memory(entries, sizeof(entries), false); kmsan_internal_unpoison_memory(entries, sizeof(entries), false);
return __stack_depot_save(entries, ARRAY_SIZE(entries), extra_bits, handle = __stack_depot_save(entries, ARRAY_SIZE(entries), GFP_ATOMIC,
GFP_ATOMIC, true); true);
return stack_depot_set_extra_bits(handle, extra_bits);
} }
void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b, void kmsan_internal_set_shadow_origin(void *addr, size_t size, int b,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment