Commit d1f684e4 authored by Mark Rutland's avatar Mark Rutland Committed by Catalin Marinas

arm64: stacktrace: rework stack boundary discovery

In subsequent patches we'll want to acquire the stack boundaries
ahead-of-time, and we'll need to be able to acquire the relevant
stack_info regardless of whether we have an object the happens to be on
the stack.

This patch replaces the on_XXX_stack() helpers with stackinfo_get_XXX()
helpers, with the caller being responsible for the checking whether an
object is on a relevant stack. For the moment this is moved into the
on_accessible_stack() functions, making these slightly larger;
subsequent patches will remove the on_accessible_stack() functions and
simplify the logic.

The on_irq_stack() and on_task_stack() helpers are kept as these are
used by IRQ entry sequences and stackleak respectively. As they're only
used as predicates, the stack_info pointer parameter is removed in both
cases.

As the on_accessible_stack() functions are always passed a non-NULL info
pointer, these now update info unconditionally. When updating the type
to STACK_TYPE_UNKNOWN, the low/high bounds are also modified, but as
these will not be consumed this should have no adverse affect.

There should be no functional change as a result of this patch.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Reviewed-by: default avatarKalesh Singh <kaleshsingh@google.com>
Reviewed-by: default avatarMadhavan T. Venkataraman <madvenka@linux.microsoft.com>
Reviewed-by: default avatarMark Brown <broonie@kernel.org>
Cc: Fuad Tabba <tabba@google.com>
Cc: Marc Zyngier <maz@kernel.org>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20220901130646.1316937-7-mark.rutland@arm.comSigned-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent 36f9a879
...@@ -410,7 +410,7 @@ long get_tagged_addr_ctrl(struct task_struct *task); ...@@ -410,7 +410,7 @@ long get_tagged_addr_ctrl(struct task_struct *task);
* The top of the current task's task stack * The top of the current task's task stack
*/ */
#define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE) #define current_top_of_stack() ((unsigned long)current->stack + THREAD_SIZE)
#define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1, NULL)) #define on_thread_stack() (on_task_stack(current, current_stack_pointer, 1))
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* __ASM_PROCESSOR_H */ #endif /* __ASM_PROCESSOR_H */
...@@ -22,77 +22,91 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk, ...@@ -22,77 +22,91 @@ extern void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk,
DECLARE_PER_CPU(unsigned long *, irq_stack_ptr); DECLARE_PER_CPU(unsigned long *, irq_stack_ptr);
static inline bool on_irq_stack(unsigned long sp, unsigned long size, static inline struct stack_info stackinfo_get_irq(void)
struct stack_info *info)
{ {
unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr); unsigned long low = (unsigned long)raw_cpu_read(irq_stack_ptr);
unsigned long high = low + IRQ_STACK_SIZE; unsigned long high = low + IRQ_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_IRQ, info); return (struct stack_info) {
.low = low,
.high = high,
.type = STACK_TYPE_IRQ,
};
} }
static inline bool on_task_stack(const struct task_struct *tsk, static inline bool on_irq_stack(unsigned long sp, unsigned long size)
unsigned long sp, unsigned long size, {
struct stack_info *info) struct stack_info info = stackinfo_get_irq();
return stackinfo_on_stack(&info, sp, size);
}
static inline struct stack_info stackinfo_get_task(const struct task_struct *tsk)
{ {
unsigned long low = (unsigned long)task_stack_page(tsk); unsigned long low = (unsigned long)task_stack_page(tsk);
unsigned long high = low + THREAD_SIZE; unsigned long high = low + THREAD_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_TASK, info); return (struct stack_info) {
.low = low,
.high = high,
.type = STACK_TYPE_TASK,
};
}
static inline bool on_task_stack(const struct task_struct *tsk,
unsigned long sp, unsigned long size)
{
struct stack_info info = stackinfo_get_task(tsk);
return stackinfo_on_stack(&info, sp, size);
} }
#ifdef CONFIG_VMAP_STACK #ifdef CONFIG_VMAP_STACK
DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack);
static inline bool on_overflow_stack(unsigned long sp, unsigned long size, static inline struct stack_info stackinfo_get_overflow(void)
struct stack_info *info)
{ {
unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack); unsigned long low = (unsigned long)raw_cpu_ptr(overflow_stack);
unsigned long high = low + OVERFLOW_STACK_SIZE; unsigned long high = low + OVERFLOW_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); return (struct stack_info) {
.low = low,
.high = high,
.type = STACK_TYPE_OVERFLOW,
};
} }
#else #else
static inline bool on_overflow_stack(unsigned long sp, unsigned long size, #define stackinfo_get_overflow() stackinfo_get_unknown()
struct stack_info *info)
{
return false;
}
#endif #endif
#if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK) #if defined(CONFIG_ARM_SDE_INTERFACE) && defined(CONFIG_VMAP_STACK)
DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr); DECLARE_PER_CPU(unsigned long *, sdei_stack_normal_ptr);
DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr); DECLARE_PER_CPU(unsigned long *, sdei_stack_critical_ptr);
static inline bool on_sdei_normal_stack(unsigned long sp, unsigned long size, static inline struct stack_info stackinfo_get_sdei_normal(void)
struct stack_info *info)
{ {
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr); unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_normal_ptr);
unsigned long high = low + SDEI_STACK_SIZE; unsigned long high = low + SDEI_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_SDEI_NORMAL, info); return (struct stack_info) {
.low = low,
.high = high,
.type = STACK_TYPE_SDEI_NORMAL,
};
} }
static inline bool on_sdei_critical_stack(unsigned long sp, unsigned long size, static inline struct stack_info stackinfo_get_sdei_critical(void)
struct stack_info *info)
{ {
unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr); unsigned long low = (unsigned long)raw_cpu_read(sdei_stack_critical_ptr);
unsigned long high = low + SDEI_STACK_SIZE; unsigned long high = low + SDEI_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_SDEI_CRITICAL, info); return (struct stack_info) {
.low = low,
.high = high,
.type = STACK_TYPE_SDEI_CRITICAL,
};
} }
#else #else
static inline bool on_sdei_normal_stack(unsigned long sp, unsigned long size, #define stackinfo_get_sdei_normal() stackinfo_get_unknown()
struct stack_info *info) #define stackinfo_get_sdei_critical() stackinfo_get_unknown()
{
return false;
}
static inline bool on_sdei_critical_stack(unsigned long sp, unsigned long size,
struct stack_info *info)
{
return false;
}
#endif #endif
#endif /* __ASM_STACKTRACE_H */ #endif /* __ASM_STACKTRACE_H */
...@@ -65,6 +65,15 @@ struct unwind_state { ...@@ -65,6 +65,15 @@ struct unwind_state {
struct task_struct *task; struct task_struct *task;
}; };
static inline struct stack_info stackinfo_get_unknown(void)
{
return (struct stack_info) {
.low = 0,
.high = 0,
.type = STACK_TYPE_UNKNOWN,
};
}
static inline bool stackinfo_on_stack(const struct stack_info *info, static inline bool stackinfo_on_stack(const struct stack_info *info,
unsigned long sp, unsigned long size) unsigned long sp, unsigned long size)
{ {
...@@ -77,25 +86,6 @@ static inline bool stackinfo_on_stack(const struct stack_info *info, ...@@ -77,25 +86,6 @@ static inline bool stackinfo_on_stack(const struct stack_info *info,
return true; return true;
} }
static inline bool on_stack(unsigned long sp, unsigned long size,
unsigned long low, unsigned long high,
enum stack_type type, struct stack_info *info)
{
struct stack_info tmp = {
.low = low,
.high = high,
.type = type,
};
if (!stackinfo_on_stack(&tmp, sp, size))
return false;
if (info)
*info = tmp;
return true;
}
static inline void unwind_init_common(struct unwind_state *state, static inline void unwind_init_common(struct unwind_state *state,
struct task_struct *task) struct task_struct *task)
{ {
......
...@@ -121,7 +121,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr) ...@@ -121,7 +121,7 @@ static bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
{ {
return ((addr & ~(THREAD_SIZE - 1)) == return ((addr & ~(THREAD_SIZE - 1)) ==
(kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) || (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1))) ||
on_irq_stack(addr, sizeof(unsigned long), NULL); on_irq_stack(addr, sizeof(unsigned long));
} }
/** /**
......
...@@ -67,36 +67,55 @@ static inline void unwind_init_from_task(struct unwind_state *state, ...@@ -67,36 +67,55 @@ static inline void unwind_init_from_task(struct unwind_state *state,
state->pc = thread_saved_pc(task); state->pc = thread_saved_pc(task);
} }
/*
* We can only safely access per-cpu stacks from current in a non-preemptible
* context.
*/
static bool on_accessible_stack(const struct task_struct *tsk, static bool on_accessible_stack(const struct task_struct *tsk,
unsigned long sp, unsigned long size, unsigned long sp, unsigned long size,
struct stack_info *info) struct stack_info *info)
{ {
if (info) struct stack_info tmp;
info->type = STACK_TYPE_UNKNOWN;
if (on_task_stack(tsk, sp, size, info)) tmp = stackinfo_get_task(tsk);
return true; if (stackinfo_on_stack(&tmp, sp, size))
if (tsk != current || preemptible()) goto found;
return false;
if (on_irq_stack(sp, size, info))
return true;
if (on_overflow_stack(sp, size, info))
return true;
if (IS_ENABLED(CONFIG_VMAP_STACK) &&
IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) &&
in_nmi()) {
if (on_sdei_critical_stack(sp, size, info))
return true;
if (on_sdei_normal_stack(sp, size, info))
return true;
}
/*
* We can only safely access per-cpu stacks when unwinding the current
* task in a non-preemptible context.
*/
if (tsk != current || preemptible())
goto not_found;
tmp = stackinfo_get_irq();
if (stackinfo_on_stack(&tmp, sp, size))
goto found;
tmp = stackinfo_get_overflow();
if (stackinfo_on_stack(&tmp, sp, size))
goto found;
/*
* We can only safely access SDEI stacks which unwinding the current
* task in an NMI context.
*/
if (!IS_ENABLED(CONFIG_VMAP_STACK) ||
!IS_ENABLED(CONFIG_ARM_SDE_INTERFACE) ||
!in_nmi())
goto not_found;
tmp = stackinfo_get_sdei_normal();
if (stackinfo_on_stack(&tmp, sp, size))
goto found;
tmp = stackinfo_get_sdei_critical();
if (stackinfo_on_stack(&tmp, sp, size))
goto found;
not_found:
*info = stackinfo_get_unknown();
return false; return false;
found:
*info = tmp;
return true;
} }
/* /*
......
...@@ -39,34 +39,51 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc) ...@@ -39,34 +39,51 @@ static void hyp_prepare_backtrace(unsigned long fp, unsigned long pc)
DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace); DEFINE_PER_CPU(unsigned long [NVHE_STACKTRACE_SIZE/sizeof(long)], pkvm_stacktrace);
static bool on_overflow_stack(unsigned long sp, unsigned long size, static struct stack_info stackinfo_get_overflow(void)
struct stack_info *info)
{ {
unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack); unsigned long low = (unsigned long)this_cpu_ptr(overflow_stack);
unsigned long high = low + OVERFLOW_STACK_SIZE; unsigned long high = low + OVERFLOW_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); return (struct stack_info) {
.low = low,
.high = high,
.type = STACK_TYPE_OVERFLOW,
};
} }
static bool on_hyp_stack(unsigned long sp, unsigned long size, static struct stack_info stackinfo_get_hyp(void)
struct stack_info *info)
{ {
struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params); struct kvm_nvhe_init_params *params = this_cpu_ptr(&kvm_init_params);
unsigned long high = params->stack_hyp_va; unsigned long high = params->stack_hyp_va;
unsigned long low = high - PAGE_SIZE; unsigned long low = high - PAGE_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); return (struct stack_info) {
.low = low,
.high = high,
.type = STACK_TYPE_HYP,
};
} }
static bool on_accessible_stack(const struct task_struct *tsk, static bool on_accessible_stack(const struct task_struct *tsk,
unsigned long sp, unsigned long size, unsigned long sp, unsigned long size,
struct stack_info *info) struct stack_info *info)
{ {
if (info) struct stack_info tmp;
info->type = STACK_TYPE_UNKNOWN;
return (on_overflow_stack(sp, size, info) || tmp = stackinfo_get_overflow();
on_hyp_stack(sp, size, info)); if (stackinfo_on_stack(&tmp, sp, size))
goto found;
tmp = stackinfo_get_hyp();
if (stackinfo_on_stack(&tmp, sp, size))
goto found;
*info = stackinfo_get_unknown();
return false;
found:
*info = tmp;
return true;
} }
static int unwind_next(struct unwind_state *state) static int unwind_next(struct unwind_state *state)
......
...@@ -62,37 +62,54 @@ static bool kvm_nvhe_stack_kern_va(unsigned long *addr, ...@@ -62,37 +62,54 @@ static bool kvm_nvhe_stack_kern_va(unsigned long *addr,
return true; return true;
} }
static bool on_overflow_stack(unsigned long sp, unsigned long size, static struct stack_info stackinfo_get_overflow(void)
struct stack_info *info)
{ {
struct kvm_nvhe_stacktrace_info *stacktrace_info struct kvm_nvhe_stacktrace_info *stacktrace_info
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base; unsigned long low = (unsigned long)stacktrace_info->overflow_stack_base;
unsigned long high = low + OVERFLOW_STACK_SIZE; unsigned long high = low + OVERFLOW_STACK_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_OVERFLOW, info); return (struct stack_info) {
.low = low,
.high = high,
.type = STACK_TYPE_OVERFLOW,
};
} }
static bool on_hyp_stack(unsigned long sp, unsigned long size, static struct stack_info stackinfo_get_hyp(void)
struct stack_info *info)
{ {
struct kvm_nvhe_stacktrace_info *stacktrace_info struct kvm_nvhe_stacktrace_info *stacktrace_info
= this_cpu_ptr_nvhe_sym(kvm_stacktrace_info); = this_cpu_ptr_nvhe_sym(kvm_stacktrace_info);
unsigned long low = (unsigned long)stacktrace_info->stack_base; unsigned long low = (unsigned long)stacktrace_info->stack_base;
unsigned long high = low + PAGE_SIZE; unsigned long high = low + PAGE_SIZE;
return on_stack(sp, size, low, high, STACK_TYPE_HYP, info); return (struct stack_info) {
.low = low,
.high = high,
.type = STACK_TYPE_HYP,
};
} }
static bool on_accessible_stack(const struct task_struct *tsk, static bool on_accessible_stack(const struct task_struct *tsk,
unsigned long sp, unsigned long size, unsigned long sp, unsigned long size,
struct stack_info *info) struct stack_info *info)
{ {
if (info) struct stack_info tmp;
info->type = STACK_TYPE_UNKNOWN;
return (on_overflow_stack(sp, size, info) || tmp = stackinfo_get_overflow();
on_hyp_stack(sp, size, info)); if (stackinfo_on_stack(&tmp, sp, size))
goto found;
tmp = stackinfo_get_hyp();
if (stackinfo_on_stack(&tmp, sp, size))
goto found;
*info = stackinfo_get_unknown();
return false;
found:
*info = tmp;
return true;
} }
static int unwind_next(struct unwind_state *state) static int unwind_next(struct unwind_state *state)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment