Commit 62b672c4 authored by Heiko Carstens's avatar Heiko Carstens Committed by Alexander Gordeev

s390/stackstrace: Detect vdso stack frames

Clear the backchain of the extra stack frame added by the vdso user wrapper
code. This allows the user stack walker to detect and skip the non-standard
stack frame. Without this an incorrect instruction pointer would be added
to stack traces, and stack frame walking would be continued with a more or
less random back chain.

Fixes: aa44433a ("s390: add USER_STACKTRACE support")
Reviewed-by: default avatarJens Remus <jremus@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
parent be72ea09
...@@ -98,6 +98,7 @@ void cpu_detect_mhz_feature(void); ...@@ -98,6 +98,7 @@ void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op; extern const struct seq_operations cpuinfo_op;
extern void execve_tail(void); extern void execve_tail(void);
unsigned long vdso_text_size(void);
unsigned long vdso_size(void); unsigned long vdso_size(void);
/* /*
......
...@@ -66,6 +66,7 @@ int main(void) ...@@ -66,6 +66,7 @@ int main(void)
OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys); OFFSET(__SF_SIE_CONTROL_PHYS, stack_frame, sie_control_block_phys);
DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame)); DEFINE(STACK_FRAME_OVERHEAD, sizeof(struct stack_frame));
BLANK(); BLANK();
OFFSET(__SFUSER_BACKCHAIN, stack_frame_user, back_chain);
DEFINE(STACK_FRAME_USER_OVERHEAD, sizeof(struct stack_frame_user)); DEFINE(STACK_FRAME_USER_OVERHEAD, sizeof(struct stack_frame_user));
OFFSET(__SFVDSO_RETURN_ADDRESS, stack_frame_vdso_wrapper, return_address); OFFSET(__SFVDSO_RETURN_ADDRESS, stack_frame_vdso_wrapper, return_address);
DEFINE(STACK_FRAME_VDSO_OVERHEAD, sizeof(struct stack_frame_vdso_wrapper)); DEFINE(STACK_FRAME_VDSO_OVERHEAD, sizeof(struct stack_frame_vdso_wrapper));
......
...@@ -92,10 +92,16 @@ static inline bool ip_invalid(unsigned long ip) ...@@ -92,10 +92,16 @@ static inline bool ip_invalid(unsigned long ip)
return false; return false;
} }
static inline bool ip_within_vdso(unsigned long ip)
{
return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
}
void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie, void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
struct perf_callchain_entry_ctx *entry, struct perf_callchain_entry_ctx *entry,
const struct pt_regs *regs, bool perf) const struct pt_regs *regs, bool perf)
{ {
struct stack_frame_vdso_wrapper __user *sf_vdso;
struct stack_frame_user __user *sf; struct stack_frame_user __user *sf;
unsigned long ip, sp; unsigned long ip, sp;
bool first = true; bool first = true;
...@@ -112,11 +118,25 @@ void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *coo ...@@ -112,11 +118,25 @@ void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *coo
while (1) { while (1) {
if (__get_user(sp, &sf->back_chain)) if (__get_user(sp, &sf->back_chain))
break; break;
/*
* VDSO entry code has a non-standard stack frame layout.
* See VDSO user wrapper code for details.
*/
if (!sp && ip_within_vdso(ip)) {
sf_vdso = (void __user *)sf;
if (__get_user(ip, &sf_vdso->return_address))
break;
sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
sf = (void __user *)sp;
if (__get_user(sp, &sf->back_chain))
break;
} else {
sf = (void __user *)sp;
if (__get_user(ip, &sf->gprs[8]))
break;
}
/* Sanity check: ABI requires SP to be 8 byte aligned. */ /* Sanity check: ABI requires SP to be 8 byte aligned. */
if (!sp || sp & 0x7) if (sp & 0x7)
break;
sf = (void __user *)sp;
if (__get_user(ip, &sf->gprs[8]))
break; break;
if (ip_invalid(ip)) { if (ip_invalid(ip)) {
/* /*
......
...@@ -210,17 +210,22 @@ static unsigned long vdso_addr(unsigned long start, unsigned long len) ...@@ -210,17 +210,22 @@ static unsigned long vdso_addr(unsigned long start, unsigned long len)
return addr; return addr;
} }
unsigned long vdso_size(void) unsigned long vdso_text_size(void)
{ {
unsigned long size = VVAR_NR_PAGES * PAGE_SIZE; unsigned long size;
if (is_compat_task()) if (is_compat_task())
size += vdso32_end - vdso32_start; size = vdso32_end - vdso32_start;
else else
size += vdso64_end - vdso64_start; size = vdso64_end - vdso64_start;
return PAGE_ALIGN(size); return PAGE_ALIGN(size);
} }
unsigned long vdso_size(void)
{
return vdso_text_size() + VVAR_NR_PAGES * PAGE_SIZE;
}
int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{ {
unsigned long addr = VDSO_BASE; unsigned long addr = VDSO_BASE;
......
...@@ -23,6 +23,7 @@ __kernel_\func: ...@@ -23,6 +23,7 @@ __kernel_\func:
CFI_VAL_OFFSET 15,-STACK_FRAME_USER_OVERHEAD CFI_VAL_OFFSET 15,-STACK_FRAME_USER_OVERHEAD
stg %r14,__SFVDSO_RETURN_ADDRESS(%r15) stg %r14,__SFVDSO_RETURN_ADDRESS(%r15)
CFI_REL_OFFSET 14,__SFVDSO_RETURN_ADDRESS CFI_REL_OFFSET 14,__SFVDSO_RETURN_ADDRESS
xc __SFUSER_BACKCHAIN(8,%r15),__SFUSER_BACKCHAIN(%r15)
brasl %r14,__s390_vdso_\func brasl %r14,__s390_vdso_\func
lg %r14,__SFVDSO_RETURN_ADDRESS(%r15) lg %r14,__SFVDSO_RETURN_ADDRESS(%r15)
CFI_RESTORE 14 CFI_RESTORE 14
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment