Commit 26b089a7 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'csky-for-linus-5.7-rc6' of git://github.com/c-sky/csky-linux

Pull csky updates from Guo Ren:

 - fix for copy_from/to_user (a hard-to-find bug, thx Viro)

 - fix for calltrace panic without FRAME_POINT

 - two fixes for perf

 - two build fixes

 - four fixes for non-fatal bugs (msa, rm dis_irq, cleanup psr,
   gdbmacros.txt)

* tag 'csky-for-linus-5.7-rc6' of git://github.com/c-sky/csky-linux:
  csky: Fixup raw_copy_from_user()
  csky: Fixup gdbmacros.txt with name sp in thread_struct
  csky: Fixup remove unnecessary save/restore PSR code
  csky: Fixup remove duplicate irq_disable
  csky: Fixup calltrace panic
  csky: Fixup perf callchain unwind
  csky: Fixup msa highest 3 bits mask
  csky: Fixup perf probe -x hungup
  csky: Fixup compile error for abiv1 entry.S
  csky/ftrace: Fixup error when disable CONFIG_DYNAMIC_FTRACE
parents 5c33696f 51bb38cb
...@@ -8,6 +8,7 @@ config CSKY ...@@ -8,6 +8,7 @@ config CSKY
select ARCH_HAS_SYNC_DMA_FOR_DEVICE select ARCH_HAS_SYNC_DMA_FOR_DEVICE
select ARCH_USE_BUILTIN_BSWAP select ARCH_USE_BUILTIN_BSWAP
select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2 select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2
select ARCH_WANT_FRAME_POINTERS if !CPU_CK610
select COMMON_CLK select COMMON_CLK
select CLKSRC_MMIO select CLKSRC_MMIO
select CSKY_MPINTC if CPU_CK860 select CSKY_MPINTC if CPU_CK860
...@@ -38,6 +39,7 @@ config CSKY ...@@ -38,6 +39,7 @@ config CSKY
select HAVE_ARCH_TRACEHOOK select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_AUDITSYSCALL select HAVE_ARCH_AUDITSYSCALL
select HAVE_COPY_THREAD_TLS select HAVE_COPY_THREAD_TLS
select HAVE_DEBUG_BUGVERBOSE
select HAVE_DYNAMIC_FTRACE select HAVE_DYNAMIC_FTRACE
select HAVE_DYNAMIC_FTRACE_WITH_REGS select HAVE_DYNAMIC_FTRACE_WITH_REGS
select HAVE_FUNCTION_TRACER select HAVE_FUNCTION_TRACER
......
...@@ -47,7 +47,7 @@ ifeq ($(CSKYABI),abiv2) ...@@ -47,7 +47,7 @@ ifeq ($(CSKYABI),abiv2)
KBUILD_CFLAGS += -mno-stack-size KBUILD_CFLAGS += -mno-stack-size
endif endif
ifdef CONFIG_STACKTRACE ifdef CONFIG_FRAME_POINTER
KBUILD_CFLAGS += -mbacktrace KBUILD_CFLAGS += -mbacktrace
endif endif
......
...@@ -167,8 +167,8 @@ ...@@ -167,8 +167,8 @@
* BA Reserved C D V * BA Reserved C D V
*/ */
cprcr r6, cpcr30 cprcr r6, cpcr30
lsri r6, 28 lsri r6, 29
lsli r6, 28 lsli r6, 29
addi r6, 0xe addi r6, 0xe
cpwcr r6, cpcr30 cpwcr r6, cpcr30
......
...@@ -285,8 +285,8 @@ ...@@ -285,8 +285,8 @@
*/ */
mfcr r6, cr<30, 15> /* Get MSA0 */ mfcr r6, cr<30, 15> /* Get MSA0 */
2: 2:
lsri r6, 28 lsri r6, 29
lsli r6, 28 lsli r6, 29
addi r6, 0x1ce addi r6, 0x1ce
mtcr r6, cr<30, 15> /* Set MSA0 */ mtcr r6, cr<30, 15> /* Set MSA0 */
......
...@@ -103,6 +103,8 @@ ENTRY(_mcount) ...@@ -103,6 +103,8 @@ ENTRY(_mcount)
mov a0, lr mov a0, lr
subi a0, 4 subi a0, 4
ldw a1, (sp, 24) ldw a1, (sp, 24)
lrw a2, function_trace_op
ldw a2, (a2, 0)
jsr r26 jsr r26
......
...@@ -41,8 +41,7 @@ extern struct cpuinfo_csky cpu_data[]; ...@@ -41,8 +41,7 @@ extern struct cpuinfo_csky cpu_data[];
#define TASK_UNMAPPED_BASE (TASK_SIZE / 3) #define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
struct thread_struct { struct thread_struct {
unsigned long ksp; /* kernel stack pointer */ unsigned long sp; /* kernel stack pointer */
unsigned long sr; /* saved status register */
unsigned long trap_no; /* saved status register */ unsigned long trap_no; /* saved status register */
/* FPU regs */ /* FPU regs */
...@@ -50,8 +49,7 @@ struct thread_struct { ...@@ -50,8 +49,7 @@ struct thread_struct {
}; };
#define INIT_THREAD { \ #define INIT_THREAD { \
.ksp = sizeof(init_stack) + (unsigned long) &init_stack, \ .sp = sizeof(init_stack) + (unsigned long) &init_stack, \
.sr = DEFAULT_PSR_VALUE, \
} }
/* /*
......
...@@ -58,6 +58,16 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) ...@@ -58,6 +58,16 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
return regs->usp; return regs->usp;
} }
static inline unsigned long frame_pointer(struct pt_regs *regs)
{
return regs->regs[4];
}
static inline void frame_pointer_set(struct pt_regs *regs,
unsigned long val)
{
regs->regs[4] = val;
}
extern int regs_query_register_offset(const char *name); extern int regs_query_register_offset(const char *name);
extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
unsigned int n); unsigned int n);
......
...@@ -38,7 +38,13 @@ struct thread_info { ...@@ -38,7 +38,13 @@ struct thread_info {
#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) #define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
#define thread_saved_fp(tsk) \ #define thread_saved_fp(tsk) \
((unsigned long)(((struct switch_stack *)(tsk->thread.ksp))->r8)) ((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r8))
#define thread_saved_sp(tsk) \
((unsigned long)(tsk->thread.sp))
#define thread_saved_lr(tsk) \
((unsigned long)(((struct switch_stack *)(tsk->thread.sp))->r15))
static inline struct thread_info *current_thread_info(void) static inline struct thread_info *current_thread_info(void)
{ {
...@@ -54,10 +60,10 @@ static inline struct thread_info *current_thread_info(void) ...@@ -54,10 +60,10 @@ static inline struct thread_info *current_thread_info(void)
#define TIF_SIGPENDING 0 /* signal pending */ #define TIF_SIGPENDING 0 /* signal pending */
#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ #define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
#define TIF_NEED_RESCHED 2 /* rescheduling necessary */ #define TIF_NEED_RESCHED 2 /* rescheduling necessary */
#define TIF_SYSCALL_TRACE 3 /* syscall trace active */ #define TIF_UPROBE 3 /* uprobe breakpoint or singlestep */
#define TIF_SYSCALL_TRACEPOINT 4 /* syscall tracepoint instrumentation */ #define TIF_SYSCALL_TRACE 4 /* syscall trace active */
#define TIF_SYSCALL_AUDIT 5 /* syscall auditing */ #define TIF_SYSCALL_TRACEPOINT 5 /* syscall tracepoint instrumentation */
#define TIF_UPROBE 6 /* uprobe breakpoint or singlestep */ #define TIF_SYSCALL_AUDIT 6 /* syscall auditing */
#define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */ #define TIF_POLLING_NRFLAG 16 /* poll_idle() is TIF_NEED_RESCHED */
#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
#define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */ #define TIF_RESTORE_SIGMASK 20 /* restore signal mask in do_signal() */
......
...@@ -253,7 +253,7 @@ do { \ ...@@ -253,7 +253,7 @@ do { \
extern int __get_user_bad(void); extern int __get_user_bad(void);
#define __copy_user(to, from, n) \ #define ___copy_to_user(to, from, n) \
do { \ do { \
int w0, w1, w2, w3; \ int w0, w1, w2, w3; \
asm volatile( \ asm volatile( \
...@@ -288,31 +288,34 @@ do { \ ...@@ -288,31 +288,34 @@ do { \
" subi %0, 4 \n" \ " subi %0, 4 \n" \
" br 3b \n" \ " br 3b \n" \
"5: cmpnei %0, 0 \n" /* 1B */ \ "5: cmpnei %0, 0 \n" /* 1B */ \
" bf 8f \n" \ " bf 13f \n" \
" ldb %3, (%2, 0) \n" \ " ldb %3, (%2, 0) \n" \
"6: stb %3, (%1, 0) \n" \ "6: stb %3, (%1, 0) \n" \
" addi %2, 1 \n" \ " addi %2, 1 \n" \
" addi %1, 1 \n" \ " addi %1, 1 \n" \
" subi %0, 1 \n" \ " subi %0, 1 \n" \
" br 5b \n" \ " br 5b \n" \
"7: br 8f \n" \ "7: subi %0, 4 \n" \
"8: subi %0, 4 \n" \
"12: subi %0, 4 \n" \
" br 13f \n" \
".section __ex_table, \"a\" \n" \ ".section __ex_table, \"a\" \n" \
".align 2 \n" \ ".align 2 \n" \
".long 2b, 7b \n" \ ".long 2b, 13f \n" \
".long 9b, 7b \n" \ ".long 4b, 13f \n" \
".long 10b, 7b \n" \ ".long 6b, 13f \n" \
".long 9b, 12b \n" \
".long 10b, 8b \n" \
".long 11b, 7b \n" \ ".long 11b, 7b \n" \
".long 4b, 7b \n" \
".long 6b, 7b \n" \
".previous \n" \ ".previous \n" \
"8: \n" \ "13: \n" \
: "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \ : "=r"(n), "=r"(to), "=r"(from), "=r"(w0), \
"=r"(w1), "=r"(w2), "=r"(w3) \ "=r"(w1), "=r"(w2), "=r"(w3) \
: "0"(n), "1"(to), "2"(from) \ : "0"(n), "1"(to), "2"(from) \
: "memory"); \ : "memory"); \
} while (0) } while (0)
#define __copy_user_zeroing(to, from, n) \ #define ___copy_from_user(to, from, n) \
do { \ do { \
int tmp; \ int tmp; \
int nsave; \ int nsave; \
...@@ -355,22 +358,22 @@ do { \ ...@@ -355,22 +358,22 @@ do { \
" addi %1, 1 \n" \ " addi %1, 1 \n" \
" subi %0, 1 \n" \ " subi %0, 1 \n" \
" br 5b \n" \ " br 5b \n" \
"8: mov %3, %0 \n" \ "8: stw %3, (%1, 0) \n" \
" movi %4, 0 \n" \ " subi %0, 4 \n" \
"9: stb %4, (%1, 0) \n" \ " bf 7f \n" \
" addi %1, 1 \n" \ "9: subi %0, 8 \n" \
" subi %3, 1 \n" \ " bf 7f \n" \
" cmpnei %3, 0 \n" \ "13: stw %3, (%1, 8) \n" \
" bt 9b \n" \ " subi %0, 12 \n" \
" br 7f \n" \ " bf 7f \n" \
".section __ex_table, \"a\" \n" \ ".section __ex_table, \"a\" \n" \
".align 2 \n" \ ".align 2 \n" \
".long 2b, 8b \n" \ ".long 2b, 7f \n" \
".long 4b, 7f \n" \
".long 6b, 7f \n" \
".long 10b, 8b \n" \ ".long 10b, 8b \n" \
".long 11b, 8b \n" \ ".long 11b, 9b \n" \
".long 12b, 8b \n" \ ".long 12b,13b \n" \
".long 4b, 8b \n" \
".long 6b, 8b \n" \
".previous \n" \ ".previous \n" \
"7: \n" \ "7: \n" \
: "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \ : "=r"(n), "=r"(to), "=r"(from), "=r"(nsave), \
......
...@@ -3,7 +3,7 @@ extra-y := head.o vmlinux.lds ...@@ -3,7 +3,7 @@ extra-y := head.o vmlinux.lds
obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o obj-y += entry.o atomic.o signal.o traps.o irq.o time.o vdso.o
obj-y += power.o syscall.o syscall_table.o setup.o obj-y += power.o syscall.o syscall_table.o setup.o
obj-y += process.o cpu-probe.o ptrace.o dumpstack.o obj-y += process.o cpu-probe.o ptrace.o stacktrace.o
obj-y += probes/ obj-y += probes/
obj-$(CONFIG_MODULES) += module.o obj-$(CONFIG_MODULES) += module.o
......
...@@ -18,8 +18,7 @@ int main(void) ...@@ -18,8 +18,7 @@ int main(void)
DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm)); DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
/* offsets into the thread struct */ /* offsets into the thread struct */
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); DEFINE(THREAD_KSP, offsetof(struct thread_struct, sp));
DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
DEFINE(THREAD_FESR, offsetof(struct thread_struct, user_fp.fesr)); DEFINE(THREAD_FESR, offsetof(struct thread_struct, user_fp.fesr));
DEFINE(THREAD_FCR, offsetof(struct thread_struct, user_fp.fcr)); DEFINE(THREAD_FCR, offsetof(struct thread_struct, user_fp.fcr));
DEFINE(THREAD_FPREG, offsetof(struct thread_struct, user_fp.vr)); DEFINE(THREAD_FPREG, offsetof(struct thread_struct, user_fp.vr));
......
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
#include <linux/ptrace.h>
int kstack_depth_to_print = 48;
void show_trace(unsigned long *stack)
{
unsigned long *stack_end;
unsigned long *stack_start;
unsigned long *fp;
unsigned long addr;
addr = (unsigned long) stack & THREAD_MASK;
stack_start = (unsigned long *) addr;
stack_end = (unsigned long *) (addr + THREAD_SIZE);
fp = stack;
pr_info("\nCall Trace:");
while (fp > stack_start && fp < stack_end) {
#ifdef CONFIG_STACKTRACE
addr = fp[1];
fp = (unsigned long *) fp[0];
#else
addr = *fp++;
#endif
if (__kernel_text_address(addr))
pr_cont("\n[<%08lx>] %pS", addr, (void *)addr);
}
pr_cont("\n");
}
void show_stack(struct task_struct *task, unsigned long *stack)
{
if (!stack) {
if (task)
stack = (unsigned long *)thread_saved_fp(task);
else
#ifdef CONFIG_STACKTRACE
asm volatile("mov %0, r8\n":"=r"(stack)::"memory");
#else
stack = (unsigned long *)&stack;
#endif
}
show_trace(stack);
}
...@@ -330,11 +330,6 @@ ENTRY(__switch_to) ...@@ -330,11 +330,6 @@ ENTRY(__switch_to)
lrw a3, TASK_THREAD lrw a3, TASK_THREAD
addu a3, a0 addu a3, a0
mfcr a2, psr /* Save PSR value */
stw a2, (a3, THREAD_SR) /* Save PSR in task struct */
bclri a2, 6 /* Disable interrupts */
mtcr a2, psr
SAVE_SWITCH_STACK SAVE_SWITCH_STACK
stw sp, (a3, THREAD_KSP) stw sp, (a3, THREAD_KSP)
...@@ -345,12 +340,9 @@ ENTRY(__switch_to) ...@@ -345,12 +340,9 @@ ENTRY(__switch_to)
ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */ ldw sp, (a3, THREAD_KSP) /* Set next kernel sp */
ldw a2, (a3, THREAD_SR) /* Set next PSR */
mtcr a2, psr
#if defined(__CSKYABIV2__) #if defined(__CSKYABIV2__)
addi r7, a1, TASK_THREAD_INFO addi a3, a1, TASK_THREAD_INFO
ldw tls, (r7, TINFO_TP_VALUE) ldw tls, (a3, TINFO_TP_VALUE)
#endif #endif
RESTORE_SWITCH_STACK RESTORE_SWITCH_STACK
......
...@@ -202,6 +202,7 @@ int ftrace_disable_ftrace_graph_caller(void) ...@@ -202,6 +202,7 @@ int ftrace_disable_ftrace_graph_caller(void)
#endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
#ifdef CONFIG_DYNAMIC_FTRACE
#ifndef CONFIG_CPU_HAS_ICACHE_INS #ifndef CONFIG_CPU_HAS_ICACHE_INS
struct ftrace_modify_param { struct ftrace_modify_param {
int command; int command;
...@@ -231,6 +232,7 @@ void arch_ftrace_update_code(int command) ...@@ -231,6 +232,7 @@ void arch_ftrace_update_code(int command)
stop_machine(__ftrace_modify_code, &param, cpu_online_mask); stop_machine(__ftrace_modify_code, &param, cpu_online_mask);
} }
#endif #endif
#endif /* CONFIG_DYNAMIC_FTRACE */
/* _mcount is defined in abi's mcount.S */ /* _mcount is defined in abi's mcount.S */
EXPORT_SYMBOL(_mcount); EXPORT_SYMBOL(_mcount);
...@@ -12,12 +12,17 @@ struct stackframe { ...@@ -12,12 +12,17 @@ struct stackframe {
static int unwind_frame_kernel(struct stackframe *frame) static int unwind_frame_kernel(struct stackframe *frame)
{ {
if (kstack_end((void *)frame->fp)) unsigned long low = (unsigned long)task_stack_page(current);
unsigned long high = low + THREAD_SIZE;
if (unlikely(frame->fp < low || frame->fp > high))
return -EPERM; return -EPERM;
if (frame->fp & 0x3 || frame->fp < TASK_SIZE)
if (kstack_end((void *)frame->fp) || frame->fp & 0x3)
return -EPERM; return -EPERM;
*frame = *(struct stackframe *)frame->fp; *frame = *(struct stackframe *)frame->fp;
if (__kernel_text_address(frame->lr)) { if (__kernel_text_address(frame->lr)) {
int graph = 0; int graph = 0;
......
...@@ -11,6 +11,11 @@ ...@@ -11,6 +11,11 @@
#define UPROBE_TRAP_NR UINT_MAX #define UPROBE_TRAP_NR UINT_MAX
bool is_swbp_insn(uprobe_opcode_t *insn)
{
return (*insn & 0xffff) == UPROBE_SWBP_INSN;
}
unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
{ {
return instruction_pointer(regs); return instruction_pointer(regs);
......
...@@ -35,7 +35,7 @@ void flush_thread(void){} ...@@ -35,7 +35,7 @@ void flush_thread(void){}
*/ */
unsigned long thread_saved_pc(struct task_struct *tsk) unsigned long thread_saved_pc(struct task_struct *tsk)
{ {
struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp; struct switch_stack *sw = (struct switch_stack *)tsk->thread.sp;
return sw->r15; return sw->r15;
} }
...@@ -56,8 +56,8 @@ int copy_thread_tls(unsigned long clone_flags, ...@@ -56,8 +56,8 @@ int copy_thread_tls(unsigned long clone_flags,
childstack = ((struct switch_stack *) childregs) - 1; childstack = ((struct switch_stack *) childregs) - 1;
memset(childstack, 0, sizeof(struct switch_stack)); memset(childstack, 0, sizeof(struct switch_stack));
/* setup ksp for switch_to !!! */ /* setup thread.sp for switch_to !!! */
p->thread.ksp = (unsigned long)childstack; p->thread.sp = (unsigned long)childstack;
if (unlikely(p->flags & PF_KTHREAD)) { if (unlikely(p->flags & PF_KTHREAD)) {
memset(childregs, 0, sizeof(struct pt_regs)); memset(childregs, 0, sizeof(struct pt_regs));
...@@ -98,37 +98,6 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs) ...@@ -98,37 +98,6 @@ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *pr_regs)
return 1; return 1;
} }
unsigned long get_wchan(struct task_struct *p)
{
unsigned long lr;
unsigned long *fp, *stack_start, *stack_end;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_start = (unsigned long *)end_of_stack(p);
stack_end = (unsigned long *)(task_stack_page(p) + THREAD_SIZE);
fp = (unsigned long *) thread_saved_fp(p);
do {
if (fp < stack_start || fp > stack_end)
return 0;
#ifdef CONFIG_STACKTRACE
lr = fp[1];
fp = (unsigned long *)fp[0];
#else
lr = *fp++;
#endif
if (!in_sched_functions(lr) &&
__kernel_text_address(lr))
return lr;
} while (count++ < 16);
return 0;
}
EXPORT_SYMBOL(get_wchan);
#ifndef CONFIG_CPU_PM_NONE #ifndef CONFIG_CPU_PM_NONE
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
......
...@@ -41,6 +41,9 @@ static void singlestep_disable(struct task_struct *tsk) ...@@ -41,6 +41,9 @@ static void singlestep_disable(struct task_struct *tsk)
regs = task_pt_regs(tsk); regs = task_pt_regs(tsk);
regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN; regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_RUN;
/* Enable irq */
regs->sr |= BIT(6);
} }
static void singlestep_enable(struct task_struct *tsk) static void singlestep_enable(struct task_struct *tsk)
...@@ -49,6 +52,9 @@ static void singlestep_enable(struct task_struct *tsk) ...@@ -49,6 +52,9 @@ static void singlestep_enable(struct task_struct *tsk)
regs = task_pt_regs(tsk); regs = task_pt_regs(tsk);
regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI; regs->sr = (regs->sr & TRACE_MODE_MASK) | TRACE_MODE_SI;
/* Disable irq */
regs->sr &= ~BIT(6);
} }
/* /*
......
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd. */
#include <linux/sched/debug.h> #include <linux/sched/debug.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/ptrace.h>
void save_stack_trace(struct stack_trace *trace) #ifdef CONFIG_FRAME_POINTER
struct stackframe {
unsigned long fp;
unsigned long ra;
};
void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
bool (*fn)(unsigned long, void *), void *arg)
{ {
save_stack_trace_tsk(current, trace); unsigned long fp, sp, pc;
if (regs) {
fp = frame_pointer(regs);
sp = user_stack_pointer(regs);
pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
const register unsigned long current_sp __asm__ ("sp");
const register unsigned long current_fp __asm__ ("r8");
fp = current_fp;
sp = current_sp;
pc = (unsigned long)walk_stackframe;
} else {
/* task blocked in __switch_to */
fp = thread_saved_fp(task);
sp = thread_saved_sp(task);
pc = thread_saved_lr(task);
}
for (;;) {
unsigned long low, high;
struct stackframe *frame;
if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
break;
/* Validate frame pointer */
low = sp;
high = ALIGN(sp, THREAD_SIZE);
if (unlikely(fp < low || fp > high || fp & 0x3))
break;
/* Unwind stack frame */
frame = (struct stackframe *)fp;
sp = fp;
fp = frame->fp;
pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
(unsigned long *)(fp - 8));
}
} }
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) #else /* !CONFIG_FRAME_POINTER */
static void notrace walk_stackframe(struct task_struct *task,
struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
{ {
unsigned long *fp, *stack_start, *stack_end; unsigned long sp, pc;
unsigned long addr; unsigned long *ksp;
int skip = trace->skip;
int savesched;
int graph_idx = 0;
if (tsk == current) { if (regs) {
asm volatile("mov %0, r8\n":"=r"(fp)); sp = user_stack_pointer(regs);
savesched = 1; pc = instruction_pointer(regs);
} else if (task == NULL || task == current) {
const register unsigned long current_sp __asm__ ("sp");
sp = current_sp;
pc = (unsigned long)walk_stackframe;
} else { } else {
fp = (unsigned long *)thread_saved_fp(tsk); /* task blocked in __switch_to */
savesched = 0; sp = thread_saved_sp(task);
pc = thread_saved_lr(task);
} }
addr = (unsigned long) fp & THREAD_MASK; if (unlikely(sp & 0x3))
stack_start = (unsigned long *) addr; return;
stack_end = (unsigned long *) (addr + THREAD_SIZE);
while (fp > stack_start && fp < stack_end) {
unsigned long lpp, fpp;
fpp = fp[0]; ksp = (unsigned long *)sp;
lpp = fp[1]; while (!kstack_end(ksp)) {
if (!__kernel_text_address(lpp)) if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
break; break;
else pc = (*ksp++) - 0x4;
lpp = ftrace_graph_ret_addr(tsk, &graph_idx, lpp, NULL);
if (savesched || !in_sched_functions(lpp)) {
if (skip) {
skip--;
} else {
trace->entries[trace->nr_entries++] = lpp;
if (trace->nr_entries >= trace->max_entries)
break;
}
}
fp = (unsigned long *)fpp;
} }
} }
#endif /* CONFIG_FRAME_POINTER */
static bool print_trace_address(unsigned long pc, void *arg)
{
print_ip_sym(pc);
return false;
}
void show_stack(struct task_struct *task, unsigned long *sp)
{
pr_cont("Call Trace:\n");
walk_stackframe(task, NULL, print_trace_address, NULL);
}
static bool save_wchan(unsigned long pc, void *arg)
{
if (!in_sched_functions(pc)) {
unsigned long *p = arg;
*p = pc;
return true;
}
return false;
}
unsigned long get_wchan(struct task_struct *task)
{
unsigned long pc = 0;
if (likely(task && task != current && task->state != TASK_RUNNING))
walk_stackframe(task, NULL, save_wchan, &pc);
return pc;
}
#ifdef CONFIG_STACKTRACE
static bool __save_trace(unsigned long pc, void *arg, bool nosched)
{
struct stack_trace *trace = arg;
if (unlikely(nosched && in_sched_functions(pc)))
return false;
if (unlikely(trace->skip > 0)) {
trace->skip--;
return false;
}
trace->entries[trace->nr_entries++] = pc;
return (trace->nr_entries >= trace->max_entries);
}
static bool save_trace(unsigned long pc, void *arg)
{
return __save_trace(pc, arg, false);
}
/*
* Save stack-backtrace addresses into a stack_trace buffer.
*/
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
walk_stackframe(tsk, NULL, save_trace, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk); EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
void save_stack_trace(struct stack_trace *trace)
{
save_stack_trace_tsk(NULL, trace);
}
EXPORT_SYMBOL_GPL(save_stack_trace);
#endif /* CONFIG_STACKTRACE */
...@@ -7,10 +7,7 @@ ...@@ -7,10 +7,7 @@
unsigned long raw_copy_from_user(void *to, const void *from, unsigned long raw_copy_from_user(void *to, const void *from,
unsigned long n) unsigned long n)
{ {
if (access_ok(from, n)) ___copy_from_user(to, from, n);
__copy_user_zeroing(to, from, n);
else
memset(to, 0, n);
return n; return n;
} }
EXPORT_SYMBOL(raw_copy_from_user); EXPORT_SYMBOL(raw_copy_from_user);
...@@ -18,8 +15,7 @@ EXPORT_SYMBOL(raw_copy_from_user); ...@@ -18,8 +15,7 @@ EXPORT_SYMBOL(raw_copy_from_user);
unsigned long raw_copy_to_user(void *to, const void *from, unsigned long raw_copy_to_user(void *to, const void *from,
unsigned long n) unsigned long n)
{ {
if (access_ok(to, n)) ___copy_to_user(to, from, n);
__copy_user(to, from, n);
return n; return n;
} }
EXPORT_SYMBOL(raw_copy_to_user); EXPORT_SYMBOL(raw_copy_to_user);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment