Commit 2923f5ea authored by Greentime Hu's avatar Greentime Hu

nds32: Exception handling

This patch includes the exception/interrupt entries, pt_reg structure and
related accessors.

/* Unaligned accessing handling*/
Andes processors cannot load/store information which is not naturally
aligned on the bus, i.e., loading a 4 byte data whose start address must
be divisible by 4. If unaligned data accessing is happened, data
unaligned exception will be triggered and user will get SIGSEGV or
kernel oops according to the unaligned address. In order to make user be
able to load/store data from an unaligned address, software load/store
emulation is implemented in arch/nds32/mm/alignment.c to address data
unaligned exception.

Unaligned accessing handling is disabled by default because it is not a
normal case. User can enable this feature by following steps.

A. Compile time:
    1. Enable kernel config CONFIG_ALIGNMENT_TRAP
B. Run time:
    1. Enter /proc/sys/nds32/unaligned_acess folder
    2. Write 1 to file enable_mode to enable unaligned accessing
       handling. User can disable it by writing 0 to this file.
    3. Write 1 to file debug to show which unaligned address is under
       processing. User can disable it by writing 0 to this file.

However, unaligned accessing handler cannot work if this unaligned
address is not accessible such as protection violation. On this
condition, the default behaviors for addressing data unaligned exception
still happen
Signed-off-by: default avatarVincent Chen <vincentc@andestech.com>
Signed-off-by: default avatarGreentime Hu <greentime@andestech.com>
Acked-by: default avatarArnd Bergmann <arnd@arndb.de>
parent 001d953e
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#ifndef __ASM_NDS32_PTRACE_H
#define __ASM_NDS32_PTRACE_H
#include <uapi/asm/ptrace.h>
/*
* If pt_regs.syscallno == NO_SYSCALL, then the thread is not executing
* a syscall -- i.e., its most recent entry into the kernel from
* userspace was not via syscall, or otherwise a tracer cancelled the
* syscall.
*
* This must have the value -1, for ABI compatibility with ptrace etc.
*/
#define NO_SYSCALL (-1)
#ifndef __ASSEMBLY__
#include <linux/types.h>
struct pt_regs {
union {
struct user_pt_regs user_regs;
struct {
long uregs[26];
long fp;
long gp;
long lp;
long sp;
long ipc;
#if defined(CONFIG_HWZOL)
long lb;
long le;
long lc;
#else
long dummy[3];
#endif
long syscallno;
};
};
long orig_r0;
long ir0;
long ipsw;
long pipsw;
long pipc;
long pp0;
long pp1;
long fucop_ctl;
long osp;
};
static inline bool in_syscall(struct pt_regs const *regs)
{
return regs->syscallno != NO_SYSCALL;
}
static inline void forget_syscall(struct pt_regs *regs)
{
regs->syscallno = NO_SYSCALL;
}
static inline unsigned long regs_return_value(struct pt_regs *regs)
{
return regs->uregs[0];
}
extern void show_regs(struct pt_regs *);
/* Avoid circular header include via sched.h */
struct task_struct;
#define arch_has_single_step() (1)
#define user_mode(regs) (((regs)->ipsw & PSW_mskPOM) == 0)
#define interrupts_enabled(regs) (!!((regs)->ipsw & PSW_mskGIE))
#define user_stack_pointer(regs) ((regs)->sp)
#define instruction_pointer(regs) ((regs)->ipc)
#define profile_pc(regs) instruction_pointer(regs)
#endif /* __ASSEMBLY__ */
#endif
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#include <linux/linkage.h>
#include <asm/memory.h>
#include <asm/nds32.h>
#include <asm/errno.h>
#include <asm/asm-offsets.h>
#include <asm/page.h>
#ifdef CONFIG_HWZOL
.macro push_zol
mfusr $r14, $LB
mfusr $r15, $LE
mfusr $r16, $LC
.endm
#endif
.macro save_user_regs
smw.adm $sp, [$sp], $sp, #0x1
/* move $SP to the bottom of pt_regs */
addi $sp, $sp, -OSP_OFFSET
/* push $r0 ~ $r25 */
smw.bim $r0, [$sp], $r25
/* push $fp, $gp, $lp */
smw.bim $sp, [$sp], $sp, #0xe
mfsr $r12, $SP_USR
mfsr $r13, $IPC
#ifdef CONFIG_HWZOL
push_zol
#endif
movi $r17, -1
move $r18, $r0
mfsr $r19, $PSW
mfsr $r20, $IPSW
mfsr $r21, $P_IPSW
mfsr $r22, $P_IPC
mfsr $r23, $P_P0
mfsr $r24, $P_P1
smw.bim $r12, [$sp], $r24, #0
addi $sp, $sp, -FUCOP_CTL_OFFSET
/* Initialize kernel space $fp */
andi $p0, $r20, #PSW_mskPOM
movi $p1, #0x0
cmovz $fp, $p1, $p0
andi $r16, $r19, #PSW_mskINTL
slti $r17, $r16, #4
bnez $r17, 1f
addi $r17, $r19, #-2
mtsr $r17, $PSW
isb
1:
/* If it was superuser mode, we don't need to update $r25 */
bnez $p0, 2f
la $p0, __entry_task
lw $r25, [$p0]
2:
.endm
.text
/*
* Exception Vector
*/
exception_handlers:
.long unhandled_exceptions !Reset/NMI
.long unhandled_exceptions !TLB fill
.long do_page_fault !PTE not present
.long do_dispatch_tlb_misc !TLB misc
.long unhandled_exceptions !TLB VLPT
.long unhandled_exceptions !Machine Error
.long do_debug_trap !Debug related
.long do_dispatch_general !General exception
.long eh_syscall !Syscall
.long asm_do_IRQ !IRQ
common_exception_handler:
save_user_regs
mfsr $p0, $ITYPE
andi $p0, $p0, #ITYPE_mskVECTOR
srli $p0, $p0, #ITYPE_offVECTOR
andi $p1, $p0, #NDS32_VECTOR_mskNONEXCEPTION
bnez $p1, 1f
sethi $lp, hi20(ret_from_exception)
ori $lp, $lp, lo12(ret_from_exception)
sethi $p1, hi20(exception_handlers)
ori $p1, $p1, lo12(exception_handlers)
lw $p1, [$p1+$p0<<2]
move $r0, $p0
mfsr $r1, $EVA
mfsr $r2, $ITYPE
move $r3, $sp
mfsr $r4, $OIPC
/* enable gie if it is enabled in IPSW. */
mfsr $r21, $PSW
andi $r20, $r20, #PSW_mskGIE /* r20 is $IPSW*/
or $r21, $r21, $r20
mtsr $r21, $PSW
dsb
jr $p1
/* syscall */
1:
addi $p1, $p0, #-NDS32_VECTOR_offEXCEPTION
bnez $p1, 2f
sethi $lp, hi20(ret_from_exception)
ori $lp, $lp, lo12(ret_from_exception)
sethi $p1, hi20(exception_handlers)
ori $p1, $p1, lo12(exception_handlers)
lwi $p1, [$p1+#NDS32_VECTOR_offEXCEPTION<<2]
jr $p1
/* interrupt */
2:
#ifdef CONFIG_TRACE_IRQFLAGS
jal arch_trace_hardirqs_off
#endif
move $r0, $sp
sethi $lp, hi20(ret_from_intr)
ori $lp, $lp, lo12(ret_from_intr)
sethi $p0, hi20(exception_handlers)
ori $p0, $p0, lo12(exception_handlers)
lwi $p0, [$p0+#NDS32_VECTOR_offINTERRUPT<<2]
jr $p0
.macro EXCEPTION_VECTOR_DEBUG
.align 4
mfsr $p0, $EDM_CTL
andi $p0, $p0, EDM_CTL_mskV3_EDM_MODE
tnez $p0, SWID_RAISE_INTERRUPT_LEVEL
.endm
.macro EXCEPTION_VECTOR
.align 4
sethi $p0, hi20(common_exception_handler)
ori $p0, $p0, lo12(common_exception_handler)
jral.ton $p0, $p0
.endm
.section ".text.init", #alloc, #execinstr
.global exception_vector
exception_vector:
.rept 6
EXCEPTION_VECTOR
.endr
EXCEPTION_VECTOR_DEBUG
.rept 121
EXCEPTION_VECTOR
.endr
.align 4
.global exception_vector_end
exception_vector_end:
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#include <linux/linkage.h>
#include <asm/unistd.h>
#include <asm/assembler.h>
#include <asm/nds32.h>
#include <asm/asm-offsets.h>
#include <asm/thread_info.h>
#include <asm/current.h>
#ifdef CONFIG_HWZOL
.macro pop_zol
mtusr $r14, $LB
mtusr $r15, $LE
mtusr $r16, $LC
.endm
#endif
.macro restore_user_regs_first
setgie.d
isb
addi $sp, $sp, FUCOP_CTL_OFFSET
lmw.adm $r12, [$sp], $r24, #0x0
mtsr $r12, $SP_USR
mtsr $r13, $IPC
#ifdef CONFIG_HWZOL
pop_zol
#endif
mtsr $r19, $PSW
mtsr $r20, $IPSW
mtsr $r21, $P_IPSW
mtsr $r22, $P_IPC
mtsr $r23, $P_P0
mtsr $r24, $P_P1
lmw.adm $sp, [$sp], $sp, #0xe
.endm
.macro restore_user_regs_last
pop $p0
cmovn $sp, $p0, $p0
iret
nop
.endm
.macro restore_user_regs
restore_user_regs_first
lmw.adm $r0, [$sp], $r25, #0x0
addi $sp, $sp, OSP_OFFSET
restore_user_regs_last
.endm
.macro fast_restore_user_regs
restore_user_regs_first
lmw.adm $r1, [$sp], $r25, #0x0
addi $sp, $sp, OSP_OFFSET-4
restore_user_regs_last
.endm
#ifdef CONFIG_PREEMPT
.macro preempt_stop
.endm
#else
.macro preempt_stop
setgie.d
isb
.endm
#define resume_kernel no_work_pending
#endif
ENTRY(ret_from_exception)
preempt_stop
ENTRY(ret_from_intr)
/*
* judge Kernel or user mode
*
*/
lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt
andi $p0, $p0, #PSW_mskINTL
bnez $p0, resume_kernel ! done with iret
j resume_userspace
/*
* This is the fast syscall return path. We do as little as
* possible here, and this includes saving $r0 back into the SVC
* stack.
* fixed: tsk - $r25, syscall # - $r7, syscall table pointer - $r8
*/
ENTRY(ret_fast_syscall)
gie_disable
lwi $r1, [tsk+#TSK_TI_FLAGS]
andi $p1, $r1, #_TIF_WORK_MASK
bnez $p1, fast_work_pending
fast_restore_user_regs ! iret
/*
* Ok, we need to do extra processing,
* enter the slow path returning from syscall, while pending work.
*/
fast_work_pending:
swi $r0, [$sp+(#R0_OFFSET)] ! what is different from ret_from_exception
work_pending:
andi $p1, $r1, #_TIF_NEED_RESCHED
bnez $p1, work_resched
andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME
beqz $p1, no_work_pending
move $r0, $sp ! 'regs'
gie_enable
bal do_notify_resume
b ret_slow_syscall
work_resched:
bal schedule ! path, return to user mode
/*
* "slow" syscall return path.
*/
ENTRY(resume_userspace)
ENTRY(ret_slow_syscall)
gie_disable
lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt
andi $p0, $p0, #PSW_mskINTL
bnez $p0, no_work_pending ! done with iret
lwi $r1, [tsk+#TSK_TI_FLAGS]
andi $p1, $r1, #_TIF_WORK_MASK
bnez $p1, work_pending ! handle work_resched, sig_pend
no_work_pending:
#ifdef CONFIG_TRACE_IRQFLAGS
lwi $p0, [$sp+(#IPSW_OFFSET)]
andi $p0, $p0, #0x1
la $r10, trace_hardirqs_off
la $r9, trace_hardirqs_on
cmovz $r9, $p0, $r10
jral $r9
#endif
restore_user_regs ! return from iret
/*
* preemptive kernel
*/
#ifdef CONFIG_PREEMPT
resume_kernel:
gie_disable
lwi $t0, [tsk+#TSK_TI_PREEMPT]
bnez $t0, no_work_pending
need_resched:
lwi $t0, [tsk+#TSK_TI_FLAGS]
andi $p1, $t0, #_TIF_NEED_RESCHED
beqz $p1, no_work_pending
lwi $t0, [$sp+(#IPSW_OFFSET)] ! Interrupts off?
andi $t0, $t0, #1
beqz $t0, no_work_pending
jal preempt_schedule_irq
b need_resched
#endif
/*
* This is how we return from a fork.
*/
ENTRY(ret_from_fork)
bal schedule_tail
beqz $r6, 1f ! r6 stores fn for kernel thread
move $r0, $r7 ! prepare kernel thread arg
jral $r6
1:
lwi $r1, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing
andi $p1, $r1, #_TIF_WORK_SYSCALL_LEAVE ! are we tracing syscalls?
beqz $p1, ret_slow_syscall
move $r0, $sp
bal syscall_trace_leave
b ret_slow_syscall
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2005-2017 Andes Technology Corporation
#include <linux/sched/debug.h>
#include <linux/sched/task_stack.h>
#include <linux/stacktrace.h>
void save_stack_trace(struct stack_trace *trace)
{
save_stack_trace_tsk(current, trace);
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
unsigned long *fpn;
int skip = trace->skip;
int savesched;
if (tsk == current) {
__asm__ __volatile__("\tori\t%0, $fp, #0\n":"=r"(fpn));
savesched = 1;
} else {
fpn = (unsigned long *)thread_saved_fp(tsk);
savesched = 0;
}
while (!kstack_end(fpn) && !((unsigned long)fpn & 0x3)
&& (fpn >= (unsigned long *)TASK_SIZE)) {
unsigned long lpp, fpp;
lpp = fpn[-1];
fpp = fpn[FP_OFFSET];
if (!__kernel_text_address(lpp))
break;
if (savesched || !in_sched_functions(lpp)) {
if (skip) {
skip--;
} else {
trace->entries[trace->nr_entries++] = lpp;
if (trace->nr_entries >= trace->max_entries)
break;
}
}
fpn = (unsigned long *)fpp;
}
}
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment