Commit 064dbfb4 authored by Mark Rutland's avatar Mark Rutland Committed by Will Deacon

arm64: entry: convert IRQ+FIQ handlers to C

For various reasons we'd like to convert the bulk of arm64's exception
triage logic to C. As a step towards that, this patch converts the EL1
and EL0 IRQ+FIQ triage logic to C.

Separate C functions are added for the native and compat cases so that
in subsequent patches we can handle native/compat differences in C.

Since the triage functions can now call arm64_apply_bp_hardening()
directly, the do_el0_irq_bp_hardening() wrapper function is removed.

Since the user_exit_irqoff macro is now unused, it is removed. The
user_enter_irqoff macro is still used by the ret_to_user code, and
cannot be removed at this time.
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarMarc Zyngier <maz@kernel.org>
Reviewed-by: default avatarJoey Gouly <joey.gouly@arm.com>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210607094624.34689-8-mark.rutland@arm.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent f8049488
...@@ -32,14 +32,18 @@ static inline u32 disr_to_esr(u64 disr) ...@@ -32,14 +32,18 @@ static inline u32 disr_to_esr(u64 disr)
} }
asmlinkage void el1_sync_handler(struct pt_regs *regs); asmlinkage void el1_sync_handler(struct pt_regs *regs);
asmlinkage void el1_irq_handler(struct pt_regs *regs);
asmlinkage void el1_fiq_handler(struct pt_regs *regs);
asmlinkage void el1_error_handler(struct pt_regs *regs); asmlinkage void el1_error_handler(struct pt_regs *regs);
asmlinkage void el0_sync_handler(struct pt_regs *regs); asmlinkage void el0_sync_handler(struct pt_regs *regs);
asmlinkage void el0_irq_handler(struct pt_regs *regs);
asmlinkage void el0_fiq_handler(struct pt_regs *regs);
asmlinkage void el0_error_handler(struct pt_regs *regs); asmlinkage void el0_error_handler(struct pt_regs *regs);
asmlinkage void el0_sync_compat_handler(struct pt_regs *regs); asmlinkage void el0_sync_compat_handler(struct pt_regs *regs);
asmlinkage void el0_irq_compat_handler(struct pt_regs *regs);
asmlinkage void el0_fiq_compat_handler(struct pt_regs *regs);
asmlinkage void el0_error_compat_handler(struct pt_regs *regs); asmlinkage void el0_error_compat_handler(struct pt_regs *regs);
asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs);
asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs);
asmlinkage void call_on_irq_stack(struct pt_regs *regs, asmlinkage void call_on_irq_stack(struct pt_regs *regs,
void (*func)(struct pt_regs *)); void (*func)(struct pt_regs *));
asmlinkage void enter_from_user_mode(void); asmlinkage void enter_from_user_mode(void);
......
...@@ -257,8 +257,6 @@ void set_task_sctlr_el1(u64 sctlr); ...@@ -257,8 +257,6 @@ void set_task_sctlr_el1(u64 sctlr);
extern struct task_struct *cpu_switch_to(struct task_struct *prev, extern struct task_struct *cpu_switch_to(struct task_struct *prev,
struct task_struct *next); struct task_struct *next);
asmlinkage void arm64_preempt_schedule_irq(void);
#define task_pt_regs(p) \ #define task_pt_regs(p) \
((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1) ((struct pt_regs *)(THREAD_SIZE + task_stack_page(p)) - 1)
......
...@@ -19,6 +19,8 @@ ...@@ -19,6 +19,8 @@
#include <asm/exception.h> #include <asm/exception.h>
#include <asm/kprobes.h> #include <asm/kprobes.h>
#include <asm/mmu.h> #include <asm/mmu.h>
#include <asm/processor.h>
#include <asm/stacktrace.h>
#include <asm/sysreg.h> #include <asm/sysreg.h>
/* /*
...@@ -101,7 +103,7 @@ void noinstr arm64_exit_nmi(struct pt_regs *regs) ...@@ -101,7 +103,7 @@ void noinstr arm64_exit_nmi(struct pt_regs *regs)
__nmi_exit(); __nmi_exit();
} }
asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
{ {
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_enter_nmi(regs); arm64_enter_nmi(regs);
...@@ -109,7 +111,7 @@ asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs) ...@@ -109,7 +111,7 @@ asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
enter_from_kernel_mode(regs); enter_from_kernel_mode(regs);
} }
asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
{ {
if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
arm64_exit_nmi(regs); arm64_exit_nmi(regs);
...@@ -117,7 +119,7 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) ...@@ -117,7 +119,7 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
exit_to_kernel_mode(regs); exit_to_kernel_mode(regs);
} }
asmlinkage void __sched arm64_preempt_schedule_irq(void) static void __sched arm64_preempt_schedule_irq(void)
{ {
lockdep_assert_irqs_disabled(); lockdep_assert_irqs_disabled();
...@@ -142,6 +144,18 @@ asmlinkage void __sched arm64_preempt_schedule_irq(void) ...@@ -142,6 +144,18 @@ asmlinkage void __sched arm64_preempt_schedule_irq(void)
preempt_schedule_irq(); preempt_schedule_irq();
} }
static void do_interrupt_handler(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
if (on_thread_stack())
call_on_irq_stack(regs, handler);
else
handler(regs);
}
extern void (*handle_arch_irq)(struct pt_regs *);
extern void (*handle_arch_fiq)(struct pt_regs *);
#ifdef CONFIG_ARM64_ERRATUM_1463225 #ifdef CONFIG_ARM64_ERRATUM_1463225
static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
...@@ -308,6 +322,36 @@ asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs) ...@@ -308,6 +322,36 @@ asmlinkage void noinstr el1_sync_handler(struct pt_regs *regs)
} }
} }
static void noinstr el1_interrupt(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
enter_el1_irq_or_nmi(regs);
do_interrupt_handler(regs, handler);
/*
* Note: thread_info::preempt_count includes both thread_info::count
* and thread_info::need_resched, and is not equivalent to
* preempt_count().
*/
if (IS_ENABLED(CONFIG_PREEMPTION) &&
READ_ONCE(current_thread_info()->preempt_count) == 0)
arm64_preempt_schedule_irq();
exit_el1_irq_or_nmi(regs);
}
asmlinkage void noinstr el1_irq_handler(struct pt_regs *regs)
{
el1_interrupt(regs, handle_arch_irq);
}
asmlinkage void noinstr el1_fiq_handler(struct pt_regs *regs)
{
el1_interrupt(regs, handle_arch_fiq);
}
asmlinkage void noinstr el1_error_handler(struct pt_regs *regs) asmlinkage void noinstr el1_error_handler(struct pt_regs *regs)
{ {
unsigned long esr = read_sysreg(esr_el1); unsigned long esr = read_sysreg(esr_el1);
...@@ -507,6 +551,39 @@ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs) ...@@ -507,6 +551,39 @@ asmlinkage void noinstr el0_sync_handler(struct pt_regs *regs)
} }
} }
static void noinstr el0_interrupt(struct pt_regs *regs,
void (*handler)(struct pt_regs *))
{
enter_from_user_mode();
write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
if (regs->pc & BIT(55))
arm64_apply_bp_hardening();
do_interrupt_handler(regs, handler);
}
static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
{
el0_interrupt(regs, handle_arch_irq);
}
asmlinkage void noinstr el0_irq_handler(struct pt_regs *regs)
{
__el0_irq_handler_common(regs);
}
static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
{
el0_interrupt(regs, handle_arch_fiq);
}
asmlinkage void noinstr el0_fiq_handler(struct pt_regs *regs)
{
__el0_fiq_handler_common(regs);
}
static void __el0_error_handler_common(struct pt_regs *regs) static void __el0_error_handler_common(struct pt_regs *regs)
{ {
unsigned long esr = read_sysreg(esr_el1); unsigned long esr = read_sysreg(esr_el1);
...@@ -583,6 +660,16 @@ asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs) ...@@ -583,6 +660,16 @@ asmlinkage void noinstr el0_sync_compat_handler(struct pt_regs *regs)
} }
} }
asmlinkage void noinstr el0_irq_compat_handler(struct pt_regs *regs)
{
__el0_irq_handler_common(regs);
}
asmlinkage void noinstr el0_fiq_compat_handler(struct pt_regs *regs)
{
__el0_fiq_handler_common(regs);
}
asmlinkage void noinstr el0_error_compat_handler(struct pt_regs *regs) asmlinkage void noinstr el0_error_compat_handler(struct pt_regs *regs)
{ {
__el0_error_handler_common(regs); __el0_error_handler_common(regs);
......
...@@ -33,12 +33,6 @@ ...@@ -33,12 +33,6 @@
* Context tracking and irqflag tracing need to instrument transitions between * Context tracking and irqflag tracing need to instrument transitions between
* user and kernel mode. * user and kernel mode.
*/ */
.macro user_exit_irqoff
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl enter_from_user_mode
#endif
.endm
.macro user_enter_irqoff .macro user_enter_irqoff
#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS) #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
bl exit_to_user_mode bl exit_to_user_mode
...@@ -486,63 +480,12 @@ SYM_CODE_START_LOCAL(__swpan_exit_el0) ...@@ -486,63 +480,12 @@ SYM_CODE_START_LOCAL(__swpan_exit_el0)
SYM_CODE_END(__swpan_exit_el0) SYM_CODE_END(__swpan_exit_el0)
#endif #endif
.macro irq_stack_entry
mov x19, sp // preserve the original sp
#ifdef CONFIG_SHADOW_CALL_STACK
mov x24, scs_sp // preserve the original shadow stack
#endif
/*
* Compare sp with the base of the task stack.
* If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
* and should switch to the irq stack.
*/
ldr x25, [tsk, TSK_STACK]
eor x25, x25, x19
and x25, x25, #~(THREAD_SIZE - 1)
cbnz x25, 9998f
ldr_this_cpu x25, irq_stack_ptr, x26
mov x26, #IRQ_STACK_SIZE
add x26, x25, x26
/* switch to the irq stack */
mov sp, x26
#ifdef CONFIG_SHADOW_CALL_STACK
/* also switch to the irq shadow stack */
ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x26
#endif
9998:
.endm
/*
* The callee-saved regs (x19-x29) should be preserved between
* irq_stack_entry and irq_stack_exit, but note that kernel_entry
* uses x20-x23 to store data for later use.
*/
.macro irq_stack_exit
mov sp, x19
#ifdef CONFIG_SHADOW_CALL_STACK
mov scs_sp, x24
#endif
.endm
/* GPRs used by entry code */ /* GPRs used by entry code */
tsk .req x28 // current thread_info tsk .req x28 // current thread_info
/* /*
* Interrupt handling. * Interrupt handling.
*/ */
.macro irq_handler, handler:req
ldr_l x1, \handler
mov x0, sp
irq_stack_entry
blr x1
irq_stack_exit
.endm
.macro gic_prio_kentry_setup, tmp:req .macro gic_prio_kentry_setup, tmp:req
#ifdef CONFIG_ARM64_PSEUDO_NMI #ifdef CONFIG_ARM64_PSEUDO_NMI
alternative_if ARM64_HAS_IRQ_PRIO_MASKING alternative_if ARM64_HAS_IRQ_PRIO_MASKING
...@@ -552,35 +495,6 @@ tsk .req x28 // current thread_info ...@@ -552,35 +495,6 @@ tsk .req x28 // current thread_info
#endif #endif
.endm .endm
.macro el1_interrupt_handler, handler:req
enable_da
mov x0, sp
bl enter_el1_irq_or_nmi
irq_handler \handler
#ifdef CONFIG_PREEMPTION
ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count
cbnz x24, 1f // preempt count != 0
bl arm64_preempt_schedule_irq // irq en/disable is done inside
1:
#endif
mov x0, sp
bl exit_el1_irq_or_nmi
.endm
.macro el0_interrupt_handler, handler:req
user_exit_irqoff
enable_da
tbz x22, #55, 1f
bl do_el0_irq_bp_hardening
1:
irq_handler \handler
.endm
.text .text
/* /*
...@@ -704,13 +618,15 @@ SYM_CODE_END(el1_sync) ...@@ -704,13 +618,15 @@ SYM_CODE_END(el1_sync)
.align 6 .align 6
SYM_CODE_START_LOCAL_NOALIGN(el1_irq) SYM_CODE_START_LOCAL_NOALIGN(el1_irq)
kernel_entry 1 kernel_entry 1
el1_interrupt_handler handle_arch_irq mov x0, sp
bl el1_irq_handler
kernel_exit 1 kernel_exit 1
SYM_CODE_END(el1_irq) SYM_CODE_END(el1_irq)
SYM_CODE_START_LOCAL_NOALIGN(el1_fiq) SYM_CODE_START_LOCAL_NOALIGN(el1_fiq)
kernel_entry 1 kernel_entry 1
el1_interrupt_handler handle_arch_fiq mov x0, sp
bl el1_fiq_handler
kernel_exit 1 kernel_exit 1
SYM_CODE_END(el1_fiq) SYM_CODE_END(el1_fiq)
...@@ -737,12 +653,16 @@ SYM_CODE_END(el0_sync_compat) ...@@ -737,12 +653,16 @@ SYM_CODE_END(el0_sync_compat)
.align 6 .align 6
SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat) SYM_CODE_START_LOCAL_NOALIGN(el0_irq_compat)
kernel_entry 0, 32 kernel_entry 0, 32
b el0_irq_naked mov x0, sp
bl el0_irq_compat_handler
b ret_to_user
SYM_CODE_END(el0_irq_compat) SYM_CODE_END(el0_irq_compat)
SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat) SYM_CODE_START_LOCAL_NOALIGN(el0_fiq_compat)
kernel_entry 0, 32 kernel_entry 0, 32
b el0_fiq_naked mov x0, sp
bl el0_fiq_compat_handler
b ret_to_user
SYM_CODE_END(el0_fiq_compat) SYM_CODE_END(el0_fiq_compat)
SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat) SYM_CODE_START_LOCAL_NOALIGN(el0_error_compat)
...@@ -756,15 +676,15 @@ SYM_CODE_END(el0_error_compat) ...@@ -756,15 +676,15 @@ SYM_CODE_END(el0_error_compat)
.align 6 .align 6
SYM_CODE_START_LOCAL_NOALIGN(el0_irq) SYM_CODE_START_LOCAL_NOALIGN(el0_irq)
kernel_entry 0 kernel_entry 0
el0_irq_naked: mov x0, sp
el0_interrupt_handler handle_arch_irq bl el0_irq_handler
b ret_to_user b ret_to_user
SYM_CODE_END(el0_irq) SYM_CODE_END(el0_irq)
SYM_CODE_START_LOCAL_NOALIGN(el0_fiq) SYM_CODE_START_LOCAL_NOALIGN(el0_fiq)
kernel_entry 0 kernel_entry 0
el0_fiq_naked: mov x0, sp
el0_interrupt_handler handle_arch_fiq bl el0_fiq_handler
b ret_to_user b ret_to_user
SYM_CODE_END(el0_fiq) SYM_CODE_END(el0_fiq)
......
...@@ -836,13 +836,6 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs) ...@@ -836,13 +836,6 @@ void do_mem_abort(unsigned long far, unsigned int esr, struct pt_regs *regs)
} }
NOKPROBE_SYMBOL(do_mem_abort); NOKPROBE_SYMBOL(do_mem_abort);
void do_el0_irq_bp_hardening(void)
{
/* PC has already been checked in entry.S */
arm64_apply_bp_hardening();
}
NOKPROBE_SYMBOL(do_el0_irq_bp_hardening);
void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
{ {
arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN, arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment