Commit 19c95f26 authored by Julien Thierry's avatar Julien Thierry Committed by Will Deacon

arm64: entry.S: Do not preempt from IRQ before all cpufeatures are enabled

Preempting from IRQ-return means that the task has its PSTATE saved
on the stack, which will get restored when the task is resumed and does
the actual IRQ return.

However, enabling some CPU features requires modifying the PSTATE. This
means that, if a task was scheduled out during an IRQ-return before all
CPU features are enabled, the task might restore a PSTATE that does not
include the feature enablement changes once scheduled back in.

* Task 1:

PAN == 0 ---|                          |---------------
            |                          |<- return from IRQ, PSTATE.PAN = 0
            | <- IRQ                   |
            +--------+ <- preempt()  +--
                                     ^
                                     |
                                     reschedule Task 1, PSTATE.PAN == 1
* Init:
        --------------------+------------------------
                            ^
                            |
                            enable_cpu_features
                            set PSTATE.PAN on all CPUs

Worse than this, since PSTATE is untouched when task switching is done,
a task missing the new bits in PSTATE might affect another task, if both
do direct calls to schedule() (outside of IRQ/exception contexts).

Fix this by preventing preemption on IRQ-return until features are
enabled on all CPUs.

This way the only PSTATE values that are saved on the stack are from
synchronous exceptions. These are expected to be fatal this early, the
exception is BRK for WARN_ON(), but as this uses do_debug_exception()
which keeps IRQs masked, it shouldn't call schedule().
Signed-off-by: default avatarJulien Thierry <julien.thierry@arm.com>
[james: Replaced a really cool hack, with an even simpler static key in C.
 expanded commit message with Julien's cover-letter ascii art]
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 8c551f91
...@@ -680,7 +680,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING ...@@ -680,7 +680,7 @@ alternative_if ARM64_HAS_IRQ_PRIO_MASKING
orr x24, x24, x0 orr x24, x24, x0
alternative_else_nop_endif alternative_else_nop_endif
cbnz x24, 1f // preempt count != 0 || NMI return path cbnz x24, 1f // preempt count != 0 || NMI return path
bl preempt_schedule_irq // irq en/disable is done inside bl arm64_preempt_schedule_irq // irq en/disable is done inside
1: 1:
#endif #endif
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/sched/task.h> #include <linux/sched/task.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/lockdep.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/stddef.h> #include <linux/stddef.h>
#include <linux/sysctl.h> #include <linux/sysctl.h>
...@@ -44,6 +45,7 @@ ...@@ -44,6 +45,7 @@
#include <asm/alternative.h> #include <asm/alternative.h>
#include <asm/arch_gicv3.h> #include <asm/arch_gicv3.h>
#include <asm/compat.h> #include <asm/compat.h>
#include <asm/cpufeature.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/exec.h> #include <asm/exec.h>
#include <asm/fpsimd.h> #include <asm/fpsimd.h>
...@@ -631,3 +633,19 @@ static int __init tagged_addr_init(void) ...@@ -631,3 +633,19 @@ static int __init tagged_addr_init(void)
core_initcall(tagged_addr_init); core_initcall(tagged_addr_init);
#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
asmlinkage void __sched arm64_preempt_schedule_irq(void)
{
lockdep_assert_irqs_disabled();
/*
* Preempting a task from an IRQ means we leave copies of PSTATE
* on the stack. cpufeature's enable calls may modify PSTATE, but
* resuming one of these preempted tasks would undo those changes.
*
* Only allow a task to be preempted once cpufeatures have been
* enabled.
*/
if (static_branch_likely(&arm64_const_caps_ready))
preempt_schedule_irq();
}
...@@ -223,6 +223,7 @@ extern long schedule_timeout_uninterruptible(long timeout); ...@@ -223,6 +223,7 @@ extern long schedule_timeout_uninterruptible(long timeout);
extern long schedule_timeout_idle(long timeout); extern long schedule_timeout_idle(long timeout);
asmlinkage void schedule(void); asmlinkage void schedule(void);
extern void schedule_preempt_disabled(void); extern void schedule_preempt_disabled(void);
asmlinkage void preempt_schedule_irq(void);
extern int __must_check io_schedule_prepare(void); extern int __must_check io_schedule_prepare(void);
extern void io_schedule_finish(int token); extern void io_schedule_finish(int token);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment