Commit 33a3581a authored by Mark Rutland's avatar Mark Rutland Committed by Will Deacon

arm64: entry: move arm64_preempt_schedule_irq to entry-common.c

Subsequent patches will pull more of the IRQ entry handling into C. To
keep this in one place, let's move arm64_preempt_schedule_irq() into
entry-common.c along with the other entry management functions.

We no longer need to include <linux/lockdep.h> in process.c, so the
include directive is removed.

There should be no functional change as a result of this patch.

Reviewed-by Joey Gouly <joey.gouly@arm.com>
Signed-off-by: default avatarMark Rutland <mark.rutland@arm.com>
Acked-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Acked-by: default avatarMarc Zyngier <maz@kernel.org>
Cc: James Morse <james.morse@arm.com>
Cc: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210607094624.34689-5-mark.rutland@arm.comSigned-off-by: default avatarWill Deacon <will@kernel.org>
parent bb8e93a2
...@@ -6,7 +6,11 @@ ...@@ -6,7 +6,11 @@
*/ */
#include <linux/context_tracking.h> #include <linux/context_tracking.h>
#include <linux/linkage.h>
#include <linux/lockdep.h>
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/sched.h>
#include <linux/sched/debug.h>
#include <linux/thread_info.h> #include <linux/thread_info.h>
#include <asm/cpufeature.h> #include <asm/cpufeature.h>
...@@ -113,6 +117,22 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs) ...@@ -113,6 +117,22 @@ asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
exit_to_kernel_mode(regs); exit_to_kernel_mode(regs);
} }
asmlinkage void __sched arm64_preempt_schedule_irq(void)
{
lockdep_assert_irqs_disabled();
/*
* Preempting a task from an IRQ means we leave copies of PSTATE
* on the stack. cpufeature's enable calls may modify PSTATE, but
* resuming one of these preempted tasks would undo those changes.
*
* Only allow a task to be preempted once cpufeatures have been
* enabled.
*/
if (system_capabilities_finalized())
preempt_schedule_irq();
}
#ifdef CONFIG_ARM64_ERRATUM_1463225 #ifdef CONFIG_ARM64_ERRATUM_1463225
static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
......
...@@ -18,7 +18,6 @@ ...@@ -18,7 +18,6 @@
#include <linux/sched/task.h> #include <linux/sched/task.h>
#include <linux/sched/task_stack.h> #include <linux/sched/task_stack.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/lockdep.h>
#include <linux/mman.h> #include <linux/mman.h>
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/nospec.h> #include <linux/nospec.h>
...@@ -724,22 +723,6 @@ static int __init tagged_addr_init(void) ...@@ -724,22 +723,6 @@ static int __init tagged_addr_init(void)
core_initcall(tagged_addr_init); core_initcall(tagged_addr_init);
#endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */ #endif /* CONFIG_ARM64_TAGGED_ADDR_ABI */
asmlinkage void __sched arm64_preempt_schedule_irq(void)
{
lockdep_assert_irqs_disabled();
/*
* Preempting a task from an IRQ means we leave copies of PSTATE
* on the stack. cpufeature's enable calls may modify PSTATE, but
* resuming one of these preempted tasks would undo those changes.
*
* Only allow a task to be preempted once cpufeatures have been
* enabled.
*/
if (system_capabilities_finalized())
preempt_schedule_irq();
}
#ifdef CONFIG_BINFMT_ELF #ifdef CONFIG_BINFMT_ELF
int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state, int arch_elf_adjust_prot(int prot, const struct arch_elf_state *state,
bool has_interp, bool is_interp) bool has_interp, bool is_interp)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment