Commit 6cbd7cc2 authored by Alexander Gordeev's avatar Alexander Gordeev Committed by Vasily Gorbik

s390/smp: call smp_reinit_ipl_cpu() before scheduler is available

Currently smp_reinit_ipl_cpu() is a pre-SMP early initcall.
That ensures no CPU is running in parallel, but still not
enough to assume the code is exclusive, since the scheduling
is already available.

Move the function call to arch_call_rest_init() callback
to ensure no thread could be preempted and allow lockless
allocation of the kernel page tables. That is needed to
allow a follow-up rework of the absolute lowcore access
mechanism.
Suggested-by: default avatarHeiko Carstens <hca@linux.ibm.com>
Signed-off-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: default avatarVasily Gorbik <gor@linux.ibm.com>
parent d61bb30e
......@@ -58,6 +58,7 @@ static inline void smp_cpus_done(unsigned int max_cpus)
{
}
extern int smp_reinit_ipl_cpu(void);
extern int smp_rescan_cpus(void);
extern void __noreturn cpu_die(void);
extern void __cpu_die(unsigned int cpu);
......
......@@ -395,6 +395,7 @@ void __init arch_call_rest_init(void)
{
unsigned long stack;
smp_reinit_ipl_cpu();
stack = stack_alloc();
if (!stack)
panic("Couldn't allocate kernel stack");
......
......@@ -1256,7 +1256,7 @@ static __always_inline void set_new_lowcore(struct lowcore *lc)
: "memory", "cc");
}
static int __init smp_reinit_ipl_cpu(void)
int __init smp_reinit_ipl_cpu(void)
{
unsigned long async_stack, nodat_stack, mcck_stack;
struct lowcore *lc, *lc_ipl;
......@@ -1291,4 +1291,3 @@ static int __init smp_reinit_ipl_cpu(void)
return 0;
}
early_initcall(smp_reinit_ipl_cpu);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment