Commit a052096b authored by Sven Schnelle's avatar Sven Schnelle Committed by Heiko Carstens

s390/topology: fix topology information when calling cpu hotplug notifiers

The cpu hotplug notifiers are called without updating the core/thread
masks when a new CPU is added. This causes problems with code setting
up data structures in a cpu hotplug notifier, and relying on that later
in normal code.

This caused a crash in the new core scheduling code (SCHED_CORE),
where rq->core was set up in a notifier depending on cpu masks.

To fix this, add a cpu_setup_mask which is used in update_cpu_masks()
instead of the cpu_online_mask to determine whether the cpu masks should
be set for a certain cpu. Also move update_cpu_masks() to update the
masks before calling notify_cpu_starting() so that the notifiers are
seeing the updated masks.
Signed-off-by: default avatarSven Schnelle <svens@linux.ibm.com>
Cc: <stable@vger.kernel.org>
[hca@linux.ibm.com: get rid of cpu_online_mask handling]
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent 88b60426
...@@ -18,6 +18,7 @@ extern struct mutex smp_cpu_state_mutex; ...@@ -18,6 +18,7 @@ extern struct mutex smp_cpu_state_mutex;
extern unsigned int smp_cpu_mt_shift; extern unsigned int smp_cpu_mt_shift;
extern unsigned int smp_cpu_mtid; extern unsigned int smp_cpu_mtid;
extern __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS]; extern __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
extern cpumask_t cpu_setup_mask;
extern int __cpu_up(unsigned int cpu, struct task_struct *tidle); extern int __cpu_up(unsigned int cpu, struct task_struct *tidle);
......
...@@ -95,6 +95,7 @@ __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS]; ...@@ -95,6 +95,7 @@ __vector128 __initdata boot_cpu_vector_save_area[__NUM_VXRS];
#endif #endif
static unsigned int smp_max_threads __initdata = -1U; static unsigned int smp_max_threads __initdata = -1U;
cpumask_t cpu_setup_mask;
static int __init early_nosmt(char *s) static int __init early_nosmt(char *s)
{ {
...@@ -902,13 +903,14 @@ static void smp_start_secondary(void *cpuvoid) ...@@ -902,13 +903,14 @@ static void smp_start_secondary(void *cpuvoid)
vtime_init(); vtime_init();
vdso_getcpu_init(); vdso_getcpu_init();
pfault_init(); pfault_init();
cpumask_set_cpu(cpu, &cpu_setup_mask);
update_cpu_masks();
notify_cpu_starting(cpu); notify_cpu_starting(cpu);
if (topology_cpu_dedicated(cpu)) if (topology_cpu_dedicated(cpu))
set_cpu_flag(CIF_DEDICATED_CPU); set_cpu_flag(CIF_DEDICATED_CPU);
else else
clear_cpu_flag(CIF_DEDICATED_CPU); clear_cpu_flag(CIF_DEDICATED_CPU);
set_cpu_online(cpu, true); set_cpu_online(cpu, true);
update_cpu_masks();
inc_irq_stat(CPU_RST); inc_irq_stat(CPU_RST);
local_irq_enable(); local_irq_enable();
cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
...@@ -950,10 +952,13 @@ early_param("possible_cpus", _setup_possible_cpus); ...@@ -950,10 +952,13 @@ early_param("possible_cpus", _setup_possible_cpus);
int __cpu_disable(void) int __cpu_disable(void)
{ {
unsigned long cregs[16]; unsigned long cregs[16];
int cpu;
/* Handle possible pending IPIs */ /* Handle possible pending IPIs */
smp_handle_ext_call(); smp_handle_ext_call();
set_cpu_online(smp_processor_id(), false); cpu = smp_processor_id();
set_cpu_online(cpu, false);
cpumask_clear_cpu(cpu, &cpu_setup_mask);
update_cpu_masks(); update_cpu_masks();
/* Disable pseudo page faults on this cpu. */ /* Disable pseudo page faults on this cpu. */
pfault_fini(); pfault_fini();
......
...@@ -67,7 +67,7 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c ...@@ -67,7 +67,7 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
static cpumask_t mask; static cpumask_t mask;
cpumask_clear(&mask); cpumask_clear(&mask);
if (!cpu_online(cpu)) if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
goto out; goto out;
cpumask_set_cpu(cpu, &mask); cpumask_set_cpu(cpu, &mask);
switch (topology_mode) { switch (topology_mode) {
...@@ -88,7 +88,7 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c ...@@ -88,7 +88,7 @@ static void cpu_group_map(cpumask_t *dst, struct mask_info *info, unsigned int c
case TOPOLOGY_MODE_SINGLE: case TOPOLOGY_MODE_SINGLE:
break; break;
} }
cpumask_and(&mask, &mask, cpu_online_mask); cpumask_and(&mask, &mask, &cpu_setup_mask);
out: out:
cpumask_copy(dst, &mask); cpumask_copy(dst, &mask);
} }
...@@ -99,16 +99,16 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu) ...@@ -99,16 +99,16 @@ static void cpu_thread_map(cpumask_t *dst, unsigned int cpu)
int i; int i;
cpumask_clear(&mask); cpumask_clear(&mask);
if (!cpu_online(cpu)) if (!cpumask_test_cpu(cpu, &cpu_setup_mask))
goto out; goto out;
cpumask_set_cpu(cpu, &mask); cpumask_set_cpu(cpu, &mask);
if (topology_mode != TOPOLOGY_MODE_HW) if (topology_mode != TOPOLOGY_MODE_HW)
goto out; goto out;
cpu -= cpu % (smp_cpu_mtid + 1); cpu -= cpu % (smp_cpu_mtid + 1);
for (i = 0; i <= smp_cpu_mtid; i++) for (i = 0; i <= smp_cpu_mtid; i++) {
if (cpu_present(cpu + i)) if (cpumask_test_cpu(cpu + i, &cpu_setup_mask))
cpumask_set_cpu(cpu + i, &mask); cpumask_set_cpu(cpu + i, &mask);
cpumask_and(&mask, &mask, cpu_online_mask); }
out: out:
cpumask_copy(dst, &mask); cpumask_copy(dst, &mask);
} }
...@@ -569,6 +569,7 @@ void __init topology_init_early(void) ...@@ -569,6 +569,7 @@ void __init topology_init_early(void)
alloc_masks(info, &book_info, 2); alloc_masks(info, &book_info, 2);
alloc_masks(info, &drawer_info, 3); alloc_masks(info, &drawer_info, 3);
out: out:
cpumask_set_cpu(0, &cpu_setup_mask);
__arch_update_cpu_topology(); __arch_update_cpu_topology();
__arch_update_dedicated_flag(NULL); __arch_update_dedicated_flag(NULL);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment