Commit 576a9e87 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] ppc64 cpu spinup fixes

From: Rusty Russell <rusty@rustcorp.com.au>

1) Check for cpuids >= NR_CPUS in prom_hold_cpus: we previously
   overflowed arrays when this happened, and checking it here ensures
   that it doesn't happen elsewhere.  Still move processors to
   secondary_hold (out of OF), but we won't release them.

2) smp_iSeries_message_pass and smp_xics_message_pass don't need to
   iterate if given a specific CPU: cleanup and fix.

3) kick_cpu variants don't need to check cpu range (caller is screwed
   anyway if this happened): replaced with BUG_ON for now.

4) smp_openpic_probe and smp_xics_probe can use cpus_weight(): it's
   faster and simpler.

5) User for_each_cpu/for_each_online_cpu as appropriate.
parent fbd7abeb
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/types.h> #include <linux/types.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/stringify.h>
#include <linux/delay.h> #include <linux/delay.h>
#include <asm/prom.h> #include <asm/prom.h>
#include <asm/rtas.h> #include <asm/rtas.h>
...@@ -1073,6 +1074,10 @@ prom_hold_cpus(unsigned long mem) ...@@ -1073,6 +1074,10 @@ prom_hold_cpus(unsigned long mem)
if (*acknowledge == cpuid) { if (*acknowledge == cpuid) {
prom_print(RELOC("ok\n")); prom_print(RELOC("ok\n"));
/* We have to get every CPU out of OF,
* even if we never start it. */
if (cpuid >= NR_CPUS)
goto next;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
/* Set the number of active processors. */ /* Set the number of active processors. */
_systemcfg->processorCount++; _systemcfg->processorCount++;
...@@ -1099,9 +1104,12 @@ prom_hold_cpus(unsigned long mem) ...@@ -1099,9 +1104,12 @@ prom_hold_cpus(unsigned long mem)
cpu_set(cpuid, RELOC(cpu_present_at_boot)); cpu_set(cpuid, RELOC(cpu_present_at_boot));
} }
next:
/* Init paca for secondary threads. They start later. */ /* Init paca for secondary threads. They start later. */
for (i=1; i < cpu_threads; i++) { for (i=1; i < cpu_threads; i++) {
cpuid++; cpuid++;
if (cpuid >= NR_CPUS)
continue;
_xPaca[cpuid].xHwProcNum = interrupt_server[i]; _xPaca[cpuid].xHwProcNum = interrupt_server[i];
prom_print_hex(interrupt_server[i]); prom_print_hex(interrupt_server[i]);
prom_print(RELOC(" : preparing thread ... ")); prom_print(RELOC(" : preparing thread ... "));
...@@ -1147,6 +1155,10 @@ prom_hold_cpus(unsigned long mem) ...@@ -1147,6 +1155,10 @@ prom_hold_cpus(unsigned long mem)
} }
#endif #endif
if (cpuid >= NR_CPUS)
prom_print(RELOC("WARNING: maximum CPUs (" __stringify(NR_CPUS)
") exceeded: ignoring extras\n"));
#ifdef DEBUG_PROM #ifdef DEBUG_PROM
prom_print(RELOC("prom_hold_cpus: end...\n")); prom_print(RELOC("prom_hold_cpus: end...\n"));
#endif #endif
......
...@@ -95,20 +95,25 @@ void iSeries_smp_message_recv( struct pt_regs * regs ) ...@@ -95,20 +95,25 @@ void iSeries_smp_message_recv( struct pt_regs * regs )
smp_message_recv( msg, regs ); smp_message_recv( msg, regs );
} }
static void smp_iSeries_message_pass(int target, int msg, unsigned long data, int wait) static inline void smp_iSeries_do_message(int cpu, int msg)
{
set_bit(msg, &iSeries_smp_message[cpu]);
HvCall_sendIPI(&(paca[cpu]));
}
static void
smp_iSeries_message_pass(int target, int msg, long data, int wait)
{ {
int i; int i;
for (i = 0; i < NR_CPUS; ++i) { if (target < NR_CPUS)
if (!cpu_online(i)) smp_iSeries_do_message(target, msg);
else {
for_each_online_cpu(i) {
if (target == MSG_ALL_BUT_SELF
&& i == smp_processor_id())
continue; continue;
smp_iSeries_do_message(i, msg);
if ((target == MSG_ALL) ||
(target == i) ||
((target == MSG_ALL_BUT_SELF) &&
(i != smp_processor_id())) ) {
set_bit(msg, &iSeries_smp_message[i]);
HvCall_sendIPI(&(paca[i]));
} }
} }
} }
...@@ -151,21 +156,14 @@ static int smp_iSeries_probe(void) ...@@ -151,21 +156,14 @@ static int smp_iSeries_probe(void)
static void smp_iSeries_kick_cpu(int nr) static void smp_iSeries_kick_cpu(int nr)
{ {
struct ItLpPaca * lpPaca; struct ItLpPaca * lpPaca;
/* Verify we have a Paca for processor nr */
if ( ( nr <= 0 ) || BUG_ON(nr < 0 || nr >= NR_CPUS);
( nr >= NR_CPUS ) )
return;
/* Verify that our partition has a processor nr */ /* Verify that our partition has a processor nr */
lpPaca = paca[nr].xLpPacaPtr; lpPaca = paca[nr].xLpPacaPtr;
if ( lpPaca->xDynProcStatus >= 2 ) if (lpPaca->xDynProcStatus >= 2)
return; return;
/* The information for processor bringup must
* be written out to main store before we release
* the processor.
*/
mb();
/* The processor is currently spinning, waiting /* The processor is currently spinning, waiting
* for the xProcStart field to become non-zero * for the xProcStart field to become non-zero
* After we set xProcStart, the processor will * After we set xProcStart, the processor will
...@@ -219,13 +217,9 @@ void smp_openpic_message_pass(int target, int msg, unsigned long data, int wait) ...@@ -219,13 +217,9 @@ void smp_openpic_message_pass(int target, int msg, unsigned long data, int wait)
static int __init smp_openpic_probe(void) static int __init smp_openpic_probe(void)
{ {
int i; int nr_cpus;
int nr_cpus = 0;
for (i = 0; i < NR_CPUS; i++) { nr_cpus = cpus_weight(cpu_possible_map);
if (cpu_possible(i))
nr_cpus++;
}
if (nr_cpus > 1) if (nr_cpus > 1)
openpic_request_IPIs(); openpic_request_IPIs();
...@@ -240,16 +234,7 @@ static void __devinit smp_openpic_setup_cpu(int cpu) ...@@ -240,16 +234,7 @@ static void __devinit smp_openpic_setup_cpu(int cpu)
static void smp_pSeries_kick_cpu(int nr) static void smp_pSeries_kick_cpu(int nr)
{ {
/* Verify we have a Paca for processor nr */ BUG_ON(nr < 0 || nr >= NR_CPUS);
if ( ( nr <= 0 ) ||
( nr >= NR_CPUS ) )
return;
/* The information for processor bringup must
* be written out to main store before we release
* the processor.
*/
mb();
/* The processor is currently spinning, waiting /* The processor is currently spinning, waiting
* for the xProcStart field to become non-zero * for the xProcStart field to become non-zero
...@@ -266,8 +251,8 @@ static void __init smp_space_timers(unsigned int max_cpus) ...@@ -266,8 +251,8 @@ static void __init smp_space_timers(unsigned int max_cpus)
unsigned long offset = tb_ticks_per_jiffy / max_cpus; unsigned long offset = tb_ticks_per_jiffy / max_cpus;
unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb; unsigned long previous_tb = paca[boot_cpuid].next_jiffy_update_tb;
for (i = 0; i < NR_CPUS; i++) { for_each_cpu(i) {
if (cpu_possible(i) && i != boot_cpuid) { if (i != boot_cpuid) {
paca[i].next_jiffy_update_tb = paca[i].next_jiffy_update_tb =
previous_tb + offset; previous_tb + offset;
previous_tb = paca[i].next_jiffy_update_tb; previous_tb = paca[i].next_jiffy_update_tb;
...@@ -287,20 +272,25 @@ void vpa_init(int cpu) ...@@ -287,20 +272,25 @@ void vpa_init(int cpu)
register_vpa(flags, cpu, __pa((unsigned long)&(paca[cpu].xLpPaca))); register_vpa(flags, cpu, __pa((unsigned long)&(paca[cpu].xLpPaca)));
} }
static inline void smp_xics_do_message(int cpu, int msg)
{
set_bit(msg, &xics_ipi_message[cpu].value);
mb();
xics_cause_IPI(cpu);
}
static void smp_xics_message_pass(int target, int msg, unsigned long data, int wait) static void smp_xics_message_pass(int target, int msg, unsigned long data, int wait)
{ {
int i; unsigned int i;
for (i = 0; i < NR_CPUS; ++i) { if (target < NR_CPUS) {
if (!cpu_online(i)) smp_xics_do_message(target, msg);
} else {
for_each_online_cpu(i) {
if (target == MSG_ALL_BUT_SELF
&& i == smp_processor_id())
continue; continue;
smp_xics_do_message(i, msg);
if (target == MSG_ALL || target == i
|| (target == MSG_ALL_BUT_SELF
&& i != smp_processor_id())) {
set_bit(msg, &xics_ipi_message[i].value);
mb();
xics_cause_IPI(i);
} }
} }
} }
...@@ -309,18 +299,11 @@ extern void xics_request_IPIs(void); ...@@ -309,18 +299,11 @@ extern void xics_request_IPIs(void);
static int __init smp_xics_probe(void) static int __init smp_xics_probe(void)
{ {
int i;
int nr_cpus = 0;
for (i = 0; i < NR_CPUS; i++) {
if (cpu_possible(i))
nr_cpus++;
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
xics_request_IPIs(); xics_request_IPIs();
#endif #endif
return nr_cpus; return cpus_weight(cpu_possible_map);
} }
static void __devinit smp_xics_setup_cpu(int cpu) static void __devinit smp_xics_setup_cpu(int cpu)
...@@ -660,6 +643,12 @@ int __devinit __cpu_up(unsigned int cpu) ...@@ -660,6 +643,12 @@ int __devinit __cpu_up(unsigned int cpu)
paca[cpu].xCurrent = (u64)p; paca[cpu].xCurrent = (u64)p;
current_set[cpu] = p->thread_info; current_set[cpu] = p->thread_info;
/* The information for processor bringup must
* be written out to main store before we release
* the processor.
*/
mb();
/* wake up cpus */ /* wake up cpus */
smp_ops->kick_cpu(cpu); smp_ops->kick_cpu(cpu);
......
...@@ -475,9 +475,7 @@ void xics_init_IRQ(void) ...@@ -475,9 +475,7 @@ void xics_init_IRQ(void)
if (systemcfg->platform == PLATFORM_PSERIES) { if (systemcfg->platform == PLATFORM_PSERIES) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
for (i = 0; i < NR_CPUS; ++i) { for_each_cpu(i) {
if (!cpu_possible(i))
continue;
xics_per_cpu[i] = __ioremap((ulong)inodes[get_hard_smp_processor_id(i)].addr, xics_per_cpu[i] = __ioremap((ulong)inodes[get_hard_smp_processor_id(i)].addr,
(ulong)inodes[get_hard_smp_processor_id(i)].size, (ulong)inodes[get_hard_smp_processor_id(i)].size,
_PAGE_NO_CACHE); _PAGE_NO_CACHE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment