Commit 87353d8a authored by Ralf Baechle's avatar Ralf Baechle

[MIPS] SMP: Call platform methods via ops structure.

Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 19388fb0
...@@ -1441,6 +1441,7 @@ config MIPS_MT_SMP ...@@ -1441,6 +1441,7 @@ config MIPS_MT_SMP
select SMP select SMP
select SYS_SUPPORTS_SCHED_SMT if SMP select SYS_SUPPORTS_SCHED_SMT if SMP
select SYS_SUPPORTS_SMP select SYS_SUPPORTS_SMP
select SMP_UP
help help
This is a kernel model which is also known a VSMP or lately This is a kernel model which is also known a VSMP or lately
has been marketesed into SMVP. has been marketesed into SMVP.
...@@ -1457,6 +1458,7 @@ config MIPS_MT_SMTC ...@@ -1457,6 +1458,7 @@ config MIPS_MT_SMTC
select NR_CPUS_DEFAULT_8 select NR_CPUS_DEFAULT_8
select SMP select SMP
select SYS_SUPPORTS_SMP select SYS_SUPPORTS_SMP
select SMP_UP
help help
This is a kernel model which is known a SMTC or lately has been This is a kernel model which is known a SMTC or lately has been
marketesed into SMVP. marketesed into SMVP.
...@@ -1735,6 +1737,9 @@ config SMP ...@@ -1735,6 +1737,9 @@ config SMP
If you don't know what to do here, say N. If you don't know what to do here, say N.
config SMP_UP
bool
config SYS_SUPPORTS_SMP config SYS_SUPPORTS_SMP
bool bool
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/sgialib.h> #include <asm/sgialib.h>
#include <asm/smp-ops.h>
#undef DEBUG_PROM_INIT #undef DEBUG_PROM_INIT
...@@ -48,4 +49,11 @@ void __init prom_init(void) ...@@ -48,4 +49,11 @@ void __init prom_init(void)
ArcRead(0, &c, 1, &cnt); ArcRead(0, &c, 1, &cnt);
ArcEnterInteractiveMode(); ArcEnterInteractiveMode();
#endif #endif
#ifdef CONFIG_SGI_IP27
{
extern struct plat_smp_ops ip27_smp_ops;
register_smp_ops(&ip27_smp_ops);
}
#endif
} }
...@@ -17,7 +17,6 @@ ...@@ -17,7 +17,6 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/mipsmtregs.h> #include <asm/mipsmtregs.h>
#include <asm/r4kcache.h> #include <asm/r4kcache.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <asm/cpu.h> #include <asm/cpu.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/smp-ops.h>
#include <asm/system.h> #include <asm/system.h>
struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
...@@ -575,9 +576,7 @@ void __init setup_arch(char **cmdline_p) ...@@ -575,9 +576,7 @@ void __init setup_arch(char **cmdline_p)
arch_mem_init(cmdline_p); arch_mem_init(cmdline_p);
resource_init(); resource_init();
#ifdef CONFIG_SMP
plat_smp_setup(); plat_smp_setup();
#endif
} }
static int __init fpu_disable(char *s) static int __init fpu_disable(char *s)
......
...@@ -215,72 +215,67 @@ static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0) ...@@ -215,72 +215,67 @@ static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)
write_tc_c0_tchalt(TCHALT_H); write_tc_c0_tchalt(TCHALT_H);
} }
/* static void vsmp_send_ipi_single(int cpu, unsigned int action)
* Common setup before any secondaries are started
* Make sure all CPU's are in a sensible state before we boot any of the
* secondarys
*/
void __init plat_smp_setup(void)
{ {
unsigned int mvpconf0, ntc, tc, ncpu = 0; int i;
unsigned int nvpe; unsigned long flags;
int vpflags;
#ifdef CONFIG_MIPS_MT_FPAFF local_irq_save(flags);
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
if (!cpu_has_mipsmt)
return;
/* disable MT so we can configure */ vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */
dvpe();
dmt();
/* Put MVPE's into 'configuration state' */ switch (action) {
set_c0_mvpcontrol(MVPCONTROL_VPC); case SMP_CALL_FUNCTION:
i = C_SW1;
break;
mvpconf0 = read_c0_mvpconf0(); case SMP_RESCHEDULE_YOURSELF:
ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; default:
i = C_SW0;
break;
}
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; /* 1:1 mapping of vpe and tc... */
smp_num_siblings = nvpe; settc(cpu);
write_vpe_c0_cause(read_vpe_c0_cause() | i);
evpe(vpflags);
/* we'll always have more TC's than VPE's, so loop setting everything local_irq_restore(flags);
to a sensible state */ }
for (tc = 0; tc <= ntc; tc++) {
settc(tc);
smp_tc_init(tc, mvpconf0); static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
ncpu = smp_vpe_init(tc, mvpconf0, ncpu); {
} unsigned int i;
/* Release config state */ for_each_cpu_mask(i, mask)
clear_c0_mvpcontrol(MVPCONTROL_VPC); vsmp_send_ipi_single(i, action);
}
/* We'll wait until starting the secondaries before starting MVPE */ static void __cpuinit vsmp_init_secondary(void)
{
/* Enable per-cpu interrupts */
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); /* This is Malta specific: IPI,performance and timer inetrrupts */
write_c0_status((read_c0_status() & ~ST0_IM ) |
(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
} }
void __init plat_prepare_cpus(unsigned int max_cpus) static void __cpuinit vsmp_smp_finish(void)
{ {
mips_mt_set_cpuoptions(); write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
/* set up ipi interrupts */
if (cpu_has_vint) {
set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
}
cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ; #ifdef CONFIG_MIPS_MT_FPAFF
cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ; /* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
cpu_set(smp_processor_id(), mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */
setup_irq(cpu_ipi_resched_irq, &irq_resched); local_irq_enable();
setup_irq(cpu_ipi_call_irq, &irq_call); }
set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq); static void vsmp_cpus_done(void)
set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq); {
} }
/* /*
...@@ -291,7 +286,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus) ...@@ -291,7 +286,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
* (unsigned long)idle->thread_info the gp * (unsigned long)idle->thread_info the gp
* assumes a 1:1 mapping of TC => VPE * assumes a 1:1 mapping of TC => VPE
*/ */
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
{ {
struct thread_info *gp = task_thread_info(idle); struct thread_info *gp = task_thread_info(idle);
dvpe(); dvpe();
...@@ -325,57 +320,81 @@ void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) ...@@ -325,57 +320,81 @@ void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
evpe(EVPE_ENABLE); evpe(EVPE_ENABLE);
} }
void __cpuinit prom_init_secondary(void) /*
{ * Common setup before any secondaries are started
/* Enable per-cpu interrupts */ * Make sure all CPU's are in a sensible state before we boot any of the
* secondarys
/* This is Malta specific: IPI,performance and timer inetrrupts */ */
write_c0_status((read_c0_status() & ~ST0_IM ) | static void __init vsmp_smp_setup(void)
(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7));
}
void __cpuinit prom_smp_finish(void)
{ {
write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); unsigned int mvpconf0, ntc, tc, ncpu = 0;
unsigned int nvpe;
#ifdef CONFIG_MIPS_MT_FPAFF #ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */ /* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu) if (cpu_has_fpu)
cpu_set(smp_processor_id(), mt_fpu_cpumask); cpu_set(0, mt_fpu_cpumask);
#endif /* CONFIG_MIPS_MT_FPAFF */ #endif /* CONFIG_MIPS_MT_FPAFF */
if (!cpu_has_mipsmt)
return;
local_irq_enable(); /* disable MT so we can configure */
} dvpe();
dmt();
void prom_cpus_done(void) /* Put MVPE's into 'configuration state' */
{ set_c0_mvpcontrol(MVPCONTROL_VPC);
}
void core_send_ipi(int cpu, unsigned int action) mvpconf0 = read_c0_mvpconf0();
{ ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
int i;
unsigned long flags;
int vpflags;
local_irq_save(flags); nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
smp_num_siblings = nvpe;
vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ /* we'll always have more TC's than VPE's, so loop setting everything
to a sensible state */
for (tc = 0; tc <= ntc; tc++) {
settc(tc);
switch (action) { smp_tc_init(tc, mvpconf0);
case SMP_CALL_FUNCTION: ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
i = C_SW1; }
break;
case SMP_RESCHEDULE_YOURSELF: /* Release config state */
default: clear_c0_mvpcontrol(MVPCONTROL_VPC);
i = C_SW0;
break; /* We'll wait until starting the secondaries before starting MVPE */
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
}
static void __init vsmp_prepare_cpus(unsigned int max_cpus)
{
mips_mt_set_cpuoptions();
/* set up ipi interrupts */
if (cpu_has_vint) {
set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
} }
/* 1:1 mapping of vpe and tc... */ cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
settc(cpu); cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
write_vpe_c0_cause(read_vpe_c0_cause() | i);
evpe(vpflags);
local_irq_restore(flags); setup_irq(cpu_ipi_resched_irq, &irq_resched);
setup_irq(cpu_ipi_call_irq, &irq_call);
set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
} }
struct plat_smp_ops vsmp_smp_ops = {
.send_ipi_single = vsmp_send_ipi_single,
.send_ipi_mask = vsmp_send_ipi_mask,
.init_secondary = vsmp_init_secondary,
.smp_finish = vsmp_smp_finish,
.cpus_done = vsmp_cpus_done,
.boot_secondary = vsmp_boot_secondary,
.smp_setup = vsmp_smp_setup,
.prepare_cpus = vsmp_prepare_cpus,
};
...@@ -37,7 +37,6 @@ ...@@ -37,7 +37,6 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/time.h> #include <asm/time.h>
#ifdef CONFIG_MIPS_MT_SMTC #ifdef CONFIG_MIPS_MT_SMTC
...@@ -84,6 +83,16 @@ static inline void set_cpu_sibling_map(int cpu) ...@@ -84,6 +83,16 @@ static inline void set_cpu_sibling_map(int cpu)
cpu_set(cpu, cpu_sibling_map[cpu]); cpu_set(cpu, cpu_sibling_map[cpu]);
} }
struct plat_smp_ops *mp_ops;
__cpuinit void register_smp_ops(struct plat_smp_ops *ops)
{
if (ops)
printk(KERN_WARNING "Overriding previous set SMP ops\n");
mp_ops = ops;
}
/* /*
* First C code run on the secondary CPUs after being started up by * First C code run on the secondary CPUs after being started up by
* the master. * the master.
...@@ -100,7 +109,7 @@ asmlinkage __cpuinit void start_secondary(void) ...@@ -100,7 +109,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu_report(); cpu_report();
per_cpu_trap_init(); per_cpu_trap_init();
mips_clockevent_init(); mips_clockevent_init();
prom_init_secondary(); mp_ops->init_secondary();
/* /*
* XXX parity protection should be folded in here when it's converted * XXX parity protection should be folded in here when it's converted
...@@ -112,7 +121,7 @@ asmlinkage __cpuinit void start_secondary(void) ...@@ -112,7 +121,7 @@ asmlinkage __cpuinit void start_secondary(void)
cpu = smp_processor_id(); cpu = smp_processor_id();
cpu_data[cpu].udelay_val = loops_per_jiffy; cpu_data[cpu].udelay_val = loops_per_jiffy;
prom_smp_finish(); mp_ops->smp_finish();
set_cpu_sibling_map(cpu); set_cpu_sibling_map(cpu);
cpu_set(cpu, cpu_callin_map); cpu_set(cpu, cpu_callin_map);
...@@ -184,7 +193,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func) (void *info), ...@@ -184,7 +193,7 @@ int smp_call_function_mask(cpumask_t mask, void (*func) (void *info),
smp_mb(); smp_mb();
/* Send a message to all other CPUs and wait for them to respond */ /* Send a message to all other CPUs and wait for them to respond */
core_send_ipi_mask(mask, SMP_CALL_FUNCTION); mp_ops->send_ipi_mask(mask, SMP_CALL_FUNCTION);
/* Wait for response */ /* Wait for response */
/* FIXME: lock-up detection, backtrace on lock-up */ /* FIXME: lock-up detection, backtrace on lock-up */
...@@ -278,7 +287,7 @@ void smp_send_stop(void) ...@@ -278,7 +287,7 @@ void smp_send_stop(void)
void __init smp_cpus_done(unsigned int max_cpus) void __init smp_cpus_done(unsigned int max_cpus)
{ {
prom_cpus_done(); mp_ops->cpus_done();
} }
/* called from main before smp_init() */ /* called from main before smp_init() */
...@@ -286,7 +295,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -286,7 +295,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
init_new_context(current, &init_mm); init_new_context(current, &init_mm);
current_thread_info()->cpu = 0; current_thread_info()->cpu = 0;
plat_prepare_cpus(max_cpus); mp_ops->prepare_cpus(max_cpus);
set_cpu_sibling_map(0); set_cpu_sibling_map(0);
#ifndef CONFIG_HOTPLUG_CPU #ifndef CONFIG_HOTPLUG_CPU
cpu_present_map = cpu_possible_map; cpu_present_map = cpu_possible_map;
...@@ -325,7 +334,7 @@ int __cpuinit __cpu_up(unsigned int cpu) ...@@ -325,7 +334,7 @@ int __cpuinit __cpu_up(unsigned int cpu)
if (IS_ERR(idle)) if (IS_ERR(idle))
panic(KERN_ERR "Fork failed for CPU %d", cpu); panic(KERN_ERR "Fork failed for CPU %d", cpu);
prom_boot_secondary(cpu, idle); mp_ops->boot_secondary(cpu, idle);
/* /*
* Trust is futile. We should really have timeouts ... * Trust is futile. We should really have timeouts ...
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#include <asm/system.h> #include <asm/system.h>
#include <asm/hardirq.h> #include <asm/hardirq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
......
...@@ -16,7 +16,6 @@ ...@@ -16,7 +16,6 @@
#include <asm/hazards.h> #include <asm/hazards.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/smp.h>
#include <asm/mipsregs.h> #include <asm/mipsregs.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/time.h> #include <asm/time.h>
......
...@@ -250,6 +250,8 @@ void __init mips_ejtag_setup(void) ...@@ -250,6 +250,8 @@ void __init mips_ejtag_setup(void)
flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
} }
extern struct plat_smp_ops msmtc_smp_ops;
void __init prom_init(void) void __init prom_init(void)
{ {
prom_argc = fw_arg0; prom_argc = fw_arg0;
...@@ -416,4 +418,10 @@ void __init prom_init(void) ...@@ -416,4 +418,10 @@ void __init prom_init(void)
#ifdef CONFIG_SERIAL_8250_CONSOLE #ifdef CONFIG_SERIAL_8250_CONSOLE
console_config(); console_config();
#endif #endif
#ifdef CONFIG_MIPS_MT_SMP
register_smp_ops(&vsmp_smp_ops);
#endif
#ifdef CONFIG_MIPS_MT_SMTC
register_smp_ops(&msmtc_smp_ops);
#endif
} }
...@@ -15,26 +15,24 @@ ...@@ -15,26 +15,24 @@
* Cause the specified action to be performed on a targeted "CPU" * Cause the specified action to be performed on a targeted "CPU"
*/ */
void core_send_ipi(int cpu, unsigned int action) static void msmtc_send_ipi_single(int cpu, unsigned int action)
{ {
/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
smtc_send_ipi(cpu, LINUX_SMP_IPI, action); smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
} }
/* static void msmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
* Platform "CPU" startup hook
*/
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
{ {
smtc_boot_secondary(cpu, idle); unsigned int i;
for_each_cpu_mask(i, mask)
msmtc_send_ipi_single(i, action);
} }
/* /*
* Post-config but pre-boot cleanup entry point * Post-config but pre-boot cleanup entry point
*/ */
static void __cpuinit msmtc_init_secondary(void)
void __cpuinit prom_init_secondary(void)
{ {
void smtc_init_secondary(void); void smtc_init_secondary(void);
int myvpe; int myvpe;
...@@ -50,45 +48,61 @@ void __cpuinit prom_init_secondary(void) ...@@ -50,45 +48,61 @@ void __cpuinit prom_init_secondary(void)
set_c0_status(0x100 << cp0_perfcount_irq); set_c0_status(0x100 << cp0_perfcount_irq);
} }
smtc_init_secondary(); smtc_init_secondary();
} }
/* /*
* Platform SMP pre-initialization * Platform "CPU" startup hook
*
* As noted above, we can assume a single CPU for now
* but it may be multithreaded.
*/ */
static void __cpuinit msmtc_boot_secondary(int cpu, struct task_struct *idle)
void __cpuinit plat_smp_setup(void)
{ {
if (read_c0_config3() & (1<<2)) smtc_boot_secondary(cpu, idle);
mipsmt_build_cpu_map(0);
} }
void __init plat_prepare_cpus(unsigned int max_cpus) /*
* SMP initialization finalization entry point
*/
static void __cpuinit msmtc_smp_finish(void)
{ {
if (read_c0_config3() & (1<<2)) smtc_smp_finish();
mipsmt_prepare_cpus();
} }
/* /*
* SMP initialization finalization entry point * Hook for after all CPUs are online
*/ */
void __cpuinit prom_smp_finish(void) static void msmtc_cpus_done(void)
{ {
smtc_smp_finish();
} }
/* /*
* Hook for after all CPUs are online * Platform SMP pre-initialization
*
* As noted above, we can assume a single CPU for now
* but it may be multithreaded.
*/ */
void prom_cpus_done(void) static void __init msmtc_smp_setup(void)
{ {
mipsmt_build_cpu_map(0);
} }
static void __init msmtc_prepare_cpus(unsigned int max_cpus)
{
mipsmt_prepare_cpus();
}
struct plat_smp_ops msmtc_smp_ops = {
.send_ipi_single = msmtc_send_ipi_single,
.send_ipi_mask = msmtc_send_ipi_mask,
.init_secondary = msmtc_init_secondary,
.smp_finish = msmtc_smp_finish,
.cpus_done = msmtc_cpus_done,
.boot_secondary = msmtc_boot_secondary,
.smp_setup = msmtc_smp_setup,
.prepare_cpus = msmtc_prepare_cpus,
};
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
/* /*
* IRQ affinity hook * IRQ affinity hook
......
...@@ -21,6 +21,6 @@ obj-y := sim_platform.o sim_setup.o sim_mem.o sim_time.o sim_int.o \ ...@@ -21,6 +21,6 @@ obj-y := sim_platform.o sim_setup.o sim_mem.o sim_time.o sim_int.o \
sim_cmdline.o sim_cmdline.o
obj-$(CONFIG_EARLY_PRINTK) += sim_console.o obj-$(CONFIG_EARLY_PRINTK) += sim_console.o
obj-$(CONFIG_SMP) += sim_smp.o obj-$(CONFIG_MIPS_MT_SMTC) += sim_smtc.o
EXTRA_CFLAGS += -Werror EXTRA_CFLAGS += -Werror
...@@ -60,6 +60,8 @@ void __init plat_mem_setup(void) ...@@ -60,6 +60,8 @@ void __init plat_mem_setup(void)
#endif #endif
} }
extern struct plat_smp_ops ssmtc_smp_ops;
void __init prom_init(void) void __init prom_init(void)
{ {
set_io_port_base(0xbfd00000); set_io_port_base(0xbfd00000);
...@@ -67,8 +69,20 @@ void __init prom_init(void) ...@@ -67,8 +69,20 @@ void __init prom_init(void)
pr_info("\nLINUX started...\n"); pr_info("\nLINUX started...\n");
prom_init_cmdline(); prom_init_cmdline();
prom_meminit(); prom_meminit();
}
#ifdef CONFIG_MIPS_MT_SMP
if (cpu_has_mipsmt)
register_smp_ops(&vsmp_smp_ops);
else
register_smp_ops(&up_smp_ops);
#endif
#ifdef CONFIG_MIPS_MT_SMTC
if (cpu_has_mipsmt)
register_smp_ops(&ssmtc_smp_ops);
else
register_smp_ops(&up_smp_ops);
#endif
}
static void __init serial_init(void) static void __init serial_init(void)
{ {
......
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
* *
*/ */
/* /*
* Simulator Platform-specific hooks for SMP operation * Simulator Platform-specific hooks for SMTC operation
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/sched.h> #include <linux/sched.h>
...@@ -29,65 +29,72 @@ ...@@ -29,65 +29,72 @@
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/smtc_ipi.h> #include <asm/smtc_ipi.h>
#endif /* CONFIG_MIPS_MT_SMTC */
/* VPE/SMP Prototype implements platform interfaces directly */ /* VPE/SMP Prototype implements platform interfaces directly */
#if !defined(CONFIG_MIPS_MT_SMP)
/* /*
* Cause the specified action to be performed on a targeted "CPU" * Cause the specified action to be performed on a targeted "CPU"
*/ */
void core_send_ipi(int cpu, unsigned int action) static void ssmtc_send_ipi_single(int cpu, unsigned int action)
{ {
#ifdef CONFIG_MIPS_MT_SMTC
smtc_send_ipi(cpu, LINUX_SMP_IPI, action); smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
#endif /* CONFIG_MIPS_MT_SMTC */ /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */ }
static inline void ssmtc_send_ipi_mask(cpumask_t mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
ssmtc_send_ipi_single(i, action);
} }
/* /*
* Platform "CPU" startup hook * Post-config but pre-boot cleanup entry point
*/ */
static void __cpuinit ssmtc_init_secondary(void)
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
{ {
#ifdef CONFIG_MIPS_MT_SMTC void smtc_init_secondary(void);
smtc_boot_secondary(cpu, idle);
#endif /* CONFIG_MIPS_MT_SMTC */ smtc_init_secondary();
} }
/* /*
* Post-config but pre-boot cleanup entry point * SMP initialization finalization entry point
*/ */
static void __cpuinit ssmtc_smp_finish(void)
{
smtc_smp_finish();
}
void __cpuinit prom_init_secondary(void) /*
* Hook for after all CPUs are online
*/
static void ssmtc_cpus_done(void)
{ {
#ifdef CONFIG_MIPS_MT_SMTC }
void smtc_init_secondary(void);
smtc_init_secondary(); /*
#endif /* CONFIG_MIPS_MT_SMTC */ * Platform "CPU" startup hook
*/
static void __cpuinit ssmtc_boot_secondary(int cpu, struct task_struct *idle)
{
smtc_boot_secondary(cpu, idle);
} }
void plat_smp_setup(void) static void __init ssmtc_smp_setup(void)
{ {
#ifdef CONFIG_MIPS_MT_SMTC
if (read_c0_config3() & (1 << 2)) if (read_c0_config3() & (1 << 2))
mipsmt_build_cpu_map(0); mipsmt_build_cpu_map(0);
#endif /* CONFIG_MIPS_MT_SMTC */
} }
/* /*
* Platform SMP pre-initialization * Platform SMP pre-initialization
*/ */
static void ssmtc_prepare_cpus(unsigned int max_cpus)
void plat_prepare_cpus(unsigned int max_cpus)
{ {
#ifdef CONFIG_MIPS_MT_SMTC
/* /*
* As noted above, we can assume a single CPU for now * As noted above, we can assume a single CPU for now
* but it may be multithreaded. * but it may be multithreaded.
...@@ -96,28 +103,15 @@ void plat_prepare_cpus(unsigned int max_cpus) ...@@ -96,28 +103,15 @@ void plat_prepare_cpus(unsigned int max_cpus)
if (read_c0_config3() & (1 << 2)) { if (read_c0_config3() & (1 << 2)) {
mipsmt_prepare_cpus(); mipsmt_prepare_cpus();
} }
#endif /* CONFIG_MIPS_MT_SMTC */
} }
/* struct plat_smp_ops ssmtc_smp_ops = {
* SMP initialization finalization entry point .send_ipi_single = ssmtc_send_ipi_single,
*/ .send_ipi_mask = ssmtc_send_ipi_mask,
.init_secondary = ssmtc_init_secondary,
void __cpuinit prom_smp_finish(void) .smp_finish = ssmtc_smp_finish,
{ .cpus_done = ssmtc_cpus_done,
#ifdef CONFIG_MIPS_MT_SMTC .boot_secondary = ssmtc_boot_secondary,
smtc_smp_finish(); .smp_setup = ssmtc_smp_setup,
#endif /* CONFIG_MIPS_MT_SMTC */ .prepare_cpus = ssmtc_prepare_cpus,
} };
/*
* Hook for after all CPUs are online
*/
void prom_cpus_done(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
#endif /* CONFIG_MIPS_MT_SMTC */
}
#endif /* CONFIG_MIPS32R2_MT_SMP */
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/reboot.h> #include <asm/reboot.h>
#include <asm/smp-ops.h>
#include <asm/system.h> #include <asm/system.h>
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/pmon.h> #include <asm/pmon.h>
...@@ -78,6 +79,8 @@ static void prom_halt(void) ...@@ -78,6 +79,8 @@ static void prom_halt(void)
__asm__(".set\tmips3\n\t" "wait\n\t" ".set\tmips0"); __asm__(".set\tmips3\n\t" "wait\n\t" ".set\tmips0");
} }
extern struct plat_smp_ops yos_smp_ops;
/* /*
* Init routine which accepts the variables from PMON * Init routine which accepts the variables from PMON
*/ */
...@@ -127,6 +130,8 @@ void __init prom_init(void) ...@@ -127,6 +130,8 @@ void __init prom_init(void)
} }
prom_grab_secondary(); prom_grab_secondary();
register_smp_ops(&yos_smp_ops);
} }
void __init prom_free_prom_memory(void) void __init prom_free_prom_memory(void)
......
...@@ -42,70 +42,6 @@ void __init prom_grab_secondary(void) ...@@ -42,70 +42,6 @@ void __init prom_grab_secondary(void)
launchstack + LAUNCHSTACK_SIZE, 0); launchstack + LAUNCHSTACK_SIZE, 0);
} }
/*
* Detect available CPUs, populate phys_cpu_present_map before smp_init
*
* We don't want to start the secondary CPU yet nor do we have a nice probing
* feature in PMON so we just assume presence of the secondary core.
*/
void __init plat_smp_setup(void)
{
int i;
cpus_clear(phys_cpu_present_map);
for (i = 0; i < 2; i++) {
cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = i;
__cpu_logical_map[i] = i;
}
}
void __init plat_prepare_cpus(unsigned int max_cpus)
{
/*
* Be paranoid. Enable the IPI only if we're really about to go SMP.
*/
if (cpus_weight(cpu_possible_map))
set_c0_status(STATUSF_IP5);
}
/*
* Firmware CPU startup hook
* Complicated by PMON's weird interface which tries to minimic the UNIX fork.
* It launches the next * available CPU and copies some information on the
* stack so the first thing we do is throw away that stuff and load useful
* values into the registers ...
*/
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
{
unsigned long gp = (unsigned long) task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle);
secondary_sp = sp;
secondary_gp = gp;
spin_unlock(&launch_lock);
}
/* Hook for after all CPUs are online */
void prom_cpus_done(void)
{
}
/*
* After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed
*/
void __cpuinit prom_init_secondary(void)
{
set_c0_status(ST0_CO | ST0_IE | ST0_IM);
}
void __cpuinit prom_smp_finish(void)
{
}
void titan_mailbox_irq(void) void titan_mailbox_irq(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
...@@ -133,7 +69,7 @@ void titan_mailbox_irq(void) ...@@ -133,7 +69,7 @@ void titan_mailbox_irq(void)
/* /*
* Send inter-processor interrupt * Send inter-processor interrupt
*/ */
void core_send_ipi(int cpu, unsigned int action) static void yos_send_ipi_single(int cpu, unsigned int action)
{ {
/* /*
* Generate an INTMSG so that it can be sent over to the * Generate an INTMSG so that it can be sent over to the
...@@ -159,3 +95,86 @@ void core_send_ipi(int cpu, unsigned int action) ...@@ -159,3 +95,86 @@ void core_send_ipi(int cpu, unsigned int action)
break; break;
} }
} }
static void yos_send_ipi_mask(cpumask_t mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
yos_send_ipi_single(i, action);
}
/*
* After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed
*/
static void __cpuinit yos_init_secondary(void)
{
set_c0_status(ST0_CO | ST0_IE | ST0_IM);
}
static void __cpuinit yos_smp_finish(void)
{
}
/* Hook for after all CPUs are online */
static void yos_cpus_done(void)
{
}
/*
* Firmware CPU startup hook
* Complicated by PMON's weird interface which tries to minimic the UNIX fork.
* It launches the next * available CPU and copies some information on the
* stack so the first thing we do is throw away that stuff and load useful
* values into the registers ...
*/
static void __cpuinit yos_boot_secondary(int cpu, struct task_struct *idle)
{
unsigned long gp = (unsigned long) task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle);
secondary_sp = sp;
secondary_gp = gp;
spin_unlock(&launch_lock);
}
/*
* Detect available CPUs, populate phys_cpu_present_map before smp_init
*
* We don't want to start the secondary CPU yet nor do we have a nice probing
* feature in PMON so we just assume presence of the secondary core.
*/
static void __init yos_smp_setup(void)
{
int i;
cpus_clear(phys_cpu_present_map);
for (i = 0; i < 2; i++) {
cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = i;
__cpu_logical_map[i] = i;
}
}
static void __init yos_prepare_cpus(unsigned int max_cpus)
{
/*
* Be paranoid. Enable the IPI only if we're really about to go SMP.
*/
if (cpus_weight(cpu_possible_map))
set_c0_status(STATUSF_IP5);
}
struct plat_smp_ops yos_smp_ops = {
.send_ipi_single = yos_send_ipi_single,
.send_ipi_mask = yos_send_ipi_mask,
.init_secondary = yos_init_secondary,
.smp_finish = yos_smp_finish,
.cpus_done = yos_cpus_done,
.boot_secondary = yos_boot_secondary,
.smp_setup = yos_smp_setup,
.prepare_cpus = yos_prepare_cpus,
};
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive * License. See the file "COPYING" in the main directory of this archive
* for more details. * for more details.
* *
* Copyright (C) 2006 by Ralf Baechle (ralf@linux-mips.org) * Copyright (C) 2006, 07 by Ralf Baechle (ralf@linux-mips.org)
* *
* Symmetric Uniprocessor (TM) Support * Symmetric Uniprocessor (TM) Support
*/ */
...@@ -13,43 +13,55 @@ ...@@ -13,43 +13,55 @@
/* /*
* Send inter-processor interrupt * Send inter-processor interrupt
*/ */
void core_send_ipi(int cpu, unsigned int action) void up_send_ipi_single(int cpu, unsigned int action)
{ {
panic(KERN_ERR "%s called", __FUNCTION__); panic(KERN_ERR "%s called", __func__);
}
static inline void up_send_ipi_mask(cpumask_t mask, unsigned int action)
{
panic(KERN_ERR "%s called", __func__);
} }
/* /*
* After we've done initial boot, this function is called to allow the * After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed * board code to clean up state, if needed
*/ */
void __cpuinit prom_init_secondary(void) void __cpuinit up_init_secondary(void)
{ {
} }
void __cpuinit prom_smp_finish(void) void __cpuinit up_smp_finish(void)
{ {
} }
/* Hook for after all CPUs are online */ /* Hook for after all CPUs are online */
void prom_cpus_done(void) void up_cpus_done(void)
{
}
void __init prom_prepare_cpus(unsigned int max_cpus)
{ {
cpus_clear(phys_cpu_present_map);
} }
/* /*
* Firmware CPU startup hook * Firmware CPU startup hook
*/ */
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) void __cpuinit up_boot_secondary(int cpu, struct task_struct *idle)
{ {
} }
void __init plat_smp_setup(void) void __init up_smp_setup(void)
{ {
} }
void __init plat_prepare_cpus(unsigned int max_cpus)
void __init up_prepare_cpus(unsigned int max_cpus)
{ {
} }
struct plat_smp_ops up_smp_ops = {
.send_ipi_single = up_send_ipi_single,
.send_ipi_mask = up_send_ipi_mask,
.init_secondary = up_init_secondary,
.smp_finish = up_smp_finish,
.cpus_done = up_cpus_done,
.boot_secondary = up_boot_secondary,
.smp_setup = up_smp_setup,
.prepare_cpus = up_prepare_cpus,
};
...@@ -27,7 +27,6 @@ ...@@ -27,7 +27,6 @@
#include <asm/sn/hub.h> #include <asm/sn/hub.h>
#include <asm/sn/intr.h> #include <asm/sn/intr.h>
#include <asm/current.h> #include <asm/current.h>
#include <asm/smp.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
......
...@@ -11,7 +11,6 @@ ...@@ -11,7 +11,6 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/smp.h>
#include <asm/sn/types.h> #include <asm/sn/types.h>
#include <asm/sn/arch.h> #include <asm/sn/arch.h>
#include <asm/sn/gda.h> #include <asm/sn/gda.h>
......
...@@ -140,30 +140,51 @@ static __init void intr_clear_all(nasid_t nasid) ...@@ -140,30 +140,51 @@ static __init void intr_clear_all(nasid_t nasid)
REMOTE_HUB_CLR_INTR(nasid, i); REMOTE_HUB_CLR_INTR(nasid, i);
} }
void __init plat_smp_setup(void) static void ip27_send_ipi_single(int destid, unsigned int action)
{ {
cnodeid_t cnode; int irq;
for_each_online_node(cnode) { switch (action) {
if (cnode == 0) case SMP_RESCHEDULE_YOURSELF:
continue; irq = CPU_RESCHED_A_IRQ;
intr_clear_all(COMPACT_TO_NASID_NODEID(cnode)); break;
case SMP_CALL_FUNCTION:
irq = CPU_CALL_A_IRQ;
break;
default:
panic("sendintr");
} }
replicate_kernel_text(); irq += cputoslice(destid);
/* /*
* Assumption to be fixed: we're always booted on logical / physical * Convert the compact hub number to the NASID to get the correct
* processor 0. While we're always running on logical processor 0 * part of the address space. Then set the interrupt bit associated
* this still means this is physical processor zero; it might for * with the CPU we want to send the interrupt to.
* example be disabled in the firwware.
*/ */
alloc_cpupda(0, 0); REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq);
} }
void __init plat_prepare_cpus(unsigned int max_cpus) static void ip27_send_ipi_mask(cpumask_t mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
ip27_send_ipi_single(i, action);
}
static void __cpuinit ip27_init_secondary(void)
{
per_cpu_init();
local_irq_enable();
}
static void __cpuinit ip27_smp_finish(void)
{
}
static void __init ip27_cpus_done(void)
{ {
/* We already did everything necessary earlier */
} }
/* /*
...@@ -171,7 +192,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus) ...@@ -171,7 +192,7 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
* set sp to the kernel stack of the newly created idle process, gp to the proc * set sp to the kernel stack of the newly created idle process, gp to the proc
* struct so that current_thread_info() will work. * struct so that current_thread_info() will work.
*/ */
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) static void __cpuinit ip27_boot_secondary(int cpu, struct task_struct *idle)
{ {
unsigned long gp = (unsigned long)task_thread_info(idle); unsigned long gp = (unsigned long)task_thread_info(idle);
unsigned long sp = __KSTK_TOS(idle); unsigned long sp = __KSTK_TOS(idle);
...@@ -181,41 +202,39 @@ void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle) ...@@ -181,41 +202,39 @@ void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
0, (void *) sp, (void *) gp); 0, (void *) sp, (void *) gp);
} }
void __cpuinit prom_init_secondary(void) static void __init ip27_smp_setup(void)
{ {
per_cpu_init(); cnodeid_t cnode;
local_irq_enable();
}
void __init prom_cpus_done(void)
{
}
void __cpuinit prom_smp_finish(void)
{
}
void core_send_ipi(int destid, unsigned int action)
{
int irq;
switch (action) { for_each_online_node(cnode) {
case SMP_RESCHEDULE_YOURSELF: if (cnode == 0)
irq = CPU_RESCHED_A_IRQ; continue;
break; intr_clear_all(COMPACT_TO_NASID_NODEID(cnode));
case SMP_CALL_FUNCTION:
irq = CPU_CALL_A_IRQ;
break;
default:
panic("sendintr");
} }
irq += cputoslice(destid); replicate_kernel_text();
/* /*
* Convert the compact hub number to the NASID to get the correct * Assumption to be fixed: we're always booted on logical / physical
* part of the address space. Then set the interrupt bit associated * processor 0. While we're always running on logical processor 0
* with the CPU we want to send the interrupt to. * this still means this is physical processor zero; it might for
* example be disabled in the firwware.
*/ */
REMOTE_HUB_SEND_INTR(COMPACT_TO_NASID_NODEID(cpu_to_node(destid)), irq); alloc_cpupda(0, 0);
} }
static void __init ip27_prepare_cpus(unsigned int max_cpus)
{
/* We already did everything necessary earlier */
}
struct plat_smp_ops ip27_smp_ops = {
.send_ipi_single = ip27_send_ipi_single,
.send_ipi_mask = ip27_send_ipi_mask,
.init_secondary = ip27_init_secondary,
.smp_finish = ip27_smp_finish,
.cpus_done = ip27_cpus_done,
.boot_secondary = ip27_boot_secondary,
.smp_setup = ip27_smp_setup,
.prepare_cpus = ip27_prepare_cpus,
};
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250.h>
#include <asm/sibyte/bcm1480_regs.h> #include <asm/sibyte/bcm1480_regs.h>
#include <asm/sibyte/bcm1480_int.h> #include <asm/sibyte/bcm1480_int.h>
...@@ -67,28 +68,114 @@ void __cpuinit bcm1480_smp_init(void) ...@@ -67,28 +68,114 @@ void __cpuinit bcm1480_smp_init(void)
change_c0_status(ST0_IM, imask); change_c0_status(ST0_IM, imask);
} }
void __cpuinit bcm1480_smp_finish(void) /*
* These are routines for dealing with the sb1250 smp capabilities
* independent of board/firmware
*/
/*
* Simple enough; everything is set up, so just poke the appropriate mailbox
* register, and we should be set
*/
static void bcm1480_send_ipi_single(int cpu, unsigned int action)
{
__raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]);
}
static void bcm1480_send_ipi_mask(cpumask_t mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
bcm1480_send_ipi_single(i, action);
}
/*
* Code to run on secondary just after probing the CPU
*/
static void __cpuinit bcm1480_init_secondary(void)
{
extern void bcm1480_smp_init(void);
bcm1480_smp_init();
}
/*
* Do any tidying up before marking online and running the idle
* loop
*/
static void __cpuinit bcm1480_smp_finish(void)
{ {
extern void sb1480_clockevent_init(void); extern void sb1480_clockevent_init(void);
sb1480_clockevent_init(); sb1480_clockevent_init();
local_irq_enable(); local_irq_enable();
bcm1480_smp_finish();
} }
/* /*
* These are routines for dealing with the sb1250 smp capabilities * Final cleanup after all secondaries booted
* independent of board/firmware
*/ */
static void bcm1480_cpus_done(void)
{
}
/* /*
* Simple enough; everything is set up, so just poke the appropriate mailbox * Setup the PC, SP, and GP of a secondary processor and start it
* register, and we should be set * running!
*/ */
void core_send_ipi(int cpu, unsigned int action) static void __cpuinit bcm1480_boot_secondary(int cpu, struct task_struct *idle)
{ {
__raw_writeq((((u64)action)<< 48), mailbox_0_set_regs[cpu]); int retval;
retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
__KSTK_TOS(idle),
(unsigned long)task_thread_info(idle), 0);
if (retval != 0)
printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
} }
/*
* Use CFE to find out how many CPUs are available, setting up
* phys_cpu_present_map and the logical/physical mappings.
* XXXKW will the boot CPU ever not be physical 0?
*
* Common setup before any secondaries are started
*/
static void __init bcm1480_smp_setup(void)
{
int i, num;
cpus_clear(phys_cpu_present_map);
cpu_set(0, phys_cpu_present_map);
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
for (i = 1, num = 0; i < NR_CPUS; i++) {
if (cfe_cpu_stop(i) == 0) {
cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
}
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
static void __init bcm1480_prepare_cpus(unsigned int max_cpus)
{
}
struct plat_smp_ops bcm1480_smp_ops = {
.send_ipi_single = bcm1480_send_ipi_single,
.send_ipi_mask = bcm1480_send_ipi_mask,
.init_secondary = bcm1480_init_secondary,
.smp_finish = bcm1480_smp_finish,
.cpus_done = bcm1480_cpus_done,
.boot_secondary = bcm1480_boot_secondary,
.smp_setup = bcm1480_smp_setup,
.prepare_cpus = bcm1480_prepare_cpus,
};
void bcm1480_mailbox_interrupt(void) void bcm1480_mailbox_interrupt(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
......
lib-y = setup.o lib-y = setup.o
lib-$(CONFIG_SMP) += smp.o
lib-$(CONFIG_SIBYTE_CFE_CONSOLE) += console.o lib-$(CONFIG_SIBYTE_CFE_CONSOLE) += console.o
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <asm/bootinfo.h> #include <asm/bootinfo.h>
#include <asm/reboot.h> #include <asm/reboot.h>
#include <asm/sibyte/board.h> #include <asm/sibyte/board.h>
#include <asm/smp-ops.h>
#include <asm/fw/cfe/cfe_api.h> #include <asm/fw/cfe/cfe_api.h>
#include <asm/fw/cfe/cfe_error.h> #include <asm/fw/cfe/cfe_error.h>
...@@ -232,6 +233,9 @@ static int __init initrd_setup(char *str) ...@@ -232,6 +233,9 @@ static int __init initrd_setup(char *str)
#endif #endif
extern struct plat_smp_ops sb_smp_ops;
extern struct plat_smp_ops bcm1480_smp_ops;
/* /*
* prom_init is called just after the cpu type is determined, from setup_arch() * prom_init is called just after the cpu type is determined, from setup_arch()
*/ */
...@@ -340,6 +344,13 @@ void __init prom_init(void) ...@@ -340,6 +344,13 @@ void __init prom_init(void)
arcs_cmdline[CL_SIZE-1] = 0; arcs_cmdline[CL_SIZE-1] = 0;
prom_meminit(); prom_meminit();
#if defined(CONFIG_SIBYTE_BCM112X) || defined(CONFIG_SIBYTE_SB1250)
register_smp_ops(&sb_smp_ops);
#endif
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
register_smp_ops(&bcm1480_smp_ops);
#endif
} }
void __init prom_free_prom_memory(void) void __init prom_free_prom_memory(void)
......
/*
* Copyright (C) 2000, 2001, 2002, 2003 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <asm/processor.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/fw/cfe/cfe_error.h>
/*
* Use CFE to find out how many CPUs are available, setting up
* phys_cpu_present_map and the logical/physical mappings.
* XXXKW will the boot CPU ever not be physical 0?
*
* Common setup before any secondaries are started
*/
void __init plat_smp_setup(void)
{
int i, num;
cpus_clear(phys_cpu_present_map);
cpu_set(0, phys_cpu_present_map);
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
for (i = 1, num = 0; i < NR_CPUS; i++) {
if (cfe_cpu_stop(i) == 0) {
cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
}
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
void __init plat_prepare_cpus(unsigned int max_cpus)
{
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
*/
void __cpuinit prom_boot_secondary(int cpu, struct task_struct *idle)
{
int retval;
retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
__KSTK_TOS(idle),
(unsigned long)task_thread_info(idle), 0);
if (retval != 0)
printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
}
/*
* Code to run on secondary just after probing the CPU
*/
void __cpuinit prom_init_secondary(void)
{
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
extern void bcm1480_smp_init(void);
bcm1480_smp_init();
#elif defined(CONFIG_SIBYTE_SB1250)
extern void sb1250_smp_init(void);
sb1250_smp_init();
#else
#error invalid SMP configuration
#endif
}
/*
* Do any tidying up before marking online and running the idle
* loop
*/
void __cpuinit prom_smp_finish(void)
{
#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
extern void bcm1480_smp_finish(void);
bcm1480_smp_finish();
#elif defined(CONFIG_SIBYTE_SB1250)
extern void sb1250_smp_finish(void);
sb1250_smp_finish();
#else
#error invalid SMP configuration
#endif
}
/*
* Final cleanup after all secondaries booted
*/
void prom_cpus_done(void)
{
}
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/fw/cfe/cfe_api.h>
#include <asm/sibyte/sb1250.h> #include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h> #include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_int.h> #include <asm/sibyte/sb1250_int.h>
...@@ -55,7 +56,43 @@ void __cpuinit sb1250_smp_init(void) ...@@ -55,7 +56,43 @@ void __cpuinit sb1250_smp_init(void)
change_c0_status(ST0_IM, imask); change_c0_status(ST0_IM, imask);
} }
void __cpuinit sb1250_smp_finish(void) /*
* These are routines for dealing with the sb1250 smp capabilities
* independent of board/firmware
*/
/*
* Simple enough; everything is set up, so just poke the appropriate mailbox
* register, and we should be set
*/
static void sb1250_send_ipi_single(int cpu, unsigned int action)
{
__raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]);
}
static inline void sb1250_send_ipi_mask(cpumask_t mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
sb1250_send_ipi_single(i, action);
}
/*
* Code to run on secondary just after probing the CPU
*/
static void __cpuinit sb1250_init_secondary(void)
{
extern void sb1250_smp_init(void);
sb1250_smp_init();
}
/*
* Do any tidying up before marking online and running the idle
* loop
*/
static void __cpuinit sb1250_smp_finish(void)
{ {
extern void sb1250_clockevent_init(void); extern void sb1250_clockevent_init(void);
...@@ -64,19 +101,68 @@ void __cpuinit sb1250_smp_finish(void) ...@@ -64,19 +101,68 @@ void __cpuinit sb1250_smp_finish(void)
} }
/* /*
* These are routines for dealing with the sb1250 smp capabilities * Final cleanup after all secondaries booted
* independent of board/firmware
*/ */
static void sb1250_cpus_done(void)
{
}
/* /*
* Simple enough; everything is set up, so just poke the appropriate mailbox * Setup the PC, SP, and GP of a secondary processor and start it
* register, and we should be set * running!
*/ */
void core_send_ipi(int cpu, unsigned int action) static void __cpuinit sb1250_boot_secondary(int cpu, struct task_struct *idle)
{ {
__raw_writeq((((u64)action) << 48), mailbox_set_regs[cpu]); int retval;
retval = cfe_cpu_start(cpu_logical_map(cpu), &smp_bootstrap,
__KSTK_TOS(idle),
(unsigned long)task_thread_info(idle), 0);
if (retval != 0)
printk("cfe_start_cpu(%i) returned %i\n" , cpu, retval);
} }
/*
* Use CFE to find out how many CPUs are available, setting up
* phys_cpu_present_map and the logical/physical mappings.
* XXXKW will the boot CPU ever not be physical 0?
*
* Common setup before any secondaries are started
*/
static void __init sb1250_smp_setup(void)
{
int i, num;
cpus_clear(phys_cpu_present_map);
cpu_set(0, phys_cpu_present_map);
__cpu_number_map[0] = 0;
__cpu_logical_map[0] = 0;
for (i = 1, num = 0; i < NR_CPUS; i++) {
if (cfe_cpu_stop(i) == 0) {
cpu_set(i, phys_cpu_present_map);
__cpu_number_map[i] = ++num;
__cpu_logical_map[num] = i;
}
}
printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
}
static void __init sb1250_prepare_cpus(unsigned int max_cpus)
{
}
struct plat_smp_ops sb_smp_ops = {
.send_ipi_single = sb1250_send_ipi_single,
.send_ipi_mask = sb1250_send_ipi_mask,
.init_secondary = sb1250_init_secondary,
.smp_finish = sb1250_smp_finish,
.cpus_done = sb1250_cpus_done,
.boot_secondary = sb1250_boot_secondary,
.smp_setup = sb1250_smp_setup,
.prepare_cpus = sb1250_prepare_cpus,
};
void sb1250_mailbox_interrupt(void) void sb1250_mailbox_interrupt(void)
{ {
int cpu = smp_processor_id(); int cpu = smp_processor_id();
......
...@@ -48,12 +48,10 @@ extern unsigned int zbbus_mhz; ...@@ -48,12 +48,10 @@ extern unsigned int zbbus_mhz;
extern void sb1250_time_init(void); extern void sb1250_time_init(void);
extern void sb1250_mask_irq(int cpu, int irq); extern void sb1250_mask_irq(int cpu, int irq);
extern void sb1250_unmask_irq(int cpu, int irq); extern void sb1250_unmask_irq(int cpu, int irq);
extern void sb1250_smp_finish(void);
extern void bcm1480_time_init(void); extern void bcm1480_time_init(void);
extern void bcm1480_mask_irq(int cpu, int irq); extern void bcm1480_mask_irq(int cpu, int irq);
extern void bcm1480_unmask_irq(int cpu, int irq); extern void bcm1480_unmask_irq(int cpu, int irq);
extern void bcm1480_smp_finish(void);
#define AT_spin \ #define AT_spin \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
......
/*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file "COPYING" in the main directory of this
* archive for more details.
*
* Copyright (C) 2000 - 2001 by Kanoj Sarcar (kanoj@sgi.com)
* Copyright (C) 2000 - 2001 by Silicon Graphics, Inc.
* Copyright (C) 2000, 2001, 2002 Ralf Baechle
* Copyright (C) 2000, 2001 Broadcom Corporation
*/
#ifndef __ASM_SMP_OPS_H
#define __ASM_SMP_OPS_H
#ifdef CONFIG_SMP
#include <linux/cpumask.h>
struct plat_smp_ops {
void (*send_ipi_single)(int cpu, unsigned int action);
void (*send_ipi_mask)(cpumask_t mask, unsigned int action);
void (*init_secondary)(void);
void (*smp_finish)(void);
void (*cpus_done)(void);
void (*boot_secondary)(int cpu, struct task_struct *idle);
void (*smp_setup)(void);
void (*prepare_cpus)(unsigned int max_cpus);
};
extern void register_smp_ops(struct plat_smp_ops *ops);
static inline void plat_smp_setup(void)
{
extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->smp_setup();
}
#else /* !CONFIG_SMP */
struct plat_smp_ops;
static inline void plat_smp_setup(void)
{
/* UP, nothing to do ... */
}
static inline void register_smp_ops(struct plat_smp_ops *ops)
{
}
#endif /* !CONFIG_SMP */
extern struct plat_smp_ops up_smp_ops;
extern struct plat_smp_ops vsmp_smp_ops;
#endif /* __ASM_SMP_OPS_H */
...@@ -11,14 +11,13 @@ ...@@ -11,14 +11,13 @@
#ifndef __ASM_SMP_H #ifndef __ASM_SMP_H
#define __ASM_SMP_H #define __ASM_SMP_H
#ifdef CONFIG_SMP
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <asm/atomic.h> #include <asm/atomic.h>
#include <asm/smp-ops.h>
extern int smp_num_siblings; extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[]; extern cpumask_t cpu_sibling_map[];
...@@ -52,56 +51,6 @@ extern struct call_data_struct *call_data; ...@@ -52,56 +51,6 @@ extern struct call_data_struct *call_data;
extern cpumask_t phys_cpu_present_map; extern cpumask_t phys_cpu_present_map;
#define cpu_possible_map phys_cpu_present_map #define cpu_possible_map phys_cpu_present_map
/*
* These are defined by the board-specific code.
*/
/*
* Cause the function described by call_data to be executed on the passed
* cpu. When the function has finished, increment the finished field of
* call_data.
*/
extern void core_send_ipi(int cpu, unsigned int action);
static inline void core_send_ipi_mask(cpumask_t mask, unsigned int action)
{
unsigned int i;
for_each_cpu_mask(i, mask)
core_send_ipi(i, action);
}
/*
* Firmware CPU startup hook
*/
extern void prom_boot_secondary(int cpu, struct task_struct *idle);
/*
* After we've done initial boot, this function is called to allow the
* board code to clean up state, if needed
*/
extern void prom_init_secondary(void);
/*
* Populate cpu_possible_map before smp_init, called from setup_arch.
*/
extern void plat_smp_setup(void);
/*
* Called in smp_prepare_cpus.
*/
extern void plat_prepare_cpus(unsigned int max_cpus);
/*
* Last chance for the board code to finish SMP initialization before
* the CPU is "online".
*/
extern void prom_smp_finish(void);
/* Hook for after all CPUs are online */
extern void prom_cpus_done(void);
extern void asmlinkage smp_bootstrap(void); extern void asmlinkage smp_bootstrap(void);
/* /*
...@@ -111,11 +60,11 @@ extern void asmlinkage smp_bootstrap(void); ...@@ -111,11 +60,11 @@ extern void asmlinkage smp_bootstrap(void);
*/ */
static inline void smp_send_reschedule(int cpu) static inline void smp_send_reschedule(int cpu)
{ {
core_send_ipi(cpu, SMP_RESCHEDULE_YOURSELF); extern struct plat_smp_ops *mp_ops; /* private */
mp_ops->send_ipi_single(cpu, SMP_RESCHEDULE_YOURSELF);
} }
extern asmlinkage void smp_call_function_interrupt(void); extern asmlinkage void smp_call_function_interrupt(void);
#endif /* CONFIG_SMP */
#endif /* __ASM_SMP_H */ #endif /* __ASM_SMP_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment