Commit 88b3aac6 authored by Helge Deller's avatar Helge Deller

parisc: Implement __cpu_die() and __cpu_disable() for CPU hotplugging

Add relevant code to __cpu_die() and __cpu_disable() to finally enable
the CPU hotplugging features. Reset the irq count values in smp_callin()
to zero before bringing up the CPU.

It seems that the firmware may need up to 8 seconds to fully stop a CPU
in which no other PDC calls are allowed to be made. Use a timeout
__cpu_die() to accommodate for this.

Use "chcpu -d 1" to bring CPU1 down, and "chcpu -e 1" to bring it up.
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent db2b0d76
......@@ -56,6 +56,7 @@ config PARISC
select HAVE_ARCH_TRACEHOOK
select HAVE_REGS_AND_STACK_ACCESS_API
select GENERIC_SCHED_CLOCK
select GENERIC_IRQ_MIGRATION if SMP
select HAVE_UNSTABLE_SCHED_CLOCK if SMP
select LEGACY_TIMER_TICK
select CPU_NO_EFFICIENT_FFS
......
......@@ -44,12 +44,7 @@ static inline void smp_send_all_nop(void) { return; }
#define NO_PROC_ID 0xFF /* No processor magic marker */
#define ANY_PROC_ID 0xFF /* Any processor magic marker */
static inline int __cpu_disable (void) {
return 0;
}
static inline void __cpu_die (unsigned int cpu) {
while(1)
;
}
int __cpu_disable(void);
void __cpu_die(unsigned int cpu);
#endif /* __ASM_SMP_H */
......@@ -30,6 +30,7 @@
#include <linux/ftrace.h>
#include <linux/cpu.h>
#include <linux/kgdb.h>
#include <linux/sched/hotplug.h>
#include <linux/atomic.h>
#include <asm/current.h>
......@@ -60,8 +61,6 @@ volatile struct task_struct *smp_init_current_idle_task;
/* track which CPU is booting */
static volatile int cpu_now_booting;
static int parisc_max_cpus = 1;
static DEFINE_PER_CPU(spinlock_t, ipi_lock);
enum ipi_message_type {
......@@ -269,7 +268,7 @@ void arch_send_call_function_single_ipi(int cpu)
/*
* Called by secondaries to update state and initialize CPU registers.
*/
static void __init
static void
smp_cpu_init(int cpunum)
{
extern void init_IRQ(void); /* arch/parisc/kernel/irq.c */
......@@ -309,7 +308,7 @@ smp_cpu_init(int cpunum)
* Slaves start using C here. Indirectly called from smp_slave_stext.
* Do what start_kernel() and main() do for boot strap processor (aka monarch)
*/
void __init smp_callin(unsigned long pdce_proc)
void smp_callin(unsigned long pdce_proc)
{
int slave_id = cpu_now_booting;
......@@ -334,11 +333,28 @@ void __init smp_callin(unsigned long pdce_proc)
/*
* Bring one cpu online.
*/
int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
static int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
{
const struct cpuinfo_parisc *p = &per_cpu(cpu_data, cpuid);
long timeout;
#ifdef CONFIG_HOTPLUG_CPU
int i;
/* reset irq statistics for this CPU */
memset(&per_cpu(irq_stat, cpuid), 0, sizeof(irq_cpustat_t));
for (i = 0; i < NR_IRQS; i++) {
struct irq_desc *desc = irq_to_desc(i);
if (desc && desc->kstat_irqs)
*per_cpu_ptr(desc->kstat_irqs, cpuid) = 0;
}
#endif
/* wait until last booting CPU has started. */
while (cpu_now_booting)
;
/* Let _start know what logical CPU we're booting
** (offset into init_tasks[],cpu_data[])
*/
......@@ -374,7 +390,6 @@ int smp_boot_one_cpu(int cpuid, struct task_struct *idle)
if(cpu_online(cpuid)) {
/* Which implies Slave has started up */
cpu_now_booting = 0;
smp_init_current_idle_task = NULL;
goto alive ;
}
udelay(100);
......@@ -415,25 +430,82 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
spin_lock_init(&per_cpu(ipi_lock, cpu));
init_cpu_present(cpumask_of(0));
parisc_max_cpus = max_cpus;
if (!max_cpus)
printk(KERN_INFO "SMP mode deactivated.\n");
}
void smp_cpus_done(unsigned int cpu_max)
void __init smp_cpus_done(unsigned int cpu_max)
{
return;
}
int __cpu_up(unsigned int cpu, struct task_struct *tidle)
{
if (cpu != 0 && cpu < parisc_max_cpus && smp_boot_one_cpu(cpu, tidle))
return -ENOSYS;
if (cpu_online(cpu))
return 0;
if (num_online_cpus() < setup_max_cpus && smp_boot_one_cpu(cpu, tidle))
return -EIO;
return cpu_online(cpu) ? 0 : -EIO;
}
/*
* __cpu_disable runs on the processor to be shutdown.
*/
int __cpu_disable(void)
{
#ifdef CONFIG_HOTPLUG_CPU
unsigned int cpu = smp_processor_id();
remove_cpu_topology(cpu);
/*
* Take this CPU offline. Once we clear this, we can't return,
* and we must not schedule until we're ready to give up the cpu.
*/
set_cpu_online(cpu, false);
disable_percpu_irq(IPI_IRQ);
irq_migrate_all_off_this_cpu();
flush_cache_all_local();
flush_tlb_all_local(NULL);
/* disable all irqs, including timer irq */
local_irq_disable();
/* wait for next timer irq ... */
mdelay(1000/HZ+100);
/* ... and then clear all pending external irqs */
set_eiem(0);
mtctl(~0UL, CR_EIRR);
mfctl(CR_EIRR);
mtctl(0, CR_EIRR);
#endif
return 0;
}
/*
* called on the thread which is asking for a CPU to be shutdown -
* waits until shutdown has completed, or it is timed out.
*/
void __cpu_die(unsigned int cpu)
{
pdc_cpu_rendezvous_lock();
if (!cpu_wait_death(cpu, 5)) {
pr_crit("CPU%u: cpu didn't die\n", cpu);
return;
}
pr_info("CPU%u: is shutting down\n", cpu);
/* set task's state to interruptible sleep */
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout((IS_ENABLED(CONFIG_64BIT) ? 8:2) * HZ);
return cpu_online(cpu) ? 0 : -ENOSYS;
pdc_cpu_rendezvous_unlock();
}
#ifdef CONFIG_PROC_FS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment