Commit c6b5b847 authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

[S390] cpu shutdown rework

Let one master cpu kill all other cpus instead of sending an external
interrupt to all other cpus so they can kill themselves.
Simplifies reipl/shutdown functions a lot.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 740b5706
...@@ -576,23 +576,6 @@ static struct subsys_attribute dump_type_attr = ...@@ -576,23 +576,6 @@ static struct subsys_attribute dump_type_attr =
static decl_subsys(dump, NULL, NULL); static decl_subsys(dump, NULL, NULL);
#ifdef CONFIG_SMP
static void dump_smp_stop_all(void)
{
int cpu;
preempt_disable();
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
while (signal_processor(cpu, sigp_stop) == sigp_busy)
udelay(10);
}
preempt_enable();
}
#else
#define dump_smp_stop_all() do { } while (0)
#endif
/* /*
* Shutdown actions section * Shutdown actions section
*/ */
...@@ -724,13 +707,13 @@ static void do_dump(void) ...@@ -724,13 +707,13 @@ static void do_dump(void)
switch (dump_method) { switch (dump_method) {
case IPL_METHOD_CCW_CIO: case IPL_METHOD_CCW_CIO:
dump_smp_stop_all(); smp_send_stop();
devid.devno = dump_block_ccw->ipl_info.ccw.devno; devid.devno = dump_block_ccw->ipl_info.ccw.devno;
devid.ssid = 0; devid.ssid = 0;
reipl_ccw_dev(&devid); reipl_ccw_dev(&devid);
break; break;
case IPL_METHOD_CCW_VM: case IPL_METHOD_CCW_VM:
dump_smp_stop_all(); smp_send_stop();
sprintf(buf, "STORE STATUS"); sprintf(buf, "STORE STATUS");
__cpcmd(buf, NULL, 0, NULL); __cpcmd(buf, NULL, 0, NULL);
sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno); sprintf(buf, "IPL %X", dump_block_ccw->ipl_info.ccw.devno);
...@@ -1059,9 +1042,6 @@ void s390_reset_system(void) ...@@ -1059,9 +1042,6 @@ void s390_reset_system(void)
{ {
struct _lowcore *lc; struct _lowcore *lc;
/* Disable all interrupts/machine checks */
__load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK);
/* Stack for interrupt/machine check handler */ /* Stack for interrupt/machine check handler */
lc = (struct _lowcore *)(unsigned long) store_prefix(); lc = (struct _lowcore *)(unsigned long) store_prefix();
lc->panic_stack = S390_lowcore.panic_stack; lc->panic_stack = S390_lowcore.panic_stack;
......
/* /*
* arch/s390/kernel/machine_kexec.c * arch/s390/kernel/machine_kexec.c
* *
* (C) Copyright IBM Corp. 2005 * Copyright IBM Corp. 2005,2006
* *
* Author(s): Rolf Adelsberger <adelsberger@de.ibm.com> * Author(s): Rolf Adelsberger,
* * Heiko Carstens <heiko.carstens@de.ibm.com>
*/
/*
* s390_machine_kexec.c - handle the transition of Linux booting another kernel
* on the S390 architecture.
*/ */
#include <linux/device.h> #include <linux/device.h>
...@@ -24,81 +19,53 @@ ...@@ -24,81 +19,53 @@
#include <asm/smp.h> #include <asm/smp.h>
#include <asm/reset.h> #include <asm/reset.h>
static void kexec_halt_all_cpus(void *); typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
typedef void (*relocate_kernel_t) (kimage_entry_t *, unsigned long);
extern const unsigned char relocate_kernel[]; extern const unsigned char relocate_kernel[];
extern const unsigned long long relocate_kernel_len; extern const unsigned long long relocate_kernel_len;
int int machine_kexec_prepare(struct kimage *image)
machine_kexec_prepare(struct kimage *image)
{ {
unsigned long reboot_code_buffer; void *reboot_code_buffer;
/* We don't support anything but the default image type for now. */ /* We don't support anything but the default image type for now. */
if (image->type != KEXEC_TYPE_DEFAULT) if (image->type != KEXEC_TYPE_DEFAULT)
return -EINVAL; return -EINVAL;
/* Get the destination where the assembler code should be copied to.*/ /* Get the destination where the assembler code should be copied to.*/
reboot_code_buffer = page_to_pfn(image->control_code_page)<<PAGE_SHIFT; reboot_code_buffer = (void *) page_to_phys(image->control_code_page);
/* Then copy it */ /* Then copy it */
memcpy((void *) reboot_code_buffer, relocate_kernel, memcpy(reboot_code_buffer, relocate_kernel, relocate_kernel_len);
relocate_kernel_len);
return 0; return 0;
} }
void void machine_kexec_cleanup(struct kimage *image)
machine_kexec_cleanup(struct kimage *image)
{ {
} }
void void machine_shutdown(void)
machine_shutdown(void)
{ {
printk(KERN_INFO "kexec: machine_shutdown called\n"); printk(KERN_INFO "kexec: machine_shutdown called\n");
} }
NORET_TYPE void
machine_kexec(struct kimage *image)
{
on_each_cpu(kexec_halt_all_cpus, image, 0, 0);
for (;;);
}
extern void pfault_fini(void); extern void pfault_fini(void);
static void void machine_kexec(struct kimage *image)
kexec_halt_all_cpus(void *kernel_image)
{ {
static atomic_t cpuid = ATOMIC_INIT(-1);
int cpu;
struct kimage *image;
relocate_kernel_t data_mover; relocate_kernel_t data_mover;
preempt_disable();
#ifdef CONFIG_PFAULT #ifdef CONFIG_PFAULT
if (MACHINE_IS_VM) if (MACHINE_IS_VM)
pfault_fini(); pfault_fini();
#endif #endif
smp_send_stop();
if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
signal_processor(smp_processor_id(), sigp_stop);
/* Wait for all other cpus to enter stopped state */
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
while (!smp_cpu_not_running(cpu))
cpu_relax();
}
s390_reset_system(); s390_reset_system();
image = (struct kimage *) kernel_image; data_mover = (relocate_kernel_t) page_to_phys(image->control_code_page);
data_mover = (relocate_kernel_t)
(page_to_pfn(image->control_code_page) << PAGE_SHIFT);
/* Call the moving routine */ /* Call the moving routine */
(*data_mover) (&image->head, image->start); (*data_mover)(&image->head, image->start);
for (;;);
} }
...@@ -230,18 +230,37 @@ static inline void do_store_status(void) ...@@ -230,18 +230,37 @@ static inline void do_store_status(void)
} }
} }
static inline void do_wait_for_stop(void)
{
int cpu;
/* Wait for all other cpus to enter stopped state */
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
while(!smp_cpu_not_running(cpu))
cpu_relax();
}
}
/* /*
* this function sends a 'stop' sigp to all other CPUs in the system. * this function sends a 'stop' sigp to all other CPUs in the system.
* it goes straight through. * it goes straight through.
*/ */
void smp_send_stop(void) void smp_send_stop(void)
{ {
/* Disable all interrupts/machine checks */
__load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK);
/* write magic number to zero page (absolute 0) */ /* write magic number to zero page (absolute 0) */
lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC; lowcore_ptr[smp_processor_id()]->panic_magic = __PANIC_MAGIC;
/* stop other processors. */ /* stop other processors. */
do_send_stop(); do_send_stop();
/* wait until other processors are stopped */
do_wait_for_stop();
/* store status of other processors. */ /* store status of other processors. */
do_store_status(); do_store_status();
} }
...@@ -250,88 +269,28 @@ void smp_send_stop(void) ...@@ -250,88 +269,28 @@ void smp_send_stop(void)
* Reboot, halt and power_off routines for SMP. * Reboot, halt and power_off routines for SMP.
*/ */
static void do_machine_restart(void * __unused)
{
int cpu;
static atomic_t cpuid = ATOMIC_INIT(-1);
if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
signal_processor(smp_processor_id(), sigp_stop);
/* Wait for all other cpus to enter stopped state */
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
while(!smp_cpu_not_running(cpu))
cpu_relax();
}
/* Store status of other cpus. */
do_store_status();
/*
* Finally call reipl. Because we waited for all other
* cpus to enter this function we know that they do
* not hold any s390irq-locks (the cpus have been
* interrupted by an external interrupt and s390irq
* locks are always held disabled).
*/
do_reipl();
}
void machine_restart_smp(char * __unused) void machine_restart_smp(char * __unused)
{ {
on_each_cpu(do_machine_restart, NULL, 0, 0); smp_send_stop();
} do_reipl();
static void do_wait_for_stop(void)
{
unsigned long cr[16];
__ctl_store(cr, 0, 15);
cr[0] &= ~0xffff;
cr[6] = 0;
__ctl_load(cr, 0, 15);
for (;;)
enabled_wait();
}
static void do_machine_halt(void * __unused)
{
static atomic_t cpuid = ATOMIC_INIT(-1);
if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
smp_send_stop();
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
cpcmd(vmhalt_cmd, NULL, 0, NULL);
signal_processor(smp_processor_id(),
sigp_stop_and_store_status);
}
do_wait_for_stop();
} }
void machine_halt_smp(void) void machine_halt_smp(void)
{ {
on_each_cpu(do_machine_halt, NULL, 0, 0); smp_send_stop();
} if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
__cpcmd(vmhalt_cmd, NULL, 0, NULL);
static void do_machine_power_off(void * __unused) signal_processor(smp_processor_id(), sigp_stop_and_store_status);
{ for (;;);
static atomic_t cpuid = ATOMIC_INIT(-1);
if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) == -1) {
smp_send_stop();
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
cpcmd(vmpoff_cmd, NULL, 0, NULL);
signal_processor(smp_processor_id(),
sigp_stop_and_store_status);
}
do_wait_for_stop();
} }
void machine_power_off_smp(void) void machine_power_off_smp(void)
{ {
on_each_cpu(do_machine_power_off, NULL, 0, 0); smp_send_stop();
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
__cpcmd(vmpoff_cmd, NULL, 0, NULL);
signal_processor(smp_processor_id(), sigp_stop_and_store_status);
for (;;);
} }
/* /*
...@@ -860,4 +819,3 @@ EXPORT_SYMBOL(smp_ctl_clear_bit); ...@@ -860,4 +819,3 @@ EXPORT_SYMBOL(smp_ctl_clear_bit);
EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_get_cpu); EXPORT_SYMBOL(smp_get_cpu);
EXPORT_SYMBOL(smp_put_cpu); EXPORT_SYMBOL(smp_put_cpu);
...@@ -19,52 +19,17 @@ ...@@ -19,52 +19,17 @@
#include "sclp.h" #include "sclp.h"
#ifdef CONFIG_SMP
/* Signal completion of shutdown process. All CPUs except the first to enter
* this function: go to stopped state. First CPU: wait until all other
* CPUs are in stopped or check stop state. Afterwards, load special PSW
* to indicate completion. */
static void
do_load_quiesce_psw(void * __unused)
{
static atomic_t cpuid = ATOMIC_INIT(-1);
psw_t quiesce_psw;
int cpu;
if (atomic_cmpxchg(&cpuid, -1, smp_processor_id()) != -1)
signal_processor(smp_processor_id(), sigp_stop);
/* Wait for all other cpus to enter stopped state */
for_each_online_cpu(cpu) {
if (cpu == smp_processor_id())
continue;
while(!smp_cpu_not_running(cpu))
cpu_relax();
}
/* Quiesce the last cpu with the special psw */
quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
quiesce_psw.addr = 0xfff;
__load_psw(quiesce_psw);
}
/* Shutdown handler. Perform shutdown function on all CPUs. */
static void
do_machine_quiesce(void)
{
on_each_cpu(do_load_quiesce_psw, NULL, 0, 0);
}
#else
/* Shutdown handler. Signal completion of shutdown by loading special PSW. */ /* Shutdown handler. Signal completion of shutdown by loading special PSW. */
static void static void
do_machine_quiesce(void) do_machine_quiesce(void)
{ {
psw_t quiesce_psw; psw_t quiesce_psw;
smp_send_stop();
quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT; quiesce_psw.mask = PSW_BASE_BITS | PSW_MASK_WAIT;
quiesce_psw.addr = 0xfff; quiesce_psw.addr = 0xfff;
__load_psw(quiesce_psw); __load_psw(quiesce_psw);
} }
#endif
/* Handler for quiesce event. Start shutdown procedure. */ /* Handler for quiesce event. Start shutdown procedure. */
static void static void
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/lowcore.h> #include <asm/lowcore.h>
#include <asm/sigp.h> #include <asm/sigp.h>
#include <asm/ptrace.h>
/* /*
s390 specific smp.c headers s390 specific smp.c headers
...@@ -101,6 +102,13 @@ smp_call_function_on(void (*func) (void *info), void *info, ...@@ -101,6 +102,13 @@ smp_call_function_on(void (*func) (void *info), void *info,
func(info); func(info);
return 0; return 0;
} }
static inline void smp_send_stop(void)
{
/* Disable all interrupts/machine checks */
__load_psw_mask(PSW_KERNEL_BITS & ~PSW_MASK_MCHECK);
}
#define smp_cpu_not_running(cpu) 1 #define smp_cpu_not_running(cpu) 1
#define smp_get_cpu(cpu) ({ 0; }) #define smp_get_cpu(cpu) ({ 0; })
#define smp_put_cpu(cpu) ({ 0; }) #define smp_put_cpu(cpu) ({ 0; })
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment