Commit 9baeca57 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Linus Torvalds

[PATCH] s390: cpu hotplug support

From: Ursula Braun-Krahl <braunu@de.ibm.com>
From: Heiko Carstens <heiko.carstens@de.ibm.com>
From: Gerald Schaefer <geraldsc@de.ibm.com>
From: Martin Schwidefsky <schwidefsky@de.ibm.com>

Add s390 architecture support for cpu hotplug.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 1ef8b835
...@@ -107,6 +107,15 @@ config NR_CPUS ...@@ -107,6 +107,15 @@ config NR_CPUS
This is purely to save memory - each supported CPU adds This is purely to save memory - each supported CPU adds
approximately sixteen kilobytes to the kernel image. approximately sixteen kilobytes to the kernel image.
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
depends on SMP && HOTPLUG && EXPERIMENTAL
default n
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
config MATHEMU config MATHEMU
bool "IEEE FPU emulation" bool "IEEE FPU emulation"
depends on MARCH_G5 depends on MARCH_G5
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include <linux/sysctl.h> #include <linux/sysctl.h>
#include <asm/timer.h> #include <asm/timer.h>
//#include <linux/kernel_stat.h> //#include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include "appldata.h" #include "appldata.h"
...@@ -124,10 +126,6 @@ static struct ctl_table appldata_dir_table[] = { ...@@ -124,10 +126,6 @@ static struct ctl_table appldata_dir_table[] = {
*/ */
DEFINE_PER_CPU(struct vtimer_list, appldata_timer); DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
static atomic_t appldata_expire_count = ATOMIC_INIT(0); static atomic_t appldata_expire_count = ATOMIC_INIT(0);
static struct appldata_mod_vtimer_args {
struct vtimer_list *timer;
u64 expires;
} appldata_mod_vtimer_args;
static spinlock_t appldata_timer_lock = SPIN_LOCK_UNLOCKED; static spinlock_t appldata_timer_lock = SPIN_LOCK_UNLOCKED;
static int appldata_interval = APPLDATA_CPU_INTERVAL; static int appldata_interval = APPLDATA_CPU_INTERVAL;
...@@ -154,7 +152,7 @@ static LIST_HEAD(appldata_ops_list); ...@@ -154,7 +152,7 @@ static LIST_HEAD(appldata_ops_list);
static void appldata_timer_function(unsigned long data, struct pt_regs *regs) static void appldata_timer_function(unsigned long data, struct pt_regs *regs)
{ {
P_DEBUG(" -= Timer =-\n"); P_DEBUG(" -= Timer =-\n");
P_DEBUG("CPU: %i, expire: %i\n", smp_processor_id(), P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
atomic_read(&appldata_expire_count)); atomic_read(&appldata_expire_count));
if (atomic_dec_and_test(&appldata_expire_count)) { if (atomic_dec_and_test(&appldata_expire_count)) {
atomic_set(&appldata_expire_count, num_online_cpus()); atomic_set(&appldata_expire_count, num_online_cpus());
...@@ -187,17 +185,6 @@ static void appldata_tasklet_function(unsigned long data) ...@@ -187,17 +185,6 @@ static void appldata_tasklet_function(unsigned long data)
spin_unlock(&appldata_ops_lock); spin_unlock(&appldata_ops_lock);
} }
/*
* appldata_mod_vtimer_wrap()
*
* wrapper function for mod_virt_timer(), because smp_call_function_on()
* accepts only one parameter.
*/
static void appldata_mod_vtimer_wrap(void *p) {
struct appldata_mod_vtimer_args *args = p;
mod_virt_timer(args->timer, args->expires);
}
/* /*
* appldata_diag() * appldata_diag()
* *
...@@ -247,6 +234,79 @@ static int appldata_diag(char record_nr, u16 function, unsigned long buffer, ...@@ -247,6 +234,79 @@ static int appldata_diag(char record_nr, u16 function, unsigned long buffer,
/****************************** /proc stuff **********************************/ /****************************** /proc stuff **********************************/
/*
* appldata_mod_vtimer_wrap()
*
* wrapper function for mod_virt_timer(), because smp_call_function_on()
* accepts only one parameter.
*/
static void __appldata_mod_vtimer_wrap(void *p) {
struct {
struct vtimer_list *timer;
u64 expires;
} *args = p;
mod_virt_timer(args->timer, args->expires);
}
#define APPLDATA_ADD_TIMER 0
#define APPLDATA_DEL_TIMER 1
#define APPLDATA_MOD_TIMER 2
/*
* __appldata_vtimer_setup()
*
* Add, delete or modify virtual timers on all online cpus.
* The caller needs to get the appldata_timer_lock spinlock.
*/
static void
__appldata_vtimer_setup(int cmd)
{
u64 per_cpu_interval;
int i;
switch (cmd) {
case APPLDATA_ADD_TIMER:
if (appldata_timer_active)
break;
per_cpu_interval = (u64) (appldata_interval*1000 /
num_online_cpus()) * TOD_MICRO;
for_each_online_cpu(i) {
per_cpu(appldata_timer, i).expires = per_cpu_interval;
smp_call_function_on(add_virt_timer_periodic,
&per_cpu(appldata_timer, i),
0, 1, i);
}
appldata_timer_active = 1;
P_INFO("Monitoring timer started.\n");
break;
case APPLDATA_DEL_TIMER:
for_each_online_cpu(i)
del_virt_timer(&per_cpu(appldata_timer, i));
if (!appldata_timer_active)
break;
appldata_timer_active = 0;
atomic_set(&appldata_expire_count, num_online_cpus());
P_INFO("Monitoring timer stopped.\n");
break;
case APPLDATA_MOD_TIMER:
per_cpu_interval = (u64) (appldata_interval*1000 /
num_online_cpus()) * TOD_MICRO;
if (!appldata_timer_active)
break;
for_each_online_cpu(i) {
struct {
struct vtimer_list *timer;
u64 expires;
} args;
args.timer = &per_cpu(appldata_timer, i);
args.expires = per_cpu_interval;
smp_call_function_on(__appldata_mod_vtimer_wrap,
&args, 0, 1, i);
}
}
}
/* /*
* appldata_timer_handler() * appldata_timer_handler()
* *
...@@ -256,9 +316,8 @@ static int ...@@ -256,9 +316,8 @@ static int
appldata_timer_handler(ctl_table *ctl, int write, struct file *filp, appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
void __user *buffer, size_t *lenp) void __user *buffer, size_t *lenp)
{ {
int len, i; int len;
char buf[2]; char buf[2];
u64 per_cpu_interval;
if (!*lenp || filp->f_pos) { if (!*lenp || filp->f_pos) {
*lenp = 0; *lenp = 0;
...@@ -272,30 +331,14 @@ appldata_timer_handler(ctl_table *ctl, int write, struct file *filp, ...@@ -272,30 +331,14 @@ appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
return -EFAULT; return -EFAULT;
goto out; goto out;
} }
per_cpu_interval = (u64) (appldata_interval*1000 /
num_online_cpus()) * TOD_MICRO;
len = *lenp; len = *lenp;
if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len)) if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT; return -EFAULT;
spin_lock(&appldata_timer_lock); spin_lock(&appldata_timer_lock);
per_cpu_interval = (u64) (appldata_interval*1000 / if (buf[0] == '1')
num_online_cpus()) * TOD_MICRO; __appldata_vtimer_setup(APPLDATA_ADD_TIMER);
if ((buf[0] == '1') && (!appldata_timer_active)) { else if (buf[0] == '0')
for (i = 0; i < num_online_cpus(); i++) { __appldata_vtimer_setup(APPLDATA_DEL_TIMER);
per_cpu(appldata_timer, i).expires = per_cpu_interval;
smp_call_function_on(add_virt_timer_periodic,
&per_cpu(appldata_timer, i),
0, 1, i);
}
appldata_timer_active = 1;
P_INFO("Monitoring timer started.\n");
} else if ((buf[0] == '0') && (appldata_timer_active)) {
for (i = 0; i < num_online_cpus(); i++) {
del_virt_timer(&per_cpu(appldata_timer, i));
}
appldata_timer_active = 0;
P_INFO("Monitoring timer stopped.\n");
}
spin_unlock(&appldata_timer_lock); spin_unlock(&appldata_timer_lock);
out: out:
*lenp = len; *lenp = len;
...@@ -313,9 +356,8 @@ static int ...@@ -313,9 +356,8 @@ static int
appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
void __user *buffer, size_t *lenp) void __user *buffer, size_t *lenp)
{ {
int len, i, interval; int len, interval;
char buf[16]; char buf[16];
u64 per_cpu_interval;
if (!*lenp || filp->f_pos) { if (!*lenp || filp->f_pos) {
*lenp = 0; *lenp = 0;
...@@ -340,20 +382,8 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp, ...@@ -340,20 +382,8 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
} }
spin_lock(&appldata_timer_lock); spin_lock(&appldata_timer_lock);
per_cpu_interval = (u64) (interval*1000 / num_online_cpus()) * TOD_MICRO;
appldata_interval = interval; appldata_interval = interval;
if (appldata_timer_active) { __appldata_vtimer_setup(APPLDATA_MOD_TIMER);
for (i = 0; i < num_online_cpus(); i++) {
appldata_mod_vtimer_args.timer =
&per_cpu(appldata_timer, i);
appldata_mod_vtimer_args.expires =
per_cpu_interval;
smp_call_function_on(
appldata_mod_vtimer_wrap,
&appldata_mod_vtimer_args,
0, 1, i);
}
}
spin_unlock(&appldata_timer_lock); spin_unlock(&appldata_timer_lock);
P_INFO("Monitoring CPU interval set to %u milliseconds.\n", P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
...@@ -564,6 +594,56 @@ void appldata_unregister_ops(struct appldata_ops *ops) ...@@ -564,6 +594,56 @@ void appldata_unregister_ops(struct appldata_ops *ops)
/******************************* init / exit *********************************/ /******************************* init / exit *********************************/
static void
appldata_online_cpu(int cpu)
{
init_virt_timer(&per_cpu(appldata_timer, cpu));
per_cpu(appldata_timer, cpu).function = appldata_timer_function;
per_cpu(appldata_timer, cpu).data = (unsigned long)
&appldata_tasklet_struct;
atomic_inc(&appldata_expire_count);
spin_lock(&appldata_timer_lock);
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
}
static void
appldata_offline_cpu(int cpu)
{
del_virt_timer(&per_cpu(appldata_timer, cpu));
if (atomic_dec_and_test(&appldata_expire_count)) {
atomic_set(&appldata_expire_count, num_online_cpus());
tasklet_schedule(&appldata_tasklet_struct);
}
spin_lock(&appldata_timer_lock);
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
}
static int
appldata_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
switch (action) {
case CPU_ONLINE:
appldata_online_cpu((long) hcpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
appldata_offline_cpu((long) hcpu);
break;
#endif
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block __devinitdata appldata_nb = {
.notifier_call = appldata_cpu_notify,
};
/* /*
* appldata_init() * appldata_init()
* *
...@@ -576,13 +656,11 @@ static int __init appldata_init(void) ...@@ -576,13 +656,11 @@ static int __init appldata_init(void)
P_DEBUG("sizeof(parameter_list) = %lu\n", P_DEBUG("sizeof(parameter_list) = %lu\n",
sizeof(struct appldata_parameter_list)); sizeof(struct appldata_parameter_list));
for (i = 0; i < num_online_cpus(); i++) { for_each_online_cpu(i)
init_virt_timer(&per_cpu(appldata_timer, i)); appldata_online_cpu(i);
per_cpu(appldata_timer, i).function = appldata_timer_function;
per_cpu(appldata_timer, i).data = (unsigned long) /* Register cpu hotplug notifier */
&appldata_tasklet_struct; register_cpu_notifier(&appldata_nb);
}
atomic_set(&appldata_expire_count, num_online_cpus());
appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1); appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1);
#ifdef MODULE #ifdef MODULE
...@@ -623,9 +701,9 @@ static void __exit appldata_exit(void) ...@@ -623,9 +701,9 @@ static void __exit appldata_exit(void)
} }
spin_unlock_bh(&appldata_ops_lock); spin_unlock_bh(&appldata_ops_lock);
for (i = 0; i < num_online_cpus(); i++) { for_each_online_cpu(i)
del_virt_timer(&per_cpu(appldata_timer, i)); appldata_offline_cpu(i);
}
appldata_timer_active = 0; appldata_timer_active = 0;
unregister_sysctl_table(appldata_sysctl_header); unregister_sysctl_table(appldata_sysctl_header);
......
...@@ -98,8 +98,7 @@ static inline void appldata_print_debug(struct appldata_os_data *os_data) ...@@ -98,8 +98,7 @@ static inline void appldata_print_debug(struct appldata_os_data *os_data)
LOAD_INT(a2), LOAD_FRAC(a2)); LOAD_INT(a2), LOAD_FRAC(a2));
P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus); P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus);
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < os_data->nr_cpus; i++) {
if (!cpu_online(i)) continue;
P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, " P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, "
"idle = %u, irq = %u, softirq = %u, iowait = %u\n", "idle = %u, irq = %u, softirq = %u, iowait = %u\n",
i, i,
...@@ -124,7 +123,7 @@ static inline void appldata_print_debug(struct appldata_os_data *os_data) ...@@ -124,7 +123,7 @@ static inline void appldata_print_debug(struct appldata_os_data *os_data)
*/ */
static void appldata_get_os_data(void *data) static void appldata_get_os_data(void *data)
{ {
int i; int i, j;
struct appldata_os_data *os_data; struct appldata_os_data *os_data;
os_data = data; os_data = data;
...@@ -139,21 +138,23 @@ static void appldata_get_os_data(void *data) ...@@ -139,21 +138,23 @@ static void appldata_get_os_data(void *data)
os_data->avenrun[1] = avenrun[1] + (FIXED_1/200); os_data->avenrun[1] = avenrun[1] + (FIXED_1/200);
os_data->avenrun[2] = avenrun[2] + (FIXED_1/200); os_data->avenrun[2] = avenrun[2] + (FIXED_1/200);
for (i = 0; i < num_online_cpus(); i++) { j = 0;
os_data->os_cpu[i].per_cpu_user = for_each_online_cpu(i) {
os_data->os_cpu[j].per_cpu_user =
kstat_cpu(i).cpustat.user; kstat_cpu(i).cpustat.user;
os_data->os_cpu[i].per_cpu_nice = os_data->os_cpu[j].per_cpu_nice =
kstat_cpu(i).cpustat.nice; kstat_cpu(i).cpustat.nice;
os_data->os_cpu[i].per_cpu_system = os_data->os_cpu[j].per_cpu_system =
kstat_cpu(i).cpustat.system; kstat_cpu(i).cpustat.system;
os_data->os_cpu[i].per_cpu_idle = os_data->os_cpu[j].per_cpu_idle =
kstat_cpu(i).cpustat.idle; kstat_cpu(i).cpustat.idle;
os_data->os_cpu[i].per_cpu_irq = os_data->os_cpu[j].per_cpu_irq =
kstat_cpu(i).cpustat.irq; kstat_cpu(i).cpustat.irq;
os_data->os_cpu[i].per_cpu_softirq = os_data->os_cpu[j].per_cpu_softirq =
kstat_cpu(i).cpustat.softirq; kstat_cpu(i).cpustat.softirq;
os_data->os_cpu[i].per_cpu_iowait = os_data->os_cpu[j].per_cpu_iowait =
kstat_cpu(i).cpustat.iowait; kstat_cpu(i).cpustat.iowait;
j++;
} }
os_data->timestamp = get_clock(); os_data->timestamp = get_clock();
......
...@@ -46,7 +46,6 @@ CONFIG_MODULES=y ...@@ -46,7 +46,6 @@ CONFIG_MODULES=y
CONFIG_OBSOLETE_MODPARM=y CONFIG_OBSOLETE_MODPARM=y
# CONFIG_MODVERSIONS is not set # CONFIG_MODVERSIONS is not set
CONFIG_KMOD=y CONFIG_KMOD=y
CONFIG_STOP_MACHINE=y
# #
# Base setup # Base setup
...@@ -63,6 +62,7 @@ CONFIG_MARCH_G5=y ...@@ -63,6 +62,7 @@ CONFIG_MARCH_G5=y
# CONFIG_MARCH_Z990 is not set # CONFIG_MARCH_Z990 is not set
CONFIG_SMP=y CONFIG_SMP=y
CONFIG_NR_CPUS=32 CONFIG_NR_CPUS=32
# CONFIG_HOTPLUG_CPU is not set
CONFIG_MATHEMU=y CONFIG_MATHEMU=y
# #
...@@ -510,6 +510,7 @@ CONFIG_CRYPTO=y ...@@ -510,6 +510,7 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_AES is not set # CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set # CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set # CONFIG_CRYPTO_CAST6 is not set
# CONFIG_CRYPTO_TEA is not set
# CONFIG_CRYPTO_ARC4 is not set # CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_DEFLATE is not set # CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set # CONFIG_CRYPTO_MICHAEL_MIC is not set
......
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
#include <linux/config.h> #include <linux/config.h>
#include <linux/compiler.h> #include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/errno.h> #include <linux/errno.h>
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -122,6 +123,11 @@ void default_idle(void) ...@@ -122,6 +123,11 @@ void default_idle(void)
/* enable monitor call class 0 */ /* enable monitor call class 0 */
__ctl_set_bit(8, 15); __ctl_set_bit(8, 15);
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id()))
cpu_die();
#endif
/* /*
* Wait for external, I/O or machine check interrupt and * Wait for external, I/O or machine check interrupt and
* switch off machine check bit after the wait has ended. * switch off machine check bit after the wait has ended.
......
...@@ -50,17 +50,6 @@ EXPORT_SYMBOL(overflowuid); ...@@ -50,17 +50,6 @@ EXPORT_SYMBOL(overflowuid);
EXPORT_SYMBOL(overflowgid); EXPORT_SYMBOL(overflowgid);
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
/*
* virtual CPU timer
*/
#ifdef CONFIG_VIRT_TIMER
EXPORT_SYMBOL(init_virt_timer);
EXPORT_SYMBOL(add_virt_timer);
EXPORT_SYMBOL(add_virt_timer_periodic);
EXPORT_SYMBOL(mod_virt_timer);
EXPORT_SYMBOL(del_virt_timer);
#endif
/* /*
* misc. * misc.
*/ */
......
...@@ -58,8 +58,6 @@ struct { ...@@ -58,8 +58,6 @@ struct {
} memory_chunk[MEMORY_CHUNKS] = { { 0 } }; } memory_chunk[MEMORY_CHUNKS] = { { 0 } };
#define CHUNK_READ_WRITE 0 #define CHUNK_READ_WRITE 0
#define CHUNK_READ_ONLY 1 #define CHUNK_READ_ONLY 1
int cpus_initialized = 0;
static cpumask_t cpu_initialized;
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */ volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
/* /*
...@@ -85,15 +83,8 @@ static struct resource data_resource = { "Kernel data", 0, 0 }; ...@@ -85,15 +83,8 @@ static struct resource data_resource = { "Kernel data", 0, 0 };
*/ */
void __devinit cpu_init (void) void __devinit cpu_init (void)
{ {
int nr = smp_processor_id();
int addr = hard_smp_processor_id(); int addr = hard_smp_processor_id();
if (cpu_test_and_set(nr,cpu_initialized)) {
printk("CPU#%d ALREADY INITIALIZED!!!!!!!!!\n", nr);
for (;;) local_irq_enable();
}
cpus_initialized++;
/* /*
* Store processor id in lowcore (used e.g. in timer_interrupt) * Store processor id in lowcore (used e.g. in timer_interrupt)
*/ */
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com)
* Heiko Carstens (heiko.carstens@de.ibm.com)
* *
* based on other smp stuff by * based on other smp stuff by
* (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net> * (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
...@@ -57,6 +58,8 @@ cpumask_t cpu_online_map; ...@@ -57,6 +58,8 @@ cpumask_t cpu_online_map;
cpumask_t cpu_possible_map; cpumask_t cpu_possible_map;
unsigned long cache_decay_ticks = 0; unsigned long cache_decay_ticks = 0;
static struct task_struct *current_set[NR_CPUS];
EXPORT_SYMBOL(cpu_online_map); EXPORT_SYMBOL(cpu_online_map);
/* /*
...@@ -124,7 +127,6 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, ...@@ -124,7 +127,6 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
struct call_data_struct data; struct call_data_struct data;
int cpus = num_online_cpus()-1; int cpus = num_online_cpus()-1;
/* FIXME: get cpu lock -hc */
if (cpus <= 0) if (cpus <= 0)
return 0; return 0;
...@@ -211,7 +213,6 @@ EXPORT_SYMBOL(smp_call_function_on); ...@@ -211,7 +213,6 @@ EXPORT_SYMBOL(smp_call_function_on);
static inline void do_send_stop(void) static inline void do_send_stop(void)
{ {
unsigned long dummy;
int i, rc; int i, rc;
/* stop all processors */ /* stop all processors */
...@@ -219,24 +220,22 @@ static inline void do_send_stop(void) ...@@ -219,24 +220,22 @@ static inline void do_send_stop(void)
if (!cpu_online(i) || smp_processor_id() == i) if (!cpu_online(i) || smp_processor_id() == i)
continue; continue;
do { do {
rc = signal_processor_ps(&dummy, 0, i, sigp_stop); rc = signal_processor(i, sigp_stop);
} while (rc == sigp_busy); } while (rc == sigp_busy);
} }
} }
static inline void do_store_status(void) static inline void do_store_status(void)
{ {
unsigned long low_core_addr;
unsigned long dummy;
int i, rc; int i, rc;
/* store status of all processors in their lowcores (real 0) */ /* store status of all processors in their lowcores (real 0) */
for (i = 0; i < NR_CPUS; i++) { for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i) || smp_processor_id() == i) if (!cpu_online(i) || smp_processor_id() == i)
continue; continue;
low_core_addr = (unsigned long) lowcore_ptr[i];
do { do {
rc = signal_processor_ps(&dummy, low_core_addr, i, rc = signal_processor_p(
(__u32)(unsigned long) lowcore_ptr[i], i,
sigp_store_status_at_address); sigp_store_status_at_address);
} while(rc == sigp_busy); } while(rc == sigp_busy);
} }
...@@ -265,8 +264,10 @@ static cpumask_t cpu_restart_map; ...@@ -265,8 +264,10 @@ static cpumask_t cpu_restart_map;
static void do_machine_restart(void * __unused) static void do_machine_restart(void * __unused)
{ {
static atomic_t cpuid = ATOMIC_INIT(-1);
cpu_clear(smp_processor_id(), cpu_restart_map); cpu_clear(smp_processor_id(), cpu_restart_map);
if (smp_processor_id() == 0) { if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
/* Wait for all other cpus to enter do_machine_restart. */ /* Wait for all other cpus to enter do_machine_restart. */
while (!cpus_empty(cpu_restart_map)) while (!cpus_empty(cpu_restart_map))
cpu_relax(); cpu_relax();
...@@ -307,7 +308,9 @@ static void do_wait_for_stop(void) ...@@ -307,7 +308,9 @@ static void do_wait_for_stop(void)
static void do_machine_halt(void * __unused) static void do_machine_halt(void * __unused)
{ {
if (smp_processor_id() == 0) { static atomic_t cpuid = ATOMIC_INIT(-1);
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
smp_send_stop(); smp_send_stop();
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0) if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
cpcmd(vmhalt_cmd, NULL, 0); cpcmd(vmhalt_cmd, NULL, 0);
...@@ -324,7 +327,9 @@ void machine_halt_smp(void) ...@@ -324,7 +327,9 @@ void machine_halt_smp(void)
static void do_machine_power_off(void * __unused) static void do_machine_power_off(void * __unused)
{ {
if (smp_processor_id() == 0) { static atomic_t cpuid = ATOMIC_INIT(-1);
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
smp_send_stop(); smp_send_stop();
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0) if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
cpcmd(vmpoff_cmd, NULL, 0); cpcmd(vmpoff_cmd, NULL, 0);
...@@ -482,7 +487,24 @@ void smp_ctl_clear_bit(int cr, int bit) { ...@@ -482,7 +487,24 @@ void smp_ctl_clear_bit(int cr, int bit) {
* Lets check how many CPUs we have. * Lets check how many CPUs we have.
*/ */
void __init smp_check_cpus(unsigned int max_cpus) #ifdef CONFIG_HOTPLUG_CPU
void
__init smp_check_cpus(unsigned int max_cpus)
{
int cpu;
/*
* cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
*/
for (cpu = 1; cpu < max_cpus; cpu++)
cpu_set(cpu, cpu_possible_map);
}
#else /* CONFIG_HOTPLUG_CPU */
void
__init smp_check_cpus(unsigned int max_cpus)
{ {
int curr_cpu, num_cpus; int curr_cpu, num_cpus;
__u16 boot_cpu_addr; __u16 boot_cpu_addr;
...@@ -505,6 +527,8 @@ void __init smp_check_cpus(unsigned int max_cpus) ...@@ -505,6 +527,8 @@ void __init smp_check_cpus(unsigned int max_cpus)
printk("Boot cpu address %2X\n", boot_cpu_addr); printk("Boot cpu address %2X\n", boot_cpu_addr);
} }
#endif /* CONFIG_HOTPLUG_CPU */
/* /*
* Activate a secondary processor. * Activate a secondary processor.
*/ */
...@@ -536,26 +560,95 @@ int __devinit start_secondary(void *cpuvoid) ...@@ -536,26 +560,95 @@ int __devinit start_secondary(void *cpuvoid)
return cpu_idle(NULL); return cpu_idle(NULL);
} }
static struct task_struct *__devinit fork_by_hand(void) static void __init smp_create_idle(unsigned int cpu)
{ {
struct pt_regs regs; struct pt_regs regs;
/* don't care about the psw and regs settings since we'll never struct task_struct *p;
reschedule the forked task. */
memset(&regs,0,sizeof(struct pt_regs)); /*
return copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL); * don't care about the psw and regs settings since we'll never
* reschedule the forked task.
*/
memset(&regs, 0, sizeof(struct pt_regs));
p = copy_process(CLONE_VM | CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
if (IS_ERR(p))
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
wake_up_forked_process(p);
init_idle(p, cpu);
unhash_process(p);
current_set[cpu] = p;
}
/* Reserving and releasing of CPUs */
static atomic_t smp_cpu_reserved[NR_CPUS];
int
smp_get_cpu(cpumask_t cpu_mask)
{
int val, cpu;
/* Try to find an already reserved cpu. */
for_each_cpu_mask(cpu, cpu_mask) {
while ((val = atomic_read(&smp_cpu_reserved[cpu])) != 0) {
if (!atomic_compare_and_swap(val, val + 1,
&smp_cpu_reserved[cpu]))
/* Found one. */
goto out;
}
}
/* Reserve a new cpu from cpu_mask. */
for_each_cpu_mask(cpu, cpu_mask) {
atomic_inc(&smp_cpu_reserved[cpu]);
if (cpu_online(cpu))
goto out;
atomic_dec(&smp_cpu_reserved[cpu]);
}
cpu = -ENODEV;
out:
return cpu;
}
void
smp_put_cpu(int cpu)
{
atomic_dec(&smp_cpu_reserved[cpu]);
} }
int __cpu_up(unsigned int cpu) static inline int
cpu_stopped(int cpu)
{
__u32 status;
/* Check for stopped state */
if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
if (status & 0x40)
return 1;
}
return 0;
}
/* Upping and downing of CPUs */
int
__cpu_up(unsigned int cpu)
{ {
struct task_struct *idle; struct task_struct *idle;
struct _lowcore *cpu_lowcore; struct _lowcore *cpu_lowcore;
sigp_ccode ccode; sigp_ccode ccode;
int curr_cpu;
/* for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
* Set prefix page for new cpu __cpu_logical_map[cpu] = (__u16) curr_cpu;
*/ if (cpu_stopped(cpu))
break;
}
ccode = signal_processor_p((unsigned long)(lowcore_ptr[cpu]), if (!cpu_stopped(cpu))
return -ENODEV;
ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
cpu, sigp_set_prefix); cpu, sigp_set_prefix);
if (ccode){ if (ccode){
printk("sigp_set_prefix failed for cpu %d " printk("sigp_set_prefix failed for cpu %d "
...@@ -564,23 +657,7 @@ int __cpu_up(unsigned int cpu) ...@@ -564,23 +657,7 @@ int __cpu_up(unsigned int cpu)
return -EIO; return -EIO;
} }
/* We can't use kernel_thread since we must _avoid_ to reschedule idle = current_set[cpu];
the child. */
idle = fork_by_hand();
if (IS_ERR(idle)){
printk("failed fork for CPU %d", cpu);
return -EIO;
}
wake_up_forked_process(idle);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
init_idle(idle, cpu);
unhash_process(idle);
cpu_lowcore = lowcore_ptr[cpu]; cpu_lowcore = lowcore_ptr[cpu];
cpu_lowcore->save_area[15] = idle->thread.ksp; cpu_lowcore->save_area[15] = idle->thread.ksp;
cpu_lowcore->kernel_stack = (unsigned long) cpu_lowcore->kernel_stack = (unsigned long)
...@@ -599,6 +676,65 @@ int __cpu_up(unsigned int cpu) ...@@ -599,6 +676,65 @@ int __cpu_up(unsigned int cpu)
return 0; return 0;
} }
int
__cpu_disable(void)
{
unsigned long flags;
ec_creg_mask_parms cr_parms;
local_irq_save(flags);
if (atomic_read(&smp_cpu_reserved[smp_processor_id()])) {
local_irq_restore(flags);
return -EBUSY;
}
/* disable all external interrupts */
cr_parms.start_ctl = 0;
cr_parms.end_ctl = 0;
cr_parms.orvals[0] = 0;
cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
1<<11 | 1<<10 | 1<< 6 | 1<< 4);
smp_ctl_bit_callback(&cr_parms);
/* disable all I/O interrupts */
cr_parms.start_ctl = 6;
cr_parms.end_ctl = 6;
cr_parms.orvals[6] = 0;
cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
1<<27 | 1<<26 | 1<<25 | 1<<24);
smp_ctl_bit_callback(&cr_parms);
/* disable most machine checks */
cr_parms.start_ctl = 14;
cr_parms.end_ctl = 14;
cr_parms.orvals[14] = 0;
cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
smp_ctl_bit_callback(&cr_parms);
local_irq_restore(flags);
return 0;
}
void
__cpu_die(unsigned int cpu)
{
/* Wait until target cpu is down */
while (!cpu_stopped(cpu));
printk("Processor %d spun down\n", cpu);
}
void
cpu_die(void)
{
signal_processor(smp_processor_id(), sigp_stop);
BUG();
for(;;);
}
/* /*
* Cycle through the processors and setup structures. * Cycle through the processors and setup structures.
*/ */
...@@ -606,6 +742,7 @@ int __cpu_up(unsigned int cpu) ...@@ -606,6 +742,7 @@ int __cpu_up(unsigned int cpu)
void __init smp_prepare_cpus(unsigned int max_cpus) void __init smp_prepare_cpus(unsigned int max_cpus)
{ {
unsigned long async_stack; unsigned long async_stack;
unsigned int cpu;
int i; int i;
/* request the 0x1202 external interrupt */ /* request the 0x1202 external interrupt */
...@@ -632,13 +769,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -632,13 +769,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
lowcore_ptr[i]->async_stack = async_stack + (ASYNC_SIZE); lowcore_ptr[i]->async_stack = async_stack + (ASYNC_SIZE);
} }
set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]); set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
for_each_cpu(cpu)
if (cpu != smp_processor_id())
smp_create_idle(cpu);
} }
void __devinit smp_prepare_boot_cpu(void) void __devinit smp_prepare_boot_cpu(void)
{ {
cpu_set(smp_processor_id(), cpu_online_map); BUG_ON(smp_processor_id() != 0);
cpu_set(smp_processor_id(), cpu_possible_map);
S390_lowcore.percpu_offset = __per_cpu_offset[smp_processor_id()]; cpu_set(0, cpu_online_map);
cpu_set(0, cpu_possible_map);
S390_lowcore.percpu_offset = __per_cpu_offset[0];
current_set[0] = current;
} }
void smp_cpus_done(unsigned int max_cpus) void smp_cpus_done(unsigned int max_cpus)
...@@ -679,3 +823,6 @@ EXPORT_SYMBOL(lowcore_ptr); ...@@ -679,3 +823,6 @@ EXPORT_SYMBOL(lowcore_ptr);
EXPORT_SYMBOL(smp_ctl_set_bit); EXPORT_SYMBOL(smp_ctl_set_bit);
EXPORT_SYMBOL(smp_ctl_clear_bit); EXPORT_SYMBOL(smp_ctl_clear_bit);
EXPORT_SYMBOL(smp_call_function); EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_get_cpu);
EXPORT_SYMBOL(smp_put_cpu);
...@@ -407,6 +407,14 @@ struct ctl_table_header *cmm_sysctl_header; ...@@ -407,6 +407,14 @@ struct ctl_table_header *cmm_sysctl_header;
static int static int
cmm_init (void) cmm_init (void)
{ {
int rc;
/* Prevent logical cpu 0 from being set offline. */
rc = smp_get_cpu(cpumask_of_cpu(0));
if (rc) {
printk(KERN_ERR "CMM: unable to reserve cpu 0\n");
return rc;
}
#ifdef CONFIG_CMM_PROC #ifdef CONFIG_CMM_PROC
cmm_sysctl_header = register_sysctl_table(cmm_dir_table, 1); cmm_sysctl_header = register_sysctl_table(cmm_dir_table, 1);
#endif #endif
...@@ -430,6 +438,8 @@ cmm_exit(void) ...@@ -430,6 +438,8 @@ cmm_exit(void)
#ifdef CONFIG_CMM_IUCV #ifdef CONFIG_CMM_IUCV
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target); smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
#endif #endif
/* Allow logical cpu 0 to be set offline again. */
smp_put_cpu(0);
} }
module_init(cmm_init); module_init(cmm_init);
......
...@@ -494,11 +494,12 @@ static struct sclp_register sclp_state_change_event = { ...@@ -494,11 +494,12 @@ static struct sclp_register sclp_state_change_event = {
static void static void
do_load_quiesce_psw(void * __unused) do_load_quiesce_psw(void * __unused)
{ {
static atomic_t cpuid = ATOMIC_INIT(-1);
psw_t quiesce_psw; psw_t quiesce_psw;
unsigned long status; __u32 status;
int i; int i;
if (smp_processor_id() != 0) if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
signal_processor(smp_processor_id(), sigp_stop); signal_processor(smp_processor_id(), sigp_stop);
/* Wait for all other cpus to enter stopped state */ /* Wait for all other cpus to enter stopped state */
i = 1; i = 1;
...@@ -511,7 +512,7 @@ do_load_quiesce_psw(void * __unused) ...@@ -511,7 +512,7 @@ do_load_quiesce_psw(void * __unused)
case sigp_order_code_accepted: case sigp_order_code_accepted:
case sigp_status_stored: case sigp_status_stored:
/* Check for stopped and check stop state */ /* Check for stopped and check stop state */
if (test_bit(6, &status) || test_bit(4, &status)) if (status & 0x50)
i++; i++;
break; break;
case sigp_busy: case sigp_busy:
......
/* /*
* $Id: iucv.c,v 1.33 2004/05/24 10:19:18 braunu Exp $ * $Id: iucv.c,v 1.34 2004/06/24 10:53:48 braunu Exp $
* *
* IUCV network driver * IUCV network driver
* *
...@@ -29,7 +29,7 @@ ...@@ -29,7 +29,7 @@
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
* *
* RELEASE-TAG: IUCV lowlevel driver $Revision: 1.33 $ * RELEASE-TAG: IUCV lowlevel driver $Revision: 1.34 $
* *
*/ */
...@@ -177,9 +177,11 @@ static handler **iucv_pathid_table; ...@@ -177,9 +177,11 @@ static handler **iucv_pathid_table;
static unsigned long max_connections; static unsigned long max_connections;
/** /**
* declare_flag: is 0 when iucv_declare_buffer has not been called * iucv_cpuid: contains the logical cpu number of the cpu which
* has declared the iucv buffer by issuing DECLARE_BUFFER.
* If no cpu has done the initialization iucv_cpuid contains -1.
*/ */
static int declare_flag; static int iucv_cpuid = -1;
/** /**
* register_flag: is 0 when external interrupt has not been registered * register_flag: is 0 when external interrupt has not been registered
*/ */
...@@ -352,7 +354,7 @@ do { \ ...@@ -352,7 +354,7 @@ do { \
static void static void
iucv_banner(void) iucv_banner(void)
{ {
char vbuf[] = "$Revision: 1.33 $"; char vbuf[] = "$Revision: 1.34 $";
char *version = vbuf; char *version = vbuf;
if ((version = strchr(version, ':'))) { if ((version = strchr(version, ':'))) {
...@@ -631,16 +633,16 @@ iucv_remove_pathid(__u16 pathid) ...@@ -631,16 +633,16 @@ iucv_remove_pathid(__u16 pathid)
} }
/** /**
* iucv_declare_buffer_cpu0 * iucv_declare_buffer_cpuid
* Register at VM for subsequent IUCV operations. This is always * Register at VM for subsequent IUCV operations. This is executed
* executed on CPU 0. Called from iucv_declare_buffer(). * on the reserved CPU iucv_cpuid. Called from iucv_declare_buffer().
*/ */
static void static void
iucv_declare_buffer_cpu0 (void *result) iucv_declare_buffer_cpuid (void *result)
{ {
iparml_db *parm; iparml_db *parm;
if (!(result && (smp_processor_id() == 0))) if (smp_processor_id() != iucv_cpuid)
return; return;
parm = (iparml_db *)grab_param(); parm = (iparml_db *)grab_param();
parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer); parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer);
...@@ -650,16 +652,17 @@ iucv_declare_buffer_cpu0 (void *result) ...@@ -650,16 +652,17 @@ iucv_declare_buffer_cpu0 (void *result)
} }
/** /**
* iucv_retrieve_buffer_cpu0: * iucv_retrieve_buffer_cpuid:
* Unregister IUCV usage at VM. This is always executed on CPU 0. * Unregister IUCV usage at VM. This is always executed on the same
* cpu that registered the buffer to VM.
* Called from iucv_retrieve_buffer(). * Called from iucv_retrieve_buffer().
*/ */
static void static void
iucv_retrieve_buffer_cpu0 (void *result) iucv_retrieve_buffer_cpuid (void *cpu)
{ {
iparml_control *parm; iparml_control *parm;
if (smp_processor_id() != 0) if (smp_processor_id() != iucv_cpuid)
return; return;
parm = (iparml_control *)grab_param(); parm = (iparml_control *)grab_param();
b2f0(RETRIEVE_BUFFER, parm); b2f0(RETRIEVE_BUFFER, parm);
...@@ -676,18 +679,22 @@ iucv_retrieve_buffer_cpu0 (void *result) ...@@ -676,18 +679,22 @@ iucv_retrieve_buffer_cpu0 (void *result)
static int static int
iucv_declare_buffer (void) iucv_declare_buffer (void)
{ {
ulong b2f0_result = 0x0deadbeef; unsigned long flags;
ulong b2f0_result;
iucv_debug(1, "entering"); iucv_debug(1, "entering");
preempt_disable(); spin_lock_irqsave (&iucv_lock, flags);
if (smp_processor_id() == 0) if (iucv_cpuid == -1) {
iucv_declare_buffer_cpu0(&b2f0_result); /* Reserve any cpu for use by iucv. */
else iucv_cpuid = smp_get_cpu(CPU_MASK_ALL);
smp_call_function(iucv_declare_buffer_cpu0, &b2f0_result, 0, 1); spin_unlock_irqrestore (&iucv_lock, flags);
preempt_enable(); smp_call_function(iucv_declare_buffer_cpuid,
&b2f0_result, 0, 1);
iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer); iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer);
if (b2f0_result == 0x0deadbeef) } else {
b2f0_result = 0xaa; spin_unlock_irqrestore (&iucv_lock, flags);
b2f0_result = 0;
}
iucv_debug(1, "exiting"); iucv_debug(1, "exiting");
return b2f0_result; return b2f0_result;
} }
...@@ -702,14 +709,11 @@ static int ...@@ -702,14 +709,11 @@ static int
iucv_retrieve_buffer (void) iucv_retrieve_buffer (void)
{ {
iucv_debug(1, "entering"); iucv_debug(1, "entering");
if (declare_flag) { if (iucv_cpuid != -1) {
preempt_disable(); smp_call_function(iucv_retrieve_buffer_cpuid, 0, 0, 1);
if (smp_processor_id() == 0) /* Release the cpu reserved by iucv_declare_buffer. */
iucv_retrieve_buffer_cpu0(0); smp_put_cpu(iucv_cpuid);
else iucv_cpuid = -1;
smp_call_function(iucv_retrieve_buffer_cpu0, 0, 0, 1);
declare_flag = 0;
preempt_enable();
} }
iucv_debug(1, "exiting"); iucv_debug(1, "exiting");
return 0; return 0;
...@@ -862,7 +866,6 @@ iucv_register_program (__u8 pgmname[16], ...@@ -862,7 +866,6 @@ iucv_register_program (__u8 pgmname[16],
return NULL; return NULL;
} }
if (declare_flag == 0) {
rc = iucv_declare_buffer(); rc = iucv_declare_buffer();
if (rc) { if (rc) {
char *err = "Unknown"; char *err = "Unknown";
...@@ -884,17 +887,11 @@ iucv_register_program (__u8 pgmname[16], ...@@ -884,17 +887,11 @@ iucv_register_program (__u8 pgmname[16],
case 0x5c: case 0x5c:
err = "Paging or storage error"; err = "Paging or storage error";
break; break;
case 0xaa:
err = "Function not called";
break;
} }
printk(KERN_WARNING "%s: iucv_declare_buffer " printk(KERN_WARNING "%s: iucv_declare_buffer "
"returned error 0x%02lx (%s)\n", __FUNCTION__, rc, "returned error 0x%02lx (%s)\n", __FUNCTION__, rc, err);
err);
return NULL; return NULL;
} }
declare_flag = 1;
}
if (register_flag == 0) { if (register_flag == 0) {
/* request the 0x4000 external interrupt */ /* request the 0x4000 external interrupt */
rc = register_external_interrupt (0x4000, iucv_irq_handler); rc = register_external_interrupt (0x4000, iucv_irq_handler);
...@@ -2190,11 +2187,11 @@ iucv_send2way_prmmsg_array (__u16 pathid, ...@@ -2190,11 +2187,11 @@ iucv_send2way_prmmsg_array (__u16 pathid,
} }
void void
iucv_setmask_cpu0 (void *result) iucv_setmask_cpuid (void *result)
{ {
iparml_set_mask *parm; iparml_set_mask *parm;
if (smp_processor_id() != 0) if (smp_processor_id() != iucv_cpuid)
return; return;
iucv_debug(1, "entering"); iucv_debug(1, "entering");
...@@ -2228,14 +2225,15 @@ iucv_setmask (int SetMaskFlag) ...@@ -2228,14 +2225,15 @@ iucv_setmask (int SetMaskFlag)
ulong result; ulong result;
__u8 param; __u8 param;
} u; } u;
int cpu;
u.param = SetMaskFlag; u.param = SetMaskFlag;
preempt_disable(); cpu = get_cpu();
if (smp_processor_id() == 0) if (cpu == iucv_cpuid)
iucv_setmask_cpu0(&u); iucv_setmask_cpuid(&u);
else else
smp_call_function(iucv_setmask_cpu0, &u, 0, 1); smp_call_function(iucv_setmask_cpuid, &u, 0, 1);
preempt_enable(); put_cpu();
return u.result; return u.result;
} }
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com)
* Heiko Carstens (heiko.carstens@de.ibm.com)
* *
* sigp.h by D.J. Barrow (c) IBM 1999 * sigp.h by D.J. Barrow (c) IBM 1999
* contains routines / structures for signalling other S/390 processors in an * contains routines / structures for signalling other S/390 processors in an
...@@ -72,17 +73,10 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code) ...@@ -72,17 +73,10 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code)
sigp_ccode ccode; sigp_ccode ccode;
__asm__ __volatile__( __asm__ __volatile__(
#ifndef __s390x__
" sr 1,1\n" /* parameter=0 in gpr 1 */ " sr 1,1\n" /* parameter=0 in gpr 1 */
" sigp 1,%1,0(%2)\n" " sigp 1,%1,0(%2)\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
#else /* __s390x__ */
" sgr 1,1\n" /* parameter=0 in gpr 1 */
" sigp 1,%1,0(%2)\n"
" ipm %0\n"
" srl %0,28"
#endif /* __s390x__ */
: "=d" (ccode) : "=d" (ccode)
: "d" (__cpu_logical_map[cpu_addr]), "a" (order_code) : "d" (__cpu_logical_map[cpu_addr]), "a" (order_code)
: "cc" , "memory", "1" ); : "cc" , "memory", "1" );
...@@ -93,23 +87,16 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code) ...@@ -93,23 +87,16 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code)
* Signal processor with parameter * Signal processor with parameter
*/ */
extern __inline__ sigp_ccode extern __inline__ sigp_ccode
signal_processor_p(unsigned long parameter,__u16 cpu_addr, signal_processor_p(__u32 parameter, __u16 cpu_addr,
sigp_order_code order_code) sigp_order_code order_code)
{ {
sigp_ccode ccode; sigp_ccode ccode;
__asm__ __volatile__( __asm__ __volatile__(
#ifndef __s390x__
" lr 1,%1\n" /* parameter in gpr 1 */ " lr 1,%1\n" /* parameter in gpr 1 */
" sigp 1,%2,0(%3)\n" " sigp 1,%2,0(%3)\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
#else /* __s390x__ */
" lgr 1,%1\n" /* parameter in gpr 1 */
" sigp 1,%2,0(%3)\n"
" ipm %0\n"
" srl %0,28\n"
#endif /* __s390x__ */
: "=d" (ccode) : "=d" (ccode)
: "d" (parameter), "d" (__cpu_logical_map[cpu_addr]), : "d" (parameter), "d" (__cpu_logical_map[cpu_addr]),
"a" (order_code) "a" (order_code)
...@@ -121,27 +108,18 @@ signal_processor_p(unsigned long parameter,__u16 cpu_addr, ...@@ -121,27 +108,18 @@ signal_processor_p(unsigned long parameter,__u16 cpu_addr,
* Signal processor with parameter and return status * Signal processor with parameter and return status
*/ */
extern __inline__ sigp_ccode extern __inline__ sigp_ccode
signal_processor_ps(unsigned long *statusptr, unsigned long parameter, signal_processor_ps(__u32 *statusptr, __u32 parameter,
__u16 cpu_addr, sigp_order_code order_code) __u16 cpu_addr, sigp_order_code order_code)
{ {
sigp_ccode ccode; sigp_ccode ccode;
__asm__ __volatile__( __asm__ __volatile__(
#ifndef __s390x__ " sr 2,2\n" /* clear status */
" sr 2,2\n" /* clear status so it doesn't contain rubbish if not saved. */
" lr 3,%2\n" /* parameter in gpr 3 */ " lr 3,%2\n" /* parameter in gpr 3 */
" sigp 2,%3,0(%4)\n" " sigp 2,%3,0(%4)\n"
" st 2,%1\n" " st 2,%1\n"
" ipm %0\n" " ipm %0\n"
" srl %0,28\n" " srl %0,28\n"
#else /* __s390x__ */
" sgr 2,2\n" /* clear status so it doesn't contain rubbish if not saved. */
" lgr 3,%2\n" /* parameter in gpr 3 */
" sigp 2,%3,0(%4)\n"
" stg 2,%1\n"
" ipm %0\n"
" srl %0,28\n"
#endif /* __s390x__ */
: "=d" (ccode), "=m" (*statusptr) : "=d" (ccode), "=m" (*statusptr)
: "d" (parameter), "d" (__cpu_logical_map[cpu_addr]), : "d" (parameter), "d" (__cpu_logical_map[cpu_addr]),
"a" (order_code) "a" (order_code)
...@@ -151,5 +129,3 @@ signal_processor_ps(unsigned long *statusptr, unsigned long parameter, ...@@ -151,5 +129,3 @@ signal_processor_ps(unsigned long *statusptr, unsigned long parameter,
} }
#endif /* __SIGP__ */ #endif /* __SIGP__ */
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com) * Martin Schwidefsky (schwidefsky@de.ibm.com)
* Heiko Carstens (heiko.carstens@de.ibm.com)
*/ */
#ifndef __ASM_SMP_H #ifndef __ASM_SMP_H
#define __ASM_SMP_H #define __ASM_SMP_H
...@@ -47,6 +48,9 @@ extern int smp_call_function_on(void (*func) (void *info), void *info, ...@@ -47,6 +48,9 @@ extern int smp_call_function_on(void (*func) (void *info), void *info,
#define smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) #define smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
extern int smp_get_cpu(cpumask_t cpu_map);
extern void smp_put_cpu(int cpu);
extern __inline__ __u16 hard_smp_processor_id(void) extern __inline__ __u16 hard_smp_processor_id(void)
{ {
__u16 cpu_address; __u16 cpu_address;
...@@ -57,10 +61,17 @@ extern __inline__ __u16 hard_smp_processor_id(void) ...@@ -57,10 +61,17 @@ extern __inline__ __u16 hard_smp_processor_id(void)
#define cpu_logical_map(cpu) (cpu) #define cpu_logical_map(cpu) (cpu)
extern int __cpu_disable (void);
extern void __cpu_die (unsigned int cpu);
extern void cpu_die (void) __attribute__ ((noreturn));
extern int __cpu_up (unsigned int cpu);
#endif #endif
#ifndef CONFIG_SMP #ifndef CONFIG_SMP
#define smp_call_function_on(func,info,nonatomic,wait,cpu) ({ 0; }) #define smp_call_function_on(func,info,nonatomic,wait,cpu) ({ 0; })
#define smp_get_cpu(cpu) ({ 0; })
#define smp_put_cpu(cpu) ({ 0; })
#endif #endif
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment