Commit 0ac3fc9f authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 2453a83f b6d199cc
......@@ -107,6 +107,15 @@ config NR_CPUS
This is purely to save memory - each supported CPU adds
approximately sixteen kilobytes to the kernel image.
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
depends on SMP && HOTPLUG && EXPERIMENTAL
default n
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
config MATHEMU
bool "IEEE FPU emulation"
depends on MARCH_G5
......
......@@ -25,6 +25,8 @@
#include <linux/sysctl.h>
#include <asm/timer.h>
//#include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include "appldata.h"
......@@ -124,10 +126,6 @@ static struct ctl_table appldata_dir_table[] = {
*/
DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
static atomic_t appldata_expire_count = ATOMIC_INIT(0);
static struct appldata_mod_vtimer_args {
struct vtimer_list *timer;
u64 expires;
} appldata_mod_vtimer_args;
static spinlock_t appldata_timer_lock = SPIN_LOCK_UNLOCKED;
static int appldata_interval = APPLDATA_CPU_INTERVAL;
......@@ -154,7 +152,7 @@ static LIST_HEAD(appldata_ops_list);
static void appldata_timer_function(unsigned long data, struct pt_regs *regs)
{
P_DEBUG(" -= Timer =-\n");
P_DEBUG("CPU: %i, expire: %i\n", smp_processor_id(),
P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
atomic_read(&appldata_expire_count));
if (atomic_dec_and_test(&appldata_expire_count)) {
atomic_set(&appldata_expire_count, num_online_cpus());
......@@ -187,17 +185,6 @@ static void appldata_tasklet_function(unsigned long data)
spin_unlock(&appldata_ops_lock);
}
/*
* appldata_mod_vtimer_wrap()
*
* wrapper function for mod_virt_timer(), because smp_call_function_on()
* accepts only one parameter.
*/
static void appldata_mod_vtimer_wrap(void *p) {
struct appldata_mod_vtimer_args *args = p;
mod_virt_timer(args->timer, args->expires);
}
/*
* appldata_diag()
*
......@@ -247,6 +234,79 @@ static int appldata_diag(char record_nr, u16 function, unsigned long buffer,
/****************************** /proc stuff **********************************/
/*
* appldata_mod_vtimer_wrap()
*
* wrapper function for mod_virt_timer(), because smp_call_function_on()
* accepts only one parameter.
*/
static void __appldata_mod_vtimer_wrap(void *p) {
struct {
struct vtimer_list *timer;
u64 expires;
} *args = p;
mod_virt_timer(args->timer, args->expires);
}
#define APPLDATA_ADD_TIMER 0
#define APPLDATA_DEL_TIMER 1
#define APPLDATA_MOD_TIMER 2
/*
* __appldata_vtimer_setup()
*
* Add, delete or modify virtual timers on all online cpus.
* The caller needs to get the appldata_timer_lock spinlock.
*/
static void
__appldata_vtimer_setup(int cmd)
{
u64 per_cpu_interval;
int i;
switch (cmd) {
case APPLDATA_ADD_TIMER:
if (appldata_timer_active)
break;
per_cpu_interval = (u64) (appldata_interval*1000 /
num_online_cpus()) * TOD_MICRO;
for_each_online_cpu(i) {
per_cpu(appldata_timer, i).expires = per_cpu_interval;
smp_call_function_on(add_virt_timer_periodic,
&per_cpu(appldata_timer, i),
0, 1, i);
}
appldata_timer_active = 1;
P_INFO("Monitoring timer started.\n");
break;
case APPLDATA_DEL_TIMER:
for_each_online_cpu(i)
del_virt_timer(&per_cpu(appldata_timer, i));
if (!appldata_timer_active)
break;
appldata_timer_active = 0;
atomic_set(&appldata_expire_count, num_online_cpus());
P_INFO("Monitoring timer stopped.\n");
break;
case APPLDATA_MOD_TIMER:
per_cpu_interval = (u64) (appldata_interval*1000 /
num_online_cpus()) * TOD_MICRO;
if (!appldata_timer_active)
break;
for_each_online_cpu(i) {
struct {
struct vtimer_list *timer;
u64 expires;
} args;
args.timer = &per_cpu(appldata_timer, i);
args.expires = per_cpu_interval;
smp_call_function_on(__appldata_mod_vtimer_wrap,
&args, 0, 1, i);
}
}
}
/*
* appldata_timer_handler()
*
......@@ -256,9 +316,8 @@ static int
appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
void __user *buffer, size_t *lenp)
{
int len, i;
int len;
char buf[2];
u64 per_cpu_interval;
if (!*lenp || filp->f_pos) {
*lenp = 0;
......@@ -272,30 +331,14 @@ appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
return -EFAULT;
goto out;
}
per_cpu_interval = (u64) (appldata_interval*1000 /
num_online_cpus()) * TOD_MICRO;
len = *lenp;
if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT;
spin_lock(&appldata_timer_lock);
per_cpu_interval = (u64) (appldata_interval*1000 /
num_online_cpus()) * TOD_MICRO;
if ((buf[0] == '1') && (!appldata_timer_active)) {
for (i = 0; i < num_online_cpus(); i++) {
per_cpu(appldata_timer, i).expires = per_cpu_interval;
smp_call_function_on(add_virt_timer_periodic,
&per_cpu(appldata_timer, i),
0, 1, i);
}
appldata_timer_active = 1;
P_INFO("Monitoring timer started.\n");
} else if ((buf[0] == '0') && (appldata_timer_active)) {
for (i = 0; i < num_online_cpus(); i++) {
del_virt_timer(&per_cpu(appldata_timer, i));
}
appldata_timer_active = 0;
P_INFO("Monitoring timer stopped.\n");
}
if (buf[0] == '1')
__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
else if (buf[0] == '0')
__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
spin_unlock(&appldata_timer_lock);
out:
*lenp = len;
......@@ -313,9 +356,8 @@ static int
appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
void __user *buffer, size_t *lenp)
{
int len, i, interval;
int len, interval;
char buf[16];
u64 per_cpu_interval;
if (!*lenp || filp->f_pos) {
*lenp = 0;
......@@ -340,20 +382,8 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
}
spin_lock(&appldata_timer_lock);
per_cpu_interval = (u64) (interval*1000 / num_online_cpus()) * TOD_MICRO;
appldata_interval = interval;
if (appldata_timer_active) {
for (i = 0; i < num_online_cpus(); i++) {
appldata_mod_vtimer_args.timer =
&per_cpu(appldata_timer, i);
appldata_mod_vtimer_args.expires =
per_cpu_interval;
smp_call_function_on(
appldata_mod_vtimer_wrap,
&appldata_mod_vtimer_args,
0, 1, i);
}
}
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
......@@ -564,6 +594,56 @@ void appldata_unregister_ops(struct appldata_ops *ops)
/******************************* init / exit *********************************/
static void
appldata_online_cpu(int cpu)
{
init_virt_timer(&per_cpu(appldata_timer, cpu));
per_cpu(appldata_timer, cpu).function = appldata_timer_function;
per_cpu(appldata_timer, cpu).data = (unsigned long)
&appldata_tasklet_struct;
atomic_inc(&appldata_expire_count);
spin_lock(&appldata_timer_lock);
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
}
static void
appldata_offline_cpu(int cpu)
{
del_virt_timer(&per_cpu(appldata_timer, cpu));
if (atomic_dec_and_test(&appldata_expire_count)) {
atomic_set(&appldata_expire_count, num_online_cpus());
tasklet_schedule(&appldata_tasklet_struct);
}
spin_lock(&appldata_timer_lock);
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
}
static int
appldata_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
switch (action) {
case CPU_ONLINE:
appldata_online_cpu((long) hcpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
appldata_offline_cpu((long) hcpu);
break;
#endif
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block __devinitdata appldata_nb = {
.notifier_call = appldata_cpu_notify,
};
/*
* appldata_init()
*
......@@ -576,13 +656,11 @@ static int __init appldata_init(void)
P_DEBUG("sizeof(parameter_list) = %lu\n",
sizeof(struct appldata_parameter_list));
for (i = 0; i < num_online_cpus(); i++) {
init_virt_timer(&per_cpu(appldata_timer, i));
per_cpu(appldata_timer, i).function = appldata_timer_function;
per_cpu(appldata_timer, i).data = (unsigned long)
&appldata_tasklet_struct;
}
atomic_set(&appldata_expire_count, num_online_cpus());
for_each_online_cpu(i)
appldata_online_cpu(i);
/* Register cpu hotplug notifier */
register_cpu_notifier(&appldata_nb);
appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1);
#ifdef MODULE
......@@ -623,9 +701,9 @@ static void __exit appldata_exit(void)
}
spin_unlock_bh(&appldata_ops_lock);
for (i = 0; i < num_online_cpus(); i++) {
del_virt_timer(&per_cpu(appldata_timer, i));
}
for_each_online_cpu(i)
appldata_offline_cpu(i);
appldata_timer_active = 0;
unregister_sysctl_table(appldata_sysctl_header);
......
......@@ -98,8 +98,7 @@ static inline void appldata_print_debug(struct appldata_os_data *os_data)
LOAD_INT(a2), LOAD_FRAC(a2));
P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus);
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i)) continue;
for (i = 0; i < os_data->nr_cpus; i++) {
P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, "
"idle = %u, irq = %u, softirq = %u, iowait = %u\n",
i,
......@@ -124,7 +123,7 @@ static inline void appldata_print_debug(struct appldata_os_data *os_data)
*/
static void appldata_get_os_data(void *data)
{
int i;
int i, j;
struct appldata_os_data *os_data;
os_data = data;
......@@ -139,21 +138,23 @@ static void appldata_get_os_data(void *data)
os_data->avenrun[1] = avenrun[1] + (FIXED_1/200);
os_data->avenrun[2] = avenrun[2] + (FIXED_1/200);
for (i = 0; i < num_online_cpus(); i++) {
os_data->os_cpu[i].per_cpu_user =
j = 0;
for_each_online_cpu(i) {
os_data->os_cpu[j].per_cpu_user =
kstat_cpu(i).cpustat.user;
os_data->os_cpu[i].per_cpu_nice =
os_data->os_cpu[j].per_cpu_nice =
kstat_cpu(i).cpustat.nice;
os_data->os_cpu[i].per_cpu_system =
os_data->os_cpu[j].per_cpu_system =
kstat_cpu(i).cpustat.system;
os_data->os_cpu[i].per_cpu_idle =
os_data->os_cpu[j].per_cpu_idle =
kstat_cpu(i).cpustat.idle;
os_data->os_cpu[i].per_cpu_irq =
os_data->os_cpu[j].per_cpu_irq =
kstat_cpu(i).cpustat.irq;
os_data->os_cpu[i].per_cpu_softirq =
os_data->os_cpu[j].per_cpu_softirq =
kstat_cpu(i).cpustat.softirq;
os_data->os_cpu[i].per_cpu_iowait =
os_data->os_cpu[j].per_cpu_iowait =
kstat_cpu(i).cpustat.iowait;
j++;
}
os_data->timestamp = get_clock();
......
......@@ -46,7 +46,6 @@ CONFIG_MODULES=y
CONFIG_OBSOLETE_MODPARM=y
# CONFIG_MODVERSIONS is not set
CONFIG_KMOD=y
CONFIG_STOP_MACHINE=y
#
# Base setup
......@@ -63,6 +62,7 @@ CONFIG_MARCH_G5=y
# CONFIG_MARCH_Z990 is not set
CONFIG_SMP=y
CONFIG_NR_CPUS=32
# CONFIG_HOTPLUG_CPU is not set
CONFIG_MATHEMU=y
#
......@@ -510,6 +510,7 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
# CONFIG_CRYPTO_TEA is not set
# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
......
......@@ -23,6 +23,8 @@ obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
obj-$(CONFIG_ARCH_S390_31) += entry.o reipl.o
obj-$(CONFIG_ARCH_S390X) += entry64.o reipl64.o
obj-$(CONFIG_VIRT_TIMER) += vtime.o
#
# This is just to get the dependencies...
#
......
......@@ -17,6 +17,7 @@
#include <linux/config.h>
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
......@@ -34,6 +35,8 @@
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
......@@ -41,9 +44,7 @@
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/irq.h>
#if defined(CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
#include <asm/timer.h>
#endif
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
......@@ -68,13 +69,39 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
}
/*
* The idle loop on a S390...
* Need to know about CPUs going idle?
*/
static struct notifier_block *idle_chain;
int register_idle_notifier(struct notifier_block *nb)
{
return notifier_chain_register(&idle_chain, nb);
}
EXPORT_SYMBOL(register_idle_notifier);
int unregister_idle_notifier(struct notifier_block *nb)
{
return notifier_chain_unregister(&idle_chain, nb);
}
EXPORT_SYMBOL(unregister_idle_notifier);
void do_monitor_call(struct pt_regs *regs, long interruption_code)
{
/* disable monitor call class 0 */
__ctl_clear_bit(8, 15);
notifier_call_chain(&idle_chain, CPU_NOT_IDLE,
(void *)(long) smp_processor_id());
}
/*
* The idle loop on a S390...
*/
void default_idle(void)
{
psw_t wait_psw;
unsigned long reg;
int cpu, rc;
local_irq_disable();
if (need_resched()) {
......@@ -83,14 +110,22 @@ void default_idle(void)
return;
}
#if defined(CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
/*
* hook to stop timers that should not tick while CPU is idle
*/
if (stop_timers()) {
/* CPU is going idle. */
cpu = smp_processor_id();
rc = notifier_call_chain(&idle_chain, CPU_IDLE, (void *)(long) cpu);
if (rc != NOTIFY_OK && rc != NOTIFY_DONE)
BUG();
if (rc != NOTIFY_OK) {
local_irq_enable();
return;
}
/* enable monitor call class 0 */
__ctl_set_bit(8, 15);
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id()))
cpu_die();
#endif
/*
......
......@@ -19,9 +19,6 @@
#ifdef CONFIG_IP_MULTICAST
#include <net/arp.h>
#endif
#ifdef CONFIG_VIRT_TIMER
#include <asm/timer.h>
#endif
/*
* memory management
......@@ -53,17 +50,6 @@ EXPORT_SYMBOL(overflowuid);
EXPORT_SYMBOL(overflowgid);
EXPORT_SYMBOL(empty_zero_page);
/*
* virtual CPU timer
*/
#ifdef CONFIG_VIRT_TIMER
EXPORT_SYMBOL(init_virt_timer);
EXPORT_SYMBOL(add_virt_timer);
EXPORT_SYMBOL(add_virt_timer_periodic);
EXPORT_SYMBOL(mod_virt_timer);
EXPORT_SYMBOL(del_virt_timer);
#endif
/*
* misc.
*/
......
......@@ -58,8 +58,6 @@ struct {
} memory_chunk[MEMORY_CHUNKS] = { { 0 } };
#define CHUNK_READ_WRITE 0
#define CHUNK_READ_ONLY 1
int cpus_initialized = 0;
static cpumask_t cpu_initialized;
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
/*
......@@ -85,15 +83,8 @@ static struct resource data_resource = { "Kernel data", 0, 0 };
*/
void __devinit cpu_init (void)
{
int nr = smp_processor_id();
int addr = hard_smp_processor_id();
if (cpu_test_and_set(nr,cpu_initialized)) {
printk("CPU#%d ALREADY INITIALIZED!!!!!!!!!\n", nr);
for (;;) local_irq_enable();
}
cpus_initialized++;
/*
* Store processor id in lowcore (used e.g. in timer_interrupt)
*/
......
......@@ -5,6 +5,7 @@
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
* Heiko Carstens (heiko.carstens@de.ibm.com)
*
* based on other smp stuff by
* (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
......@@ -57,6 +58,8 @@ cpumask_t cpu_online_map;
cpumask_t cpu_possible_map;
unsigned long cache_decay_ticks = 0;
static struct task_struct *current_set[NR_CPUS];
EXPORT_SYMBOL(cpu_online_map);
/*
......@@ -124,7 +127,6 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
struct call_data_struct data;
int cpus = num_online_cpus()-1;
/* FIXME: get cpu lock -hc */
if (cpus <= 0)
return 0;
......@@ -211,7 +213,6 @@ EXPORT_SYMBOL(smp_call_function_on);
static inline void do_send_stop(void)
{
unsigned long dummy;
int i, rc;
/* stop all processors */
......@@ -219,25 +220,23 @@ static inline void do_send_stop(void)
if (!cpu_online(i) || smp_processor_id() == i)
continue;
do {
rc = signal_processor_ps(&dummy, 0, i, sigp_stop);
rc = signal_processor(i, sigp_stop);
} while (rc == sigp_busy);
}
}
static inline void do_store_status(void)
{
unsigned long low_core_addr;
unsigned long dummy;
int i, rc;
/* store status of all processors in their lowcores (real 0) */
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i) || smp_processor_id() == i)
continue;
low_core_addr = (unsigned long) lowcore_ptr[i];
do {
rc = signal_processor_ps(&dummy, low_core_addr, i,
sigp_store_status_at_address);
rc = signal_processor_p(
(__u32)(unsigned long) lowcore_ptr[i], i,
sigp_store_status_at_address);
} while(rc == sigp_busy);
}
}
......@@ -265,8 +264,10 @@ static cpumask_t cpu_restart_map;
static void do_machine_restart(void * __unused)
{
static atomic_t cpuid = ATOMIC_INIT(-1);
cpu_clear(smp_processor_id(), cpu_restart_map);
if (smp_processor_id() == 0) {
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
/* Wait for all other cpus to enter do_machine_restart. */
while (!cpus_empty(cpu_restart_map))
cpu_relax();
......@@ -307,7 +308,9 @@ static void do_wait_for_stop(void)
static void do_machine_halt(void * __unused)
{
if (smp_processor_id() == 0) {
static atomic_t cpuid = ATOMIC_INIT(-1);
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
smp_send_stop();
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
cpcmd(vmhalt_cmd, NULL, 0);
......@@ -324,7 +327,9 @@ void machine_halt_smp(void)
static void do_machine_power_off(void * __unused)
{
if (smp_processor_id() == 0) {
static atomic_t cpuid = ATOMIC_INIT(-1);
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
smp_send_stop();
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
cpcmd(vmpoff_cmd, NULL, 0);
......@@ -482,7 +487,24 @@ void smp_ctl_clear_bit(int cr, int bit) {
* Lets check how many CPUs we have.
*/
void __init smp_check_cpus(unsigned int max_cpus)
#ifdef CONFIG_HOTPLUG_CPU
void
__init smp_check_cpus(unsigned int max_cpus)
{
int cpu;
/*
* cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
*/
for (cpu = 1; cpu < max_cpus; cpu++)
cpu_set(cpu, cpu_possible_map);
}
#else /* CONFIG_HOTPLUG_CPU */
void
__init smp_check_cpus(unsigned int max_cpus)
{
int curr_cpu, num_cpus;
__u16 boot_cpu_addr;
......@@ -505,10 +527,13 @@ void __init smp_check_cpus(unsigned int max_cpus)
printk("Boot cpu address %2X\n", boot_cpu_addr);
}
#endif /* CONFIG_HOTPLUG_CPU */
/*
* Activate a secondary processor.
*/
extern void init_cpu_timer(void);
extern void init_cpu_vtimer(void);
extern int pfault_init(void);
extern int pfault_token(void);
......@@ -518,6 +543,9 @@ int __devinit start_secondary(void *cpuvoid)
cpu_init();
/* init per CPU timer */
init_cpu_timer();
#ifdef CONFIG_VIRT_TIMER
init_cpu_vtimer();
#endif
#ifdef CONFIG_PFAULT
/* Enable pfault pseudo page faults on this cpu. */
pfault_init();
......@@ -532,26 +560,95 @@ int __devinit start_secondary(void *cpuvoid)
return cpu_idle(NULL);
}
static struct task_struct *__devinit fork_by_hand(void)
static void __init smp_create_idle(unsigned int cpu)
{
struct pt_regs regs;
struct task_struct *p;
/*
* don't care about the psw and regs settings since we'll never
* reschedule the forked task.
*/
memset(&regs, 0, sizeof(struct pt_regs));
p = copy_process(CLONE_VM | CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
if (IS_ERR(p))
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
wake_up_forked_process(p);
init_idle(p, cpu);
unhash_process(p);
current_set[cpu] = p;
}
/* Reserving and releasing of CPUs */
static atomic_t smp_cpu_reserved[NR_CPUS];
int
smp_get_cpu(cpumask_t cpu_mask)
{
int val, cpu;
/* Try to find an already reserved cpu. */
for_each_cpu_mask(cpu, cpu_mask) {
while ((val = atomic_read(&smp_cpu_reserved[cpu])) != 0) {
if (!atomic_compare_and_swap(val, val + 1,
&smp_cpu_reserved[cpu]))
/* Found one. */
goto out;
}
}
/* Reserve a new cpu from cpu_mask. */
for_each_cpu_mask(cpu, cpu_mask) {
atomic_inc(&smp_cpu_reserved[cpu]);
if (cpu_online(cpu))
goto out;
atomic_dec(&smp_cpu_reserved[cpu]);
}
cpu = -ENODEV;
out:
return cpu;
}
void
smp_put_cpu(int cpu)
{
struct pt_regs regs;
/* don't care about the psw and regs settings since we'll never
reschedule the forked task. */
memset(&regs,0,sizeof(struct pt_regs));
return copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
atomic_dec(&smp_cpu_reserved[cpu]);
}
int __cpu_up(unsigned int cpu)
static inline int
cpu_stopped(int cpu)
{
struct task_struct *idle;
__u32 status;
/* Check for stopped state */
if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
if (status & 0x40)
return 1;
}
return 0;
}
/* Upping and downing of CPUs */
int
__cpu_up(unsigned int cpu)
{
struct task_struct *idle;
struct _lowcore *cpu_lowcore;
sigp_ccode ccode;
int curr_cpu;
/*
* Set prefix page for new cpu
*/
for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
__cpu_logical_map[cpu] = (__u16) curr_cpu;
if (cpu_stopped(cpu))
break;
}
if (!cpu_stopped(cpu))
return -ENODEV;
ccode = signal_processor_p((unsigned long)(lowcore_ptr[cpu]),
ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
cpu, sigp_set_prefix);
if (ccode){
printk("sigp_set_prefix failed for cpu %d "
......@@ -560,23 +657,7 @@ int __cpu_up(unsigned int cpu)
return -EIO;
}
/* We can't use kernel_thread since we must _avoid_ to reschedule
the child. */
idle = fork_by_hand();
if (IS_ERR(idle)){
printk("failed fork for CPU %d", cpu);
return -EIO;
}
wake_up_forked_process(idle);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
init_idle(idle, cpu);
unhash_process(idle);
idle = current_set[cpu];
cpu_lowcore = lowcore_ptr[cpu];
cpu_lowcore->save_area[15] = idle->thread.ksp;
cpu_lowcore->kernel_stack = (unsigned long)
......@@ -595,6 +676,65 @@ int __cpu_up(unsigned int cpu)
return 0;
}
int
__cpu_disable(void)
{
unsigned long flags;
ec_creg_mask_parms cr_parms;
local_irq_save(flags);
if (atomic_read(&smp_cpu_reserved[smp_processor_id()])) {
local_irq_restore(flags);
return -EBUSY;
}
/* disable all external interrupts */
cr_parms.start_ctl = 0;
cr_parms.end_ctl = 0;
cr_parms.orvals[0] = 0;
cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
1<<11 | 1<<10 | 1<< 6 | 1<< 4);
smp_ctl_bit_callback(&cr_parms);
/* disable all I/O interrupts */
cr_parms.start_ctl = 6;
cr_parms.end_ctl = 6;
cr_parms.orvals[6] = 0;
cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
1<<27 | 1<<26 | 1<<25 | 1<<24);
smp_ctl_bit_callback(&cr_parms);
/* disable most machine checks */
cr_parms.start_ctl = 14;
cr_parms.end_ctl = 14;
cr_parms.orvals[14] = 0;
cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
smp_ctl_bit_callback(&cr_parms);
local_irq_restore(flags);
return 0;
}
void
__cpu_die(unsigned int cpu)
{
/* Wait until target cpu is down */
while (!cpu_stopped(cpu));
printk("Processor %d spun down\n", cpu);
}
void
cpu_die(void)
{
signal_processor(smp_processor_id(), sigp_stop);
BUG();
for(;;);
}
/*
* Cycle through the processors and setup structures.
*/
......@@ -602,6 +742,7 @@ int __cpu_up(unsigned int cpu)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned long async_stack;
unsigned int cpu;
int i;
/* request the 0x1202 external interrupt */
......@@ -628,13 +769,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
lowcore_ptr[i]->async_stack = async_stack + (ASYNC_SIZE);
}
set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
for_each_cpu(cpu)
if (cpu != smp_processor_id())
smp_create_idle(cpu);
}
void __devinit smp_prepare_boot_cpu(void)
{
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_possible_map);
S390_lowcore.percpu_offset = __per_cpu_offset[smp_processor_id()];
BUG_ON(smp_processor_id() != 0);
cpu_set(0, cpu_online_map);
cpu_set(0, cpu_possible_map);
S390_lowcore.percpu_offset = __per_cpu_offset[0];
current_set[0] = current;
}
void smp_cpus_done(unsigned int max_cpus)
......@@ -675,3 +823,6 @@ EXPORT_SYMBOL(lowcore_ptr);
EXPORT_SYMBOL(smp_ctl_set_bit);
EXPORT_SYMBOL(smp_ctl_clear_bit);
EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_get_cpu);
EXPORT_SYMBOL(smp_put_cpu);
This diff is collapsed.
......@@ -64,9 +64,7 @@ extern void pfault_fini(void);
extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code);
static ext_int_info_t ext_int_pfault;
#endif
#if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_VIRT_TIMER)
extern pgm_check_handler_t do_monitor_call;
#endif
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
......@@ -620,9 +618,8 @@ void __init trap_init(void)
#endif /* CONFIG_ARCH_S390X */
pgm_check_table[0x15] = &operand_exception;
pgm_check_table[0x1C] = &privileged_op;
#if defined(CONFIG_VIRT_TIMER) || defined(CONFIG_NO_IDLE_HZ)
pgm_check_table[0x40] = &do_monitor_call;
#endif
if (MACHINE_IS_VM) {
/*
* First try to get pfault pseudo page faults going.
......
This diff is collapsed.
......@@ -407,6 +407,14 @@ struct ctl_table_header *cmm_sysctl_header;
static int
cmm_init (void)
{
int rc;
/* Prevent logical cpu 0 from being set offline. */
rc = smp_get_cpu(cpumask_of_cpu(0));
if (rc) {
printk(KERN_ERR "CMM: unable to reserve cpu 0\n");
return rc;
}
#ifdef CONFIG_CMM_PROC
cmm_sysctl_header = register_sysctl_table(cmm_dir_table, 1);
#endif
......@@ -430,6 +438,8 @@ cmm_exit(void)
#ifdef CONFIG_CMM_IUCV
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
#endif
/* Allow logical cpu 0 to be set offline again. */
smp_put_cpu(0);
}
module_init(cmm_init);
......
......@@ -2711,6 +2711,10 @@ int take_over_console(const struct consw *csw, int first, int last, int deflt)
save_screen(i);
old_was_color = vc_cons[i].d->vc_can_do_color;
vc_cons[i].d->vc_sw->con_deinit(vc_cons[i].d);
origin = (unsigned long) screenbuf;
visible_origin = origin;
scr_end = origin + screenbuf_size;
pos = origin + video_size_row*y + 2*x;
visual_init(i, 0);
update_attr(i);
......
......@@ -54,7 +54,7 @@ static void ap_hdlc_like_ctrl_char_list (u32 ctrl_char) {
}
void init_CRC() {
void init_CRC(void) {
ap_hdlc_like_ctrl_char_list(0xffffffff);
}
......
......@@ -112,11 +112,11 @@ config VIDEO_CPIA_USB
It is also available as a module (cpia_usb).
config VIDEO_SAA5246A
tristate "SAA5246A Teletext processor"
tristate "SAA5246A, SAA5281 Teletext processor"
depends on VIDEO_DEV && I2C
help
Support for I2C bus based teletext using the SAA5246A chip. Useful
only if you live in Europe.
Support for I2C bus based teletext using the SAA5246A or SAA5281
chip. Useful only if you live in Europe.
To compile this driver as a module, choose M here: the
module will be called saa5246a.
......
/*
* Driver for the SAA5246A videotext decoder chip from Philips.
* Driver for the SAA5246A or SAA5281 Teletext (=Videotext) decoder chips from
* Philips.
*
* Only capturing of videotext pages is tested. The SAA5246A chip also has
* a TV output but my hardware doesn't use it. For this reason this driver
* does not support changing any TV display settings.
* Only capturing of Teletext pages is tested. The videotext chips also have a
* TV output but my hardware doesn't use it. For this reason this driver does
* not support changing any TV display settings.
*
* Copyright (C) 2004 Michael Geng <linux@MichaelGeng.de>
*
......@@ -47,6 +48,10 @@
#include <linux/videodev.h>
#include "saa5246a.h"
MODULE_AUTHOR("Michael Geng <linux@MichaelGeng.de>");
MODULE_DESCRIPTION("Philips SAA5246A, SAA5281 Teletext decoder driver");
MODULE_LICENSE("GPL");
struct saa5246a_device
{
u8 pgbuf[NUM_DAUS][VTX_VIRTUALSIZE];
......@@ -764,8 +769,8 @@ static int saa5246a_release(struct inode *inode, struct file *file)
static int __init init_saa_5246a (void)
{
printk(KERN_INFO "SAA5246A driver (" IF_NAME
" interface) for VideoText version %d.%d\n",
printk(KERN_INFO
"SAA5246A (or compatible) Teletext decoder driver version %d.%d\n",
MAJOR_VERSION, MINOR_VERSION);
return i2c_add_driver(&i2c_driver_videotext);
}
......@@ -796,5 +801,3 @@ static struct video_device saa_template =
.release = video_device_release,
.minor = -1,
};
MODULE_LICENSE("GPL");
/*
Driver for the SAA5246A videotext decoder chip from Philips.
Driver for the SAA5246A or SAA5281 Teletext (=Videotext) decoder chips from
Philips.
Copyright (C) 2004 Michael Geng (linux@MichaelGeng.de)
This program is free software; you can redistribute it and/or modify
......@@ -21,7 +23,7 @@
#define __SAA5246A_H__
#define MAJOR_VERSION 1 /* driver major version number */
#define MINOR_VERSION 6 /* driver minor version number */
#define MINOR_VERSION 7 /* driver minor version number */
#define IF_NAME "SAA5246A"
......
......@@ -7,7 +7,7 @@
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
*
* $Revision: 1.146 $
* $Revision: 1.147 $
*/
#include <linux/config.h>
......@@ -739,8 +739,16 @@ dasd_start_IO(struct dasd_ccw_req * cqr)
if (rc)
return rc;
device = (struct dasd_device *) cqr->device;
if (cqr->retries < 0) {
DEV_MESSAGE(KERN_DEBUG, device,
"start_IO: request %p (%02x/%i) - no retry left.",
cqr, cqr->status, cqr->retries);
cqr->status = DASD_CQR_FAILED;
return -EIO;
}
cqr->startclk = get_clock();
cqr->starttime = jiffies;
cqr->retries--;
rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
cqr->lpm, 0);
switch (rc) {
......@@ -1067,7 +1075,6 @@ __dasd_process_ccw_queue(struct dasd_device * device,
break;
/* Process requests with DASD_CQR_ERROR */
if (cqr->status == DASD_CQR_ERROR) {
cqr->retries--;
if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {
cqr->status = DASD_CQR_FAILED;
cqr->stopclk = get_clock();
......
......@@ -494,11 +494,12 @@ static struct sclp_register sclp_state_change_event = {
static void
do_load_quiesce_psw(void * __unused)
{
static atomic_t cpuid = ATOMIC_INIT(-1);
psw_t quiesce_psw;
unsigned long status;
__u32 status;
int i;
if (smp_processor_id() != 0)
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
signal_processor(smp_processor_id(), sigp_stop);
/* Wait for all other cpus to enter stopped state */
i = 1;
......@@ -511,7 +512,7 @@ do_load_quiesce_psw(void * __unused)
case sigp_order_code_accepted:
case sigp_status_stored:
/* Check for stopped and check stop state */
if (test_bit(6, &status) || test_bit(4, &status))
if (status & 0x50)
i++;
break;
case sigp_busy:
......
......@@ -165,8 +165,6 @@ ccw_device_handle_oper(struct ccw_device *cdev)
return;
}
cdev->private->flags.donotify = 1;
/* Get device online again. */
ccw_device_online(cdev);
}
/*
......@@ -233,15 +231,23 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
cdev->private->devno, sch->irq);
break;
case DEV_STATE_OFFLINE:
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
ccw_device_handle_oper(cdev);
notify = 1;
else /* fill out sense information */
cdev->id = (struct ccw_device_id) {
.cu_type = cdev->private->senseid.cu_type,
.cu_model = cdev->private->senseid.cu_model,
.dev_type = cdev->private->senseid.dev_type,
.dev_model = cdev->private->senseid.dev_model,
};
}
/* fill out sense information */
cdev->id = (struct ccw_device_id) {
.cu_type = cdev->private->senseid.cu_type,
.cu_model = cdev->private->senseid.cu_model,
.dev_type = cdev->private->senseid.dev_type,
.dev_model = cdev->private->senseid.dev_model,
};
if (notify) {
/* Get device online again. */
ccw_device_online(cdev);
wake_up(&cdev->private->wait_q);
return;
}
/* Issue device info message. */
CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
"CU Type/Mod = %04X/%02X, Dev Type/Mod = "
......@@ -256,10 +262,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
break;
}
cdev->private->state = state;
if (notify && state == DEV_STATE_OFFLINE)
ccw_device_handle_oper(cdev);
else
io_subchannel_recog_done(cdev);
io_subchannel_recog_done(cdev);
if (state != DEV_STATE_NOT_OPER)
wake_up(&cdev->private->wait_q);
}
......
......@@ -2,7 +2,7 @@
# S/390 network devices
#
ctc-objs := ctcmain.o ctctty.o
ctc-objs := ctcmain.o ctctty.o ctcdbug.o
obj-$(CONFIG_IUCV) += iucv.o
obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
......
/*
*
* linux/drivers/s390/net/ctcdbug.c ($Revision: 1.1 $)
*
* Linux on zSeries OSA Express and HiperSockets support
*
* Copyright 2000,2003 IBM Corporation
*
* Author(s): Original Code written by
* Peter Tiedemann (ptiedem@de.ibm.com)
*
* $Revision: 1.1 $ $Date: 2004/07/02 16:31:22 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "ctcdbug.h"
/**
* Debug Facility Stuff
*/
debug_info_t *dbf_setup = NULL;
debug_info_t *dbf_data = NULL;
debug_info_t *dbf_trace = NULL;
DEFINE_PER_CPU(char[256], dbf_txt_buf);
void
unregister_dbf_views(void)
{
if (dbf_setup)
debug_unregister(dbf_setup);
if (dbf_data)
debug_unregister(dbf_data);
if (dbf_trace)
debug_unregister(dbf_trace);
}
int
register_dbf_views(void)
{
dbf_setup = debug_register(CTC_DBF_SETUP_NAME,
CTC_DBF_SETUP_INDEX,
CTC_DBF_SETUP_NR_AREAS,
CTC_DBF_SETUP_LEN);
dbf_data = debug_register(CTC_DBF_DATA_NAME,
CTC_DBF_DATA_INDEX,
CTC_DBF_DATA_NR_AREAS,
CTC_DBF_DATA_LEN);
dbf_trace = debug_register(CTC_DBF_TRACE_NAME,
CTC_DBF_TRACE_INDEX,
CTC_DBF_TRACE_NR_AREAS,
CTC_DBF_TRACE_LEN);
if ((dbf_setup == NULL) || (dbf_data == NULL) ||
(dbf_trace == NULL)) {
unregister_dbf_views();
return -ENOMEM;
}
debug_register_view(dbf_setup, &debug_hex_ascii_view);
debug_set_level(dbf_setup, CTC_DBF_SETUP_LEVEL);
debug_register_view(dbf_data, &debug_hex_ascii_view);
debug_set_level(dbf_data, CTC_DBF_DATA_LEVEL);
debug_register_view(dbf_trace, &debug_hex_ascii_view);
debug_set_level(dbf_trace, CTC_DBF_TRACE_LEVEL);
return 0;
}
/*
*
* linux/drivers/s390/net/ctcdbug.h ($Revision: 1.1 $)
*
* Linux on zSeries OSA Express and HiperSockets support
*
* Copyright 2000,2003 IBM Corporation
*
* Author(s): Original Code written by
* Peter Tiedemann (ptiedem@de.ibm.com)
*
* $Revision: 1.1 $ $Date: 2004/07/02 16:31:22 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <asm/debug.h>
/**
* Debug Facility stuff
*/
#define CTC_DBF_SETUP_NAME "ctc_setup"
#define CTC_DBF_SETUP_LEN 16
#define CTC_DBF_SETUP_INDEX 3
#define CTC_DBF_SETUP_NR_AREAS 1
#define CTC_DBF_SETUP_LEVEL 3
#define CTC_DBF_DATA_NAME "ctc_data"
#define CTC_DBF_DATA_LEN 128
#define CTC_DBF_DATA_INDEX 3
#define CTC_DBF_DATA_NR_AREAS 1
#define CTC_DBF_DATA_LEVEL 2
#define CTC_DBF_TRACE_NAME "ctc_trace"
#define CTC_DBF_TRACE_LEN 16
#define CTC_DBF_TRACE_INDEX 2
#define CTC_DBF_TRACE_NR_AREAS 2
#define CTC_DBF_TRACE_LEVEL 3
#define DBF_TEXT(name,level,text) \
do { \
debug_text_event(dbf_##name,level,text); \
} while (0)
#define DBF_HEX(name,level,addr,len) \
do { \
debug_event(dbf_##name,level,(void*)(addr),len); \
} while (0)
extern DEFINE_PER_CPU(char[256], dbf_txt_buf);
extern debug_info_t *dbf_setup;
extern debug_info_t *dbf_data;
extern debug_info_t *dbf_trace;
#define DBF_TEXT_(name,level,text...) \
do { \
char* dbf_txt_buf = get_cpu_var(dbf_txt_buf); \
sprintf(dbf_txt_buf, text); \
debug_text_event(dbf_##name,level,dbf_txt_buf); \
put_cpu_var(dbf_txt_buf); \
} while (0)
#define DBF_SPRINTF(name,level,text...) \
do { \
debug_sprintf_event(dbf_trace, level, ##text ); \
debug_sprintf_event(dbf_trace, level, text ); \
} while (0)
int register_dbf_views(void);
void unregister_dbf_views(void);
/**
* some more debug stuff
*/
#define HEXDUMP16(importance,header,ptr) \
PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
"%02x %02x %02x %02x %02x %02x %02x %02x\n", \
*(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
*(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
*(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
*(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
*(((char*)ptr)+12),*(((char*)ptr)+13), \
*(((char*)ptr)+14),*(((char*)ptr)+15)); \
PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
"%02x %02x %02x %02x %02x %02x %02x %02x\n", \
*(((char*)ptr)+16),*(((char*)ptr)+17), \
*(((char*)ptr)+18),*(((char*)ptr)+19), \
*(((char*)ptr)+20),*(((char*)ptr)+21), \
*(((char*)ptr)+22),*(((char*)ptr)+23), \
*(((char*)ptr)+24),*(((char*)ptr)+25), \
*(((char*)ptr)+26),*(((char*)ptr)+27), \
*(((char*)ptr)+28),*(((char*)ptr)+29), \
*(((char*)ptr)+30),*(((char*)ptr)+31));
static inline void
hex_dump(unsigned char *buf, size_t len)
{
size_t i;
for (i = 0; i < len; i++) {
if (i && !(i % 16))
printk("\n");
printk("%02x ", *(buf + i));
}
printk("\n");
}
This diff is collapsed.
/*
* $Id: ctctty.c,v 1.17 2004/03/31 17:06:34 ptiedem Exp $
* $Id: ctctty.c,v 1.21 2004/07/02 16:31:22 ptiedem Exp $
*
* CTC / ESCON network driver, tty interface.
*
......@@ -30,6 +30,7 @@
#include <asm/uaccess.h>
#include <linux/devfs_fs_kernel.h>
#include "ctctty.h"
#include "ctcdbug.h"
#define CTC_TTY_MAJOR 43
#define CTC_TTY_MAX_DEVICES 64
......@@ -103,6 +104,7 @@ ctc_tty_try_read(ctc_tty_info * info, struct sk_buff *skb)
int len;
struct tty_struct *tty;
DBF_TEXT(trace, 2, __FUNCTION__);
if ((tty = info->tty)) {
if (info->mcr & UART_MCR_RTS) {
c = TTY_FLIPBUF_SIZE - tty->flip.count;
......@@ -132,6 +134,7 @@ ctc_tty_readmodem(ctc_tty_info *info)
int ret = 1;
struct tty_struct *tty;
DBF_TEXT(trace, 2, __FUNCTION__);
if ((tty = info->tty)) {
if (info->mcr & UART_MCR_RTS) {
int c = TTY_FLIPBUF_SIZE - tty->flip.count;
......@@ -165,6 +168,7 @@ ctc_tty_setcarrier(struct net_device *netdev, int on)
{
int i;
DBF_TEXT(trace, 2, __FUNCTION__);
if ((!driver) || ctc_tty_shuttingdown)
return;
for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
......@@ -185,6 +189,7 @@ ctc_tty_netif_rx(struct sk_buff *skb)
int i;
ctc_tty_info *info = NULL;
DBF_TEXT(trace, 2, __FUNCTION__);
if (!skb)
return;
if ((!skb->dev) || (!driver) || ctc_tty_shuttingdown) {
......@@ -249,6 +254,7 @@ ctc_tty_tint(ctc_tty_info * info)
int wake = 1;
int rc;
DBF_TEXT(trace, 2, __FUNCTION__);
if (!info->netdev) {
if (skb)
kfree_skb(skb);
......@@ -341,6 +347,7 @@ ctc_tty_inject(ctc_tty_info *info, char c)
int skb_res;
struct sk_buff *skb;
DBF_TEXT(trace, 2, __FUNCTION__);
if (ctc_tty_shuttingdown)
return;
skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
......@@ -361,6 +368,7 @@ ctc_tty_inject(ctc_tty_info *info, char c)
static void
ctc_tty_transmit_status(ctc_tty_info *info)
{
DBF_TEXT(trace, 2, __FUNCTION__);
if (ctc_tty_shuttingdown)
return;
info->flags |= CTC_ASYNC_TX_LINESTAT;
......@@ -374,6 +382,7 @@ ctc_tty_change_speed(ctc_tty_info * info)
unsigned int quot;
int i;
DBF_TEXT(trace, 2, __FUNCTION__);
if (!info->tty || !info->tty->termios)
return;
cflag = info->tty->termios->c_cflag;
......@@ -412,6 +421,7 @@ ctc_tty_change_speed(ctc_tty_info * info)
static int
ctc_tty_startup(ctc_tty_info * info)
{
DBF_TEXT(trace, 2, __FUNCTION__);
if (info->flags & CTC_ASYNC_INITIALIZED)
return 0;
#ifdef CTC_DEBUG_MODEM_OPEN
......@@ -454,6 +464,7 @@ ctc_tty_stopdev(unsigned long data)
static void
ctc_tty_shutdown(ctc_tty_info * info)
{
DBF_TEXT(trace, 2, __FUNCTION__);
if (!(info->flags & CTC_ASYNC_INITIALIZED))
return;
#ifdef CTC_DEBUG_MODEM_OPEN
......@@ -486,14 +497,17 @@ ctc_tty_write(struct tty_struct *tty, int from_user, const u_char * buf, int cou
int total = 0;
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
DBF_TEXT(trace, 2, __FUNCTION__);
if (ctc_tty_shuttingdown)
return 0;
goto ex;
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write"))
return 0;
goto ex;
if (!tty)
return 0;
if (!info->netdev)
return -ENODEV;
goto ex;
if (!info->netdev) {
total = -ENODEV;
goto ex;
}
if (from_user)
down(&info->write_sem);
while (1) {
......@@ -530,6 +544,8 @@ ctc_tty_write(struct tty_struct *tty, int from_user, const u_char * buf, int cou
}
if (from_user)
up(&info->write_sem);
ex:
DBF_TEXT(trace, 6, __FUNCTION__);
return total;
}
......@@ -559,13 +575,14 @@ ctc_tty_flush_buffer(struct tty_struct *tty)
ctc_tty_info *info;
unsigned long flags;
DBF_TEXT(trace, 2, __FUNCTION__);
if (!tty)
return;
goto ex;
spin_lock_irqsave(&ctc_tty_lock, flags);
info = (ctc_tty_info *) tty->driver_data;
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_buffer")) {
spin_unlock_irqrestore(&ctc_tty_lock, flags);
return;
goto ex;
}
skb_queue_purge(&info->tx_queue);
info->lsr |= UART_LSR_TEMT;
......@@ -574,6 +591,9 @@ ctc_tty_flush_buffer(struct tty_struct *tty)
if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
tty->ldisc.write_wakeup)
(tty->ldisc.write_wakeup) (tty);
ex:
DBF_TEXT_(trace, 2, "ex: %s ", __FUNCTION__);
return;
}
static void
......@@ -783,7 +803,6 @@ ctc_tty_set_termios(struct tty_struct *tty, struct termios *old_termios)
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
unsigned int cflag = tty->termios->c_cflag;
ctc_tty_change_speed(info);
/* Handle transition to B0 */
......@@ -1032,8 +1051,10 @@ ctc_tty_close(struct tty_struct *tty, struct file *filp)
}
}
ctc_tty_shutdown(info);
if (tty->driver->flush_buffer)
tty->driver->flush_buffer(tty);
if (tty->driver->flush_buffer) {
skb_queue_purge(&info->tx_queue);
info->lsr |= UART_LSR_TEMT;
}
if (tty->ldisc.flush_buffer)
tty->ldisc.flush_buffer(tty);
info->tty = 0;
......@@ -1059,7 +1080,6 @@ ctc_tty_hangup(struct tty_struct *tty)
{
ctc_tty_info *info = (ctc_tty_info *)tty->driver_data;
unsigned long saveflags;
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_hangup"))
return;
ctc_tty_shutdown(info);
......@@ -1185,6 +1205,21 @@ ctc_tty_register_netdev(struct net_device *dev) {
"with NULL dev or NULL dev-name\n");
return -1;
}
/*
* If the name is a format string the caller wants us to
* do a name allocation : format string must end with %d
*/
if (strchr(dev->name, '%'))
{
int err = dev_alloc_name(dev, dev->name); // dev->name is changed by this
if (err < 0) {
printk(KERN_DEBUG "dev_alloc returned error %d\n", err);
return err;
}
}
for (p = dev->name; p && ((*p < '0') || (*p > '9')); p++);
ttynum = simple_strtoul(p, &err, 0);
if ((ttynum < 0) || (ttynum >= CTC_TTY_MAX_DEVICES) ||
......
/*
* $Id: iucv.c,v 1.33 2004/05/24 10:19:18 braunu Exp $
* $Id: iucv.c,v 1.34 2004/06/24 10:53:48 braunu Exp $
*
* IUCV network driver
*
......@@ -29,7 +29,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* RELEASE-TAG: IUCV lowlevel driver $Revision: 1.33 $
* RELEASE-TAG: IUCV lowlevel driver $Revision: 1.34 $
*
*/
......@@ -177,9 +177,11 @@ static handler **iucv_pathid_table;
static unsigned long max_connections;
/**
* declare_flag: is 0 when iucv_declare_buffer has not been called
* iucv_cpuid: contains the logical cpu number of the cpu which
* has declared the iucv buffer by issuing DECLARE_BUFFER.
* If no cpu has done the initialization iucv_cpuid contains -1.
*/
static int declare_flag;
static int iucv_cpuid = -1;
/**
* register_flag: is 0 when external interrupt has not been registered
*/
......@@ -352,7 +354,7 @@ do { \
static void
iucv_banner(void)
{
char vbuf[] = "$Revision: 1.33 $";
char vbuf[] = "$Revision: 1.34 $";
char *version = vbuf;
if ((version = strchr(version, ':'))) {
......@@ -631,16 +633,16 @@ iucv_remove_pathid(__u16 pathid)
}
/**
* iucv_declare_buffer_cpu0
* Register at VM for subsequent IUCV operations. This is always
* executed on CPU 0. Called from iucv_declare_buffer().
* iucv_declare_buffer_cpuid
* Register at VM for subsequent IUCV operations. This is executed
* on the reserved CPU iucv_cpuid. Called from iucv_declare_buffer().
*/
static void
iucv_declare_buffer_cpu0 (void *result)
iucv_declare_buffer_cpuid (void *result)
{
iparml_db *parm;
if (!(result && (smp_processor_id() == 0)))
if (smp_processor_id() != iucv_cpuid)
return;
parm = (iparml_db *)grab_param();
parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer);
......@@ -650,16 +652,17 @@ iucv_declare_buffer_cpu0 (void *result)
}
/**
* iucv_retrieve_buffer_cpu0:
* Unregister IUCV usage at VM. This is always executed on CPU 0.
* iucv_retrieve_buffer_cpuid:
* Unregister IUCV usage at VM. This is always executed on the same
* cpu that registered the buffer to VM.
* Called from iucv_retrieve_buffer().
*/
static void
iucv_retrieve_buffer_cpu0 (void *result)
iucv_retrieve_buffer_cpuid (void *cpu)
{
iparml_control *parm;
if (smp_processor_id() != 0)
if (smp_processor_id() != iucv_cpuid)
return;
parm = (iparml_control *)grab_param();
b2f0(RETRIEVE_BUFFER, parm);
......@@ -676,18 +679,22 @@ iucv_retrieve_buffer_cpu0 (void *result)
static int
iucv_declare_buffer (void)
{
ulong b2f0_result = 0x0deadbeef;
unsigned long flags;
ulong b2f0_result;
iucv_debug(1, "entering");
preempt_disable();
if (smp_processor_id() == 0)
iucv_declare_buffer_cpu0(&b2f0_result);
else
smp_call_function(iucv_declare_buffer_cpu0, &b2f0_result, 0, 1);
preempt_enable();
iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer);
if (b2f0_result == 0x0deadbeef)
b2f0_result = 0xaa;
spin_lock_irqsave (&iucv_lock, flags);
if (iucv_cpuid == -1) {
/* Reserve any cpu for use by iucv. */
iucv_cpuid = smp_get_cpu(CPU_MASK_ALL);
spin_unlock_irqrestore (&iucv_lock, flags);
smp_call_function(iucv_declare_buffer_cpuid,
&b2f0_result, 0, 1);
iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer);
} else {
spin_unlock_irqrestore (&iucv_lock, flags);
b2f0_result = 0;
}
iucv_debug(1, "exiting");
return b2f0_result;
}
......@@ -702,14 +709,11 @@ static int
iucv_retrieve_buffer (void)
{
iucv_debug(1, "entering");
if (declare_flag) {
preempt_disable();
if (smp_processor_id() == 0)
iucv_retrieve_buffer_cpu0(0);
else
smp_call_function(iucv_retrieve_buffer_cpu0, 0, 0, 1);
declare_flag = 0;
preempt_enable();
if (iucv_cpuid != -1) {
smp_call_function(iucv_retrieve_buffer_cpuid, 0, 0, 1);
/* Release the cpu reserved by iucv_declare_buffer. */
smp_put_cpu(iucv_cpuid);
iucv_cpuid = -1;
}
iucv_debug(1, "exiting");
return 0;
......@@ -862,38 +866,31 @@ iucv_register_program (__u8 pgmname[16],
return NULL;
}
if (declare_flag == 0) {
rc = iucv_declare_buffer();
if (rc) {
char *err = "Unknown";
iucv_remove_handler(new_handler);
kfree(new_handler);
switch(rc) {
case 0x03:
err = "Directory error";
break;
case 0x0a:
err = "Invalid length";
break;
case 0x13:
err = "Buffer already exists";
break;
case 0x3e:
err = "Buffer overlap";
break;
case 0x5c:
err = "Paging or storage error";
break;
case 0xaa:
err = "Function not called";
break;
}
printk(KERN_WARNING "%s: iucv_declare_buffer "
"returned error 0x%02lx (%s)\n", __FUNCTION__, rc,
err);
return NULL;
rc = iucv_declare_buffer();
if (rc) {
char *err = "Unknown";
iucv_remove_handler(new_handler);
kfree(new_handler);
switch(rc) {
case 0x03:
err = "Directory error";
break;
case 0x0a:
err = "Invalid length";
break;
case 0x13:
err = "Buffer already exists";
break;
case 0x3e:
err = "Buffer overlap";
break;
case 0x5c:
err = "Paging or storage error";
break;
}
declare_flag = 1;
printk(KERN_WARNING "%s: iucv_declare_buffer "
"returned error 0x%02lx (%s)\n", __FUNCTION__, rc, err);
return NULL;
}
if (register_flag == 0) {
/* request the 0x4000 external interrupt */
......@@ -2190,11 +2187,11 @@ iucv_send2way_prmmsg_array (__u16 pathid,
}
void
iucv_setmask_cpu0 (void *result)
iucv_setmask_cpuid (void *result)
{
iparml_set_mask *parm;
if (smp_processor_id() != 0)
if (smp_processor_id() != iucv_cpuid)
return;
iucv_debug(1, "entering");
......@@ -2228,14 +2225,15 @@ iucv_setmask (int SetMaskFlag)
ulong result;
__u8 param;
} u;
int cpu;
u.param = SetMaskFlag;
preempt_disable();
if (smp_processor_id() == 0)
iucv_setmask_cpu0(&u);
cpu = get_cpu();
if (cpu == iucv_cpuid)
iucv_setmask_cpuid(&u);
else
smp_call_function(iucv_setmask_cpu0, &u, 0, 1);
preempt_enable();
smp_call_function(iucv_setmask_cpuid, &u, 0, 1);
put_cpu();
return u.result;
}
......
......@@ -788,7 +788,7 @@ ahd_done_unlock(struct ahd_softc *ahd, unsigned long *flags)
}
static __inline void
ahd_list_lockinit()
ahd_list_lockinit(void)
{
spin_lock_init(&ahd_list_spinlock);
}
......
This diff is collapsed.
......@@ -69,6 +69,35 @@ extern int set_con2fb_map(int unit, int newidx);
/* There are several methods fbcon can use to move text around the screen:
*
* Operation Pan Wrap
*---------------------------------------------
* SCROLL_MOVE copyarea No No
* SCROLL_PAN_MOVE copyarea Yes No
* SCROLL_WRAP_MOVE copyarea No Yes
* SCROLL_REDRAW imageblit No No
* SCROLL_PAN_REDRAW imageblit Yes No
* SCROLL_WRAP_REDRAW imageblit No Yes
*
* (SCROLL_WRAP_REDRAW is not implemented yet)
*
* In general, fbcon will choose the best scrolling
* method based on the rule below:
*
* Pan/Wrap > accel imageblit > accel copyarea >
* soft imageblit > (soft copyarea)
*
* Exception to the rule: Pan + accel copyarea is
* preferred over Pan + accel imageblit.
*
* The above is typical for PCI/AGP cards. Unless
* overridden, fbcon will never use soft copyarea.
*
* If you need to override the above rule, set the
* appropriate flags in fb_info->flags. For example,
* to prefer copyarea over imageblit, set
* FBINFO_READS_FAST.
*
* Other notes:
* + use the hardware engine to move the text
* (hw-accelerated copyarea() and fillrect())
* + use hardware-supported panning on a large virtual screen
......@@ -84,10 +113,11 @@ extern int set_con2fb_map(int unit, int newidx);
*
*/
#define SCROLL_ACCEL 0x001
#define SCROLL_PAN 0x002
#define SCROLL_WRAP 0x003
#define SCROLL_REDRAW 0x004
#define SCROLL_MOVE 0x001
#define SCROLL_PAN_MOVE 0x002
#define SCROLL_WRAP_MOVE 0x003
#define SCROLL_REDRAW 0x004
#define SCROLL_PAN_REDRAW 0x005
extern int fb_console_init(void);
......
......@@ -1681,7 +1681,8 @@ static int __devinit riva_set_fbinfo(struct fb_info *info)
| FBINFO_HWACCEL_YPAN
| FBINFO_HWACCEL_COPYAREA
| FBINFO_HWACCEL_FILLRECT
| FBINFO_HWACCEL_IMAGEBLIT;
| FBINFO_HWACCEL_IMAGEBLIT
| FBINFO_MISC_MODESWITCHLATE;
info->var = rivafb_default_var;
info->fix.visual = (info->var.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
......
......@@ -378,7 +378,8 @@ static int __init vesafb_probe(struct device *device)
info->fbops = &vesafb_ops;
info->var = vesafb_defined;
info->fix = vesafb_fix;
info->flags = FBINFO_FLAG_DEFAULT;
info->flags = FBINFO_FLAG_DEFAULT |
(ypan) ? FBINFO_HWACCEL_YPAN : 0;
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
err = -ENXIO;
......
......@@ -1370,7 +1370,8 @@ int __init vga16fb_init(void)
vga16fb.var = vga16fb_defined;
vga16fb.fix = vga16fb_fix;
vga16fb.par = &vga16_par;
vga16fb.flags = FBINFO_FLAG_DEFAULT;
vga16fb.flags = FBINFO_FLAG_DEFAULT |
FBINFO_HWACCEL_YPAN;
i = (vga16fb_defined.bits_per_pixel == 8) ? 256 : 16;
ret = fb_alloc_cmap(&vga16fb.cmap, i, 0);
......
......@@ -716,7 +716,7 @@ static int ep_getfd(int *efd, struct inode **einode, struct file **efile)
dentry->d_op = &eventpollfs_dentry_operations;
d_add(dentry, inode);
file->f_vfsmnt = mntget(eventpoll_mnt);
file->f_dentry = dget(dentry);
file->f_dentry = dentry;
file->f_mapping = inode->i_mapping;
file->f_pos = 0;
......
......@@ -65,7 +65,7 @@ void smbiod_wake_up(void)
/*
* start smbiod if none is running
*/
static int smbiod_start()
static int smbiod_start(void)
{
pid_t pid;
if (smbiod_state != SMBIOD_DEAD)
......
......@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/linkage.h>
#include <linux/time.h>
#include <linux/compiler.h>
/* Avoid too many header ordering problems. */
struct siginfo;
......@@ -128,7 +129,11 @@ typedef unsigned long sigset_t;
#define SIG_SETMASK 2 /* for setting the signal mask */
/* Type of a signal handler. */
typedef void (*__sighandler_t)(int);
typedef void __signalfn_t(int);
typedef __signalfn_t __user *__sighandler_t;
typedef void __restorefn_t(void);
typedef __restorefn_t __user *__sigrestore_t;
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
......@@ -139,13 +144,13 @@ struct old_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
__sigrestore_t sa_restorer;
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
void (*sa_restorer)(void);
__sigrestore_t sa_restorer;
sigset_t sa_mask; /* mask last for extensibility */
};
......@@ -171,7 +176,7 @@ struct sigaction {
#endif /* __KERNEL__ */
typedef struct sigaltstack {
void *ss_sp;
void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
......
/*
* Architecture dependent definitions
* for NEC uPD4990A serial I/O real-time clock.
*
* Copyright 2001 TAKAI Kousuke <tak@kmc.kyoto-u.ac.jp>
* Kyoto University Microcomputer Club (KMC).
*
* References:
* uPD4990A serial I/O real-time clock users' manual (Japanese)
* No. S12828JJ4V0UM00 (4th revision), NEC Corporation, 1999.
*/
#ifndef _ASM_I386_uPD4990A_H
#define _ASM_I386_uPD4990A_H
#include <asm/io.h>
#define UPD4990A_IO (0x0020)
#define UPD4990A_IO_DATAOUT (0x0033)
#define UPD4990A_OUTPUT_DATA_CLK(data, clk) \
outb((((data) & 1) << 5) | (((clk) & 1) << 4) \
| UPD4990A_PAR_SERIAL_MODE, UPD4990A_IO)
#define UPD4990A_OUTPUT_CLK(clk) UPD4990A_OUTPUT_DATA_CLK(0, (clk))
#define UPD4990A_OUTPUT_STROBE(stb) \
outb(((stb) << 3) | UPD4990A_PAR_SERIAL_MODE, UPD4990A_IO)
/*
* Note: udelay() is *not* usable for UPD4990A_DELAY because
* the Linux kernel reads uPD4990A to set up system clock
* before calibrating delay...
*/
#define UPD4990A_DELAY(usec) \
do { \
if (__builtin_constant_p((usec)) && (usec) < 5) \
__asm__ (".rept %c1\n\toutb %%al,%0\n\t.endr" \
: : "N" (0x5F), \
"i" (((usec) * 10 + 5) / 6)); \
else { \
int _count = ((usec) * 10 + 5) / 6; \
__asm__ volatile ("1: outb %%al,%1\n\tloop 1b" \
: "=c" (_count) \
: "N" (0x5F), "0" (_count)); \
} \
} while (0)
/* Caller should ignore all bits except bit0 */
#define UPD4990A_READ_DATA() inb(UPD4990A_IO_DATAOUT)
#endif
......@@ -2,6 +2,7 @@
#define _ASMPPC64_SIGNAL_H
#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/siginfo.h>
/* Avoid too many header ordering problems. */
......@@ -114,7 +115,12 @@ typedef struct {
#define SIG_SETMASK 2 /* for setting the signal mask */
/* Type of a signal handler. */
typedef void (*__sighandler_t)(int);
typedef void __sigfunction(int);
typedef __sigfunction __user * __sighandler_t;
/* Type of the restorer function */
typedef void __sigrestorer(void);
typedef __sigrestorer __user * __sigrestorer_t;
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
......@@ -124,13 +130,13 @@ struct old_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
__sigrestorer_t sa_restorer;
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
void (*sa_restorer)(void);
__sigrestorer_t sa_restorer;
sigset_t sa_mask; /* mask last for extensibility */
};
......@@ -139,7 +145,7 @@ struct k_sigaction {
};
typedef struct sigaltstack {
void *ss_sp;
void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
......
......@@ -319,6 +319,16 @@ static inline void disabled_wait(unsigned long code)
#endif /* __s390x__ */
}
/*
* CPU idle notifier chain.
*/
#define CPU_IDLE 0
#define CPU_NOT_IDLE 1
struct notifier_block;
int register_idle_notifier(struct notifier_block *nb);
int unregister_idle_notifier(struct notifier_block *nb);
#endif
#endif /* __ASM_S390_PROCESSOR_H */
......@@ -5,6 +5,7 @@
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
* Heiko Carstens (heiko.carstens@de.ibm.com)
*
* sigp.h by D.J. Barrow (c) IBM 1999
* contains routines / structures for signalling other S/390 processors in an
......@@ -72,17 +73,10 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code)
sigp_ccode ccode;
__asm__ __volatile__(
#ifndef __s390x__
" sr 1,1\n" /* parameter=0 in gpr 1 */
" sigp 1,%1,0(%2)\n"
" ipm %0\n"
" srl %0,28\n"
#else /* __s390x__ */
" sgr 1,1\n" /* parameter=0 in gpr 1 */
" sigp 1,%1,0(%2)\n"
" ipm %0\n"
" srl %0,28"
#endif /* __s390x__ */
: "=d" (ccode)
: "d" (__cpu_logical_map[cpu_addr]), "a" (order_code)
: "cc" , "memory", "1" );
......@@ -93,23 +87,16 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code)
* Signal processor with parameter
*/
extern __inline__ sigp_ccode
signal_processor_p(unsigned long parameter,__u16 cpu_addr,
signal_processor_p(__u32 parameter, __u16 cpu_addr,
sigp_order_code order_code)
{
sigp_ccode ccode;
__asm__ __volatile__(
#ifndef __s390x__
" lr 1,%1\n" /* parameter in gpr 1 */
" sigp 1,%2,0(%3)\n"
" ipm %0\n"
" srl %0,28\n"
#else /* __s390x__ */
" lgr 1,%1\n" /* parameter in gpr 1 */
" sigp 1,%2,0(%3)\n"
" ipm %0\n"
" srl %0,28\n"
#endif /* __s390x__ */
: "=d" (ccode)
: "d" (parameter), "d" (__cpu_logical_map[cpu_addr]),
"a" (order_code)
......@@ -121,27 +108,18 @@ signal_processor_p(unsigned long parameter,__u16 cpu_addr,
* Signal processor with parameter and return status
*/
extern __inline__ sigp_ccode
signal_processor_ps(unsigned long *statusptr, unsigned long parameter,
signal_processor_ps(__u32 *statusptr, __u32 parameter,
__u16 cpu_addr, sigp_order_code order_code)
{
sigp_ccode ccode;
__asm__ __volatile__(
#ifndef __s390x__
" sr 2,2\n" /* clear status so it doesn't contain rubbish if not saved. */
" sr 2,2\n" /* clear status */
" lr 3,%2\n" /* parameter in gpr 3 */
" sigp 2,%3,0(%4)\n"
" st 2,%1\n"
" ipm %0\n"
" srl %0,28\n"
#else /* __s390x__ */
" sgr 2,2\n" /* clear status so it doesn't contain rubbish if not saved. */
" lgr 3,%2\n" /* parameter in gpr 3 */
" sigp 2,%3,0(%4)\n"
" stg 2,%1\n"
" ipm %0\n"
" srl %0,28\n"
#endif /* __s390x__ */
: "=d" (ccode), "=m" (*statusptr)
: "d" (parameter), "d" (__cpu_logical_map[cpu_addr]),
"a" (order_code)
......@@ -151,5 +129,3 @@ signal_processor_ps(unsigned long *statusptr, unsigned long parameter,
}
#endif /* __SIGP__ */
......@@ -5,6 +5,7 @@
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
* Heiko Carstens (heiko.carstens@de.ibm.com)
*/
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
......@@ -47,6 +48,9 @@ extern int smp_call_function_on(void (*func) (void *info), void *info,
#define smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
extern int smp_get_cpu(cpumask_t cpu_map);
extern void smp_put_cpu(int cpu);
extern __inline__ __u16 hard_smp_processor_id(void)
{
__u16 cpu_address;
......@@ -57,10 +61,17 @@ extern __inline__ __u16 hard_smp_processor_id(void)
#define cpu_logical_map(cpu) (cpu)
extern int __cpu_disable (void);
extern void __cpu_die (unsigned int cpu);
extern void cpu_die (void) __attribute__ ((noreturn));
extern int __cpu_up (unsigned int cpu);
#endif
#ifndef CONFIG_SMP
#define smp_call_function_on(func,info,nonatomic,wait,cpu) ({ 0; })
#define smp_get_cpu(cpu) ({ 0; })
#define smp_put_cpu(cpu) ({ 0; })
#endif
#endif
......@@ -533,6 +533,7 @@ struct fb_ops {
#define FBINFO_MISC_MODECHANGEUSER 0x10000 /* mode change request
from userspace */
#define FBINFO_MISC_MODESWITCH 0x20000 /* mode switch */
#define FBINFO_MISC_MODESWITCHLATE 0x40000 /* init hardware later */
struct fb_info {
int node;
......
/*
* Constant and architecture independent procedures
* for NEC uPD4990A serial I/O real-time clock.
*
* Copyright 2001 TAKAI Kousuke <tak@kmc.kyoto-u.ac.jp>
* Kyoto University Microcomputer Club (KMC).
*
* References:
* uPD4990A serial I/O real-time clock users' manual (Japanese)
* No. S12828JJ4V0UM00 (4th revision), NEC Corporation, 1999.
*/
#ifndef _LINUX_uPD4990A_H
#define _LINUX_uPD4990A_H
#include <asm/byteorder.h>
#include <asm/upd4990a.h>
/* Serial commands (4 bits) */
#define UPD4990A_REGISTER_HOLD (0x0)
#define UPD4990A_REGISTER_SHIFT (0x1)
#define UPD4990A_TIME_SET_AND_COUNTER_HOLD (0x2)
#define UPD4990A_TIME_READ (0x3)
#define UPD4990A_TP_64HZ (0x4)
#define UPD4990A_TP_256HZ (0x5)
#define UPD4990A_TP_2048HZ (0x6)
#define UPD4990A_TP_4096HZ (0x7)
#define UPD4990A_TP_1S (0x8)
#define UPD4990A_TP_10S (0x9)
#define UPD4990A_TP_30S (0xA)
#define UPD4990A_TP_60S (0xB)
#define UPD4990A_INTERRUPT_RESET (0xC)
#define UPD4990A_INTERRUPT_TIMER_START (0xD)
#define UPD4990A_INTERRUPT_TIMER_STOP (0xE)
#define UPD4990A_TEST_MODE_SET (0xF)
/* Parallel commands (3 bits)
0-6 are same with serial commands. */
#define UPD4990A_PAR_SERIAL_MODE 7
#ifndef UPD4990A_DELAY
# include <linux/delay.h>
# define UPD4990A_DELAY(usec) udelay((usec))
#endif
#ifndef UPD4990A_OUTPUT_DATA
# define UPD4990A_OUTPUT_DATA(bit) \
do { \
UPD4990A_OUTPUT_DATA_CLK((bit), 0); \
UPD4990A_DELAY(1); /* t-DSU */ \
UPD4990A_OUTPUT_DATA_CLK((bit), 1); \
UPD4990A_DELAY(1); /* t-DHLD */ \
} while (0)
#endif
static __inline__ void upd4990a_serial_command(int command)
{
UPD4990A_OUTPUT_DATA(command >> 0);
UPD4990A_OUTPUT_DATA(command >> 1);
UPD4990A_OUTPUT_DATA(command >> 2);
UPD4990A_OUTPUT_DATA(command >> 3);
UPD4990A_DELAY(1); /* t-HLD */
UPD4990A_OUTPUT_STROBE(1);
UPD4990A_DELAY(1); /* t-STB & t-d1 */
UPD4990A_OUTPUT_STROBE(0);
/* 19 microseconds extra delay is needed
iff previous mode is TIME READ command */
}
struct upd4990a_raw_data {
u8 sec; /* BCD */
u8 min; /* BCD */
u8 hour; /* BCD */
u8 mday; /* BCD */
#if defined __LITTLE_ENDIAN_BITFIELD
unsigned wday :4; /* 0-6 */
unsigned mon :4; /* 1-based */
#elif defined __BIG_ENDIAN_BITFIELD
unsigned mon :4; /* 1-based */
unsigned wday :4; /* 0-6 */
#else
# error Unknown bitfield endian!
#endif
u8 year; /* BCD */
};
static __inline__ void upd4990a_get_time(struct upd4990a_raw_data *buf,
int leave_register_hold)
{
int byte;
upd4990a_serial_command(UPD4990A_TIME_READ);
upd4990a_serial_command(UPD4990A_REGISTER_SHIFT);
UPD4990A_DELAY(19); /* t-d2 - t-d1 */
for (byte = 0; byte < 6; byte++) {
u8 tmp;
int bit;
for (tmp = 0, bit = 0; bit < 8; bit++) {
tmp = (tmp | (UPD4990A_READ_DATA() << 8)) >> 1;
UPD4990A_OUTPUT_CLK(1);
UPD4990A_DELAY(1);
UPD4990A_OUTPUT_CLK(0);
UPD4990A_DELAY(1);
}
((u8 *) buf)[byte] = tmp;
}
/* The uPD4990A users' manual says that we should issue `Register
Hold' command after each data retrieval, or next `Time Read'
command may not work correctly. */
if (!leave_register_hold)
upd4990a_serial_command(UPD4990A_REGISTER_HOLD);
}
static __inline__ void upd4990a_set_time(const struct upd4990a_raw_data *data,
int time_set_only)
{
int byte;
if (!time_set_only)
upd4990a_serial_command(UPD4990A_REGISTER_SHIFT);
for (byte = 0; byte < 6; byte++) {
int bit;
u8 tmp = ((const u8 *) data)[byte];
for (bit = 0; bit < 8; bit++, tmp >>= 1)
UPD4990A_OUTPUT_DATA(tmp);
}
upd4990a_serial_command(UPD4990A_TIME_SET_AND_COUNTER_HOLD);
/* Release counter hold and start the clock. */
if (!time_set_only)
upd4990a_serial_command(UPD4990A_REGISTER_HOLD);
}
#endif /* _LINUX_uPD4990A_H */
......@@ -828,9 +828,6 @@ asmlinkage NORET_TYPE void do_exit(long code)
__exit_fs(tsk);
exit_namespace(tsk);
exit_thread();
#ifdef CONFIG_NUMA
mpol_free(tsk->mempolicy);
#endif
if (tsk->signal->leader)
disassociate_ctty(1);
......@@ -841,6 +838,10 @@ asmlinkage NORET_TYPE void do_exit(long code)
tsk->exit_code = code;
exit_notify(tsk);
#ifdef CONFIG_NUMA
mpol_free(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
schedule();
BUG();
/* Avoid "noreturn function does return". */
......
......@@ -157,7 +157,7 @@ static kmem_cache_t *sigqueue_cachep;
static int sig_ignored(struct task_struct *t, int sig)
{
void * handler;
void __user * handler;
/*
* Tracers always want to know about signals..
......@@ -2362,13 +2362,13 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
int error;
if (uoss) {
oss.ss_sp = (void *) current->sas_ss_sp;
oss.ss_sp = (void __user *) current->sas_ss_sp;
oss.ss_size = current->sas_ss_size;
oss.ss_flags = sas_ss_flags(sp);
}
if (uss) {
void *ss_sp;
void __user *ss_sp;
size_t ss_size;
int ss_flags;
......
......@@ -1116,7 +1116,7 @@ static ssize_t snd_ctl_read(struct file *file, char __user *buffer, size_t count
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto out;
goto __end;
}
init_waitqueue_entry(&wait, current);
add_wait_queue(&ctl->change_sleep, &wait);
......@@ -1137,7 +1137,7 @@ static ssize_t snd_ctl_read(struct file *file, char __user *buffer, size_t count
kfree(kev);
if (copy_to_user(buffer, &ev, sizeof(snd_ctl_event_t))) {
err = -EFAULT;
goto __end;
goto out;
}
spin_lock_irq(&ctl->read_lock);
buffer += sizeof(snd_ctl_event_t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment