Commit 0ac3fc9f authored by Linus Torvalds's avatar Linus Torvalds

Merge bk://kernel.bkbits.net/davem/net-2.6

into ppc970.osdl.org:/home/torvalds/v2.6/linux
parents 2453a83f b6d199cc
......@@ -107,6 +107,15 @@ config NR_CPUS
This is purely to save memory - each supported CPU adds
approximately sixteen kilobytes to the kernel image.
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs (EXPERIMENTAL)"
depends on SMP && HOTPLUG && EXPERIMENTAL
default n
help
Say Y here to experiment with turning CPUs off and on. CPUs
can be controlled through /sys/devices/system/cpu/cpu#.
Say N if you want to disable CPU hotplug.
config MATHEMU
bool "IEEE FPU emulation"
depends on MARCH_G5
......
......@@ -25,6 +25,8 @@
#include <linux/sysctl.h>
#include <asm/timer.h>
//#include <linux/kernel_stat.h>
#include <linux/notifier.h>
#include <linux/cpu.h>
#include "appldata.h"
......@@ -124,10 +126,6 @@ static struct ctl_table appldata_dir_table[] = {
*/
DEFINE_PER_CPU(struct vtimer_list, appldata_timer);
static atomic_t appldata_expire_count = ATOMIC_INIT(0);
static struct appldata_mod_vtimer_args {
struct vtimer_list *timer;
u64 expires;
} appldata_mod_vtimer_args;
static spinlock_t appldata_timer_lock = SPIN_LOCK_UNLOCKED;
static int appldata_interval = APPLDATA_CPU_INTERVAL;
......@@ -154,7 +152,7 @@ static LIST_HEAD(appldata_ops_list);
static void appldata_timer_function(unsigned long data, struct pt_regs *regs)
{
P_DEBUG(" -= Timer =-\n");
P_DEBUG("CPU: %i, expire: %i\n", smp_processor_id(),
P_DEBUG("CPU: %i, expire_count: %i\n", smp_processor_id(),
atomic_read(&appldata_expire_count));
if (atomic_dec_and_test(&appldata_expire_count)) {
atomic_set(&appldata_expire_count, num_online_cpus());
......@@ -187,17 +185,6 @@ static void appldata_tasklet_function(unsigned long data)
spin_unlock(&appldata_ops_lock);
}
/*
* appldata_mod_vtimer_wrap()
*
* wrapper function for mod_virt_timer(), because smp_call_function_on()
* accepts only one parameter.
*/
static void appldata_mod_vtimer_wrap(void *p) {
struct appldata_mod_vtimer_args *args = p;
mod_virt_timer(args->timer, args->expires);
}
/*
* appldata_diag()
*
......@@ -247,6 +234,79 @@ static int appldata_diag(char record_nr, u16 function, unsigned long buffer,
/****************************** /proc stuff **********************************/
/*
* appldata_mod_vtimer_wrap()
*
* wrapper function for mod_virt_timer(), because smp_call_function_on()
* accepts only one parameter.
*/
static void __appldata_mod_vtimer_wrap(void *p) {
struct {
struct vtimer_list *timer;
u64 expires;
} *args = p;
mod_virt_timer(args->timer, args->expires);
}
#define APPLDATA_ADD_TIMER 0
#define APPLDATA_DEL_TIMER 1
#define APPLDATA_MOD_TIMER 2
/*
* __appldata_vtimer_setup()
*
* Add, delete or modify virtual timers on all online cpus.
* The caller needs to get the appldata_timer_lock spinlock.
*/
static void
__appldata_vtimer_setup(int cmd)
{
u64 per_cpu_interval;
int i;
switch (cmd) {
case APPLDATA_ADD_TIMER:
if (appldata_timer_active)
break;
per_cpu_interval = (u64) (appldata_interval*1000 /
num_online_cpus()) * TOD_MICRO;
for_each_online_cpu(i) {
per_cpu(appldata_timer, i).expires = per_cpu_interval;
smp_call_function_on(add_virt_timer_periodic,
&per_cpu(appldata_timer, i),
0, 1, i);
}
appldata_timer_active = 1;
P_INFO("Monitoring timer started.\n");
break;
case APPLDATA_DEL_TIMER:
for_each_online_cpu(i)
del_virt_timer(&per_cpu(appldata_timer, i));
if (!appldata_timer_active)
break;
appldata_timer_active = 0;
atomic_set(&appldata_expire_count, num_online_cpus());
P_INFO("Monitoring timer stopped.\n");
break;
case APPLDATA_MOD_TIMER:
per_cpu_interval = (u64) (appldata_interval*1000 /
num_online_cpus()) * TOD_MICRO;
if (!appldata_timer_active)
break;
for_each_online_cpu(i) {
struct {
struct vtimer_list *timer;
u64 expires;
} args;
args.timer = &per_cpu(appldata_timer, i);
args.expires = per_cpu_interval;
smp_call_function_on(__appldata_mod_vtimer_wrap,
&args, 0, 1, i);
}
}
}
/*
* appldata_timer_handler()
*
......@@ -256,9 +316,8 @@ static int
appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
void __user *buffer, size_t *lenp)
{
int len, i;
int len;
char buf[2];
u64 per_cpu_interval;
if (!*lenp || filp->f_pos) {
*lenp = 0;
......@@ -272,30 +331,14 @@ appldata_timer_handler(ctl_table *ctl, int write, struct file *filp,
return -EFAULT;
goto out;
}
per_cpu_interval = (u64) (appldata_interval*1000 /
num_online_cpus()) * TOD_MICRO;
len = *lenp;
if (copy_from_user(buf, buffer, len > sizeof(buf) ? sizeof(buf) : len))
return -EFAULT;
spin_lock(&appldata_timer_lock);
per_cpu_interval = (u64) (appldata_interval*1000 /
num_online_cpus()) * TOD_MICRO;
if ((buf[0] == '1') && (!appldata_timer_active)) {
for (i = 0; i < num_online_cpus(); i++) {
per_cpu(appldata_timer, i).expires = per_cpu_interval;
smp_call_function_on(add_virt_timer_periodic,
&per_cpu(appldata_timer, i),
0, 1, i);
}
appldata_timer_active = 1;
P_INFO("Monitoring timer started.\n");
} else if ((buf[0] == '0') && (appldata_timer_active)) {
for (i = 0; i < num_online_cpus(); i++) {
del_virt_timer(&per_cpu(appldata_timer, i));
}
appldata_timer_active = 0;
P_INFO("Monitoring timer stopped.\n");
}
if (buf[0] == '1')
__appldata_vtimer_setup(APPLDATA_ADD_TIMER);
else if (buf[0] == '0')
__appldata_vtimer_setup(APPLDATA_DEL_TIMER);
spin_unlock(&appldata_timer_lock);
out:
*lenp = len;
......@@ -313,9 +356,8 @@ static int
appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
void __user *buffer, size_t *lenp)
{
int len, i, interval;
int len, interval;
char buf[16];
u64 per_cpu_interval;
if (!*lenp || filp->f_pos) {
*lenp = 0;
......@@ -340,20 +382,8 @@ appldata_interval_handler(ctl_table *ctl, int write, struct file *filp,
}
spin_lock(&appldata_timer_lock);
per_cpu_interval = (u64) (interval*1000 / num_online_cpus()) * TOD_MICRO;
appldata_interval = interval;
if (appldata_timer_active) {
for (i = 0; i < num_online_cpus(); i++) {
appldata_mod_vtimer_args.timer =
&per_cpu(appldata_timer, i);
appldata_mod_vtimer_args.expires =
per_cpu_interval;
smp_call_function_on(
appldata_mod_vtimer_wrap,
&appldata_mod_vtimer_args,
0, 1, i);
}
}
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
P_INFO("Monitoring CPU interval set to %u milliseconds.\n",
......@@ -564,6 +594,56 @@ void appldata_unregister_ops(struct appldata_ops *ops)
/******************************* init / exit *********************************/
static void
appldata_online_cpu(int cpu)
{
init_virt_timer(&per_cpu(appldata_timer, cpu));
per_cpu(appldata_timer, cpu).function = appldata_timer_function;
per_cpu(appldata_timer, cpu).data = (unsigned long)
&appldata_tasklet_struct;
atomic_inc(&appldata_expire_count);
spin_lock(&appldata_timer_lock);
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
}
static void
appldata_offline_cpu(int cpu)
{
del_virt_timer(&per_cpu(appldata_timer, cpu));
if (atomic_dec_and_test(&appldata_expire_count)) {
atomic_set(&appldata_expire_count, num_online_cpus());
tasklet_schedule(&appldata_tasklet_struct);
}
spin_lock(&appldata_timer_lock);
__appldata_vtimer_setup(APPLDATA_MOD_TIMER);
spin_unlock(&appldata_timer_lock);
}
static int
appldata_cpu_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
switch (action) {
case CPU_ONLINE:
appldata_online_cpu((long) hcpu);
break;
#ifdef CONFIG_HOTPLUG_CPU
case CPU_DEAD:
appldata_offline_cpu((long) hcpu);
break;
#endif
default:
break;
}
return NOTIFY_OK;
}
static struct notifier_block __devinitdata appldata_nb = {
.notifier_call = appldata_cpu_notify,
};
/*
* appldata_init()
*
......@@ -576,13 +656,11 @@ static int __init appldata_init(void)
P_DEBUG("sizeof(parameter_list) = %lu\n",
sizeof(struct appldata_parameter_list));
for (i = 0; i < num_online_cpus(); i++) {
init_virt_timer(&per_cpu(appldata_timer, i));
per_cpu(appldata_timer, i).function = appldata_timer_function;
per_cpu(appldata_timer, i).data = (unsigned long)
&appldata_tasklet_struct;
}
atomic_set(&appldata_expire_count, num_online_cpus());
for_each_online_cpu(i)
appldata_online_cpu(i);
/* Register cpu hotplug notifier */
register_cpu_notifier(&appldata_nb);
appldata_sysctl_header = register_sysctl_table(appldata_dir_table, 1);
#ifdef MODULE
......@@ -623,9 +701,9 @@ static void __exit appldata_exit(void)
}
spin_unlock_bh(&appldata_ops_lock);
for (i = 0; i < num_online_cpus(); i++) {
del_virt_timer(&per_cpu(appldata_timer, i));
}
for_each_online_cpu(i)
appldata_offline_cpu(i);
appldata_timer_active = 0;
unregister_sysctl_table(appldata_sysctl_header);
......
......@@ -98,8 +98,7 @@ static inline void appldata_print_debug(struct appldata_os_data *os_data)
LOAD_INT(a2), LOAD_FRAC(a2));
P_DEBUG("nr_cpus = %u\n", os_data->nr_cpus);
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i)) continue;
for (i = 0; i < os_data->nr_cpus; i++) {
P_DEBUG("cpu%u : user = %u, nice = %u, system = %u, "
"idle = %u, irq = %u, softirq = %u, iowait = %u\n",
i,
......@@ -124,7 +123,7 @@ static inline void appldata_print_debug(struct appldata_os_data *os_data)
*/
static void appldata_get_os_data(void *data)
{
int i;
int i, j;
struct appldata_os_data *os_data;
os_data = data;
......@@ -139,21 +138,23 @@ static void appldata_get_os_data(void *data)
os_data->avenrun[1] = avenrun[1] + (FIXED_1/200);
os_data->avenrun[2] = avenrun[2] + (FIXED_1/200);
for (i = 0; i < num_online_cpus(); i++) {
os_data->os_cpu[i].per_cpu_user =
j = 0;
for_each_online_cpu(i) {
os_data->os_cpu[j].per_cpu_user =
kstat_cpu(i).cpustat.user;
os_data->os_cpu[i].per_cpu_nice =
os_data->os_cpu[j].per_cpu_nice =
kstat_cpu(i).cpustat.nice;
os_data->os_cpu[i].per_cpu_system =
os_data->os_cpu[j].per_cpu_system =
kstat_cpu(i).cpustat.system;
os_data->os_cpu[i].per_cpu_idle =
os_data->os_cpu[j].per_cpu_idle =
kstat_cpu(i).cpustat.idle;
os_data->os_cpu[i].per_cpu_irq =
os_data->os_cpu[j].per_cpu_irq =
kstat_cpu(i).cpustat.irq;
os_data->os_cpu[i].per_cpu_softirq =
os_data->os_cpu[j].per_cpu_softirq =
kstat_cpu(i).cpustat.softirq;
os_data->os_cpu[i].per_cpu_iowait =
os_data->os_cpu[j].per_cpu_iowait =
kstat_cpu(i).cpustat.iowait;
j++;
}
os_data->timestamp = get_clock();
......
......@@ -46,7 +46,6 @@ CONFIG_MODULES=y
CONFIG_OBSOLETE_MODPARM=y
# CONFIG_MODVERSIONS is not set
CONFIG_KMOD=y
CONFIG_STOP_MACHINE=y
#
# Base setup
......@@ -63,6 +62,7 @@ CONFIG_MARCH_G5=y
# CONFIG_MARCH_Z990 is not set
CONFIG_SMP=y
CONFIG_NR_CPUS=32
# CONFIG_HOTPLUG_CPU is not set
CONFIG_MATHEMU=y
#
......@@ -510,6 +510,7 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_AES is not set
# CONFIG_CRYPTO_CAST5 is not set
# CONFIG_CRYPTO_CAST6 is not set
# CONFIG_CRYPTO_TEA is not set
# CONFIG_CRYPTO_ARC4 is not set
# CONFIG_CRYPTO_DEFLATE is not set
# CONFIG_CRYPTO_MICHAEL_MIC is not set
......
......@@ -23,6 +23,8 @@ obj-$(CONFIG_BINFMT_ELF32) += binfmt_elf32.o
obj-$(CONFIG_ARCH_S390_31) += entry.o reipl.o
obj-$(CONFIG_ARCH_S390X) += entry64.o reipl64.o
obj-$(CONFIG_VIRT_TIMER) += vtime.o
#
# This is just to get the dependencies...
#
......
......@@ -17,6 +17,7 @@
#include <linux/config.h>
#include <linux/compiler.h>
#include <linux/cpu.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
......@@ -34,6 +35,8 @@
#include <linux/delay.h>
#include <linux/reboot.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
......@@ -41,9 +44,7 @@
#include <asm/io.h>
#include <asm/processor.h>
#include <asm/irq.h>
#if defined(CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
#include <asm/timer.h>
#endif
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
......@@ -68,13 +69,39 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
}
/*
* The idle loop on a S390...
* Need to know about CPUs going idle?
*/
static struct notifier_block *idle_chain;
int register_idle_notifier(struct notifier_block *nb)
{
return notifier_chain_register(&idle_chain, nb);
}
EXPORT_SYMBOL(register_idle_notifier);
int unregister_idle_notifier(struct notifier_block *nb)
{
return notifier_chain_unregister(&idle_chain, nb);
}
EXPORT_SYMBOL(unregister_idle_notifier);
void do_monitor_call(struct pt_regs *regs, long interruption_code)
{
/* disable monitor call class 0 */
__ctl_clear_bit(8, 15);
notifier_call_chain(&idle_chain, CPU_NOT_IDLE,
(void *)(long) smp_processor_id());
}
/*
* The idle loop on a S390...
*/
void default_idle(void)
{
psw_t wait_psw;
unsigned long reg;
int cpu, rc;
local_irq_disable();
if (need_resched()) {
......@@ -83,14 +110,22 @@ void default_idle(void)
return;
}
#if defined(CONFIG_VIRT_TIMER) || defined (CONFIG_NO_IDLE_HZ)
/*
* hook to stop timers that should not tick while CPU is idle
*/
if (stop_timers()) {
/* CPU is going idle. */
cpu = smp_processor_id();
rc = notifier_call_chain(&idle_chain, CPU_IDLE, (void *)(long) cpu);
if (rc != NOTIFY_OK && rc != NOTIFY_DONE)
BUG();
if (rc != NOTIFY_OK) {
local_irq_enable();
return;
}
/* enable monitor call class 0 */
__ctl_set_bit(8, 15);
#ifdef CONFIG_HOTPLUG_CPU
if (cpu_is_offline(smp_processor_id()))
cpu_die();
#endif
/*
......
......@@ -19,9 +19,6 @@
#ifdef CONFIG_IP_MULTICAST
#include <net/arp.h>
#endif
#ifdef CONFIG_VIRT_TIMER
#include <asm/timer.h>
#endif
/*
* memory management
......@@ -53,17 +50,6 @@ EXPORT_SYMBOL(overflowuid);
EXPORT_SYMBOL(overflowgid);
EXPORT_SYMBOL(empty_zero_page);
/*
* virtual CPU timer
*/
#ifdef CONFIG_VIRT_TIMER
EXPORT_SYMBOL(init_virt_timer);
EXPORT_SYMBOL(add_virt_timer);
EXPORT_SYMBOL(add_virt_timer_periodic);
EXPORT_SYMBOL(mod_virt_timer);
EXPORT_SYMBOL(del_virt_timer);
#endif
/*
* misc.
*/
......
......@@ -58,8 +58,6 @@ struct {
} memory_chunk[MEMORY_CHUNKS] = { { 0 } };
#define CHUNK_READ_WRITE 0
#define CHUNK_READ_ONLY 1
int cpus_initialized = 0;
static cpumask_t cpu_initialized;
volatile int __cpu_logical_map[NR_CPUS]; /* logical cpu to cpu address */
/*
......@@ -85,15 +83,8 @@ static struct resource data_resource = { "Kernel data", 0, 0 };
*/
void __devinit cpu_init (void)
{
int nr = smp_processor_id();
int addr = hard_smp_processor_id();
if (cpu_test_and_set(nr,cpu_initialized)) {
printk("CPU#%d ALREADY INITIALIZED!!!!!!!!!\n", nr);
for (;;) local_irq_enable();
}
cpus_initialized++;
/*
* Store processor id in lowcore (used e.g. in timer_interrupt)
*/
......
......@@ -5,6 +5,7 @@
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
* Heiko Carstens (heiko.carstens@de.ibm.com)
*
* based on other smp stuff by
* (c) 1995 Alan Cox, CymruNET Ltd <alan@cymru.net>
......@@ -57,6 +58,8 @@ cpumask_t cpu_online_map;
cpumask_t cpu_possible_map;
unsigned long cache_decay_ticks = 0;
static struct task_struct *current_set[NR_CPUS];
EXPORT_SYMBOL(cpu_online_map);
/*
......@@ -124,7 +127,6 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
struct call_data_struct data;
int cpus = num_online_cpus()-1;
/* FIXME: get cpu lock -hc */
if (cpus <= 0)
return 0;
......@@ -211,7 +213,6 @@ EXPORT_SYMBOL(smp_call_function_on);
static inline void do_send_stop(void)
{
unsigned long dummy;
int i, rc;
/* stop all processors */
......@@ -219,25 +220,23 @@ static inline void do_send_stop(void)
if (!cpu_online(i) || smp_processor_id() == i)
continue;
do {
rc = signal_processor_ps(&dummy, 0, i, sigp_stop);
rc = signal_processor(i, sigp_stop);
} while (rc == sigp_busy);
}
}
static inline void do_store_status(void)
{
unsigned long low_core_addr;
unsigned long dummy;
int i, rc;
/* store status of all processors in their lowcores (real 0) */
for (i = 0; i < NR_CPUS; i++) {
if (!cpu_online(i) || smp_processor_id() == i)
continue;
low_core_addr = (unsigned long) lowcore_ptr[i];
do {
rc = signal_processor_ps(&dummy, low_core_addr, i,
sigp_store_status_at_address);
rc = signal_processor_p(
(__u32)(unsigned long) lowcore_ptr[i], i,
sigp_store_status_at_address);
} while(rc == sigp_busy);
}
}
......@@ -265,8 +264,10 @@ static cpumask_t cpu_restart_map;
static void do_machine_restart(void * __unused)
{
static atomic_t cpuid = ATOMIC_INIT(-1);
cpu_clear(smp_processor_id(), cpu_restart_map);
if (smp_processor_id() == 0) {
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
/* Wait for all other cpus to enter do_machine_restart. */
while (!cpus_empty(cpu_restart_map))
cpu_relax();
......@@ -307,7 +308,9 @@ static void do_wait_for_stop(void)
static void do_machine_halt(void * __unused)
{
if (smp_processor_id() == 0) {
static atomic_t cpuid = ATOMIC_INIT(-1);
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
smp_send_stop();
if (MACHINE_IS_VM && strlen(vmhalt_cmd) > 0)
cpcmd(vmhalt_cmd, NULL, 0);
......@@ -324,7 +327,9 @@ void machine_halt_smp(void)
static void do_machine_power_off(void * __unused)
{
if (smp_processor_id() == 0) {
static atomic_t cpuid = ATOMIC_INIT(-1);
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid) == 0) {
smp_send_stop();
if (MACHINE_IS_VM && strlen(vmpoff_cmd) > 0)
cpcmd(vmpoff_cmd, NULL, 0);
......@@ -482,7 +487,24 @@ void smp_ctl_clear_bit(int cr, int bit) {
* Lets check how many CPUs we have.
*/
void __init smp_check_cpus(unsigned int max_cpus)
#ifdef CONFIG_HOTPLUG_CPU
void
__init smp_check_cpus(unsigned int max_cpus)
{
int cpu;
/*
* cpu 0 is the boot cpu. See smp_prepare_boot_cpu.
*/
for (cpu = 1; cpu < max_cpus; cpu++)
cpu_set(cpu, cpu_possible_map);
}
#else /* CONFIG_HOTPLUG_CPU */
void
__init smp_check_cpus(unsigned int max_cpus)
{
int curr_cpu, num_cpus;
__u16 boot_cpu_addr;
......@@ -505,10 +527,13 @@ void __init smp_check_cpus(unsigned int max_cpus)
printk("Boot cpu address %2X\n", boot_cpu_addr);
}
#endif /* CONFIG_HOTPLUG_CPU */
/*
* Activate a secondary processor.
*/
extern void init_cpu_timer(void);
extern void init_cpu_vtimer(void);
extern int pfault_init(void);
extern int pfault_token(void);
......@@ -518,6 +543,9 @@ int __devinit start_secondary(void *cpuvoid)
cpu_init();
/* init per CPU timer */
init_cpu_timer();
#ifdef CONFIG_VIRT_TIMER
init_cpu_vtimer();
#endif
#ifdef CONFIG_PFAULT
/* Enable pfault pseudo page faults on this cpu. */
pfault_init();
......@@ -532,26 +560,95 @@ int __devinit start_secondary(void *cpuvoid)
return cpu_idle(NULL);
}
static struct task_struct *__devinit fork_by_hand(void)
static void __init smp_create_idle(unsigned int cpu)
{
struct pt_regs regs;
struct task_struct *p;
/*
* don't care about the psw and regs settings since we'll never
* reschedule the forked task.
*/
memset(&regs, 0, sizeof(struct pt_regs));
p = copy_process(CLONE_VM | CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
if (IS_ERR(p))
panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
wake_up_forked_process(p);
init_idle(p, cpu);
unhash_process(p);
current_set[cpu] = p;
}
/* Reserving and releasing of CPUs */
static atomic_t smp_cpu_reserved[NR_CPUS];
int
smp_get_cpu(cpumask_t cpu_mask)
{
int val, cpu;
/* Try to find an already reserved cpu. */
for_each_cpu_mask(cpu, cpu_mask) {
while ((val = atomic_read(&smp_cpu_reserved[cpu])) != 0) {
if (!atomic_compare_and_swap(val, val + 1,
&smp_cpu_reserved[cpu]))
/* Found one. */
goto out;
}
}
/* Reserve a new cpu from cpu_mask. */
for_each_cpu_mask(cpu, cpu_mask) {
atomic_inc(&smp_cpu_reserved[cpu]);
if (cpu_online(cpu))
goto out;
atomic_dec(&smp_cpu_reserved[cpu]);
}
cpu = -ENODEV;
out:
return cpu;
}
void
smp_put_cpu(int cpu)
{
struct pt_regs regs;
/* don't care about the psw and regs settings since we'll never
reschedule the forked task. */
memset(&regs,0,sizeof(struct pt_regs));
return copy_process(CLONE_VM|CLONE_IDLETASK, 0, &regs, 0, NULL, NULL);
atomic_dec(&smp_cpu_reserved[cpu]);
}
int __cpu_up(unsigned int cpu)
static inline int
cpu_stopped(int cpu)
{
struct task_struct *idle;
__u32 status;
/* Check for stopped state */
if (signal_processor_ps(&status, 0, cpu, sigp_sense) == sigp_status_stored) {
if (status & 0x40)
return 1;
}
return 0;
}
/* Upping and downing of CPUs */
int
__cpu_up(unsigned int cpu)
{
struct task_struct *idle;
struct _lowcore *cpu_lowcore;
sigp_ccode ccode;
int curr_cpu;
/*
* Set prefix page for new cpu
*/
for (curr_cpu = 0; curr_cpu <= 65535; curr_cpu++) {
__cpu_logical_map[cpu] = (__u16) curr_cpu;
if (cpu_stopped(cpu))
break;
}
if (!cpu_stopped(cpu))
return -ENODEV;
ccode = signal_processor_p((unsigned long)(lowcore_ptr[cpu]),
ccode = signal_processor_p((__u32)(unsigned long)(lowcore_ptr[cpu]),
cpu, sigp_set_prefix);
if (ccode){
printk("sigp_set_prefix failed for cpu %d "
......@@ -560,23 +657,7 @@ int __cpu_up(unsigned int cpu)
return -EIO;
}
/* We can't use kernel_thread since we must _avoid_ to reschedule
the child. */
idle = fork_by_hand();
if (IS_ERR(idle)){
printk("failed fork for CPU %d", cpu);
return -EIO;
}
wake_up_forked_process(idle);
/*
* We remove it from the pidhash and the runqueue
* once we got the process:
*/
init_idle(idle, cpu);
unhash_process(idle);
idle = current_set[cpu];
cpu_lowcore = lowcore_ptr[cpu];
cpu_lowcore->save_area[15] = idle->thread.ksp;
cpu_lowcore->kernel_stack = (unsigned long)
......@@ -595,6 +676,65 @@ int __cpu_up(unsigned int cpu)
return 0;
}
int
__cpu_disable(void)
{
unsigned long flags;
ec_creg_mask_parms cr_parms;
local_irq_save(flags);
if (atomic_read(&smp_cpu_reserved[smp_processor_id()])) {
local_irq_restore(flags);
return -EBUSY;
}
/* disable all external interrupts */
cr_parms.start_ctl = 0;
cr_parms.end_ctl = 0;
cr_parms.orvals[0] = 0;
cr_parms.andvals[0] = ~(1<<15 | 1<<14 | 1<<13 | 1<<12 |
1<<11 | 1<<10 | 1<< 6 | 1<< 4);
smp_ctl_bit_callback(&cr_parms);
/* disable all I/O interrupts */
cr_parms.start_ctl = 6;
cr_parms.end_ctl = 6;
cr_parms.orvals[6] = 0;
cr_parms.andvals[6] = ~(1<<31 | 1<<30 | 1<<29 | 1<<28 |
1<<27 | 1<<26 | 1<<25 | 1<<24);
smp_ctl_bit_callback(&cr_parms);
/* disable most machine checks */
cr_parms.start_ctl = 14;
cr_parms.end_ctl = 14;
cr_parms.orvals[14] = 0;
cr_parms.andvals[14] = ~(1<<28 | 1<<27 | 1<<26 | 1<<25 | 1<<24);
smp_ctl_bit_callback(&cr_parms);
local_irq_restore(flags);
return 0;
}
void
__cpu_die(unsigned int cpu)
{
/* Wait until target cpu is down */
while (!cpu_stopped(cpu));
printk("Processor %d spun down\n", cpu);
}
void
cpu_die(void)
{
signal_processor(smp_processor_id(), sigp_stop);
BUG();
for(;;);
}
/*
* Cycle through the processors and setup structures.
*/
......@@ -602,6 +742,7 @@ int __cpu_up(unsigned int cpu)
void __init smp_prepare_cpus(unsigned int max_cpus)
{
unsigned long async_stack;
unsigned int cpu;
int i;
/* request the 0x1202 external interrupt */
......@@ -628,13 +769,20 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
lowcore_ptr[i]->async_stack = async_stack + (ASYNC_SIZE);
}
set_prefix((u32)(unsigned long) lowcore_ptr[smp_processor_id()]);
for_each_cpu(cpu)
if (cpu != smp_processor_id())
smp_create_idle(cpu);
}
void __devinit smp_prepare_boot_cpu(void)
{
cpu_set(smp_processor_id(), cpu_online_map);
cpu_set(smp_processor_id(), cpu_possible_map);
S390_lowcore.percpu_offset = __per_cpu_offset[smp_processor_id()];
BUG_ON(smp_processor_id() != 0);
cpu_set(0, cpu_online_map);
cpu_set(0, cpu_possible_map);
S390_lowcore.percpu_offset = __per_cpu_offset[0];
current_set[0] = current;
}
void smp_cpus_done(unsigned int max_cpus)
......@@ -675,3 +823,6 @@ EXPORT_SYMBOL(lowcore_ptr);
EXPORT_SYMBOL(smp_ctl_set_bit);
EXPORT_SYMBOL(smp_ctl_clear_bit);
EXPORT_SYMBOL(smp_call_function);
EXPORT_SYMBOL(smp_get_cpu);
EXPORT_SYMBOL(smp_put_cpu);
/*
* arch/s390/kernel/time.c
* Time of day based timer functions.
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
......@@ -11,6 +12,7 @@
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
*/
#include <linux/config.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
......@@ -26,16 +28,14 @@
#include <linux/types.h>
#include <linux/profile.h>
#include <linux/timex.h>
#include <linux/config.h>
#include <linux/notifier.h>
#include <asm/uaccess.h>
#include <asm/delay.h>
#include <asm/s390_ext.h>
#include <asm/div64.h>
#include <asm/irq.h>
#ifdef CONFIG_VIRT_TIMER
#include <asm/timer.h>
#endif
/* change this if you have some constant time drift */
#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
......@@ -60,12 +60,6 @@ static u64 xtime_cc;
extern unsigned long wall_jiffies;
#ifdef CONFIG_VIRT_TIMER
#define VTIMER_MAGIC (0x4b87ad6e + 1)
static ext_int_info_t ext_int_info_timer;
DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
#endif
/*
* Scheduler clock - returns current time in nanosec units.
*/
......@@ -236,6 +230,8 @@ void account_ticks(struct pt_regs *regs)
__u32 ticks;
/* Calculate how many ticks have passed. */
if (S390_lowcore.int_clock < S390_lowcore.jiffy_timer)
return;
tmp = S390_lowcore.int_clock - S390_lowcore.jiffy_timer;
if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than two ticks ? */
ticks = __calculate_ticks(tmp) + 1;
......@@ -283,174 +279,6 @@ void account_ticks(struct pt_regs *regs)
s390_do_profile(regs);
}
#ifdef CONFIG_VIRT_TIMER
void start_cpu_timer(void)
{
struct vtimer_queue *vt_list;
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
set_vtimer(vt_list->idle);
}
int stop_cpu_timer(void)
{
__u64 done;
struct vtimer_queue *vt_list;
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
/* nothing to do */
if (list_empty(&vt_list->list)) {
vt_list->idle = VTIMER_MAX_SLICE;
goto fire;
}
/* store progress */
asm volatile ("STPT %0" : "=m" (done));
/*
* If done is negative we do not stop the CPU timer
* because we will get instantly an interrupt that
* will start the CPU timer again.
*/
if (done & 1LL<<63)
return 1;
else
vt_list->offset += vt_list->to_expire - done;
/* save the actual expire value */
vt_list->idle = done;
/*
* We cannot halt the CPU timer, we just write a value that
* nearly never expires (only after 71 years) and re-write
* the stored expire value if we continue the timer
*/
fire:
set_vtimer(VTIMER_MAX_SLICE);
return 0;
}
void set_vtimer(__u64 expires)
{
asm volatile ("SPT %0" : : "m" (expires));
/* store expire time for this CPU timer */
per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
}
/*
* Sorted add to a list. List is linear searched until first bigger
* element is found.
*/
void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
{
struct vtimer_list *event;
list_for_each_entry(event, head, entry) {
if (event->expires > timer->expires) {
list_add_tail(&timer->entry, &event->entry);
return;
}
}
list_add_tail(&timer->entry, head);
}
/*
* Do the callback functions of expired vtimer events.
* Called from within the interrupt handler.
*/
static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs)
{
struct vtimer_queue *vt_list;
struct vtimer_list *event, *tmp;
void (*fn)(unsigned long, struct pt_regs*);
unsigned long data;
if (list_empty(cb_list))
return;
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
list_for_each_entry_safe(event, tmp, cb_list, entry) {
fn = event->function;
data = event->data;
fn(data, regs);
if (!event->interval)
/* delete one shot timer */
list_del_init(&event->entry);
else {
/* move interval timer back to list */
spin_lock(&vt_list->lock);
list_del_init(&event->entry);
list_add_sorted(event, &vt_list->list);
spin_unlock(&vt_list->lock);
}
}
}
/*
* Handler for the virtual CPU timer.
*/
static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code)
{
int cpu;
__u64 next, delta;
struct vtimer_queue *vt_list;
struct vtimer_list *event, *tmp;
struct list_head *ptr;
/* the callback queue */
struct list_head cb_list;
INIT_LIST_HEAD(&cb_list);
cpu = smp_processor_id();
vt_list = &per_cpu(virt_cpu_timer, cpu);
/* walk timer list, fire all expired events */
spin_lock(&vt_list->lock);
if (vt_list->to_expire < VTIMER_MAX_SLICE)
vt_list->offset += vt_list->to_expire;
list_for_each_entry_safe(event, tmp, &vt_list->list, entry) {
if (event->expires > vt_list->offset)
/* found first unexpired event, leave */
break;
/* re-charge interval timer, we have to add the offset */
if (event->interval)
event->expires = event->interval + vt_list->offset;
/* move expired timer to the callback queue */
list_move_tail(&event->entry, &cb_list);
}
spin_unlock(&vt_list->lock);
do_callbacks(&cb_list, regs);
/* next event is first in list */
spin_lock(&vt_list->lock);
if (!list_empty(&vt_list->list)) {
ptr = vt_list->list.next;
event = list_entry(ptr, struct vtimer_list, entry);
next = event->expires - vt_list->offset;
/* add the expired time from this interrupt handler
* and the callback functions
*/
asm volatile ("STPT %0" : "=m" (delta));
delta = 0xffffffffffffffffLL - delta + 1;
vt_list->offset += delta;
next -= delta;
} else {
vt_list->offset = 0;
next = VTIMER_MAX_SLICE;
}
spin_unlock(&vt_list->lock);
set_vtimer(next);
}
#endif
#ifdef CONFIG_NO_IDLE_HZ
#ifdef CONFIG_NO_IDLE_HZ_INIT
......@@ -459,66 +287,11 @@ int sysctl_hz_timer = 0;
int sysctl_hz_timer = 1;
#endif
/*
* Start the HZ tick on the current CPU.
* Only cpu_idle may call this function.
*/
void start_hz_timer(struct pt_regs *regs)
{
__u64 tmp;
__u32 ticks;
if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
return;
/* Calculate how many ticks have passed */
asm volatile ("STCK 0(%0)" : : "a" (&tmp) : "memory", "cc");
tmp = tmp + CLK_TICKS_PER_JIFFY - S390_lowcore.jiffy_timer;
ticks = __calculate_ticks(tmp);
S390_lowcore.jiffy_timer += CLK_TICKS_PER_JIFFY * (__u64) ticks;
/* Set the clock comparator to the next tick. */
tmp = S390_lowcore.jiffy_timer + CPU_DEVIATION;
asm volatile ("SCKC %0" : : "m" (tmp));
/* Charge the ticks. */
if (ticks > 0) {
#ifdef CONFIG_SMP
/*
* Do not rely on the boot cpu to do the calls to do_timer.
* Spread it over all cpus instead.
*/
write_seqlock(&xtime_lock);
if (S390_lowcore.jiffy_timer > xtime_cc) {
__u32 xticks;
tmp = S390_lowcore.jiffy_timer - xtime_cc;
if (tmp >= 2*CLK_TICKS_PER_JIFFY) {
xticks = __calculate_ticks(tmp);
xtime_cc += (__u64) xticks*CLK_TICKS_PER_JIFFY;
} else {
xticks = 1;
xtime_cc += CLK_TICKS_PER_JIFFY;
}
while (xticks--)
do_timer(regs);
}
write_sequnlock(&xtime_lock);
while (ticks--)
update_process_times(user_mode(regs));
#else
while (ticks--)
do_timer(regs);
#endif
}
cpu_clear(smp_processor_id(), nohz_cpu_mask);
}
/*
* Stop the HZ tick on the current CPU.
* Only cpu_idle may call this function.
*/
void stop_hz_timer(void)
static inline void stop_hz_timer(void)
{
__u64 timer;
......@@ -541,57 +314,52 @@ void stop_hz_timer(void)
timer = jiffies_timer_cc + timer * CLK_TICKS_PER_JIFFY;
asm volatile ("SCKC %0" : : "m" (timer));
}
#endif
#if defined(CONFIG_VIRT_TIMER) || defined(CONFIG_NO_IDLE_HZ)
void do_monitor_call(struct pt_regs *regs, long interruption_code)
{
/* disable monitor call class 0 */
__ctl_clear_bit(8, 15);
#ifdef CONFIG_VIRT_TIMER
start_cpu_timer();
#endif
#ifdef CONFIG_NO_IDLE_HZ
start_hz_timer(regs);
#endif
}
/*
* called from cpu_idle to stop any timers
* returns 1 if CPU should not be stopped
* Start the HZ tick on the current CPU.
* Only cpu_idle may call this function.
*/
int stop_timers(void)
static inline void start_hz_timer(void)
{
#ifdef CONFIG_VIRT_TIMER
if (stop_cpu_timer())
return 1;
#endif
if (!cpu_isset(smp_processor_id(), nohz_cpu_mask))
return;
account_ticks(__KSTK_PTREGS(current));
cpu_clear(smp_processor_id(), nohz_cpu_mask);
}
#ifdef CONFIG_NO_IDLE_HZ
stop_hz_timer();
#endif
static int nohz_idle_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
switch (action) {
case CPU_IDLE:
stop_hz_timer();
break;
case CPU_NOT_IDLE:
start_hz_timer();
break;
}
return NOTIFY_OK;
}
/* enable monitor call class 0 */
__ctl_set_bit(8, 15);
static struct notifier_block nohz_idle_nb = {
.notifier_call = nohz_idle_notify,
};
return 0;
void __init nohz_init(void)
{
if (register_idle_notifier(&nohz_idle_nb))
panic("Couldn't register idle notifier");
}
#endif
/*
* Start the clock comparator and the virtual CPU timer
* on the current CPU.
* Start the clock comparator on the current CPU.
*/
void init_cpu_timer(void)
{
unsigned long cr0;
__u64 timer;
#ifdef CONFIG_VIRT_TIMER
struct vtimer_queue *vt_list;
#endif
timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY;
S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY;
......@@ -601,24 +369,10 @@ void init_cpu_timer(void)
__ctl_store(cr0, 0, 0);
cr0 |= 0x800;
__ctl_load(cr0, 0, 0);
#ifdef CONFIG_VIRT_TIMER
/* kick the virtual timer */
timer = VTIMER_MAX_SLICE;
asm volatile ("SPT %0" : : "m" (timer));
__ctl_store(cr0, 0, 0);
cr0 |= 0x400;
__ctl_load(cr0, 0, 0);
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
INIT_LIST_HEAD(&vt_list->list);
spin_lock_init(&vt_list->lock);
vt_list->to_expire = 0;
vt_list->offset = 0;
vt_list->idle = 0;
#endif
}
extern void vtime_init(void);
/*
* Initialize the TOD clock and the CPU timer of
* the boot cpu.
......@@ -661,247 +415,14 @@ void __init time_init(void)
&ext_int_info_cc) != 0)
panic("Couldn't request external interrupt 0x1004");
#ifdef CONFIG_VIRT_TIMER
/* request the cpu timer external interrupt */
if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
&ext_int_info_timer) != 0)
panic("Couldn't request external interrupt 0x1005");
#endif
init_cpu_timer();
}
#ifdef CONFIG_VIRT_TIMER
void init_virt_timer(struct vtimer_list *timer)
{
timer->magic = VTIMER_MAGIC;
timer->function = NULL;
INIT_LIST_HEAD(&timer->entry);
spin_lock_init(&timer->lock);
}
static inline int check_vtimer(struct vtimer_list *timer)
{
if (timer->magic != VTIMER_MAGIC)
return -EINVAL;
return 0;
}
static inline int vtimer_pending(struct vtimer_list *timer)
{
return (!list_empty(&timer->entry));
}
/*
* this function should only run on the specified CPU
*/
static void internal_add_vtimer(struct vtimer_list *timer)
{
unsigned long flags;
__u64 done;
struct vtimer_list *event;
struct vtimer_queue *vt_list;
vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
spin_lock_irqsave(&vt_list->lock, flags);
if (timer->cpu != smp_processor_id())
printk("internal_add_vtimer: BUG, running on wrong CPU");
/* if list is empty we only have to set the timer */
if (list_empty(&vt_list->list)) {
/* reset the offset, this may happen if the last timer was
* just deleted by mod_virt_timer and the interrupt
* didn't happen until here
*/
vt_list->offset = 0;
goto fire;
}
/* save progress */
asm volatile ("STPT %0" : "=m" (done));
/* calculate completed work */
done = vt_list->to_expire - done + vt_list->offset;
vt_list->offset = 0;
list_for_each_entry(event, &vt_list->list, entry)
event->expires -= done;
fire:
list_add_sorted(timer, &vt_list->list);
/* get first element, which is the next vtimer slice */
event = list_entry(vt_list->list.next, struct vtimer_list, entry);
set_vtimer(event->expires);
spin_unlock_irqrestore(&vt_list->lock, flags);
/* release CPU aquired in prepare_vtimer or mod_virt_timer() */
put_cpu();
}
static inline int prepare_vtimer(struct vtimer_list *timer)
{
if (check_vtimer(timer) || !timer->function) {
printk("add_virt_timer: uninitialized timer\n");
return -EINVAL;
}
if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
printk("add_virt_timer: invalid timer expire value!\n");
return -EINVAL;
}
if (vtimer_pending(timer)) {
printk("add_virt_timer: timer pending\n");
return -EBUSY;
}
timer->cpu = get_cpu();
return 0;
}
/*
* add_virt_timer - add an oneshot virtual CPU timer
*/
void add_virt_timer(void *new)
{
struct vtimer_list *timer;
timer = (struct vtimer_list *)new;
if (prepare_vtimer(timer) < 0)
return;
timer->interval = 0;
internal_add_vtimer(timer);
}
/*
* add_virt_timer_int - add an interval virtual CPU timer
*/
void add_virt_timer_periodic(void *new)
{
struct vtimer_list *timer;
timer = (struct vtimer_list *)new;
if (prepare_vtimer(timer) < 0)
return;
timer->interval = timer->expires;
internal_add_vtimer(timer);
}
/*
* If we change a pending timer the function must be called on the CPU
* where the timer is running on, e.g. by smp_call_function_on()
*
* The original mod_timer adds the timer if it is not pending. For compatibility
* we do the same. The timer will be added on the current CPU as a oneshot timer.
*
* returns whether it has modified a pending timer (1) or not (0)
*/
int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
{
struct vtimer_queue *vt_list;
unsigned long flags;
int cpu;
if (check_vtimer(timer) || !timer->function) {
printk("mod_virt_timer: uninitialized timer\n");
return -EINVAL;
}
if (!expires || expires > VTIMER_MAX_SLICE) {
printk("mod_virt_timer: invalid expire range\n");
return -EINVAL;
}
/*
* This is a common optimization triggered by the
* networking code - if the timer is re-modified
* to be the same thing then just return:
*/
if (timer->expires == expires && vtimer_pending(timer))
return 1;
cpu = get_cpu();
vt_list = &per_cpu(virt_cpu_timer, cpu);
/* disable interrupts before test if timer is pending */
spin_lock_irqsave(&vt_list->lock, flags);
/* if timer isn't pending add it on the current CPU */
if (!vtimer_pending(timer)) {
spin_unlock_irqrestore(&vt_list->lock, flags);
/* we do not activate an interval timer with mod_virt_timer */
timer->interval = 0;
timer->expires = expires;
timer->cpu = cpu;
internal_add_vtimer(timer);
return 0;
}
/* check if we run on the right CPU */
if (timer->cpu != cpu) {
printk("mod_virt_timer: running on wrong CPU, check your code\n");
spin_unlock_irqrestore(&vt_list->lock, flags);
put_cpu();
return -EINVAL;
}
list_del_init(&timer->entry);
timer->expires = expires;
/* also change the interval if we have an interval timer */
if (timer->interval)
timer->interval = expires;
/* the timer can't expire anymore so we can release the lock */
spin_unlock_irqrestore(&vt_list->lock, flags);
internal_add_vtimer(timer);
return 1;
}
/*
* delete a virtual timer
*
* returns whether the deleted timer was pending (1) or not (0)
*/
int del_virt_timer(struct vtimer_list *timer)
{
unsigned long flags;
struct vtimer_queue *vt_list;
if (check_vtimer(timer)) {
printk("del_virt_timer: timer not initialized\n");
return -EINVAL;
}
/* check if timer is pending */
if (!vtimer_pending(timer))
return 0;
if (!cpu_online(timer->cpu)) {
printk("del_virt_timer: CPU not present!\n");
return -1;
}
vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
spin_lock_irqsave(&vt_list->lock, flags);
/* we don't interrupt a running timer, just let it expire! */
list_del_init(&timer->entry);
/* last timer removed */
if (list_empty(&vt_list->list)) {
vt_list->to_expire = 0;
vt_list->offset = 0;
}
#ifdef CONFIG_NO_IDLE_HZ
nohz_init();
#endif
spin_unlock_irqrestore(&vt_list->lock, flags);
return 1;
}
#ifdef CONFIG_VIRT_TIMER
vtime_init();
#endif
}
......@@ -64,9 +64,7 @@ extern void pfault_fini(void);
extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code);
static ext_int_info_t ext_int_pfault;
#endif
#if defined(CONFIG_NO_IDLE_HZ) || defined(CONFIG_VIRT_TIMER)
extern pgm_check_handler_t do_monitor_call;
#endif
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
......@@ -620,9 +618,8 @@ void __init trap_init(void)
#endif /* CONFIG_ARCH_S390X */
pgm_check_table[0x15] = &operand_exception;
pgm_check_table[0x1C] = &privileged_op;
#if defined(CONFIG_VIRT_TIMER) || defined(CONFIG_NO_IDLE_HZ)
pgm_check_table[0x40] = &do_monitor_call;
#endif
if (MACHINE_IS_VM) {
/*
* First try to get pfault pseudo page faults going.
......
/*
* arch/s390/kernel/vtime.c
* Virtual cpu timer based timer functions.
*
* S390 version
* Copyright (C) 2004 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Jan Glauber <jan.glauber@de.ibm.com>
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/smp.h>
#include <linux/types.h>
#include <linux/timex.h>
#include <linux/notifier.h>
#include <asm/s390_ext.h>
#include <asm/timer.h>
#define VTIMER_MAGIC (0x4b87ad6e + 1)
static ext_int_info_t ext_int_info_timer;
DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
void start_cpu_timer(void)
{
struct vtimer_queue *vt_list;
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
set_vtimer(vt_list->idle);
}
void stop_cpu_timer(void)
{
__u64 done;
struct vtimer_queue *vt_list;
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
/* nothing to do */
if (list_empty(&vt_list->list)) {
vt_list->idle = VTIMER_MAX_SLICE;
goto fire;
}
/* store progress */
asm volatile ("STPT %0" : "=m" (done));
/*
* If done is negative we do not stop the CPU timer
* because we will get instantly an interrupt that
* will start the CPU timer again.
*/
if (done & 1LL<<63)
return;
else
vt_list->offset += vt_list->to_expire - done;
/* save the actual expire value */
vt_list->idle = done;
/*
* We cannot halt the CPU timer, we just write a value that
* nearly never expires (only after 71 years) and re-write
* the stored expire value if we continue the timer
*/
fire:
set_vtimer(VTIMER_MAX_SLICE);
}
void set_vtimer(__u64 expires)
{
asm volatile ("SPT %0" : : "m" (expires));
/* store expire time for this CPU timer */
per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
}
/*
* Sorted add to a list. List is linear searched until first bigger
* element is found.
*/
void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
{
struct vtimer_list *event;
list_for_each_entry(event, head, entry) {
if (event->expires > timer->expires) {
list_add_tail(&timer->entry, &event->entry);
return;
}
}
list_add_tail(&timer->entry, head);
}
/*
* Do the callback functions of expired vtimer events.
* Called from within the interrupt handler.
*/
static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs)
{
struct vtimer_queue *vt_list;
struct vtimer_list *event, *tmp;
void (*fn)(unsigned long, struct pt_regs*);
unsigned long data;
if (list_empty(cb_list))
return;
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
list_for_each_entry_safe(event, tmp, cb_list, entry) {
fn = event->function;
data = event->data;
fn(data, regs);
if (!event->interval)
/* delete one shot timer */
list_del_init(&event->entry);
else {
/* move interval timer back to list */
spin_lock(&vt_list->lock);
list_del_init(&event->entry);
list_add_sorted(event, &vt_list->list);
spin_unlock(&vt_list->lock);
}
}
}
/*
* Handler for the virtual CPU timer.
*/
static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code)
{
int cpu;
__u64 next, delta;
struct vtimer_queue *vt_list;
struct vtimer_list *event, *tmp;
struct list_head *ptr;
/* the callback queue */
struct list_head cb_list;
INIT_LIST_HEAD(&cb_list);
cpu = smp_processor_id();
vt_list = &per_cpu(virt_cpu_timer, cpu);
/* walk timer list, fire all expired events */
spin_lock(&vt_list->lock);
if (vt_list->to_expire < VTIMER_MAX_SLICE)
vt_list->offset += vt_list->to_expire;
list_for_each_entry_safe(event, tmp, &vt_list->list, entry) {
if (event->expires > vt_list->offset)
/* found first unexpired event, leave */
break;
/* re-charge interval timer, we have to add the offset */
if (event->interval)
event->expires = event->interval + vt_list->offset;
/* move expired timer to the callback queue */
list_move_tail(&event->entry, &cb_list);
}
spin_unlock(&vt_list->lock);
do_callbacks(&cb_list, regs);
/* next event is first in list */
spin_lock(&vt_list->lock);
if (!list_empty(&vt_list->list)) {
ptr = vt_list->list.next;
event = list_entry(ptr, struct vtimer_list, entry);
next = event->expires - vt_list->offset;
/* add the expired time from this interrupt handler
* and the callback functions
*/
asm volatile ("STPT %0" : "=m" (delta));
delta = 0xffffffffffffffffLL - delta + 1;
vt_list->offset += delta;
next -= delta;
} else {
vt_list->offset = 0;
next = VTIMER_MAX_SLICE;
}
spin_unlock(&vt_list->lock);
set_vtimer(next);
}
void init_virt_timer(struct vtimer_list *timer)
{
timer->magic = VTIMER_MAGIC;
timer->function = NULL;
INIT_LIST_HEAD(&timer->entry);
spin_lock_init(&timer->lock);
}
EXPORT_SYMBOL(init_virt_timer);
static inline int check_vtimer(struct vtimer_list *timer)
{
if (timer->magic != VTIMER_MAGIC)
return -EINVAL;
return 0;
}
static inline int vtimer_pending(struct vtimer_list *timer)
{
return (!list_empty(&timer->entry));
}
/*
* this function should only run on the specified CPU
*/
static void internal_add_vtimer(struct vtimer_list *timer)
{
unsigned long flags;
__u64 done;
struct vtimer_list *event;
struct vtimer_queue *vt_list;
vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
spin_lock_irqsave(&vt_list->lock, flags);
if (timer->cpu != smp_processor_id())
printk("internal_add_vtimer: BUG, running on wrong CPU");
/* if list is empty we only have to set the timer */
if (list_empty(&vt_list->list)) {
/* reset the offset, this may happen if the last timer was
* just deleted by mod_virt_timer and the interrupt
* didn't happen until here
*/
vt_list->offset = 0;
goto fire;
}
/* save progress */
asm volatile ("STPT %0" : "=m" (done));
/* calculate completed work */
done = vt_list->to_expire - done + vt_list->offset;
vt_list->offset = 0;
list_for_each_entry(event, &vt_list->list, entry)
event->expires -= done;
fire:
list_add_sorted(timer, &vt_list->list);
/* get first element, which is the next vtimer slice */
event = list_entry(vt_list->list.next, struct vtimer_list, entry);
set_vtimer(event->expires);
spin_unlock_irqrestore(&vt_list->lock, flags);
/* release CPU aquired in prepare_vtimer or mod_virt_timer() */
put_cpu();
}
static inline int prepare_vtimer(struct vtimer_list *timer)
{
if (check_vtimer(timer) || !timer->function) {
printk("add_virt_timer: uninitialized timer\n");
return -EINVAL;
}
if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
printk("add_virt_timer: invalid timer expire value!\n");
return -EINVAL;
}
if (vtimer_pending(timer)) {
printk("add_virt_timer: timer pending\n");
return -EBUSY;
}
timer->cpu = get_cpu();
return 0;
}
/*
* add_virt_timer - add an oneshot virtual CPU timer
*/
void add_virt_timer(void *new)
{
struct vtimer_list *timer;
timer = (struct vtimer_list *)new;
if (prepare_vtimer(timer) < 0)
return;
timer->interval = 0;
internal_add_vtimer(timer);
}
EXPORT_SYMBOL(add_virt_timer);
/*
* add_virt_timer_int - add an interval virtual CPU timer
*/
void add_virt_timer_periodic(void *new)
{
struct vtimer_list *timer;
timer = (struct vtimer_list *)new;
if (prepare_vtimer(timer) < 0)
return;
timer->interval = timer->expires;
internal_add_vtimer(timer);
}
EXPORT_SYMBOL(add_virt_timer_periodic);
/*
* If we change a pending timer the function must be called on the CPU
* where the timer is running on, e.g. by smp_call_function_on()
*
* The original mod_timer adds the timer if it is not pending. For compatibility
* we do the same. The timer will be added on the current CPU as a oneshot timer.
*
* returns whether it has modified a pending timer (1) or not (0)
*/
int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
{
struct vtimer_queue *vt_list;
unsigned long flags;
int cpu;
if (check_vtimer(timer) || !timer->function) {
printk("mod_virt_timer: uninitialized timer\n");
return -EINVAL;
}
if (!expires || expires > VTIMER_MAX_SLICE) {
printk("mod_virt_timer: invalid expire range\n");
return -EINVAL;
}
/*
* This is a common optimization triggered by the
* networking code - if the timer is re-modified
* to be the same thing then just return:
*/
if (timer->expires == expires && vtimer_pending(timer))
return 1;
cpu = get_cpu();
vt_list = &per_cpu(virt_cpu_timer, cpu);
/* disable interrupts before test if timer is pending */
spin_lock_irqsave(&vt_list->lock, flags);
/* if timer isn't pending add it on the current CPU */
if (!vtimer_pending(timer)) {
spin_unlock_irqrestore(&vt_list->lock, flags);
/* we do not activate an interval timer with mod_virt_timer */
timer->interval = 0;
timer->expires = expires;
timer->cpu = cpu;
internal_add_vtimer(timer);
return 0;
}
/* check if we run on the right CPU */
if (timer->cpu != cpu) {
printk("mod_virt_timer: running on wrong CPU, check your code\n");
spin_unlock_irqrestore(&vt_list->lock, flags);
put_cpu();
return -EINVAL;
}
list_del_init(&timer->entry);
timer->expires = expires;
/* also change the interval if we have an interval timer */
if (timer->interval)
timer->interval = expires;
/* the timer can't expire anymore so we can release the lock */
spin_unlock_irqrestore(&vt_list->lock, flags);
internal_add_vtimer(timer);
return 1;
}
EXPORT_SYMBOL(mod_virt_timer);
/*
* delete a virtual timer
*
* returns whether the deleted timer was pending (1) or not (0)
*/
int del_virt_timer(struct vtimer_list *timer)
{
unsigned long flags;
struct vtimer_queue *vt_list;
if (check_vtimer(timer)) {
printk("del_virt_timer: timer not initialized\n");
return -EINVAL;
}
/* check if timer is pending */
if (!vtimer_pending(timer))
return 0;
vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
spin_lock_irqsave(&vt_list->lock, flags);
/* we don't interrupt a running timer, just let it expire! */
list_del_init(&timer->entry);
/* last timer removed */
if (list_empty(&vt_list->list)) {
vt_list->to_expire = 0;
vt_list->offset = 0;
}
spin_unlock_irqrestore(&vt_list->lock, flags);
return 1;
}
EXPORT_SYMBOL(del_virt_timer);
/*
* Start the virtual CPU timer on the current CPU.
*/
void init_cpu_vtimer(void)
{
struct vtimer_queue *vt_list;
unsigned long cr0;
__u64 timer;
/* kick the virtual timer */
timer = VTIMER_MAX_SLICE;
asm volatile ("SPT %0" : : "m" (timer));
__ctl_store(cr0, 0, 0);
cr0 |= 0x400;
__ctl_load(cr0, 0, 0);
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
INIT_LIST_HEAD(&vt_list->list);
spin_lock_init(&vt_list->lock);
vt_list->to_expire = 0;
vt_list->offset = 0;
vt_list->idle = 0;
}
static int vtimer_idle_notify(struct notifier_block *self,
unsigned long action, void *hcpu)
{
switch (action) {
case CPU_IDLE:
stop_cpu_timer();
break;
case CPU_NOT_IDLE:
start_cpu_timer();
break;
}
return NOTIFY_OK;
}
static struct notifier_block vtimer_idle_nb = {
.notifier_call = vtimer_idle_notify,
};
void __init vtime_init(void)
{
/* request the cpu timer external interrupt */
if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
&ext_int_info_timer) != 0)
panic("Couldn't request external interrupt 0x1005");
if (register_idle_notifier(&vtimer_idle_nb))
panic("Couldn't register idle notifier");
init_cpu_vtimer();
}
......@@ -407,6 +407,14 @@ struct ctl_table_header *cmm_sysctl_header;
static int
cmm_init (void)
{
int rc;
/* Prevent logical cpu 0 from being set offline. */
rc = smp_get_cpu(cpumask_of_cpu(0));
if (rc) {
printk(KERN_ERR "CMM: unable to reserve cpu 0\n");
return rc;
}
#ifdef CONFIG_CMM_PROC
cmm_sysctl_header = register_sysctl_table(cmm_dir_table, 1);
#endif
......@@ -430,6 +438,8 @@ cmm_exit(void)
#ifdef CONFIG_CMM_IUCV
smsg_unregister_callback(SMSG_PREFIX, cmm_smsg_target);
#endif
/* Allow logical cpu 0 to be set offline again. */
smp_put_cpu(0);
}
module_init(cmm_init);
......
......@@ -2711,6 +2711,10 @@ int take_over_console(const struct consw *csw, int first, int last, int deflt)
save_screen(i);
old_was_color = vc_cons[i].d->vc_can_do_color;
vc_cons[i].d->vc_sw->con_deinit(vc_cons[i].d);
origin = (unsigned long) screenbuf;
visible_origin = origin;
scr_end = origin + screenbuf_size;
pos = origin + video_size_row*y + 2*x;
visual_init(i, 0);
update_attr(i);
......
......@@ -54,7 +54,7 @@ static void ap_hdlc_like_ctrl_char_list (u32 ctrl_char) {
}
void init_CRC() {
void init_CRC(void) {
ap_hdlc_like_ctrl_char_list(0xffffffff);
}
......
......@@ -112,11 +112,11 @@ config VIDEO_CPIA_USB
It is also available as a module (cpia_usb).
config VIDEO_SAA5246A
tristate "SAA5246A Teletext processor"
tristate "SAA5246A, SAA5281 Teletext processor"
depends on VIDEO_DEV && I2C
help
Support for I2C bus based teletext using the SAA5246A chip. Useful
only if you live in Europe.
Support for I2C bus based teletext using the SAA5246A or SAA5281
chip. Useful only if you live in Europe.
To compile this driver as a module, choose M here: the
module will be called saa5246a.
......
/*
* Driver for the SAA5246A videotext decoder chip from Philips.
* Driver for the SAA5246A or SAA5281 Teletext (=Videotext) decoder chips from
* Philips.
*
* Only capturing of videotext pages is tested. The SAA5246A chip also has
* a TV output but my hardware doesn't use it. For this reason this driver
* does not support changing any TV display settings.
* Only capturing of Teletext pages is tested. The videotext chips also have a
* TV output but my hardware doesn't use it. For this reason this driver does
* not support changing any TV display settings.
*
* Copyright (C) 2004 Michael Geng <linux@MichaelGeng.de>
*
......@@ -47,6 +48,10 @@
#include <linux/videodev.h>
#include "saa5246a.h"
MODULE_AUTHOR("Michael Geng <linux@MichaelGeng.de>");
MODULE_DESCRIPTION("Philips SAA5246A, SAA5281 Teletext decoder driver");
MODULE_LICENSE("GPL");
struct saa5246a_device
{
u8 pgbuf[NUM_DAUS][VTX_VIRTUALSIZE];
......@@ -764,8 +769,8 @@ static int saa5246a_release(struct inode *inode, struct file *file)
static int __init init_saa_5246a (void)
{
printk(KERN_INFO "SAA5246A driver (" IF_NAME
" interface) for VideoText version %d.%d\n",
printk(KERN_INFO
"SAA5246A (or compatible) Teletext decoder driver version %d.%d\n",
MAJOR_VERSION, MINOR_VERSION);
return i2c_add_driver(&i2c_driver_videotext);
}
......@@ -796,5 +801,3 @@ static struct video_device saa_template =
.release = video_device_release,
.minor = -1,
};
MODULE_LICENSE("GPL");
/*
Driver for the SAA5246A videotext decoder chip from Philips.
Driver for the SAA5246A or SAA5281 Teletext (=Videotext) decoder chips from
Philips.
Copyright (C) 2004 Michael Geng (linux@MichaelGeng.de)
This program is free software; you can redistribute it and/or modify
......@@ -21,7 +23,7 @@
#define __SAA5246A_H__
#define MAJOR_VERSION 1 /* driver major version number */
#define MINOR_VERSION 6 /* driver minor version number */
#define MINOR_VERSION 7 /* driver minor version number */
#define IF_NAME "SAA5246A"
......
......@@ -7,7 +7,7 @@
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001
*
* $Revision: 1.146 $
* $Revision: 1.147 $
*/
#include <linux/config.h>
......@@ -739,8 +739,16 @@ dasd_start_IO(struct dasd_ccw_req * cqr)
if (rc)
return rc;
device = (struct dasd_device *) cqr->device;
if (cqr->retries < 0) {
DEV_MESSAGE(KERN_DEBUG, device,
"start_IO: request %p (%02x/%i) - no retry left.",
cqr, cqr->status, cqr->retries);
cqr->status = DASD_CQR_FAILED;
return -EIO;
}
cqr->startclk = get_clock();
cqr->starttime = jiffies;
cqr->retries--;
rc = ccw_device_start(device->cdev, cqr->cpaddr, (long) cqr,
cqr->lpm, 0);
switch (rc) {
......@@ -1067,7 +1075,6 @@ __dasd_process_ccw_queue(struct dasd_device * device,
break;
/* Process requests with DASD_CQR_ERROR */
if (cqr->status == DASD_CQR_ERROR) {
cqr->retries--;
if (cqr->irb.scsw.fctl & SCSW_FCTL_HALT_FUNC) {
cqr->status = DASD_CQR_FAILED;
cqr->stopclk = get_clock();
......
......@@ -494,11 +494,12 @@ static struct sclp_register sclp_state_change_event = {
static void
do_load_quiesce_psw(void * __unused)
{
static atomic_t cpuid = ATOMIC_INIT(-1);
psw_t quiesce_psw;
unsigned long status;
__u32 status;
int i;
if (smp_processor_id() != 0)
if (atomic_compare_and_swap(-1, smp_processor_id(), &cpuid))
signal_processor(smp_processor_id(), sigp_stop);
/* Wait for all other cpus to enter stopped state */
i = 1;
......@@ -511,7 +512,7 @@ do_load_quiesce_psw(void * __unused)
case sigp_order_code_accepted:
case sigp_status_stored:
/* Check for stopped and check stop state */
if (test_bit(6, &status) || test_bit(4, &status))
if (status & 0x50)
i++;
break;
case sigp_busy:
......
......@@ -165,8 +165,6 @@ ccw_device_handle_oper(struct ccw_device *cdev)
return;
}
cdev->private->flags.donotify = 1;
/* Get device online again. */
ccw_device_online(cdev);
}
/*
......@@ -233,15 +231,23 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
cdev->private->devno, sch->irq);
break;
case DEV_STATE_OFFLINE:
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID)
if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) {
ccw_device_handle_oper(cdev);
notify = 1;
else /* fill out sense information */
cdev->id = (struct ccw_device_id) {
.cu_type = cdev->private->senseid.cu_type,
.cu_model = cdev->private->senseid.cu_model,
.dev_type = cdev->private->senseid.dev_type,
.dev_model = cdev->private->senseid.dev_model,
};
}
/* fill out sense information */
cdev->id = (struct ccw_device_id) {
.cu_type = cdev->private->senseid.cu_type,
.cu_model = cdev->private->senseid.cu_model,
.dev_type = cdev->private->senseid.dev_type,
.dev_model = cdev->private->senseid.dev_model,
};
if (notify) {
/* Get device online again. */
ccw_device_online(cdev);
wake_up(&cdev->private->wait_q);
return;
}
/* Issue device info message. */
CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: "
"CU Type/Mod = %04X/%02X, Dev Type/Mod = "
......@@ -256,10 +262,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
break;
}
cdev->private->state = state;
if (notify && state == DEV_STATE_OFFLINE)
ccw_device_handle_oper(cdev);
else
io_subchannel_recog_done(cdev);
io_subchannel_recog_done(cdev);
if (state != DEV_STATE_NOT_OPER)
wake_up(&cdev->private->wait_q);
}
......
......@@ -2,7 +2,7 @@
# S/390 network devices
#
ctc-objs := ctcmain.o ctctty.o
ctc-objs := ctcmain.o ctctty.o ctcdbug.o
obj-$(CONFIG_IUCV) += iucv.o
obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
......
/*
*
* linux/drivers/s390/net/ctcdbug.c ($Revision: 1.1 $)
*
* Linux on zSeries OSA Express and HiperSockets support
*
* Copyright 2000,2003 IBM Corporation
*
* Author(s): Original Code written by
* Peter Tiedemann (ptiedem@de.ibm.com)
*
* $Revision: 1.1 $ $Date: 2004/07/02 16:31:22 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "ctcdbug.h"
/**
* Debug Facility Stuff
*/
debug_info_t *dbf_setup = NULL;
debug_info_t *dbf_data = NULL;
debug_info_t *dbf_trace = NULL;
DEFINE_PER_CPU(char[256], dbf_txt_buf);
void
unregister_dbf_views(void)
{
if (dbf_setup)
debug_unregister(dbf_setup);
if (dbf_data)
debug_unregister(dbf_data);
if (dbf_trace)
debug_unregister(dbf_trace);
}
int
register_dbf_views(void)
{
dbf_setup = debug_register(CTC_DBF_SETUP_NAME,
CTC_DBF_SETUP_INDEX,
CTC_DBF_SETUP_NR_AREAS,
CTC_DBF_SETUP_LEN);
dbf_data = debug_register(CTC_DBF_DATA_NAME,
CTC_DBF_DATA_INDEX,
CTC_DBF_DATA_NR_AREAS,
CTC_DBF_DATA_LEN);
dbf_trace = debug_register(CTC_DBF_TRACE_NAME,
CTC_DBF_TRACE_INDEX,
CTC_DBF_TRACE_NR_AREAS,
CTC_DBF_TRACE_LEN);
if ((dbf_setup == NULL) || (dbf_data == NULL) ||
(dbf_trace == NULL)) {
unregister_dbf_views();
return -ENOMEM;
}
debug_register_view(dbf_setup, &debug_hex_ascii_view);
debug_set_level(dbf_setup, CTC_DBF_SETUP_LEVEL);
debug_register_view(dbf_data, &debug_hex_ascii_view);
debug_set_level(dbf_data, CTC_DBF_DATA_LEVEL);
debug_register_view(dbf_trace, &debug_hex_ascii_view);
debug_set_level(dbf_trace, CTC_DBF_TRACE_LEVEL);
return 0;
}
/*
*
* linux/drivers/s390/net/ctcdbug.h ($Revision: 1.1 $)
*
* Linux on zSeries OSA Express and HiperSockets support
*
* Copyright 2000,2003 IBM Corporation
*
* Author(s): Original Code written by
* Peter Tiedemann (ptiedem@de.ibm.com)
*
* $Revision: 1.1 $ $Date: 2004/07/02 16:31:22 $
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <asm/debug.h>
/**
* Debug Facility stuff
*/
#define CTC_DBF_SETUP_NAME "ctc_setup"
#define CTC_DBF_SETUP_LEN 16
#define CTC_DBF_SETUP_INDEX 3
#define CTC_DBF_SETUP_NR_AREAS 1
#define CTC_DBF_SETUP_LEVEL 3
#define CTC_DBF_DATA_NAME "ctc_data"
#define CTC_DBF_DATA_LEN 128
#define CTC_DBF_DATA_INDEX 3
#define CTC_DBF_DATA_NR_AREAS 1
#define CTC_DBF_DATA_LEVEL 2
#define CTC_DBF_TRACE_NAME "ctc_trace"
#define CTC_DBF_TRACE_LEN 16
#define CTC_DBF_TRACE_INDEX 2
#define CTC_DBF_TRACE_NR_AREAS 2
#define CTC_DBF_TRACE_LEVEL 3
#define DBF_TEXT(name,level,text) \
do { \
debug_text_event(dbf_##name,level,text); \
} while (0)
#define DBF_HEX(name,level,addr,len) \
do { \
debug_event(dbf_##name,level,(void*)(addr),len); \
} while (0)
extern DEFINE_PER_CPU(char[256], dbf_txt_buf);
extern debug_info_t *dbf_setup;
extern debug_info_t *dbf_data;
extern debug_info_t *dbf_trace;
#define DBF_TEXT_(name,level,text...) \
do { \
char* dbf_txt_buf = get_cpu_var(dbf_txt_buf); \
sprintf(dbf_txt_buf, text); \
debug_text_event(dbf_##name,level,dbf_txt_buf); \
put_cpu_var(dbf_txt_buf); \
} while (0)
#define DBF_SPRINTF(name,level,text...) \
do { \
debug_sprintf_event(dbf_trace, level, ##text ); \
debug_sprintf_event(dbf_trace, level, text ); \
} while (0)
int register_dbf_views(void);
void unregister_dbf_views(void);
/**
* some more debug stuff
*/
#define HEXDUMP16(importance,header,ptr) \
PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
"%02x %02x %02x %02x %02x %02x %02x %02x\n", \
*(((char*)ptr)),*(((char*)ptr)+1),*(((char*)ptr)+2), \
*(((char*)ptr)+3),*(((char*)ptr)+4),*(((char*)ptr)+5), \
*(((char*)ptr)+6),*(((char*)ptr)+7),*(((char*)ptr)+8), \
*(((char*)ptr)+9),*(((char*)ptr)+10),*(((char*)ptr)+11), \
*(((char*)ptr)+12),*(((char*)ptr)+13), \
*(((char*)ptr)+14),*(((char*)ptr)+15)); \
PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \
"%02x %02x %02x %02x %02x %02x %02x %02x\n", \
*(((char*)ptr)+16),*(((char*)ptr)+17), \
*(((char*)ptr)+18),*(((char*)ptr)+19), \
*(((char*)ptr)+20),*(((char*)ptr)+21), \
*(((char*)ptr)+22),*(((char*)ptr)+23), \
*(((char*)ptr)+24),*(((char*)ptr)+25), \
*(((char*)ptr)+26),*(((char*)ptr)+27), \
*(((char*)ptr)+28),*(((char*)ptr)+29), \
*(((char*)ptr)+30),*(((char*)ptr)+31));
static inline void
hex_dump(unsigned char *buf, size_t len)
{
size_t i;
for (i = 0; i < len; i++) {
if (i && !(i % 16))
printk("\n");
printk("%02x ", *(buf + i));
}
printk("\n");
}
/*
* $Id: ctcmain.c,v 1.60 2004/06/18 15:13:51 ptiedem Exp $
* $Id: ctcmain.c,v 1.61 2004/07/02 16:31:22 ptiedem Exp $
*
* CTC / ESCON network driver
*
......@@ -36,7 +36,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* RELEASE-TAG: CTC/ESCON network driver $Revision: 1.60 $
* RELEASE-TAG: CTC/ESCON network driver $Revision: 1.61 $
*
*/
......@@ -73,6 +73,7 @@
#include "ctctty.h"
#include "fsm.h"
#include "cu3088.h"
#include "ctcdbug.h"
MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
MODULE_DESCRIPTION("Linux for S/390 CTC/Escon Driver");
......@@ -319,7 +320,7 @@ static void
print_banner(void)
{
static int printed = 0;
char vbuf[] = "$Revision: 1.60 $";
char vbuf[] = "$Revision: 1.61 $";
char *version = vbuf;
if (printed)
......@@ -616,8 +617,9 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
{
struct net_device *dev = ch->netdev;
struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
__u16 len = *((__u16 *) pskb->data);
DBF_TEXT(trace, 2, __FUNCTION__);
skb_put(pskb, 2 + LL_HEADER_LENGTH);
skb_pull(pskb, 2);
pskb->dev = dev;
......@@ -759,6 +761,7 @@ ctc_unpack_skb(struct channel *ch, struct sk_buff *pskb)
static void inline
ccw_check_return_code(struct channel *ch, int return_code, char *msg)
{
DBF_TEXT(trace, 2, __FUNCTION__);
switch (return_code) {
case 0:
fsm_event(ch->fsm, CH_EVENT_IO_SUCCESS, ch);
......@@ -793,6 +796,7 @@ ccw_check_return_code(struct channel *ch, int return_code, char *msg)
static void inline
ccw_unit_check(struct channel *ch, unsigned char sense)
{
DBF_TEXT(trace, 2, __FUNCTION__);
if (sense & SNS0_INTERVENTION_REQ) {
if (sense & 0x01) {
if (ch->protocol != CTC_PROTO_LINUX_TTY)
......@@ -838,6 +842,8 @@ ctc_purge_skb_queue(struct sk_buff_head *q)
{
struct sk_buff *skb;
DBF_TEXT(trace, 2, __FUNCTION__);
while ((skb = skb_dequeue(q))) {
atomic_dec(&skb->users);
dev_kfree_skb_irq(skb);
......@@ -847,6 +853,7 @@ ctc_purge_skb_queue(struct sk_buff_head *q)
static __inline__ int
ctc_checkalloc_buffer(struct channel *ch, int warn)
{
DBF_TEXT(trace, 2, __FUNCTION__);
if ((ch->trans_skb == NULL) ||
(ch->flags & CHANNEL_FLAGS_BUFSIZE_CHANGED)) {
if (ch->trans_skb != NULL)
......@@ -913,9 +920,12 @@ ch_action_txdone(fsm_instance * fi, int event, void *arg)
struct sk_buff *skb;
int first = 1;
int i;
unsigned long duration;
struct timespec done_stamp = xtime;
unsigned long duration =
DBF_TEXT(trace, 2, __FUNCTION__);
duration =
(done_stamp.tv_sec - ch->prof.send_stamp.tv_sec) * 1000000 +
(done_stamp.tv_nsec - ch->prof.send_stamp.tv_nsec) / 1000;
if (duration > ch->prof.tx_time)
......@@ -996,6 +1006,7 @@ ch_action_txidle(fsm_instance * fi, int event, void *arg)
{
struct channel *ch = (struct channel *) arg;
DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CH_STATE_TXIDLE);
fsm_event(((struct ctc_priv *) ch->netdev->priv)->fsm, DEV_EVENT_TXUP,
......@@ -1022,6 +1033,7 @@ ch_action_rx(fsm_instance * fi, int event, void *arg)
int check_len;
int rc;
DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
if (len < 8) {
ctc_pr_debug("%s: got packet with length %d < 8\n",
......@@ -1092,6 +1104,8 @@ ch_action_firstio(fsm_instance * fi, int event, void *arg)
struct channel *ch = (struct channel *) arg;
int rc;
DBF_TEXT(trace, 2, __FUNCTION__);
if (fsm_getstate(fi) == CH_STATE_TXIDLE)
ctc_pr_debug("%s: remote side issued READ?, init ...\n", ch->id);
fsm_deltimer(&ch->timer);
......@@ -1166,6 +1180,7 @@ ch_action_rxidle(fsm_instance * fi, int event, void *arg)
__u16 buflen;
int rc;
DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
buflen = *((__u16 *) ch->trans_skb->data);
#ifdef DEBUG
......@@ -1205,6 +1220,7 @@ ch_action_setmode(fsm_instance * fi, int event, void *arg)
int rc;
unsigned long saveflags;
DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
fsm_newstate(fi, CH_STATE_SETUPWAIT);
......@@ -1236,6 +1252,7 @@ ch_action_start(fsm_instance * fi, int event, void *arg)
int rc;
struct net_device *dev;
DBF_TEXT(trace, 2, __FUNCTION__);
if (ch == NULL) {
ctc_pr_warn("ch_action_start ch=NULL\n");
return;
......@@ -1315,6 +1332,7 @@ ch_action_haltio(fsm_instance * fi, int event, void *arg)
int rc;
int oldstate;
DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_addtimer(&ch->timer, CTC_TIMEOUT_5SEC, CH_EVENT_TIMER, ch);
if (event == CH_EVENT_STOP)
......@@ -1347,6 +1365,7 @@ ch_action_stopped(fsm_instance * fi, int event, void *arg)
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CH_STATE_STOPPED);
if (ch->trans_skb != NULL) {
......@@ -1398,6 +1417,7 @@ ch_action_fail(fsm_instance * fi, int event, void *arg)
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
fsm_newstate(fi, CH_STATE_NOTOP);
if (CHANNEL_DIRECTION(ch->flags) == READ) {
......@@ -1428,6 +1448,7 @@ ch_action_setuperr(fsm_instance * fi, int event, void *arg)
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
DBF_TEXT(setup, 2, __FUNCTION__);
/**
* Special case: Got UC_RCRESET on setmode.
* This means that remote side isn't setup. In this case
......@@ -1480,6 +1501,7 @@ ch_action_restart(fsm_instance * fi, int event, void *arg)
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
ctc_pr_debug("%s: %s channel restart\n", dev->name,
(CHANNEL_DIRECTION(ch->flags) == READ) ? "RX" : "TX");
......@@ -1514,6 +1536,7 @@ ch_action_rxiniterr(fsm_instance * fi, int event, void *arg)
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
DBF_TEXT(setup, 2, __FUNCTION__);
if (event == CH_EVENT_TIMER) {
fsm_deltimer(&ch->timer);
ctc_pr_debug("%s: Timeout during RX init handshake\n", dev->name);
......@@ -1542,6 +1565,7 @@ ch_action_rxinitfail(fsm_instance * fi, int event, void *arg)
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
DBF_TEXT(setup, 2, __FUNCTION__);
fsm_newstate(fi, CH_STATE_RXERR);
ctc_pr_warn("%s: RX initialization failed\n", dev->name);
ctc_pr_warn("%s: RX <-> RX connection detected\n", dev->name);
......@@ -1562,6 +1586,7 @@ ch_action_rxdisc(fsm_instance * fi, int event, void *arg)
struct channel *ch2;
struct net_device *dev = ch->netdev;
DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
ctc_pr_debug("%s: Got remote disconnect, re-initializing ...\n",
dev->name);
......@@ -1593,6 +1618,7 @@ ch_action_txiniterr(fsm_instance * fi, int event, void *arg)
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
DBF_TEXT(setup, 2, __FUNCTION__);
if (event == CH_EVENT_TIMER) {
fsm_deltimer(&ch->timer);
ctc_pr_debug("%s: Timeout during TX init handshake\n", dev->name);
......@@ -1621,6 +1647,7 @@ ch_action_txretry(fsm_instance * fi, int event, void *arg)
struct net_device *dev = ch->netdev;
unsigned long saveflags;
DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
if (ch->retry++ > 3) {
ctc_pr_debug("%s: TX retry failed, restarting channel\n",
......@@ -1678,6 +1705,7 @@ ch_action_iofatal(fsm_instance * fi, int event, void *arg)
struct channel *ch = (struct channel *) arg;
struct net_device *dev = ch->netdev;
DBF_TEXT(trace, 2, __FUNCTION__);
fsm_deltimer(&ch->timer);
if (CHANNEL_DIRECTION(ch->flags) == READ) {
ctc_pr_debug("%s: RX I/O error\n", dev->name);
......@@ -1699,6 +1727,7 @@ ch_action_reinit(fsm_instance *fi, int event, void *arg)
struct net_device *dev = ch->netdev;
struct ctc_priv *privptr = dev->priv;
DBF_TEXT(trace, 2, __FUNCTION__);
ch_action_iofatal(fi, event, arg);
fsm_addtimer(&privptr->restart_timer, 1000, DEV_EVENT_RESTART, dev);
}
......@@ -1849,6 +1878,7 @@ add_channel(struct ccw_device *cdev, enum channel_types type)
struct channel **c = &channels;
struct channel *ch;
DBF_TEXT(trace, 2, __FUNCTION__);
if ((ch =
(struct channel *) kmalloc(sizeof (struct channel),
GFP_KERNEL)) == NULL) {
......@@ -1954,6 +1984,7 @@ channel_remove(struct channel *ch)
{
struct channel **c = &channels;
DBF_TEXT(trace, 2, __FUNCTION__);
if (ch == NULL)
return;
......@@ -1990,6 +2021,7 @@ channel_get(enum channel_types type, char *id, int direction)
{
struct channel *ch = channels;
DBF_TEXT(trace, 2, __FUNCTION__);
#ifdef DEBUG
ctc_pr_debug("ctc: %s(): searching for ch with id %s and type %d\n",
__func__, id, type);
......@@ -2085,6 +2117,7 @@ ctc_irq_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
struct net_device *dev;
struct ctc_priv *priv;
DBF_TEXT(trace, 2, __FUNCTION__);
if (__ctc_check_irb_error(cdev, irb))
return;
......@@ -2178,6 +2211,7 @@ dev_action_start(fsm_instance * fi, int event, void *arg)
struct ctc_priv *privptr = dev->priv;
int direction;
DBF_TEXT(setup, 2, __FUNCTION__);
fsm_deltimer(&privptr->restart_timer);
fsm_newstate(fi, DEV_STATE_STARTWAIT_RXTX);
for (direction = READ; direction <= WRITE; direction++) {
......@@ -2200,6 +2234,7 @@ dev_action_stop(fsm_instance * fi, int event, void *arg)
struct ctc_priv *privptr = dev->priv;
int direction;
DBF_TEXT(trace, 2, __FUNCTION__);
fsm_newstate(fi, DEV_STATE_STOPWAIT_RXTX);
for (direction = READ; direction <= WRITE; direction++) {
struct channel *ch = privptr->channel[direction];
......@@ -2212,6 +2247,7 @@ dev_action_restart(fsm_instance *fi, int event, void *arg)
struct net_device *dev = (struct net_device *)arg;
struct ctc_priv *privptr = dev->priv;
DBF_TEXT(trace, 2, __FUNCTION__);
ctc_pr_debug("%s: Restarting\n", dev->name);
dev_action_stop(fi, event, arg);
fsm_event(privptr->fsm, DEV_EVENT_STOP, dev);
......@@ -2233,6 +2269,7 @@ dev_action_chup(fsm_instance * fi, int event, void *arg)
struct net_device *dev = (struct net_device *) arg;
struct ctc_priv *privptr = dev->priv;
DBF_TEXT(trace, 2, __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_STARTWAIT_RXTX:
if (event == DEV_EVENT_RXUP)
......@@ -2285,6 +2322,7 @@ dev_action_chdown(fsm_instance * fi, int event, void *arg)
struct net_device *dev = (struct net_device *) arg;
struct ctc_priv *privptr = dev->priv;
DBF_TEXT(trace, 2, __FUNCTION__);
switch (fsm_getstate(fi)) {
case DEV_STATE_RUNNING:
if (privptr->protocol == CTC_PROTO_LINUX_TTY)
......@@ -2386,6 +2424,7 @@ transmit_skb(struct channel *ch, struct sk_buff *skb)
struct ll_header header;
int rc = 0;
DBF_TEXT(trace, 2, __FUNCTION__);
if (fsm_getstate(ch->fsm) != CH_STATE_TXIDLE) {
int l = skb->len + LL_HEADER_LENGTH;
......@@ -2558,6 +2597,7 @@ ctc_tx(struct sk_buff *skb, struct net_device * dev)
int rc = 0;
struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
DBF_TEXT(trace, 2, __FUNCTION__);
/**
* Some sanity checks ...
*/
......@@ -2615,6 +2655,7 @@ ctc_change_mtu(struct net_device * dev, int new_mtu)
{
struct ctc_priv *privptr = (struct ctc_priv *) dev->priv;
DBF_TEXT(trace, 2, __FUNCTION__);
if ((new_mtu < 576) || (new_mtu > 65527) ||
(new_mtu > (privptr->channel[READ]->max_bufsize -
LL_HEADER_LENGTH - 2)))
......@@ -2659,6 +2700,7 @@ buffer_write(struct device *dev, const char *buf, size_t count)
struct net_device *ndev;
int bs1;
DBF_TEXT(trace, 2, __FUNCTION__);
priv = dev->driver_data;
if (!priv)
return -ENODEV;
......@@ -2703,6 +2745,7 @@ loglevel_write(struct device *dev, const char *buf, size_t count)
struct ctc_priv *priv;
int ll1;
DBF_TEXT(trace, 2, __FUNCTION__);
priv = dev->driver_data;
if (!priv)
return -ENODEV;
......@@ -2720,6 +2763,7 @@ ctc_print_statistics(struct ctc_priv *priv)
char *sbuf;
char *p;
DBF_TEXT(trace, 2, __FUNCTION__);
if (!priv)
return;
sbuf = (char *)kmalloc(2048, GFP_KERNEL);
......@@ -2849,6 +2893,7 @@ ctc_init_netdevice(struct net_device * dev, int alloc_device,
if (!privptr)
return NULL;
DBF_TEXT(setup, 2, __FUNCTION__);
if (alloc_device) {
dev = kmalloc(sizeof (struct net_device), GFP_KERNEL);
if (!dev)
......@@ -2900,6 +2945,7 @@ ctc_proto_store(struct device *dev, const char *buf, size_t count)
struct ctc_priv *priv;
int value;
DBF_TEXT(trace, 2, __FUNCTION__);
pr_debug("%s() called\n", __FUNCTION__);
priv = dev->driver_data;
......@@ -2971,6 +3017,7 @@ ctc_probe_device(struct ccwgroup_device *cgdev)
int rc;
pr_debug("%s() called\n", __FUNCTION__);
DBF_TEXT(trace, 2, __FUNCTION__);
if (!get_device(&cgdev->dev))
return -ENODEV;
......@@ -3017,6 +3064,7 @@ ctc_new_device(struct ccwgroup_device *cgdev)
int ret;
pr_debug("%s() called\n", __FUNCTION__);
DBF_TEXT(setup, 2, __FUNCTION__);
privptr = cgdev->dev.driver_data;
if (!privptr)
......@@ -3110,7 +3158,7 @@ ctc_shutdown_device(struct ccwgroup_device *cgdev)
struct ctc_priv *priv;
struct net_device *ndev;
DBF_TEXT(trace, 2, __FUNCTION__);
pr_debug("%s() called\n", __FUNCTION__);
priv = cgdev->dev.driver_data;
......@@ -3161,6 +3209,7 @@ ctc_remove_device(struct ccwgroup_device *cgdev)
struct ctc_priv *priv;
pr_debug("%s() called\n", __FUNCTION__);
DBF_TEXT(trace, 2, __FUNCTION__);
priv = cgdev->dev.driver_data;
if (!priv)
......@@ -3199,6 +3248,7 @@ ctc_exit(void)
{
unregister_cu3088_discipline(&ctc_group_driver);
ctc_tty_cleanup();
unregister_dbf_views();
ctc_pr_info("CTC driver unloaded\n");
}
......@@ -3215,10 +3265,17 @@ ctc_init(void)
print_banner();
ret = register_dbf_views();
if (ret){
ctc_pr_crit("ctc_init failed with register_dbf_views rc = %d\n", ret);
return ret;
}
ctc_tty_init();
ret = register_cu3088_discipline(&ctc_group_driver);
if (ret)
if (ret) {
ctc_tty_cleanup();
unregister_dbf_views();
}
return ret;
}
......
/*
* $Id: ctctty.c,v 1.17 2004/03/31 17:06:34 ptiedem Exp $
* $Id: ctctty.c,v 1.21 2004/07/02 16:31:22 ptiedem Exp $
*
* CTC / ESCON network driver, tty interface.
*
......@@ -30,6 +30,7 @@
#include <asm/uaccess.h>
#include <linux/devfs_fs_kernel.h>
#include "ctctty.h"
#include "ctcdbug.h"
#define CTC_TTY_MAJOR 43
#define CTC_TTY_MAX_DEVICES 64
......@@ -103,6 +104,7 @@ ctc_tty_try_read(ctc_tty_info * info, struct sk_buff *skb)
int len;
struct tty_struct *tty;
DBF_TEXT(trace, 2, __FUNCTION__);
if ((tty = info->tty)) {
if (info->mcr & UART_MCR_RTS) {
c = TTY_FLIPBUF_SIZE - tty->flip.count;
......@@ -132,6 +134,7 @@ ctc_tty_readmodem(ctc_tty_info *info)
int ret = 1;
struct tty_struct *tty;
DBF_TEXT(trace, 2, __FUNCTION__);
if ((tty = info->tty)) {
if (info->mcr & UART_MCR_RTS) {
int c = TTY_FLIPBUF_SIZE - tty->flip.count;
......@@ -165,6 +168,7 @@ ctc_tty_setcarrier(struct net_device *netdev, int on)
{
int i;
DBF_TEXT(trace, 2, __FUNCTION__);
if ((!driver) || ctc_tty_shuttingdown)
return;
for (i = 0; i < CTC_TTY_MAX_DEVICES; i++)
......@@ -185,6 +189,7 @@ ctc_tty_netif_rx(struct sk_buff *skb)
int i;
ctc_tty_info *info = NULL;
DBF_TEXT(trace, 2, __FUNCTION__);
if (!skb)
return;
if ((!skb->dev) || (!driver) || ctc_tty_shuttingdown) {
......@@ -249,6 +254,7 @@ ctc_tty_tint(ctc_tty_info * info)
int wake = 1;
int rc;
DBF_TEXT(trace, 2, __FUNCTION__);
if (!info->netdev) {
if (skb)
kfree_skb(skb);
......@@ -341,6 +347,7 @@ ctc_tty_inject(ctc_tty_info *info, char c)
int skb_res;
struct sk_buff *skb;
DBF_TEXT(trace, 2, __FUNCTION__);
if (ctc_tty_shuttingdown)
return;
skb_res = info->netdev->hard_header_len + sizeof(info->mcr) +
......@@ -361,6 +368,7 @@ ctc_tty_inject(ctc_tty_info *info, char c)
static void
ctc_tty_transmit_status(ctc_tty_info *info)
{
DBF_TEXT(trace, 2, __FUNCTION__);
if (ctc_tty_shuttingdown)
return;
info->flags |= CTC_ASYNC_TX_LINESTAT;
......@@ -374,6 +382,7 @@ ctc_tty_change_speed(ctc_tty_info * info)
unsigned int quot;
int i;
DBF_TEXT(trace, 2, __FUNCTION__);
if (!info->tty || !info->tty->termios)
return;
cflag = info->tty->termios->c_cflag;
......@@ -412,6 +421,7 @@ ctc_tty_change_speed(ctc_tty_info * info)
static int
ctc_tty_startup(ctc_tty_info * info)
{
DBF_TEXT(trace, 2, __FUNCTION__);
if (info->flags & CTC_ASYNC_INITIALIZED)
return 0;
#ifdef CTC_DEBUG_MODEM_OPEN
......@@ -454,6 +464,7 @@ ctc_tty_stopdev(unsigned long data)
static void
ctc_tty_shutdown(ctc_tty_info * info)
{
DBF_TEXT(trace, 2, __FUNCTION__);
if (!(info->flags & CTC_ASYNC_INITIALIZED))
return;
#ifdef CTC_DEBUG_MODEM_OPEN
......@@ -486,14 +497,17 @@ ctc_tty_write(struct tty_struct *tty, int from_user, const u_char * buf, int cou
int total = 0;
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
DBF_TEXT(trace, 2, __FUNCTION__);
if (ctc_tty_shuttingdown)
return 0;
goto ex;
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_write"))
return 0;
goto ex;
if (!tty)
return 0;
if (!info->netdev)
return -ENODEV;
goto ex;
if (!info->netdev) {
total = -ENODEV;
goto ex;
}
if (from_user)
down(&info->write_sem);
while (1) {
......@@ -530,6 +544,8 @@ ctc_tty_write(struct tty_struct *tty, int from_user, const u_char * buf, int cou
}
if (from_user)
up(&info->write_sem);
ex:
DBF_TEXT(trace, 6, __FUNCTION__);
return total;
}
......@@ -559,13 +575,14 @@ ctc_tty_flush_buffer(struct tty_struct *tty)
ctc_tty_info *info;
unsigned long flags;
DBF_TEXT(trace, 2, __FUNCTION__);
if (!tty)
return;
goto ex;
spin_lock_irqsave(&ctc_tty_lock, flags);
info = (ctc_tty_info *) tty->driver_data;
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_flush_buffer")) {
spin_unlock_irqrestore(&ctc_tty_lock, flags);
return;
goto ex;
}
skb_queue_purge(&info->tx_queue);
info->lsr |= UART_LSR_TEMT;
......@@ -574,6 +591,9 @@ ctc_tty_flush_buffer(struct tty_struct *tty)
if ((tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
tty->ldisc.write_wakeup)
(tty->ldisc.write_wakeup) (tty);
ex:
DBF_TEXT_(trace, 2, "ex: %s ", __FUNCTION__);
return;
}
static void
......@@ -783,7 +803,6 @@ ctc_tty_set_termios(struct tty_struct *tty, struct termios *old_termios)
{
ctc_tty_info *info = (ctc_tty_info *) tty->driver_data;
unsigned int cflag = tty->termios->c_cflag;
ctc_tty_change_speed(info);
/* Handle transition to B0 */
......@@ -1032,8 +1051,10 @@ ctc_tty_close(struct tty_struct *tty, struct file *filp)
}
}
ctc_tty_shutdown(info);
if (tty->driver->flush_buffer)
tty->driver->flush_buffer(tty);
if (tty->driver->flush_buffer) {
skb_queue_purge(&info->tx_queue);
info->lsr |= UART_LSR_TEMT;
}
if (tty->ldisc.flush_buffer)
tty->ldisc.flush_buffer(tty);
info->tty = 0;
......@@ -1059,7 +1080,6 @@ ctc_tty_hangup(struct tty_struct *tty)
{
ctc_tty_info *info = (ctc_tty_info *)tty->driver_data;
unsigned long saveflags;
if (ctc_tty_paranoia_check(info, tty->name, "ctc_tty_hangup"))
return;
ctc_tty_shutdown(info);
......@@ -1185,6 +1205,21 @@ ctc_tty_register_netdev(struct net_device *dev) {
"with NULL dev or NULL dev-name\n");
return -1;
}
/*
* If the name is a format string the caller wants us to
* do a name allocation : format string must end with %d
*/
if (strchr(dev->name, '%'))
{
int err = dev_alloc_name(dev, dev->name); // dev->name is changed by this
if (err < 0) {
printk(KERN_DEBUG "dev_alloc returned error %d\n", err);
return err;
}
}
for (p = dev->name; p && ((*p < '0') || (*p > '9')); p++);
ttynum = simple_strtoul(p, &err, 0);
if ((ttynum < 0) || (ttynum >= CTC_TTY_MAX_DEVICES) ||
......
/*
* $Id: iucv.c,v 1.33 2004/05/24 10:19:18 braunu Exp $
* $Id: iucv.c,v 1.34 2004/06/24 10:53:48 braunu Exp $
*
* IUCV network driver
*
......@@ -29,7 +29,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* RELEASE-TAG: IUCV lowlevel driver $Revision: 1.33 $
* RELEASE-TAG: IUCV lowlevel driver $Revision: 1.34 $
*
*/
......@@ -177,9 +177,11 @@ static handler **iucv_pathid_table;
static unsigned long max_connections;
/**
* declare_flag: is 0 when iucv_declare_buffer has not been called
* iucv_cpuid: contains the logical cpu number of the cpu which
* has declared the iucv buffer by issuing DECLARE_BUFFER.
* If no cpu has done the initialization iucv_cpuid contains -1.
*/
static int declare_flag;
static int iucv_cpuid = -1;
/**
* register_flag: is 0 when external interrupt has not been registered
*/
......@@ -352,7 +354,7 @@ do { \
static void
iucv_banner(void)
{
char vbuf[] = "$Revision: 1.33 $";
char vbuf[] = "$Revision: 1.34 $";
char *version = vbuf;
if ((version = strchr(version, ':'))) {
......@@ -631,16 +633,16 @@ iucv_remove_pathid(__u16 pathid)
}
/**
* iucv_declare_buffer_cpu0
* Register at VM for subsequent IUCV operations. This is always
* executed on CPU 0. Called from iucv_declare_buffer().
* iucv_declare_buffer_cpuid
* Register at VM for subsequent IUCV operations. This is executed
* on the reserved CPU iucv_cpuid. Called from iucv_declare_buffer().
*/
static void
iucv_declare_buffer_cpu0 (void *result)
iucv_declare_buffer_cpuid (void *result)
{
iparml_db *parm;
if (!(result && (smp_processor_id() == 0)))
if (smp_processor_id() != iucv_cpuid)
return;
parm = (iparml_db *)grab_param();
parm->ipbfadr1 = virt_to_phys(iucv_external_int_buffer);
......@@ -650,16 +652,17 @@ iucv_declare_buffer_cpu0 (void *result)
}
/**
* iucv_retrieve_buffer_cpu0:
* Unregister IUCV usage at VM. This is always executed on CPU 0.
* iucv_retrieve_buffer_cpuid:
* Unregister IUCV usage at VM. This is always executed on the same
* cpu that registered the buffer to VM.
* Called from iucv_retrieve_buffer().
*/
static void
iucv_retrieve_buffer_cpu0 (void *result)
iucv_retrieve_buffer_cpuid (void *cpu)
{
iparml_control *parm;
if (smp_processor_id() != 0)
if (smp_processor_id() != iucv_cpuid)
return;
parm = (iparml_control *)grab_param();
b2f0(RETRIEVE_BUFFER, parm);
......@@ -676,18 +679,22 @@ iucv_retrieve_buffer_cpu0 (void *result)
static int
iucv_declare_buffer (void)
{
ulong b2f0_result = 0x0deadbeef;
unsigned long flags;
ulong b2f0_result;
iucv_debug(1, "entering");
preempt_disable();
if (smp_processor_id() == 0)
iucv_declare_buffer_cpu0(&b2f0_result);
else
smp_call_function(iucv_declare_buffer_cpu0, &b2f0_result, 0, 1);
preempt_enable();
iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer);
if (b2f0_result == 0x0deadbeef)
b2f0_result = 0xaa;
spin_lock_irqsave (&iucv_lock, flags);
if (iucv_cpuid == -1) {
/* Reserve any cpu for use by iucv. */
iucv_cpuid = smp_get_cpu(CPU_MASK_ALL);
spin_unlock_irqrestore (&iucv_lock, flags);
smp_call_function(iucv_declare_buffer_cpuid,
&b2f0_result, 0, 1);
iucv_debug(1, "Address of EIB = %p", iucv_external_int_buffer);
} else {
spin_unlock_irqrestore (&iucv_lock, flags);
b2f0_result = 0;
}
iucv_debug(1, "exiting");
return b2f0_result;
}
......@@ -702,14 +709,11 @@ static int
iucv_retrieve_buffer (void)
{
iucv_debug(1, "entering");
if (declare_flag) {
preempt_disable();
if (smp_processor_id() == 0)
iucv_retrieve_buffer_cpu0(0);
else
smp_call_function(iucv_retrieve_buffer_cpu0, 0, 0, 1);
declare_flag = 0;
preempt_enable();
if (iucv_cpuid != -1) {
smp_call_function(iucv_retrieve_buffer_cpuid, 0, 0, 1);
/* Release the cpu reserved by iucv_declare_buffer. */
smp_put_cpu(iucv_cpuid);
iucv_cpuid = -1;
}
iucv_debug(1, "exiting");
return 0;
......@@ -862,38 +866,31 @@ iucv_register_program (__u8 pgmname[16],
return NULL;
}
if (declare_flag == 0) {
rc = iucv_declare_buffer();
if (rc) {
char *err = "Unknown";
iucv_remove_handler(new_handler);
kfree(new_handler);
switch(rc) {
case 0x03:
err = "Directory error";
break;
case 0x0a:
err = "Invalid length";
break;
case 0x13:
err = "Buffer already exists";
break;
case 0x3e:
err = "Buffer overlap";
break;
case 0x5c:
err = "Paging or storage error";
break;
case 0xaa:
err = "Function not called";
break;
}
printk(KERN_WARNING "%s: iucv_declare_buffer "
"returned error 0x%02lx (%s)\n", __FUNCTION__, rc,
err);
return NULL;
rc = iucv_declare_buffer();
if (rc) {
char *err = "Unknown";
iucv_remove_handler(new_handler);
kfree(new_handler);
switch(rc) {
case 0x03:
err = "Directory error";
break;
case 0x0a:
err = "Invalid length";
break;
case 0x13:
err = "Buffer already exists";
break;
case 0x3e:
err = "Buffer overlap";
break;
case 0x5c:
err = "Paging or storage error";
break;
}
declare_flag = 1;
printk(KERN_WARNING "%s: iucv_declare_buffer "
"returned error 0x%02lx (%s)\n", __FUNCTION__, rc, err);
return NULL;
}
if (register_flag == 0) {
/* request the 0x4000 external interrupt */
......@@ -2190,11 +2187,11 @@ iucv_send2way_prmmsg_array (__u16 pathid,
}
void
iucv_setmask_cpu0 (void *result)
iucv_setmask_cpuid (void *result)
{
iparml_set_mask *parm;
if (smp_processor_id() != 0)
if (smp_processor_id() != iucv_cpuid)
return;
iucv_debug(1, "entering");
......@@ -2228,14 +2225,15 @@ iucv_setmask (int SetMaskFlag)
ulong result;
__u8 param;
} u;
int cpu;
u.param = SetMaskFlag;
preempt_disable();
if (smp_processor_id() == 0)
iucv_setmask_cpu0(&u);
cpu = get_cpu();
if (cpu == iucv_cpuid)
iucv_setmask_cpuid(&u);
else
smp_call_function(iucv_setmask_cpu0, &u, 0, 1);
preempt_enable();
smp_call_function(iucv_setmask_cpuid, &u, 0, 1);
put_cpu();
return u.result;
}
......
......@@ -788,7 +788,7 @@ ahd_done_unlock(struct ahd_softc *ahd, unsigned long *flags)
}
static __inline void
ahd_list_lockinit()
ahd_list_lockinit(void)
{
spin_lock_init(&ahd_list_spinlock);
}
......
......@@ -183,6 +183,8 @@ static __inline__ void ypan_down(struct vc_data *vc, int count);
static void fbcon_bmove_rec(struct vc_data *vc, struct display *p, int sy, int sx,
int dy, int dx, int height, int width, u_int y_break);
static void fbcon_set_disp(struct fb_info *info, struct vc_data *vc);
static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
int line, int count, int dy);
#ifdef CONFIG_MAC
/*
......@@ -763,7 +765,7 @@ static void fbcon_init(struct vc_data *vc, int init)
if ((cap & FBINFO_HWACCEL_COPYAREA) &&
!(cap & FBINFO_HWACCEL_DISABLED))
p->scrollmode = SCROLL_ACCEL;
p->scrollmode = SCROLL_MOVE;
else /* default to something safe */
p->scrollmode = SCROLL_REDRAW;
......@@ -815,14 +817,14 @@ static void fbcon_init(struct vc_data *vc, int init)
vc->vc_pos += logo_lines * vc->vc_size_row;
}
}
if (CON_IS_VISIBLE(vc) && vt_cons[vc->vc_num]->vc_mode == KD_TEXT) {
accel_clear_margins(vc, info, 0);
update_screen(vc->vc_num);
}
scr_memsetw((unsigned short *) vc->vc_origin,
vc->vc_video_erase_char,
vc->vc_size_row * logo_lines);
if (CON_IS_VISIBLE(vc) && vt_cons[vc->vc_num]->vc_mode == KD_TEXT) {
accel_clear_margins(vc, info, 0);
update_screen(vc->vc_num);
}
if (save) {
q = (unsigned short *) (vc->vc_origin +
vc->vc_size_row *
......@@ -1226,6 +1228,31 @@ static __inline__ void ypan_up(struct vc_data *vc, int count)
scrollback_current = 0;
}
static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
int redraw = 0;
p->yscroll += count;
if (p->yscroll > p->vrows - vc->vc_rows) {
p->yscroll -= p->vrows - vc->vc_rows;
redraw = 1;
}
info->var.xoffset = 0;
info->var.yoffset = p->yscroll * vc->vc_font.height;
info->var.vmode &= ~FB_VMODE_YWRAP;
if (redraw)
fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t);
update_var(vc->vc_num, info);
accel_clear_margins(vc, info, 1);
scrollback_max += count;
if (scrollback_max > scrollback_phys_max)
scrollback_max = scrollback_phys_max;
scrollback_current = 0;
}
static __inline__ void ypan_down(struct vc_data *vc, int count)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
......@@ -1248,6 +1275,30 @@ static __inline__ void ypan_down(struct vc_data *vc, int count)
scrollback_current = 0;
}
static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
int redraw = 0;
p->yscroll -= count;
if (p->yscroll < 0) {
p->yscroll += p->vrows - vc->vc_rows;
redraw = 1;
}
info->var.xoffset = 0;
info->var.yoffset = p->yscroll * vc->vc_font.height;
info->var.vmode &= ~FB_VMODE_YWRAP;
if (redraw)
fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count);
update_var(vc->vc_num, info);
accel_clear_margins(vc, info, 1);
scrollback_max -= count;
if (scrollback_max < 0)
scrollback_max = 0;
scrollback_current = 0;
}
static void fbcon_redraw_softback(struct vc_data *vc, struct display *p,
long delta)
{
......@@ -1343,6 +1394,42 @@ static void fbcon_redraw_softback(struct vc_data *vc, struct display *p,
}
}
static void fbcon_redraw_move(struct vc_data *vc, struct display *p,
int line, int count, int dy)
{
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
unsigned short *s = (unsigned short *)
(vc->vc_origin + vc->vc_size_row * line);
while (count--) {
unsigned short *start = s;
unsigned short *le = advance_row(s, 1);
unsigned short c;
int x = 0;
unsigned short attr = 1;
do {
c = scr_readw(s);
if (attr != (c & 0xff00)) {
attr = c & 0xff00;
if (s > start) {
accel_putcs(vc, info, start, s - start,
real_y(p, dy), x);
x += s - start;
start = s;
}
}
console_conditional_schedule();
s++;
} while (s < le);
if (s > start)
accel_putcs(vc, info, start, s - start,
real_y(p, dy), x);
console_conditional_schedule();
dy++;
}
}
static void fbcon_redraw(struct vc_data *vc, struct display *p,
int line, int count, int offset)
{
......@@ -1455,14 +1542,14 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
if (logo_shown >= 0)
goto redraw_up;
switch (p->scrollmode) {
case SCROLL_ACCEL:
case SCROLL_MOVE:
accel_bmove(vc, info, t + count, 0, t, 0,
b - t - count, vc->vc_cols);
accel_clear(vc, info, b - count, 0, count,
vc->vc_cols);
break;
case SCROLL_WRAP:
case SCROLL_WRAP_MOVE:
if (b - t - count > 3 * vc->vc_rows >> 2) {
if (t > 0)
fbcon_bmove(vc, 0, 0, count, 0, t,
......@@ -1480,7 +1567,25 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
break;
case SCROLL_PAN:
case SCROLL_PAN_REDRAW:
if ((p->yscroll + count <=
2 * (p->vrows - vc->vc_rows))
&& ((!scroll_partial && (b - t == vc->vc_rows))
|| (scroll_partial
&& (b - t - count >
3 * vc->vc_rows >> 2)))) {
if (t > 0)
fbcon_redraw_move(vc, p, 0, t, count);
ypan_up_redraw(vc, t, count);
if (vc->vc_rows - b > 0)
fbcon_redraw_move(vc, p, b - count,
vc->vc_rows - b, b);
} else
fbcon_redraw_move(vc, p, t + count, b - t - count, t);
fbcon_clear(vc, b - count, 0, count, vc->vc_cols);
break;
case SCROLL_PAN_MOVE:
if ((p->yscroll + count <=
2 * (p->vrows - vc->vc_rows))
&& ((!scroll_partial && (b - t == vc->vc_rows))
......@@ -1522,13 +1627,13 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
if (count > vc->vc_rows) /* Maximum realistic size */
count = vc->vc_rows;
switch (p->scrollmode) {
case SCROLL_ACCEL:
case SCROLL_MOVE:
accel_bmove(vc, info, t, 0, t + count, 0,
b - t - count, vc->vc_cols);
accel_clear(vc, info, t, 0, count, vc->vc_cols);
break;
case SCROLL_WRAP:
case SCROLL_WRAP_MOVE:
if (b - t - count > 3 * vc->vc_rows >> 2) {
if (vc->vc_rows - b > 0)
fbcon_bmove(vc, b, 0, b - count, 0,
......@@ -1546,7 +1651,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
fbcon_clear(vc, t, 0, count, vc->vc_cols);
break;
case SCROLL_PAN:
case SCROLL_PAN_MOVE:
if ((count - p->yscroll <= p->vrows - vc->vc_rows)
&& ((!scroll_partial && (b - t == vc->vc_rows))
|| (scroll_partial
......@@ -1568,6 +1673,23 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
fbcon_clear(vc, t, 0, count, vc->vc_cols);
break;
case SCROLL_PAN_REDRAW:
if ((count - p->yscroll <= p->vrows - vc->vc_rows)
&& ((!scroll_partial && (b - t == vc->vc_rows))
|| (scroll_partial
&& (b - t - count >
3 * vc->vc_rows >> 2)))) {
if (vc->vc_rows - b > 0)
fbcon_redraw_move(vc, p, b, vc->vc_rows - b,
b - count);
ypan_down_redraw(vc, t, count);
if (t > 0)
fbcon_redraw_move(vc, p, count, t, 0);
} else
fbcon_redraw_move(vc, p, t, b - t - count, t + count);
fbcon_clear(vc, t, 0, count, vc->vc_cols);
break;
case SCROLL_REDRAW:
redraw_down:
fbcon_redraw(vc, p, b - 1, b - t - count,
......@@ -1657,12 +1779,13 @@ static __inline__ void updatescrollmode(struct display *p, struct fb_info *info,
int cap = info->flags;
int good_pan = (cap & FBINFO_HWACCEL_YPAN)
&& divides(info->fix.ypanstep, vc->vc_font.height)
&& info->var.yres_virtual >= 2*info->var.yres;
&& info->var.yres_virtual > info->var.yres;
int good_wrap = (cap & FBINFO_HWACCEL_YWRAP)
&& divides(info->fix.ywrapstep, vc->vc_font.height)
&& divides(vc->vc_font.height, info->var.yres_virtual);
int reading_fast = cap & FBINFO_READS_FAST;
int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) && !(cap & FBINFO_HWACCEL_DISABLED);
int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) && !(cap & FBINFO_HWACCEL_DISABLED);
p->vrows = info->var.yres_virtual/fh;
if (info->var.yres > (fh * (vc->vc_rows + 1)))
......@@ -1673,12 +1796,13 @@ static __inline__ void updatescrollmode(struct display *p, struct fb_info *info,
if (good_wrap || good_pan) {
if (reading_fast || fast_copyarea)
p->scrollmode = good_wrap ? SCROLL_WRAP : SCROLL_PAN;
p->scrollmode = good_wrap ? SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE;
else
p->scrollmode = SCROLL_REDRAW;
p->scrollmode = good_wrap ? SCROLL_REDRAW :
SCROLL_PAN_REDRAW;
} else {
if (reading_fast || fast_copyarea)
p->scrollmode = SCROLL_ACCEL;
if (reading_fast || (fast_copyarea && !fast_imageblit))
p->scrollmode = SCROLL_MOVE;
else
p->scrollmode = SCROLL_REDRAW;
}
......@@ -1698,14 +1822,10 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
var.yres = height * fh;
x_diff = info->var.xres - var.xres;
y_diff = info->var.yres - var.yres;
if (x_diff < 0 || x_diff > fw || (y_diff < 0 || y_diff > fh) ||
(info->flags & FBINFO_MISC_MODESWITCH)) {
if (x_diff < 0 || x_diff > fw || (y_diff < 0 || y_diff > fh)) {
char mode[40];
DPRINTK("attempting resize %ix%i\n", var.xres, var.yres);
if (!info->fbops->fb_set_par)
return -EINVAL;
snprintf(mode, 40, "%ix%i", var.xres, var.yres);
err = fb_find_mode(&var, info, mode, info->monspecs.modedb,
info->monspecs.modedb_len, NULL,
......@@ -1715,11 +1835,10 @@ static int fbcon_resize(struct vc_data *vc, unsigned int width,
DPRINTK("resize now %ix%i\n", var.xres, var.yres);
if (CON_IS_VISIBLE(vc)) {
var.activate = FB_ACTIVATE_NOW |
(info->flags & FBINFO_MISC_MODESWITCH) ?
FB_ACTIVATE_FORCE : 0;
FB_ACTIVATE_FORCE;
fb_set_var(info, &var);
info->flags &= ~FBINFO_MISC_MODESWITCH;
}
info->flags &= ~FBINFO_MISC_MODESWITCH;
}
updatescrollmode(p, info, vc);
return 0;
......@@ -1771,11 +1890,19 @@ static int fbcon_switch(struct vc_data *vc)
}
fbcon_resize(vc, vc->vc_cols, vc->vc_rows);
if (info->flags & FBINFO_MISC_MODESWITCH) {
if (info->fbops->fb_set_par)
info->fbops->fb_set_par(info);
info->flags &= ~FBINFO_MISC_MODESWITCH;
}
switch (p->scrollmode) {
case SCROLL_WRAP:
case SCROLL_WRAP_MOVE:
scrollback_phys_max = p->vrows - vc->vc_rows;
break;
case SCROLL_PAN:
case SCROLL_PAN_MOVE:
case SCROLL_PAN_REDRAW:
scrollback_phys_max = p->vrows - 2 * vc->vc_rows;
if (scrollback_phys_max < 0)
scrollback_phys_max = 0;
......@@ -1793,16 +1920,6 @@ static int fbcon_switch(struct vc_data *vc)
if (vt_cons[vc->vc_num]->vc_mode == KD_TEXT)
accel_clear_margins(vc, info, 0);
if (logo_shown == -2) {
struct fb_fillrect rect;
int bgshift = (vc->vc_hi_font_mask) ? 13 : 12;
rect.color = attr_bgcol_ec(bgshift, vc);
rect.rop = ROP_COPY;
rect.dx = rect.dy = 0;
rect.width = info->var.xres;
rect.height = logo_lines * vc->vc_font.height;
info->fbops->fb_fillrect(info, &rect);
logo_shown = fg_console;
/* This is protected above by initmem_freed */
fb_show_logo(info);
......@@ -1821,8 +1938,31 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch)
struct fb_info *info = registered_fb[(int) con2fb_map[vc->vc_num]];
struct display *p = &fb_display[vc->vc_num];
if (mode_switch)
info->flags |= FBINFO_MISC_MODESWITCH;
if (mode_switch) {
struct fb_var_screeninfo var = info->var;
/*
* HACK ALERT: Some hardware will require reinitializion at this stage,
* others will require it to be done as late as possible.
* For now, we differentiate this with the
* FBINFO_MISC_MODESWITCHLATE bitflag. Worst case will be
* hardware that requires it here and another one later.
* A definitive solution may require fixing X or the VT
* system.
*/
if (info->flags & FBINFO_MISC_MODESWITCHLATE)
info->flags |= FBINFO_MISC_MODESWITCH;
if (blank) {
fbcon_cursor(vc, CM_ERASE);
return 0;
}
if (!(info->flags & FBINFO_MISC_MODESWITCHLATE)) {
var.activate = FB_ACTIVATE_NOW | FB_ACTIVATE_FORCE;
fb_set_var(info, &var);
}
}
fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW);
......@@ -2348,10 +2488,11 @@ static int fbcon_scrolldelta(struct vc_data *vc, int lines)
offset = p->yscroll - scrollback_current;
limit = p->vrows;
switch (p->scrollmode) {
case SCROLL_WRAP:
case SCROLL_WRAP_MOVE:
info->var.vmode |= FB_VMODE_YWRAP;
break;
case SCROLL_PAN:
case SCROLL_PAN_MOVE:
case SCROLL_PAN_REDRAW:
limit -= vc->vc_rows;
info->var.vmode &= ~FB_VMODE_YWRAP;
break;
......
......@@ -69,6 +69,35 @@ extern int set_con2fb_map(int unit, int newidx);
/* There are several methods fbcon can use to move text around the screen:
*
* Operation Pan Wrap
*---------------------------------------------
* SCROLL_MOVE copyarea No No
* SCROLL_PAN_MOVE copyarea Yes No
* SCROLL_WRAP_MOVE copyarea No Yes
* SCROLL_REDRAW imageblit No No
* SCROLL_PAN_REDRAW imageblit Yes No
* SCROLL_WRAP_REDRAW imageblit No Yes
*
* (SCROLL_WRAP_REDRAW is not implemented yet)
*
* In general, fbcon will choose the best scrolling
* method based on the rule below:
*
* Pan/Wrap > accel imageblit > accel copyarea >
* soft imageblit > (soft copyarea)
*
* Exception to the rule: Pan + accel copyarea is
* preferred over Pan + accel imageblit.
*
* The above is typical for PCI/AGP cards. Unless
* overridden, fbcon will never use soft copyarea.
*
* If you need to override the above rule, set the
* appropriate flags in fb_info->flags. For example,
* to prefer copyarea over imageblit, set
* FBINFO_READS_FAST.
*
* Other notes:
* + use the hardware engine to move the text
* (hw-accelerated copyarea() and fillrect())
* + use hardware-supported panning on a large virtual screen
......@@ -84,10 +113,11 @@ extern int set_con2fb_map(int unit, int newidx);
*
*/
#define SCROLL_ACCEL 0x001
#define SCROLL_PAN 0x002
#define SCROLL_WRAP 0x003
#define SCROLL_REDRAW 0x004
#define SCROLL_MOVE 0x001
#define SCROLL_PAN_MOVE 0x002
#define SCROLL_WRAP_MOVE 0x003
#define SCROLL_REDRAW 0x004
#define SCROLL_PAN_REDRAW 0x005
extern int fb_console_init(void);
......
......@@ -1681,7 +1681,8 @@ static int __devinit riva_set_fbinfo(struct fb_info *info)
| FBINFO_HWACCEL_YPAN
| FBINFO_HWACCEL_COPYAREA
| FBINFO_HWACCEL_FILLRECT
| FBINFO_HWACCEL_IMAGEBLIT;
| FBINFO_HWACCEL_IMAGEBLIT
| FBINFO_MISC_MODESWITCHLATE;
info->var = rivafb_default_var;
info->fix.visual = (info->var.bits_per_pixel == 8) ?
FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR;
......
......@@ -378,7 +378,8 @@ static int __init vesafb_probe(struct device *device)
info->fbops = &vesafb_ops;
info->var = vesafb_defined;
info->fix = vesafb_fix;
info->flags = FBINFO_FLAG_DEFAULT;
info->flags = FBINFO_FLAG_DEFAULT |
(ypan) ? FBINFO_HWACCEL_YPAN : 0;
if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
err = -ENXIO;
......
......@@ -1370,7 +1370,8 @@ int __init vga16fb_init(void)
vga16fb.var = vga16fb_defined;
vga16fb.fix = vga16fb_fix;
vga16fb.par = &vga16_par;
vga16fb.flags = FBINFO_FLAG_DEFAULT;
vga16fb.flags = FBINFO_FLAG_DEFAULT |
FBINFO_HWACCEL_YPAN;
i = (vga16fb_defined.bits_per_pixel == 8) ? 256 : 16;
ret = fb_alloc_cmap(&vga16fb.cmap, i, 0);
......
......@@ -716,7 +716,7 @@ static int ep_getfd(int *efd, struct inode **einode, struct file **efile)
dentry->d_op = &eventpollfs_dentry_operations;
d_add(dentry, inode);
file->f_vfsmnt = mntget(eventpoll_mnt);
file->f_dentry = dget(dentry);
file->f_dentry = dentry;
file->f_mapping = inode->i_mapping;
file->f_pos = 0;
......
......@@ -65,7 +65,7 @@ void smbiod_wake_up(void)
/*
* start smbiod if none is running
*/
static int smbiod_start()
static int smbiod_start(void)
{
pid_t pid;
if (smbiod_state != SMBIOD_DEAD)
......
......@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/linkage.h>
#include <linux/time.h>
#include <linux/compiler.h>
/* Avoid too many header ordering problems. */
struct siginfo;
......@@ -128,7 +129,11 @@ typedef unsigned long sigset_t;
#define SIG_SETMASK 2 /* for setting the signal mask */
/* Type of a signal handler. */
typedef void (*__sighandler_t)(int);
typedef void __signalfn_t(int);
typedef __signalfn_t __user *__sighandler_t;
typedef void __restorefn_t(void);
typedef __restorefn_t __user *__sigrestore_t;
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
......@@ -139,13 +144,13 @@ struct old_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
__sigrestore_t sa_restorer;
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
void (*sa_restorer)(void);
__sigrestore_t sa_restorer;
sigset_t sa_mask; /* mask last for extensibility */
};
......@@ -171,7 +176,7 @@ struct sigaction {
#endif /* __KERNEL__ */
typedef struct sigaltstack {
void *ss_sp;
void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
......
/*
* Architecture dependent definitions
* for NEC uPD4990A serial I/O real-time clock.
*
* Copyright 2001 TAKAI Kousuke <tak@kmc.kyoto-u.ac.jp>
* Kyoto University Microcomputer Club (KMC).
*
* References:
* uPD4990A serial I/O real-time clock users' manual (Japanese)
* No. S12828JJ4V0UM00 (4th revision), NEC Corporation, 1999.
*/
#ifndef _ASM_I386_uPD4990A_H
#define _ASM_I386_uPD4990A_H
#include <asm/io.h>
#define UPD4990A_IO (0x0020)
#define UPD4990A_IO_DATAOUT (0x0033)
#define UPD4990A_OUTPUT_DATA_CLK(data, clk) \
outb((((data) & 1) << 5) | (((clk) & 1) << 4) \
| UPD4990A_PAR_SERIAL_MODE, UPD4990A_IO)
#define UPD4990A_OUTPUT_CLK(clk) UPD4990A_OUTPUT_DATA_CLK(0, (clk))
#define UPD4990A_OUTPUT_STROBE(stb) \
outb(((stb) << 3) | UPD4990A_PAR_SERIAL_MODE, UPD4990A_IO)
/*
* Note: udelay() is *not* usable for UPD4990A_DELAY because
* the Linux kernel reads uPD4990A to set up system clock
* before calibrating delay...
*/
#define UPD4990A_DELAY(usec) \
do { \
if (__builtin_constant_p((usec)) && (usec) < 5) \
__asm__ (".rept %c1\n\toutb %%al,%0\n\t.endr" \
: : "N" (0x5F), \
"i" (((usec) * 10 + 5) / 6)); \
else { \
int _count = ((usec) * 10 + 5) / 6; \
__asm__ volatile ("1: outb %%al,%1\n\tloop 1b" \
: "=c" (_count) \
: "N" (0x5F), "0" (_count)); \
} \
} while (0)
/* Caller should ignore all bits except bit0 */
#define UPD4990A_READ_DATA() inb(UPD4990A_IO_DATAOUT)
#endif
......@@ -2,6 +2,7 @@
#define _ASMPPC64_SIGNAL_H
#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/siginfo.h>
/* Avoid too many header ordering problems. */
......@@ -114,7 +115,12 @@ typedef struct {
#define SIG_SETMASK 2 /* for setting the signal mask */
/* Type of a signal handler. */
typedef void (*__sighandler_t)(int);
typedef void __sigfunction(int);
typedef __sigfunction __user * __sighandler_t;
/* Type of the restorer function */
typedef void __sigrestorer(void);
typedef __sigrestorer __user * __sigrestorer_t;
#define SIG_DFL ((__sighandler_t)0) /* default signal handling */
#define SIG_IGN ((__sighandler_t)1) /* ignore signal */
......@@ -124,13 +130,13 @@ struct old_sigaction {
__sighandler_t sa_handler;
old_sigset_t sa_mask;
unsigned long sa_flags;
void (*sa_restorer)(void);
__sigrestorer_t sa_restorer;
};
struct sigaction {
__sighandler_t sa_handler;
unsigned long sa_flags;
void (*sa_restorer)(void);
__sigrestorer_t sa_restorer;
sigset_t sa_mask; /* mask last for extensibility */
};
......@@ -139,7 +145,7 @@ struct k_sigaction {
};
typedef struct sigaltstack {
void *ss_sp;
void __user *ss_sp;
int ss_flags;
size_t ss_size;
} stack_t;
......
......@@ -319,6 +319,16 @@ static inline void disabled_wait(unsigned long code)
#endif /* __s390x__ */
}
/*
* CPU idle notifier chain.
*/
#define CPU_IDLE 0
#define CPU_NOT_IDLE 1
struct notifier_block;
int register_idle_notifier(struct notifier_block *nb);
int unregister_idle_notifier(struct notifier_block *nb);
#endif
#endif /* __ASM_S390_PROCESSOR_H */
......@@ -5,6 +5,7 @@
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
* Heiko Carstens (heiko.carstens@de.ibm.com)
*
* sigp.h by D.J. Barrow (c) IBM 1999
* contains routines / structures for signalling other S/390 processors in an
......@@ -72,17 +73,10 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code)
sigp_ccode ccode;
__asm__ __volatile__(
#ifndef __s390x__
" sr 1,1\n" /* parameter=0 in gpr 1 */
" sigp 1,%1,0(%2)\n"
" ipm %0\n"
" srl %0,28\n"
#else /* __s390x__ */
" sgr 1,1\n" /* parameter=0 in gpr 1 */
" sigp 1,%1,0(%2)\n"
" ipm %0\n"
" srl %0,28"
#endif /* __s390x__ */
: "=d" (ccode)
: "d" (__cpu_logical_map[cpu_addr]), "a" (order_code)
: "cc" , "memory", "1" );
......@@ -93,23 +87,16 @@ signal_processor(__u16 cpu_addr, sigp_order_code order_code)
* Signal processor with parameter
*/
extern __inline__ sigp_ccode
signal_processor_p(unsigned long parameter,__u16 cpu_addr,
signal_processor_p(__u32 parameter, __u16 cpu_addr,
sigp_order_code order_code)
{
sigp_ccode ccode;
__asm__ __volatile__(
#ifndef __s390x__
" lr 1,%1\n" /* parameter in gpr 1 */
" sigp 1,%2,0(%3)\n"
" ipm %0\n"
" srl %0,28\n"
#else /* __s390x__ */
" lgr 1,%1\n" /* parameter in gpr 1 */
" sigp 1,%2,0(%3)\n"
" ipm %0\n"
" srl %0,28\n"
#endif /* __s390x__ */
: "=d" (ccode)
: "d" (parameter), "d" (__cpu_logical_map[cpu_addr]),
"a" (order_code)
......@@ -121,27 +108,18 @@ signal_processor_p(unsigned long parameter,__u16 cpu_addr,
* Signal processor with parameter and return status
*/
extern __inline__ sigp_ccode
signal_processor_ps(unsigned long *statusptr, unsigned long parameter,
signal_processor_ps(__u32 *statusptr, __u32 parameter,
__u16 cpu_addr, sigp_order_code order_code)
{
sigp_ccode ccode;
__asm__ __volatile__(
#ifndef __s390x__
" sr 2,2\n" /* clear status so it doesn't contain rubbish if not saved. */
" sr 2,2\n" /* clear status */
" lr 3,%2\n" /* parameter in gpr 3 */
" sigp 2,%3,0(%4)\n"
" st 2,%1\n"
" ipm %0\n"
" srl %0,28\n"
#else /* __s390x__ */
" sgr 2,2\n" /* clear status so it doesn't contain rubbish if not saved. */
" lgr 3,%2\n" /* parameter in gpr 3 */
" sigp 2,%3,0(%4)\n"
" stg 2,%1\n"
" ipm %0\n"
" srl %0,28\n"
#endif /* __s390x__ */
: "=d" (ccode), "=m" (*statusptr)
: "d" (parameter), "d" (__cpu_logical_map[cpu_addr]),
"a" (order_code)
......@@ -151,5 +129,3 @@ signal_processor_ps(unsigned long *statusptr, unsigned long parameter,
}
#endif /* __SIGP__ */
......@@ -5,6 +5,7 @@
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
* Martin Schwidefsky (schwidefsky@de.ibm.com)
* Heiko Carstens (heiko.carstens@de.ibm.com)
*/
#ifndef __ASM_SMP_H
#define __ASM_SMP_H
......@@ -47,6 +48,9 @@ extern int smp_call_function_on(void (*func) (void *info), void *info,
#define smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
extern int smp_get_cpu(cpumask_t cpu_map);
extern void smp_put_cpu(int cpu);
extern __inline__ __u16 hard_smp_processor_id(void)
{
__u16 cpu_address;
......@@ -57,10 +61,17 @@ extern __inline__ __u16 hard_smp_processor_id(void)
#define cpu_logical_map(cpu) (cpu)
extern int __cpu_disable (void);
extern void __cpu_die (unsigned int cpu);
extern void cpu_die (void) __attribute__ ((noreturn));
extern int __cpu_up (unsigned int cpu);
#endif
#ifndef CONFIG_SMP
#define smp_call_function_on(func,info,nonatomic,wait,cpu) ({ 0; })
#define smp_get_cpu(cpu) ({ 0; })
#define smp_put_cpu(cpu) ({ 0; })
#endif
#endif
......@@ -533,6 +533,7 @@ struct fb_ops {
#define FBINFO_MISC_MODECHANGEUSER 0x10000 /* mode change request
from userspace */
#define FBINFO_MISC_MODESWITCH 0x20000 /* mode switch */
#define FBINFO_MISC_MODESWITCHLATE 0x40000 /* init hardware later */
struct fb_info {
int node;
......
/*
* Constant and architecture independent procedures
* for NEC uPD4990A serial I/O real-time clock.
*
* Copyright 2001 TAKAI Kousuke <tak@kmc.kyoto-u.ac.jp>
* Kyoto University Microcomputer Club (KMC).
*
* References:
* uPD4990A serial I/O real-time clock users' manual (Japanese)
* No. S12828JJ4V0UM00 (4th revision), NEC Corporation, 1999.
*/
#ifndef _LINUX_uPD4990A_H
#define _LINUX_uPD4990A_H
#include <asm/byteorder.h>
#include <asm/upd4990a.h>
/* Serial commands (4 bits) */
#define UPD4990A_REGISTER_HOLD (0x0)
#define UPD4990A_REGISTER_SHIFT (0x1)
#define UPD4990A_TIME_SET_AND_COUNTER_HOLD (0x2)
#define UPD4990A_TIME_READ (0x3)
#define UPD4990A_TP_64HZ (0x4)
#define UPD4990A_TP_256HZ (0x5)
#define UPD4990A_TP_2048HZ (0x6)
#define UPD4990A_TP_4096HZ (0x7)
#define UPD4990A_TP_1S (0x8)
#define UPD4990A_TP_10S (0x9)
#define UPD4990A_TP_30S (0xA)
#define UPD4990A_TP_60S (0xB)
#define UPD4990A_INTERRUPT_RESET (0xC)
#define UPD4990A_INTERRUPT_TIMER_START (0xD)
#define UPD4990A_INTERRUPT_TIMER_STOP (0xE)
#define UPD4990A_TEST_MODE_SET (0xF)
/* Parallel commands (3 bits)
0-6 are same with serial commands. */
#define UPD4990A_PAR_SERIAL_MODE 7
#ifndef UPD4990A_DELAY
# include <linux/delay.h>
# define UPD4990A_DELAY(usec) udelay((usec))
#endif
#ifndef UPD4990A_OUTPUT_DATA
# define UPD4990A_OUTPUT_DATA(bit) \
do { \
UPD4990A_OUTPUT_DATA_CLK((bit), 0); \
UPD4990A_DELAY(1); /* t-DSU */ \
UPD4990A_OUTPUT_DATA_CLK((bit), 1); \
UPD4990A_DELAY(1); /* t-DHLD */ \
} while (0)
#endif
static __inline__ void upd4990a_serial_command(int command)
{
UPD4990A_OUTPUT_DATA(command >> 0);
UPD4990A_OUTPUT_DATA(command >> 1);
UPD4990A_OUTPUT_DATA(command >> 2);
UPD4990A_OUTPUT_DATA(command >> 3);
UPD4990A_DELAY(1); /* t-HLD */
UPD4990A_OUTPUT_STROBE(1);
UPD4990A_DELAY(1); /* t-STB & t-d1 */
UPD4990A_OUTPUT_STROBE(0);
/* 19 microseconds extra delay is needed
iff previous mode is TIME READ command */
}
struct upd4990a_raw_data {
u8 sec; /* BCD */
u8 min; /* BCD */
u8 hour; /* BCD */
u8 mday; /* BCD */
#if defined __LITTLE_ENDIAN_BITFIELD
unsigned wday :4; /* 0-6 */
unsigned mon :4; /* 1-based */
#elif defined __BIG_ENDIAN_BITFIELD
unsigned mon :4; /* 1-based */
unsigned wday :4; /* 0-6 */
#else
# error Unknown bitfield endian!
#endif
u8 year; /* BCD */
};
static __inline__ void upd4990a_get_time(struct upd4990a_raw_data *buf,
int leave_register_hold)
{
int byte;
upd4990a_serial_command(UPD4990A_TIME_READ);
upd4990a_serial_command(UPD4990A_REGISTER_SHIFT);
UPD4990A_DELAY(19); /* t-d2 - t-d1 */
for (byte = 0; byte < 6; byte++) {
u8 tmp;
int bit;
for (tmp = 0, bit = 0; bit < 8; bit++) {
tmp = (tmp | (UPD4990A_READ_DATA() << 8)) >> 1;
UPD4990A_OUTPUT_CLK(1);
UPD4990A_DELAY(1);
UPD4990A_OUTPUT_CLK(0);
UPD4990A_DELAY(1);
}
((u8 *) buf)[byte] = tmp;
}
/* The uPD4990A users' manual says that we should issue `Register
Hold' command after each data retrieval, or next `Time Read'
command may not work correctly. */
if (!leave_register_hold)
upd4990a_serial_command(UPD4990A_REGISTER_HOLD);
}
static __inline__ void upd4990a_set_time(const struct upd4990a_raw_data *data,
int time_set_only)
{
int byte;
if (!time_set_only)
upd4990a_serial_command(UPD4990A_REGISTER_SHIFT);
for (byte = 0; byte < 6; byte++) {
int bit;
u8 tmp = ((const u8 *) data)[byte];
for (bit = 0; bit < 8; bit++, tmp >>= 1)
UPD4990A_OUTPUT_DATA(tmp);
}
upd4990a_serial_command(UPD4990A_TIME_SET_AND_COUNTER_HOLD);
/* Release counter hold and start the clock. */
if (!time_set_only)
upd4990a_serial_command(UPD4990A_REGISTER_HOLD);
}
#endif /* _LINUX_uPD4990A_H */
......@@ -828,9 +828,6 @@ asmlinkage NORET_TYPE void do_exit(long code)
__exit_fs(tsk);
exit_namespace(tsk);
exit_thread();
#ifdef CONFIG_NUMA
mpol_free(tsk->mempolicy);
#endif
if (tsk->signal->leader)
disassociate_ctty(1);
......@@ -841,6 +838,10 @@ asmlinkage NORET_TYPE void do_exit(long code)
tsk->exit_code = code;
exit_notify(tsk);
#ifdef CONFIG_NUMA
mpol_free(tsk->mempolicy);
tsk->mempolicy = NULL;
#endif
schedule();
BUG();
/* Avoid "noreturn function does return". */
......
......@@ -157,7 +157,7 @@ static kmem_cache_t *sigqueue_cachep;
static int sig_ignored(struct task_struct *t, int sig)
{
void * handler;
void __user * handler;
/*
* Tracers always want to know about signals..
......@@ -2362,13 +2362,13 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
int error;
if (uoss) {
oss.ss_sp = (void *) current->sas_ss_sp;
oss.ss_sp = (void __user *) current->sas_ss_sp;
oss.ss_size = current->sas_ss_size;
oss.ss_flags = sas_ss_flags(sp);
}
if (uss) {
void *ss_sp;
void __user *ss_sp;
size_t ss_size;
int ss_flags;
......
......@@ -1116,7 +1116,7 @@ static ssize_t snd_ctl_read(struct file *file, char __user *buffer, size_t count
wait_queue_t wait;
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
goto out;
goto __end;
}
init_waitqueue_entry(&wait, current);
add_wait_queue(&ctl->change_sleep, &wait);
......@@ -1137,7 +1137,7 @@ static ssize_t snd_ctl_read(struct file *file, char __user *buffer, size_t count
kfree(kev);
if (copy_to_user(buffer, &ev, sizeof(snd_ctl_event_t))) {
err = -EFAULT;
goto __end;
goto out;
}
spin_lock_irq(&ctl->read_lock);
buffer += sizeof(snd_ctl_event_t);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment