Commit 429383c2 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.114

parent 32cf753f
VERSION = 2
PATCHLEVEL = 1
SUBLEVEL = 113
SUBLEVEL = 114
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
......
/*
* bios32.c - Low-Level PCI Access
*
* $Id: bios32.c,v 1.42 1998/07/26 09:33:07 mj Exp $
* $Id: bios32.c,v 1.43 1998/08/03 15:59:20 mj Exp $
*
* Copyright 1993, 1994 Drew Eckhardt
* Visionary Computing
......@@ -920,6 +920,13 @@ __initfunc(void pcibios_fixup_peer_bridges(void))
struct pci_bus *b = &pci_root;
int i;
/*
* Don't search for peer host bridges if we use config type 2
* since it reads bogus values for non-existent busses and
* chipsets supporting multiple primary busses use conf1 anyway.
*/
if (access_pci == &pci_direct_conf2)
return;
do {
int n = b->subordinate+1;
u16 l;
......@@ -972,8 +979,13 @@ __initfunc(void pcibios_fixup_devices(void))
/*
* Don't enable VGA-compatible cards since they have
* fixed I/O and memory space.
*
* Don't enabled disabled IDE interfaces either because
* some BIOSes may reallocate the same address when they
* find that no devices are attached.
*/
if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) &&
((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)) {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (has_io && !(cmd & PCI_COMMAND_IO)) {
printk("PCI: Enabling I/O for device %02x:%02x\n",
......
This diff is collapsed.
......@@ -56,17 +56,11 @@ extern int handle_IRQ_event(unsigned int, struct pt_regs *);
void mask_irq(unsigned int irq);
void unmask_irq(unsigned int irq);
void enable_IO_APIC_irq (unsigned int irq);
void disable_IO_APIC_irq (unsigned int irq);
void unmask_IO_APIC_irq (unsigned int irq);
void mask_IO_APIC_irq (unsigned int irq);
void set_8259A_irq_mask (unsigned int irq);
int i8259A_irq_pending (unsigned int irq);
void ack_APIC_irq (void);
void setup_IO_APIC (void);
void init_IO_APIC_traps(void);
int IO_APIC_get_PCI_irq_vector (int bus, int slot, int fn);
int IO_APIC_irq_trigger (int irq);
void make_8259A_irq (unsigned int irq);
void send_IPI (int dest, int vector);
void init_pic_mode (void);
......
......@@ -441,13 +441,26 @@ void show_regs(struct pt_regs * regs)
*
* This extra buffer essentially acts to make for less
* "jitter" in the allocations..
*
* On SMP we don't do this right now because:
* - we aren't holding any locks when called, and we might
* as well just depend on the generic memory management
* to do proper locking for us instead of complicating it
* here.
* - if you use SMP you have a beefy enough machine that
* this shouldn't matter..
*/
#ifndef __SMP__
#define EXTRA_TASK_STRUCT 16
static struct task_struct * task_struct_stack[EXTRA_TASK_STRUCT];
static int task_struct_stack_ptr = -1;
#endif
struct task_struct * alloc_task_struct(void)
{
#ifndef EXTRA_TASK_STRUCT
return (struct task_struct *) __get_free_pages(GFP_KERNEL,1);
#else
int index;
struct task_struct *ret;
......@@ -464,16 +477,19 @@ struct task_struct * alloc_task_struct(void)
}
}
return ret;
#endif
}
void free_task_struct(struct task_struct *p)
{
#ifdef EXTRA_TASK_STRUCT
int index = task_struct_stack_ptr+1;
if (index < EXTRA_TASK_STRUCT) {
task_struct_stack[index] = p;
task_struct_stack_ptr = index;
} else
#endif
free_pages((unsigned long) p, 1);
}
......
......@@ -108,7 +108,7 @@ static void show_registers(struct pt_regs *regs)
unsigned long *stack, addr, module_start, module_end;
extern char _stext, _etext;
esp = (unsigned long) &regs->esp;
esp = (unsigned long) (1+regs);
ss = __KERNEL_DS;
if (regs->xcs & 3) {
in_kernel = 0;
......@@ -169,8 +169,8 @@ static void show_registers(struct pt_regs *regs)
printk("\nCode: ");
for(i=0;i<20;i++)
printk("%02x ", ((unsigned char *)regs->eip)[i]);
printk("\n");
}
printk("\n");
}
spinlock_t die_lock;
......
......@@ -16,6 +16,7 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/uaccess.h>
......
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/zorro.h>
......
......@@ -12,7 +12,6 @@
** Created: 12/10/97 by Alain Malek
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <asm/amigayle.h>
......
......@@ -110,8 +110,8 @@ int nbd_xmit(int send, struct socket *sock, char *buf, int size)
if (result <= 0) {
#ifdef PARANOIA
printk(KERN_ERR "NBD: %s - sock=%d at buf=%d, size=%d returned %d.\n",
send ? "send" : "receive", (int) sock, (int) buf, size, result);
printk(KERN_ERR "NBD: %s - sock=%ld at buf=%ld, size=%d returned %d.\n",
send ? "send" : "receive", (long) sock, (long) buf, size, result);
#endif
break;
}
......@@ -371,8 +371,8 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
return 0;
#ifdef PARANOIA
case NBD_PRINT_DEBUG:
printk(KERN_INFO "NBD device %d: head = %x, tail = %x. Global: in %d, out %d\n",
dev, (int) lo->head, (int) lo->tail, requests_in, requests_out);
printk(KERN_INFO "NBD device %d: head = %lx, tail = %lx. Global: in %d, out %d\n",
dev, (long) lo->head, (long) lo->tail, requests_in, requests_out);
return 0;
#endif
}
......
......@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
......
......@@ -8,7 +8,6 @@
#include <linux/blk.h>
#include <linux/sched.h>
#include <linux/version.h>
#include <linux/config.h>
#include <linux/zorro.h>
#include <asm/setup.h>
......
......@@ -186,7 +186,8 @@ lockd(struct svc_rqst *rqstp)
nlm_shutdown_hosts();
nlmsvc_pid = 0;
} else
printk("lockd: new process, skipping host shutdown\n");
printk(KERN_DEBUG
"lockd: new process, skipping host shutdown\n");
wake_up(&lockd_exit);
/* Exit the RPC thread */
......@@ -205,6 +206,7 @@ lockd(struct svc_rqst *rqstp)
int
lockd_up(void)
{
static int warned = 0;
struct svc_serv * serv;
int error = 0;
......@@ -225,27 +227,32 @@ lockd_up(void)
* we should be the first user ...
*/
if (nlmsvc_users > 1)
printk("lockd_up: no pid, %d users??\n", nlmsvc_users);
printk(KERN_WARNING
"lockd_up: no pid, %d users??\n", nlmsvc_users);
error = -ENOMEM;
serv = svc_create(&nlmsvc_program, 0, NLMSVC_XDRSIZE);
if (!serv) {
printk("lockd_up: create service failed\n");
printk(KERN_WARNING "lockd_up: create service failed\n");
goto out;
}
if ((error = svc_makesock(serv, IPPROTO_UDP, 0)) < 0
|| (error = svc_makesock(serv, IPPROTO_TCP, 0)) < 0) {
printk("lockd_up: makesock failed, error=%d\n", error);
if (warned++ == 0)
printk(KERN_WARNING
"lockd_up: makesock failed, error=%d\n", error);
goto destroy_and_out;
}
}
warned = 0;
/*
* Create the kernel thread and wait for it to start.
*/
error = svc_create_thread(lockd, serv);
if (error) {
printk("lockd_up: create thread failed, error=%d\n", error);
printk(KERN_WARNING
"lockd_up: create thread failed, error=%d\n", error);
goto destroy_and_out;
}
sleep_on(&lockd_start);
......@@ -267,17 +274,21 @@ lockd_up(void)
void
lockd_down(void)
{
static int warned = 0;
down(&nlmsvc_sema);
if (nlmsvc_users) {
if (--nlmsvc_users)
goto out;
} else
printk("lockd_down: no users! pid=%d\n", nlmsvc_pid);
printk(KERN_WARNING "lockd_down: no users! pid=%d\n", nlmsvc_pid);
if (!nlmsvc_pid) {
printk("lockd_down: nothing to do!\n");
if (warned++ == 0)
printk(KERN_WARNING "lockd_down: no lockd running.\n");
goto out;
}
warned = 0;
kill_proc(nlmsvc_pid, SIGKILL, 1);
/*
......@@ -289,7 +300,8 @@ lockd_down(void)
interruptible_sleep_on(&lockd_exit);
current->timeout = 0;
if (nlmsvc_pid) {
printk("lockd_down: lockd failed to exit, clearing pid\n");
printk(KERN_WARNING
"lockd_down: lockd failed to exit, clearing pid\n");
nlmsvc_pid = 0;
}
spin_lock_irq(&current->sigmask_lock);
......
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* i386 SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
__asm__ __volatile__(
"incl %1\n\t"
"jne 9f"
spin_lock_string
"\n9:"
:"=m" (__dummy_lock(&kernel_flag)),
"=m" (current->lock_depth));
}
extern __inline__ void unlock_kernel(void)
{
__asm__ __volatile__(
"decl %1\n\t"
"jns 9f\n"
spin_unlock_string
"\n9:"
:"=m" (__dummy_lock(&kernel_flag)),
"=m" (current->lock_depth));
}
......@@ -128,8 +128,7 @@ typedef struct {
typedef struct { unsigned long a[100]; } __dummy_lock_t;
#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
#define spin_lock(lock) \
__asm__ __volatile__( \
#define spin_lock_string \
"\n1:\t" \
"lock ; btsl $0,%0\n\t" \
"jc 2f\n" \
......@@ -138,12 +137,19 @@ __asm__ __volatile__( \
"testb $1,%0\n\t" \
"jne 2b\n\t" \
"jmp 1b\n" \
".previous" \
".previous"
#define spin_unlock_string \
"lock ; btrl $0,%0"
#define spin_lock(lock) \
__asm__ __volatile__( \
spin_lock_string \
:"=m" (__dummy_lock(lock)))
#define spin_unlock(lock) \
__asm__ __volatile__( \
"lock ; btrl $0,%0" \
spin_unlock_string \
:"=m" (__dummy_lock(lock)))
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
......
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
......@@ -213,9 +213,16 @@ struct task_struct {
/* various fields */
long counter;
long priority;
struct linux_binfmt *binfmt;
/* SMP and runqueue state */
int has_cpu;
int processor;
int last_processor;
int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
struct task_struct *next_task, *prev_task;
struct task_struct *next_run, *prev_run;
/* task state */
struct linux_binfmt *binfmt;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
/* ??? */
......@@ -282,18 +289,12 @@ struct task_struct {
/* memory management info */
struct mm_struct *mm;
/* signal handlers */
spinlock_t sigmask_lock; /* Protects signal and blocked */
struct signal_struct *sig;
sigset_t signal, blocked;
struct signal_queue *sigqueue, **sigqueue_tail;
unsigned long sas_ss_sp;
size_t sas_ss_size;
/* SMP state */
int has_cpu;
int processor;
int last_processor;
int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
/* Spinlocks for various pieces or per-task state. */
spinlock_t sigmask_lock; /* Protects signal and blocked */
};
/*
......@@ -338,8 +339,9 @@ struct task_struct {
#define INIT_TASK \
/* state etc */ { 0,0,0,KERNEL_DS,&default_exec_domain,0, \
/* counter */ DEF_PRIORITY,DEF_PRIORITY, \
/* binfmt */ NULL, \
/* SMP */ 0,0,0,-1, \
/* schedlink */ &init_task,&init_task, &init_task, &init_task, \
/* binfmt */ NULL, \
/* ec,brk... */ 0,0,0,0,0,0, \
/* pid etc.. */ 0,0,0,0,0, \
/* proc links*/ &init_task,&init_task,NULL,NULL,NULL, \
......@@ -365,10 +367,7 @@ struct task_struct {
/* fs */ &init_fs, \
/* files */ &init_files, \
/* mm */ &init_mm, \
/* signals */ &init_signals, {{0}}, {{0}}, NULL, &init_task.sigqueue, \
0, 0, \
/* SMP */ 0,0,0,0, \
/* locks */ INIT_LOCKS \
/* signals */ INIT_LOCKS, &init_signals, {{0}}, {{0}}, NULL, &init_task.sigqueue, 0, 0, \
}
union task_union {
......
......@@ -10,60 +10,7 @@
#else
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
struct task_struct *tsk = current;
int lock_depth;
lock_depth = tsk->lock_depth;
tsk->lock_depth = lock_depth+1;
if (!lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
struct task_struct *tsk = current;
int lock_depth;
lock_depth = tsk->lock_depth-1;
tsk->lock_depth = lock_depth;
if (!lock_depth)
spin_unlock(&kernel_flag);
}
#include <asm/smplock.h>
#endif /* __SMP__ */
......
......@@ -36,21 +36,31 @@ static void release(struct task_struct * p)
{
if (p != current) {
#ifdef __SMP__
/* FIXME! Cheesy, but kills the window... -DaveM */
do {
barrier();
} while (p->has_cpu);
spin_unlock_wait(&scheduler_lock);
/*
* Wait to make sure the process isn't active on any
* other CPU
*/
for (;;) {
int has_cpu;
spin_lock(&scheduler_lock);
has_cpu = p->has_cpu;
spin_unlock(&scheduler_lock);
if (!has_cpu)
break;
do {
barrier();
} while (p->has_cpu);
}
#endif
charge_uid(p, -1);
nr_tasks--;
add_free_taskslot(p->tarray_ptr);
{
write_lock_irq(&tasklist_lock);
unhash_pid(p);
REMOVE_LINKS(p);
write_unlock_irq(&tasklist_lock);
}
write_lock_irq(&tasklist_lock);
unhash_pid(p);
REMOVE_LINKS(p);
write_unlock_irq(&tasklist_lock);
release_thread(p);
current->cmin_flt += p->min_flt + p->cmin_flt;
current->cmaj_flt += p->maj_flt + p->cmaj_flt;
......@@ -340,35 +350,39 @@ static void exit_notify(void)
NORET_TYPE void do_exit(long code)
{
struct task_struct *tsk = current;
if (in_interrupt())
printk("Aiee, killing interrupt handler\n");
if (current == task[0])
if (!tsk->pid)
panic("Attempted to kill the idle task!");
tsk->flags |= PF_EXITING;
del_timer(&tsk->real_timer);
lock_kernel();
fake_volatile:
current->flags |= PF_EXITING;
#ifdef CONFIG_BSD_PROCESS_ACCT
acct_process(code);
#endif
del_timer(&current->real_timer);
sem_exit();
__exit_mm(current);
__exit_mm(tsk);
#if CONFIG_AP1000
exit_msc(current);
exit_msc(tsk);
#endif
__exit_files(current);
__exit_fs(current);
__exit_sighand(current);
__exit_files(tsk);
__exit_fs(tsk);
__exit_sighand(tsk);
exit_thread();
current->state = TASK_ZOMBIE;
current->exit_code = code;
tsk->state = TASK_ZOMBIE;
tsk->exit_code = code;
exit_notify();
#ifdef DEBUG_PROC_TREE
audit_ptree();
#endif
if (current->exec_domain && current->exec_domain->module)
__MOD_DEC_USE_COUNT(current->exec_domain->module);
if (current->binfmt && current->binfmt->module)
__MOD_DEC_USE_COUNT(current->binfmt->module);
if (tsk->exec_domain && tsk->exec_domain->module)
__MOD_DEC_USE_COUNT(tsk->exec_domain->module);
if (tsk->binfmt && tsk->binfmt->module)
__MOD_DEC_USE_COUNT(tsk->binfmt->module);
schedule();
/*
* In order to get rid of the "volatile function does return" message
......@@ -388,9 +402,7 @@ NORET_TYPE void do_exit(long code)
asmlinkage int sys_exit(int error_code)
{
lock_kernel();
do_exit((error_code&0xff)<<8);
unlock_kernel();
}
asmlinkage int sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru)
......
......@@ -476,7 +476,7 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
{
int nr;
int error = -ENOMEM;
int retval = -ENOMEM;
struct task_struct *p;
down(&current->mm->mmap_sem);
......@@ -485,7 +485,7 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
if (!p)
goto bad_fork;
error = -EAGAIN;
retval = -EAGAIN;
nr = find_empty_process();
if (nr < 0)
goto bad_fork_free;
......@@ -504,8 +504,16 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
copy_flags(clone_flags, p);
p->pid = get_pid(clone_flags);
p->next_run = NULL;
p->prev_run = NULL;
/*
* This is a "shadow run" state. The process
* is marked runnable, but isn't actually on
* any run queue yet.. (that happens at the
* very end).
*/
p->state = TASK_RUNNING;
p->next_run = p;
p->prev_run = p;
p->p_pptr = p->p_opptr = current;
p->p_cptr = NULL;
init_waitqueue(&p->wait_chldexit);
......@@ -535,12 +543,13 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
spin_lock_init(&p->sigmask_lock);
}
#endif
p->lock_depth = 0;
p->lock_depth = -1; /* -1 = no lock */
p->start_time = jiffies;
p->tarray_ptr = &task[nr];
*p->tarray_ptr = p;
{
/* This makes it visible to the rest of the system */
unsigned long flags;
write_lock_irqsave(&tasklist_lock, flags);
SET_LINKS(p);
......@@ -550,7 +559,7 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
nr_tasks++;
error = -ENOMEM;
retval = -ENOMEM;
/* copy all the process information */
if (copy_files(clone_flags, p))
goto bad_fork_cleanup;
......@@ -560,8 +569,8 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
goto bad_fork_cleanup_fs;
if (copy_mm(nr, clone_flags, p))
goto bad_fork_cleanup_sighand;
error = copy_thread(nr, clone_flags, usp, p, regs);
if (error)
retval = copy_thread(nr, clone_flags, usp, p, regs);
if (retval)
goto bad_fork_cleanup_sighand;
p->semundo = NULL;
......@@ -579,18 +588,18 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
current->counter >>= 1;
p->counter = current->counter;
if(p->pid) {
wake_up_process(p); /* do this last, just in case */
} else {
p->state = TASK_RUNNING;
p->next_run = p->prev_run = p;
/* Ok, add it to the run-queues, let it rip! */
retval = p->pid;
if (retval) {
p->next_run = NULL;
p->prev_run = NULL;
wake_up_process(p); /* do this last */
}
++total_forks;
error = p->pid;
bad_fork:
up(&current->mm->mmap_sem);
unlock_kernel();
return error;
return retval;
bad_fork_cleanup_sighand:
exit_sighand(p);
......
......@@ -146,14 +146,21 @@ static inline void reschedule_idle(struct task_struct * p)
current->need_resched = 1;
}
/*
* Careful!
*
* This has to add the process to the _beginning_ of the
* run-queue, not the end. See the comment about "This is
* subtle" in the scheduler proper..
*/
static inline void add_to_runqueue(struct task_struct * p)
{
nr_running++;
reschedule_idle(p);
(p->prev_run = init_task.prev_run)->next_run = p;
p->next_run = &init_task;
init_task.prev_run = p;
struct task_struct *next = init_task.next_run;
p->prev_run = &init_task;
init_task.next_run = p;
p->next_run = next;
next->prev_run = p;
}
static inline void del_from_runqueue(struct task_struct * p)
......@@ -229,8 +236,11 @@ inline void wake_up_process(struct task_struct * p)
spin_lock_irqsave(&runqueue_lock, flags);
p->state = TASK_RUNNING;
if (!p->next_run)
if (!p->next_run) {
add_to_runqueue(p);
reschedule_idle(p);
nr_running++;
}
spin_unlock_irqrestore(&runqueue_lock, flags);
}
......@@ -420,6 +430,9 @@ int del_timer(struct timer_list * timer)
ret = detach_timer(timer);
timer->next = timer->prev = 0;
spin_unlock_irqrestore(&timerlist_lock, flags);
/* Make sure the timer isn't running in parallell.. */
synchronize_bh();
return ret;
}
......@@ -1351,8 +1364,8 @@ static int setscheduler(pid_t pid, int policy,
/*
* We play safe to avoid deadlocks.
*/
spin_lock_irq(&scheduler_lock);
spin_lock(&runqueue_lock);
spin_lock(&scheduler_lock);
spin_lock_irq(&runqueue_lock);
read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
......@@ -1398,8 +1411,8 @@ static int setscheduler(pid_t pid, int policy,
out_unlock:
read_unlock(&tasklist_lock);
spin_unlock(&runqueue_lock);
spin_unlock_irq(&scheduler_lock);
spin_unlock_irq(&runqueue_lock);
spin_unlock(&scheduler_lock);
out_nounlock:
return retval;
......@@ -1590,13 +1603,13 @@ static void show_task(int nr,struct task_struct * p)
else
printk(" %016lx ", thread_saved_pc(&p->tss));
#endif
#if 0
for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
if (((unsigned long *)p->kernel_stack_page)[free])
break;
{
unsigned long * n = (unsigned long *) (p+1);
while (!*n)
n++;
free = (unsigned long) n - (unsigned long)(p+1);
}
#endif
printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
printk("%5lu %5d %6d ", free, p->pid, p->p_pptr->pid);
if (p->p_cptr)
printk("%5d ", p->p_cptr->pid);
else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment