Commit 429383c2 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.114

parent 32cf753f
VERSION = 2
PATCHLEVEL = 1
SUBLEVEL = 113
SUBLEVEL = 114
ARCH := $(shell uname -m | sed -e s/i.86/i386/ -e s/sun4u/sparc64/ -e s/arm.*/arm/ -e s/sa110/arm/)
......
/*
* bios32.c - Low-Level PCI Access
*
* $Id: bios32.c,v 1.42 1998/07/26 09:33:07 mj Exp $
* $Id: bios32.c,v 1.43 1998/08/03 15:59:20 mj Exp $
*
* Copyright 1993, 1994 Drew Eckhardt
* Visionary Computing
......@@ -920,6 +920,13 @@ __initfunc(void pcibios_fixup_peer_bridges(void))
struct pci_bus *b = &pci_root;
int i;
/*
* Don't search for peer host bridges if we use config type 2
* since it reads bogus values for non-existent busses and
* chipsets supporting multiple primary busses use conf1 anyway.
*/
if (access_pci == &pci_direct_conf2)
return;
do {
int n = b->subordinate+1;
u16 l;
......@@ -972,8 +979,13 @@ __initfunc(void pcibios_fixup_devices(void))
/*
* Don't enable VGA-compatible cards since they have
* fixed I/O and memory space.
*
* Don't enabled disabled IDE interfaces either because
* some BIOSes may reallocate the same address when they
* find that no devices are attached.
*/
if ((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
if (((dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) &&
((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)) {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
if (has_io && !(cmd & PCI_COMMAND_IO)) {
printk("PCI: Enabling I/O for device %02x:%02x\n",
......
......@@ -7,49 +7,24 @@
* patches and reporting/debugging problems patiently!
*/
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/kernel_stat.h>
#include <linux/delay.h>
#include <linux/mc146818rtc.h>
#include <asm/i82489.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <linux/init.h>
#include <asm/pgtable.h>
#include <asm/bitops.h>
#include <asm/pgtable.h>
#include <asm/smp.h>
#include <linux/delay.h>
#include <asm/io.h>
#include "irq.h"
/*
* volatile is justified in this case, it might change
* spontaneously, GCC should not cache it
* volatile is justified in this case, IO-APIC register contents
* might change spontaneously, GCC should not cache it
*/
#define IO_APIC_BASE ((volatile int *)fix_to_virt(FIX_IO_APIC_BASE))
enum mp_irq_source_types {
mp_INT = 0,
mp_NMI = 1,
mp_SMI = 2,
mp_ExtINT = 3
};
enum ioapic_irq_destination_types {
dest_Fixed = 0,
dest_LowestPrio = 1,
dest_ExtINT = 7
};
/*
* The structure of the IO-APIC:
*/
struct IO_APIC_reg_00 {
__u32 __reserved_2 : 24,
ID : 4,
......@@ -69,6 +44,17 @@ struct IO_APIC_reg_02 {
__reserved_1 : 4;
} __attribute__ ((packed));
/*
* # of IRQ routing registers
*/
int nr_ioapic_registers = 0;
enum ioapic_irq_destination_types {
dest_Fixed = 0,
dest_LowestPrio = 1,
dest_ExtINT = 7
};
struct IO_APIC_route_entry {
__u32 vector : 8,
delivery_mode : 3, /* 000: FIXED
......@@ -97,13 +83,17 @@ struct IO_APIC_route_entry {
} __attribute__ ((packed));
#define UNEXPECTED_IO_APIC() \
{ \
printk(" WARNING: unexpected IO-APIC, please mail\n"); \
printk(" to linux-smp@vger.rutgers.edu\n"); \
}
/*
* MP-BIOS irq configuration table structures:
*/
enum mp_irq_source_types {
mp_INT = 0,
mp_NMI = 1,
mp_SMI = 2,
mp_ExtINT = 3
};
int nr_ioapic_registers = 0; /* # of IRQ routing registers */
int mp_irq_entries = 0; /* # of MP IRQ source entries */
struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
/* MP IRQ source entries */
......@@ -116,13 +106,13 @@ int mpc_default_type = 0; /* non-0 if default (table-less)
*/
static int irq_2_pin[NR_IRQS];
unsigned int io_apic_read (unsigned int reg)
static inline unsigned int io_apic_read(unsigned int reg)
{
*IO_APIC_BASE = reg;
return *(IO_APIC_BASE+4);
}
void io_apic_write (unsigned int reg, unsigned int value)
static inline void io_apic_write(unsigned int reg, unsigned int value)
{
*IO_APIC_BASE = reg;
*(IO_APIC_BASE+4) = value;
......@@ -141,57 +131,57 @@ static inline void io_apic_sync(void)
* We disable IO-APIC IRQs by setting their 'destination CPU mask' to
* zero. Trick, trick.
*/
void disable_IO_APIC_irq(unsigned int irq)
static inline void disable_IO_APIC_irq(unsigned int irq)
{
int pin = irq_2_pin[irq];
struct IO_APIC_route_entry entry;
if (pin != -1) {
*(((int *)&entry)+1) = io_apic_read(0x11+pin*2);
*(((int *)&entry) + 1) = io_apic_read(0x11 + pin * 2);
entry.dest.logical.logical_dest = 0x0;
io_apic_write(0x11+2*pin, *(((int *)&entry)+1));
io_apic_write(0x11 + 2 * pin, *(((int *)&entry) + 1));
io_apic_sync();
}
}
void enable_IO_APIC_irq(unsigned int irq)
static inline void enable_IO_APIC_irq(unsigned int irq)
{
int pin = irq_2_pin[irq];
struct IO_APIC_route_entry entry;
if (pin != -1) {
*(((int *)&entry)+1) = io_apic_read(0x11+pin*2);
*(((int *)&entry) + 1) = io_apic_read(0x11 + pin * 2);
entry.dest.logical.logical_dest = 0xff;
io_apic_write(0x11+2*pin, *(((int *)&entry)+1));
io_apic_write(0x11 + 2 * pin, *(((int *)&entry) + 1));
}
}
void mask_IO_APIC_irq(unsigned int irq)
static inline void mask_IO_APIC_irq(unsigned int irq)
{
int pin = irq_2_pin[irq];
struct IO_APIC_route_entry entry;
if (pin != -1) {
*(((int *)&entry)+0) = io_apic_read(0x10+pin*2);
*(((int *)&entry) + 0) = io_apic_read(0x10 + pin * 2);
entry.mask = 1;
io_apic_write(0x10+2*pin, *(((int *)&entry)+0));
io_apic_write(0x10 + 2 * pin, *(((int *)&entry) + 0));
io_apic_sync();
}
}
void unmask_IO_APIC_irq(unsigned int irq)
static inline void unmask_IO_APIC_irq(unsigned int irq)
{
int pin = irq_2_pin[irq];
struct IO_APIC_route_entry entry;
if (pin != -1) {
*(((int *)&entry)+0) = io_apic_read(0x10+pin*2);
*(((int *)&entry) + 0) = io_apic_read(0x10 + pin * 2);
entry.mask = 0;
io_apic_write(0x10+2*pin, *(((int *)&entry)+0));
io_apic_write(0x10 + 2 * pin, *(((int *)&entry) + 0));
}
}
void clear_IO_APIC_pin (unsigned int pin)
static void __init clear_IO_APIC_pin(unsigned int pin)
{
struct IO_APIC_route_entry entry;
......@@ -200,8 +190,8 @@ void clear_IO_APIC_pin (unsigned int pin)
*/
memset(&entry, 0, sizeof(entry));
entry.mask = 1;
io_apic_write(0x10+2*pin, *(((int *)&entry)+0));
io_apic_write(0x11+2*pin, *(((int *)&entry)+1));
io_apic_write(0x10 + 2 * pin, *(((int *)&entry) + 0));
io_apic_write(0x11 + 2 * pin, *(((int *)&entry) + 1));
}
......@@ -214,30 +204,30 @@ void clear_IO_APIC_pin (unsigned int pin)
int pirq_entries [MAX_PIRQS];
int pirqs_enabled;
__initfunc(void ioapic_pirq_setup(char *str, int *ints))
void __init ioapic_pirq_setup(char *str, int *ints)
{
int i, max;
for (i=0; i<MAX_PIRQS; i++)
pirq_entries[i]=-1;
for (i = 0; i < MAX_PIRQS; i++)
pirq_entries[i] = -1;
if (!ints) {
pirqs_enabled=0;
printk("PIRQ redirection SETUP, trusting MP-BIOS.\n");
pirqs_enabled = 0;
printk("PIRQ redirection, trusting MP-BIOS.\n");
} else {
pirqs_enabled=1;
printk("PIRQ redirection SETUP, working around broken MP-BIOS.\n");
pirqs_enabled = 1;
printk("PIRQ redirection, working around broken MP-BIOS.\n");
max = MAX_PIRQS;
if (ints[0] < MAX_PIRQS)
max = ints[0];
for (i=0; i < max; i++) {
for (i = 0; i < max; i++) {
printk("... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
/*
* PIRQs are mapped upside down, usually.
*/
pirq_entries[MAX_PIRQS-i-1]=ints[i+1];
pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
}
}
}
......@@ -245,11 +235,11 @@ __initfunc(void ioapic_pirq_setup(char *str, int *ints))
/*
* Find the IRQ entry number of a certain pin.
*/
__initfunc(static int find_irq_entry(int pin, int type))
static int __init find_irq_entry(int pin, int type)
{
int i;
for (i=0; i<mp_irq_entries; i++)
for (i = 0; i < mp_irq_entries; i++)
if ( (mp_irqs[i].mpc_irqtype == type) &&
(mp_irqs[i].mpc_dstirq == pin))
......@@ -261,11 +251,11 @@ __initfunc(static int find_irq_entry(int pin, int type))
/*
* Find the pin to which IRQ0 (ISA) is connected
*/
__initfunc(int find_timer_pin (int type))
static int __init find_timer_pin(int type)
{
int i;
for (i=0; i<mp_irq_entries; i++) {
for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].mpc_srcbus;
if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA) &&
......@@ -281,11 +271,11 @@ __initfunc(int find_timer_pin (int type))
* Find a specific PCI IRQ entry.
* Not an initfunc, possibly needed by modules
*/
int IO_APIC_get_PCI_irq_vector (int bus, int slot, int pci_pin)
int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pci_pin)
{
int i;
for (i=0; i<mp_irq_entries; i++) {
for (i = 0; i < mp_irq_entries; i++) {
int lbus = mp_irqs[i].mpc_srcbus;
if (IO_APIC_IRQ(mp_irqs[i].mpc_dstirq) &&
......@@ -308,7 +298,7 @@ int IO_APIC_get_PCI_irq_vector (int bus, int slot, int pci_pin)
* to be accepted. Yes, ugh.
*/
static int MPBIOS_polarity(int idx)
static int __init MPBIOS_polarity(int idx)
{
int bus = mp_irqs[idx].mpc_srcbus;
int polarity;
......@@ -367,8 +357,7 @@ static int MPBIOS_polarity(int idx)
return polarity;
}
static int MPBIOS_trigger(int idx)
static int __init MPBIOS_trigger(int idx)
{
int bus = mp_irqs[idx].mpc_srcbus;
int trigger;
......@@ -427,7 +416,7 @@ static int MPBIOS_trigger(int idx)
return trigger;
}
static int trigger_flag_broken (int idx)
static int __init trigger_flag_broken(int idx)
{
int bus = mp_irqs[idx].mpc_srcbus;
int polarity = MPBIOS_polarity(idx);
......@@ -442,7 +431,7 @@ static int trigger_flag_broken (int idx)
return 0;
}
static int irq_polarity (int idx)
static inline int irq_polarity(int idx)
{
/*
* There are no known BIOS bugs wrt polarity. yet.
......@@ -450,16 +439,16 @@ static int irq_polarity (int idx)
return MPBIOS_polarity(idx);
}
static int irq_trigger (int idx)
static inline int irq_trigger(int idx)
{
int trigger = MPBIOS_trigger(idx);
if (trigger_flag_broken (idx))
if (trigger_flag_broken(idx))
trigger = 0;
return trigger;
}
static int pin_2_irq (int idx, int pin)
static int __init pin_2_irq(int idx, int pin)
{
int irq;
int bus = mp_irqs[idx].mpc_srcbus;
......@@ -496,7 +485,7 @@ static int pin_2_irq (int idx, int pin)
/*
* PCI IRQ command line redirection. Yes, limits are hardcoded.
*/
if ((pin>=16) && (pin<=23)) {
if ((pin >= 16) && (pin <= 23)) {
if (pirq_entries[pin-16] != -1) {
if (!pirq_entries[pin-16]) {
printk("disabling PIRQ%d\n", pin-16);
......@@ -510,14 +499,14 @@ static int pin_2_irq (int idx, int pin)
return irq;
}
int IO_APIC_irq_trigger (int irq)
static inline int IO_APIC_irq_trigger(int irq)
{
int idx, pin;
for (pin=0; pin<nr_ioapic_registers; pin++) {
for (pin = 0; pin < nr_ioapic_registers; pin++) {
idx = find_irq_entry(pin,mp_INT);
if ((idx != -1) && (irq == pin_2_irq(idx,pin)))
return (irq_trigger(idx));
return irq_trigger(idx);
}
/*
* nonexistent IRQs are edge default
......@@ -525,7 +514,7 @@ int IO_APIC_irq_trigger (int irq)
return 0;
}
__initfunc(static int assign_irq_vector(int irq))
static int __init assign_irq_vector(int irq)
{
static int current_vector = IRQ0_TRAP_VECTOR, offset = 0;
if (IO_APIC_VECTOR(irq) > 0)
......@@ -541,14 +530,14 @@ __initfunc(static int assign_irq_vector(int irq))
return current_vector;
}
__initfunc(void setup_IO_APIC_irqs (void))
void __init setup_IO_APIC_irqs(void)
{
struct IO_APIC_route_entry entry;
int pin, idx, bus, irq, first_notcon=1;
int pin, idx, bus, irq, first_notcon = 1;
printk("init IO_APIC IRQs\n");
for (pin=0; pin<nr_ioapic_registers; pin++) {
for (pin = 0; pin < nr_ioapic_registers; pin++) {
/*
* add it to the IO-APIC irq-routing table:
......@@ -564,7 +553,7 @@ __initfunc(void setup_IO_APIC_irqs (void))
if (idx == -1) {
if (first_notcon) {
printk(" IO-APIC pin %d", pin);
first_notcon=0;
first_notcon = 0;
} else
printk(", %d", pin);
continue;
......@@ -594,7 +583,7 @@ __initfunc(void setup_IO_APIC_irqs (void))
printk(" not connected.\n");
}
__initfunc(void setup_IO_APIC_irq_ISA_default (unsigned int irq))
void __init setup_IO_APIC_irq_ISA_default(unsigned int irq)
{
struct IO_APIC_route_entry entry;
......@@ -610,8 +599,8 @@ __initfunc(void setup_IO_APIC_irq_ISA_default (unsigned int irq))
entry.vector = assign_irq_vector(irq);
entry.polarity=0;
entry.trigger=0;
entry.polarity = 0;
entry.trigger = 0;
io_apic_write(0x10+2*irq, *(((int *)&entry)+0));
io_apic_write(0x11+2*irq, *(((int *)&entry)+1));
......@@ -620,7 +609,7 @@ __initfunc(void setup_IO_APIC_irq_ISA_default (unsigned int irq))
/*
* Set up a certain pin as ExtINT delivered interrupt
*/
__initfunc(void setup_ExtINT_pin (unsigned int pin))
void __init setup_ExtINT_pin(unsigned int pin)
{
struct IO_APIC_route_entry entry;
......@@ -636,14 +625,20 @@ __initfunc(void setup_ExtINT_pin (unsigned int pin))
entry.vector = 0; /* it's ignored */
entry.polarity=0;
entry.trigger=0;
entry.polarity = 0;
entry.trigger = 0;
io_apic_write(0x10+2*pin, *(((int *)&entry)+0));
io_apic_write(0x11+2*pin, *(((int *)&entry)+1));
}
void print_IO_APIC (void)
void __init UNEXPECTED_IO_APIC(void)
{
printk(" WARNING: unexpected IO-APIC, please mail\n");
printk(" to linux-smp@vger.rutgers.edu\n");
}
void __init print_IO_APIC(void)
{
int i;
struct IO_APIC_reg_00 reg_00;
......@@ -695,7 +690,7 @@ void print_IO_APIC (void)
printk(" NR Log Phy ");
printk("Mask Trig IRR Pol Stat Dest Deli Vect: \n");
for (i=0; i<=reg_01.entries; i++) {
for (i = 0; i <= reg_01.entries; i++) {
struct IO_APIC_route_entry entry;
*(((int *)&entry)+0) = io_apic_read(0x10+i*2);
......@@ -720,7 +715,7 @@ void print_IO_APIC (void)
}
printk("IRQ to pin mappings:\n");
for (i=0; i<NR_IRQS; i++)
for (i = 0; i < NR_IRQS; i++)
printk("%d->%d ", i, irq_2_pin[i]);
printk("\n");
......@@ -729,15 +724,15 @@ void print_IO_APIC (void)
return;
}
__initfunc(static void init_sym_mode (void))
static void __init init_sym_mode(void)
{
int i, pin;
for (i=0; i<NR_IRQS; i++)
for (i = 0; i < NR_IRQS; i++)
irq_2_pin[i] = -1;
if (!pirqs_enabled)
for (i=0; i<MAX_PIRQS; i++)
pirq_entries[i]=-1;
for (i = 0; i < MAX_PIRQS; i++)
pirq_entries[i] =- 1;
printk("enabling symmetric IO mode... ");
......@@ -759,18 +754,18 @@ __initfunc(static void init_sym_mode (void))
/*
* Do not trust the IO-APIC being empty at bootup
*/
for (pin=0; pin<nr_ioapic_registers; pin++)
clear_IO_APIC_pin (pin);
for (pin = 0; pin < nr_ioapic_registers; pin++)
clear_IO_APIC_pin(pin);
}
/*
* Not an initfunc, needed by the reboot code
*/
void init_pic_mode (void)
void init_pic_mode(void)
{
printk("disabling symmetric IO mode... ");
outb_p (0x70, 0x22);
outb_p (0x00, 0x23);
outb_p(0x70, 0x22);
outb_p(0x00, 0x23);
printk("...done.\n");
}
......@@ -782,7 +777,7 @@ struct ioapic_list_entry {
char * product_id;
};
struct ioapic_list_entry ioapic_whitelist [] = {
struct ioapic_list_entry __initdata ioapic_whitelist [] = {
{ "INTEL " , "PR440FX " },
{ "INTEL " , "82440FX " },
......@@ -790,22 +785,22 @@ struct ioapic_list_entry ioapic_whitelist [] = {
{ 0 , 0 }
};
struct ioapic_list_entry ioapic_blacklist [] = {
struct ioapic_list_entry __initdata ioapic_blacklist [] = {
{ "OEM00000" , "PROD00000000" },
{ 0 , 0 }
};
__initfunc(static int in_ioapic_list (struct ioapic_list_entry * table))
static int __init in_ioapic_list(struct ioapic_list_entry * table)
{
for (;table->oem_id; table++)
for ( ; table->oem_id ; table++)
if ((!strcmp(table->oem_id,ioapic_OEM_ID)) &&
(!strcmp(table->product_id,ioapic_Product_ID)))
return 1;
return 0;
}
__initfunc(static int ioapic_whitelisted (void))
static int __init ioapic_whitelisted(void)
{
/*
* Right now, whitelist everything to see whether the new parsing
......@@ -818,12 +813,12 @@ __initfunc(static int ioapic_whitelisted (void))
#endif
}
__initfunc(static int ioapic_blacklisted (void))
static int __init ioapic_blacklisted(void)
{
return in_ioapic_list(ioapic_blacklist);
}
__initfunc(static void setup_ioapic_id (void))
static void __init setup_ioapic_id(void)
{
struct IO_APIC_reg_00 reg_00;
......@@ -857,11 +852,11 @@ __initfunc(static void setup_ioapic_id (void))
panic("could not set ID");
}
__initfunc(static void construct_default_ISA_mptable (void))
static void __init construct_default_ISA_mptable(void)
{
int i, pos=0;
int i, pos = 0;
for (i=0; i<16; i++) {
for (i = 0; i < 16; i++) {
if (!IO_APIC_IRQ(i))
continue;
......@@ -903,14 +898,11 @@ __initfunc(static void construct_default_ISA_mptable (void))
* - if this function detects that timer IRQs are defunct, then we fall
* back to ISA timer IRQs
*/
__initfunc(static int timer_irq_works (void))
static int __init timer_irq_works(void)
{
unsigned int t1=jiffies;
unsigned long flags;
unsigned int t1 = jiffies;
save_flags(flags);
sti();
udelay(10*10000);
if (jiffies-t1>1)
......@@ -919,8 +911,6 @@ __initfunc(static int timer_irq_works (void))
return 0;
}
#ifdef __SMP__
/*
* In the SMP+IOAPIC case it might happen that there are an unspecified
* number of pending IRQ events unhandled. These cases are very rare,
......@@ -928,7 +918,7 @@ __initfunc(static int timer_irq_works (void))
* better to do it this way as thus we do not have to be aware of
* 'pending' interrupts in the IRQ path, except at this point.
*/
static inline void self_IPI (unsigned int irq)
static inline void self_IPI(unsigned int irq)
{
irq_desc_t *desc = irq_desc + irq;
......@@ -1023,8 +1013,8 @@ static void do_edge_ioapic_IRQ(unsigned int irq, int cpu, struct pt_regs * regs)
irq_exit(cpu, irq);
}
static void do_level_ioapic_IRQ (unsigned int irq, int cpu,
struct pt_regs * regs)
static void do_level_ioapic_IRQ(unsigned int irq, int cpu,
struct pt_regs * regs)
{
irq_desc_t *desc = irq_desc + irq;
struct irqaction * action;
......@@ -1095,7 +1085,7 @@ static struct hw_interrupt_type ioapic_level_irq_type = {
disable_level_ioapic_irq
};
void init_IO_APIC_traps(void)
static inline void init_IO_APIC_traps(void)
{
int i;
/*
......@@ -1124,7 +1114,6 @@ void init_IO_APIC_traps(void)
}
}
}
#endif
/*
* This code may look a bit paranoid, but it's supposed to cooperate with
......@@ -1132,38 +1121,38 @@ void init_IO_APIC_traps(void)
* is so screwy. Thanks to Brian Perkins for testing/hacking this beast
* fanatically on his truly buggy board.
*/
__initfunc(static void check_timer (void))
static inline void check_timer(void)
{
int pin1, pin2;
pin1 = find_timer_pin (mp_INT);
pin2 = find_timer_pin (mp_ExtINT);
pin1 = find_timer_pin(mp_INT);
pin2 = find_timer_pin(mp_ExtINT);
if (!timer_irq_works ()) {
if (!timer_irq_works()) {
if (pin1 != -1)
printk("..MP-BIOS bug: 8254 timer not connected to IO-APIC\n");
printk("...trying to set up timer as ExtINT... ");
if (pin2 != -1) {
printk(".. (found pin %d) ...", pin2);
setup_ExtINT_pin (pin2);
setup_ExtINT_pin(pin2);
make_8259A_irq(0);
}
if (!timer_irq_works ()) {
if (!timer_irq_works()) {
printk(" failed.\n");
printk("...trying to set up timer as BP IRQ...");
/*
* Just in case ...
*/
if (pin1 != -1)
clear_IO_APIC_pin (pin1);
clear_IO_APIC_pin(pin1);
if (pin2 != -1)
clear_IO_APIC_pin (pin2);
clear_IO_APIC_pin(pin2);
make_8259A_irq(0);
if (!timer_irq_works ()) {
if (!timer_irq_works()) {
printk(" failed.\n");
panic("IO-APIC + timer doesn't work!");
}
......@@ -1172,7 +1161,7 @@ __initfunc(static void check_timer (void))
}
}
__initfunc(void setup_IO_APIC (void))
void __init setup_IO_APIC(void)
{
init_sym_mode();
......@@ -1216,7 +1205,7 @@ __initfunc(void setup_IO_APIC (void))
* Set up the IO-APIC IRQ routing table by parsing the MP-BIOS
* mptable:
*/
setup_IO_APIC_irqs ();
setup_IO_APIC_irqs();
init_IRQ_SMP();
check_timer();
......
......@@ -56,17 +56,11 @@ extern int handle_IRQ_event(unsigned int, struct pt_regs *);
void mask_irq(unsigned int irq);
void unmask_irq(unsigned int irq);
void enable_IO_APIC_irq (unsigned int irq);
void disable_IO_APIC_irq (unsigned int irq);
void unmask_IO_APIC_irq (unsigned int irq);
void mask_IO_APIC_irq (unsigned int irq);
void set_8259A_irq_mask (unsigned int irq);
int i8259A_irq_pending (unsigned int irq);
void ack_APIC_irq (void);
void setup_IO_APIC (void);
void init_IO_APIC_traps(void);
int IO_APIC_get_PCI_irq_vector (int bus, int slot, int fn);
int IO_APIC_irq_trigger (int irq);
void make_8259A_irq (unsigned int irq);
void send_IPI (int dest, int vector);
void init_pic_mode (void);
......
......@@ -441,13 +441,26 @@ void show_regs(struct pt_regs * regs)
*
* This extra buffer essentially acts to make for less
* "jitter" in the allocations..
*
* On SMP we don't do this right now because:
* - we aren't holding any locks when called, and we might
* as well just depend on the generic memory management
* to do proper locking for us instead of complicating it
* here.
* - if you use SMP you have a beefy enough machine that
* this shouldn't matter..
*/
#ifndef __SMP__
#define EXTRA_TASK_STRUCT 16
static struct task_struct * task_struct_stack[EXTRA_TASK_STRUCT];
static int task_struct_stack_ptr = -1;
#endif
struct task_struct * alloc_task_struct(void)
{
#ifndef EXTRA_TASK_STRUCT
return (struct task_struct *) __get_free_pages(GFP_KERNEL,1);
#else
int index;
struct task_struct *ret;
......@@ -464,16 +477,19 @@ struct task_struct * alloc_task_struct(void)
}
}
return ret;
#endif
}
void free_task_struct(struct task_struct *p)
{
#ifdef EXTRA_TASK_STRUCT
int index = task_struct_stack_ptr+1;
if (index < EXTRA_TASK_STRUCT) {
task_struct_stack[index] = p;
task_struct_stack_ptr = index;
} else
#endif
free_pages((unsigned long) p, 1);
}
......
......@@ -108,7 +108,7 @@ static void show_registers(struct pt_regs *regs)
unsigned long *stack, addr, module_start, module_end;
extern char _stext, _etext;
esp = (unsigned long) &regs->esp;
esp = (unsigned long) (1+regs);
ss = __KERNEL_DS;
if (regs->xcs & 3) {
in_kernel = 0;
......@@ -169,8 +169,8 @@ static void show_registers(struct pt_regs *regs)
printk("\nCode: ");
for(i=0;i<20;i++)
printk("%02x ", ((unsigned char *)regs->eip)[i]);
printk("\n");
}
printk("\n");
}
spinlock_t die_lock;
......
......@@ -16,6 +16,7 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/interrupt.h>
#include <asm/system.h>
#include <asm/uaccess.h>
......
#include <linux/config.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/zorro.h>
......
......@@ -12,7 +12,6 @@
** Created: 12/10/97 by Alain Malek
*/
#include <linux/config.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <asm/amigayle.h>
......
......@@ -110,8 +110,8 @@ int nbd_xmit(int send, struct socket *sock, char *buf, int size)
if (result <= 0) {
#ifdef PARANOIA
printk(KERN_ERR "NBD: %s - sock=%d at buf=%d, size=%d returned %d.\n",
send ? "send" : "receive", (int) sock, (int) buf, size, result);
printk(KERN_ERR "NBD: %s - sock=%ld at buf=%ld, size=%d returned %d.\n",
send ? "send" : "receive", (long) sock, (long) buf, size, result);
#endif
break;
}
......@@ -371,8 +371,8 @@ static int nbd_ioctl(struct inode *inode, struct file *file,
return 0;
#ifdef PARANOIA
case NBD_PRINT_DEBUG:
printk(KERN_INFO "NBD device %d: head = %x, tail = %x. Global: in %d, out %d\n",
dev, (int) lo->head, (int) lo->tail, requests_in, requests_out);
printk(KERN_INFO "NBD device %d: head = %lx, tail = %lx. Global: in %d, out %d\n",
dev, (long) lo->head, (long) lo->tail, requests_in, requests_out);
return 0;
#endif
}
......
......@@ -29,7 +29,6 @@
#include <linux/module.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
......
......@@ -8,7 +8,6 @@
#include <linux/blk.h>
#include <linux/sched.h>
#include <linux/version.h>
#include <linux/config.h>
#include <linux/zorro.h>
#include <asm/setup.h>
......
......@@ -186,7 +186,8 @@ lockd(struct svc_rqst *rqstp)
nlm_shutdown_hosts();
nlmsvc_pid = 0;
} else
printk("lockd: new process, skipping host shutdown\n");
printk(KERN_DEBUG
"lockd: new process, skipping host shutdown\n");
wake_up(&lockd_exit);
/* Exit the RPC thread */
......@@ -205,6 +206,7 @@ lockd(struct svc_rqst *rqstp)
int
lockd_up(void)
{
static int warned = 0;
struct svc_serv * serv;
int error = 0;
......@@ -225,27 +227,32 @@ lockd_up(void)
* we should be the first user ...
*/
if (nlmsvc_users > 1)
printk("lockd_up: no pid, %d users??\n", nlmsvc_users);
printk(KERN_WARNING
"lockd_up: no pid, %d users??\n", nlmsvc_users);
error = -ENOMEM;
serv = svc_create(&nlmsvc_program, 0, NLMSVC_XDRSIZE);
if (!serv) {
printk("lockd_up: create service failed\n");
printk(KERN_WARNING "lockd_up: create service failed\n");
goto out;
}
if ((error = svc_makesock(serv, IPPROTO_UDP, 0)) < 0
|| (error = svc_makesock(serv, IPPROTO_TCP, 0)) < 0) {
printk("lockd_up: makesock failed, error=%d\n", error);
if (warned++ == 0)
printk(KERN_WARNING
"lockd_up: makesock failed, error=%d\n", error);
goto destroy_and_out;
}
}
warned = 0;
/*
* Create the kernel thread and wait for it to start.
*/
error = svc_create_thread(lockd, serv);
if (error) {
printk("lockd_up: create thread failed, error=%d\n", error);
printk(KERN_WARNING
"lockd_up: create thread failed, error=%d\n", error);
goto destroy_and_out;
}
sleep_on(&lockd_start);
......@@ -267,17 +274,21 @@ lockd_up(void)
void
lockd_down(void)
{
static int warned = 0;
down(&nlmsvc_sema);
if (nlmsvc_users) {
if (--nlmsvc_users)
goto out;
} else
printk("lockd_down: no users! pid=%d\n", nlmsvc_pid);
printk(KERN_WARNING "lockd_down: no users! pid=%d\n", nlmsvc_pid);
if (!nlmsvc_pid) {
printk("lockd_down: nothing to do!\n");
if (warned++ == 0)
printk(KERN_WARNING "lockd_down: no lockd running.\n");
goto out;
}
warned = 0;
kill_proc(nlmsvc_pid, SIGKILL, 1);
/*
......@@ -289,7 +300,8 @@ lockd_down(void)
interruptible_sleep_on(&lockd_exit);
current->timeout = 0;
if (nlmsvc_pid) {
printk("lockd_down: lockd failed to exit, clearing pid\n");
printk(KERN_WARNING
"lockd_down: lockd failed to exit, clearing pid\n");
nlmsvc_pid = 0;
}
spin_lock_irq(&current->sigmask_lock);
......
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* i386 SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
__asm__ __volatile__(
"incl %1\n\t"
"jne 9f"
spin_lock_string
"\n9:"
:"=m" (__dummy_lock(&kernel_flag)),
"=m" (current->lock_depth));
}
extern __inline__ void unlock_kernel(void)
{
__asm__ __volatile__(
"decl %1\n\t"
"jns 9f\n"
spin_unlock_string
"\n9:"
:"=m" (__dummy_lock(&kernel_flag)),
"=m" (current->lock_depth));
}
......@@ -128,8 +128,7 @@ typedef struct {
typedef struct { unsigned long a[100]; } __dummy_lock_t;
#define __dummy_lock(lock) (*(__dummy_lock_t *)(lock))
#define spin_lock(lock) \
__asm__ __volatile__( \
#define spin_lock_string \
"\n1:\t" \
"lock ; btsl $0,%0\n\t" \
"jc 2f\n" \
......@@ -138,12 +137,19 @@ __asm__ __volatile__( \
"testb $1,%0\n\t" \
"jne 2b\n\t" \
"jmp 1b\n" \
".previous" \
".previous"
#define spin_unlock_string \
"lock ; btrl $0,%0"
#define spin_lock(lock) \
__asm__ __volatile__( \
spin_lock_string \
:"=m" (__dummy_lock(lock)))
#define spin_unlock(lock) \
__asm__ __volatile__( \
"lock ; btrl $0,%0" \
spin_unlock_string \
:"=m" (__dummy_lock(lock)))
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
......
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
/*
* <asm/smplock.h>
*
* Default SMP lock implementation
*/
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth >= 0) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth >= 0) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
if (!++current->lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
if (--current->lock_depth < 0)
spin_unlock(&kernel_flag);
}
......@@ -213,9 +213,16 @@ struct task_struct {
/* various fields */
long counter;
long priority;
struct linux_binfmt *binfmt;
/* SMP and runqueue state */
int has_cpu;
int processor;
int last_processor;
int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
struct task_struct *next_task, *prev_task;
struct task_struct *next_run, *prev_run;
/* task state */
struct linux_binfmt *binfmt;
int exit_code, exit_signal;
int pdeath_signal; /* The signal sent when the parent dies */
/* ??? */
......@@ -282,18 +289,12 @@ struct task_struct {
/* memory management info */
struct mm_struct *mm;
/* signal handlers */
spinlock_t sigmask_lock; /* Protects signal and blocked */
struct signal_struct *sig;
sigset_t signal, blocked;
struct signal_queue *sigqueue, **sigqueue_tail;
unsigned long sas_ss_sp;
size_t sas_ss_size;
/* SMP state */
int has_cpu;
int processor;
int last_processor;
int lock_depth; /* Lock depth. We can context switch in and out of holding a syscall kernel lock... */
/* Spinlocks for various pieces or per-task state. */
spinlock_t sigmask_lock; /* Protects signal and blocked */
};
/*
......@@ -338,8 +339,9 @@ struct task_struct {
#define INIT_TASK \
/* state etc */ { 0,0,0,KERNEL_DS,&default_exec_domain,0, \
/* counter */ DEF_PRIORITY,DEF_PRIORITY, \
/* binfmt */ NULL, \
/* SMP */ 0,0,0,-1, \
/* schedlink */ &init_task,&init_task, &init_task, &init_task, \
/* binfmt */ NULL, \
/* ec,brk... */ 0,0,0,0,0,0, \
/* pid etc.. */ 0,0,0,0,0, \
/* proc links*/ &init_task,&init_task,NULL,NULL,NULL, \
......@@ -365,10 +367,7 @@ struct task_struct {
/* fs */ &init_fs, \
/* files */ &init_files, \
/* mm */ &init_mm, \
/* signals */ &init_signals, {{0}}, {{0}}, NULL, &init_task.sigqueue, \
0, 0, \
/* SMP */ 0,0,0,0, \
/* locks */ INIT_LOCKS \
/* signals */ INIT_LOCKS, &init_signals, {{0}}, {{0}}, NULL, &init_task.sigqueue, 0, 0, \
}
union task_union {
......
......@@ -10,60 +10,7 @@
#else
#include <linux/interrupt.h>
#include <asm/spinlock.h>
extern spinlock_t kernel_flag;
/*
* Release global kernel lock and global interrupt lock
*/
#define release_kernel_lock(task, cpu) \
do { \
if (task->lock_depth) \
spin_unlock(&kernel_flag); \
release_irqlock(cpu); \
__sti(); \
} while (0)
/*
* Re-acquire the kernel lock
*/
#define reacquire_kernel_lock(task) \
do { \
if (task->lock_depth) \
spin_lock(&kernel_flag); \
} while (0)
/*
* Getting the big kernel lock.
*
* This cannot happen asynchronously,
* so we only need to worry about other
* CPU's.
*/
extern __inline__ void lock_kernel(void)
{
struct task_struct *tsk = current;
int lock_depth;
lock_depth = tsk->lock_depth;
tsk->lock_depth = lock_depth+1;
if (!lock_depth)
spin_lock(&kernel_flag);
}
extern __inline__ void unlock_kernel(void)
{
struct task_struct *tsk = current;
int lock_depth;
lock_depth = tsk->lock_depth-1;
tsk->lock_depth = lock_depth;
if (!lock_depth)
spin_unlock(&kernel_flag);
}
#include <asm/smplock.h>
#endif /* __SMP__ */
......
......@@ -36,21 +36,31 @@ static void release(struct task_struct * p)
{
if (p != current) {
#ifdef __SMP__
/* FIXME! Cheesy, but kills the window... -DaveM */
do {
barrier();
} while (p->has_cpu);
spin_unlock_wait(&scheduler_lock);
/*
* Wait to make sure the process isn't active on any
* other CPU
*/
for (;;) {
int has_cpu;
spin_lock(&scheduler_lock);
has_cpu = p->has_cpu;
spin_unlock(&scheduler_lock);
if (!has_cpu)
break;
do {
barrier();
} while (p->has_cpu);
}
#endif
charge_uid(p, -1);
nr_tasks--;
add_free_taskslot(p->tarray_ptr);
{
write_lock_irq(&tasklist_lock);
unhash_pid(p);
REMOVE_LINKS(p);
write_unlock_irq(&tasklist_lock);
}
write_lock_irq(&tasklist_lock);
unhash_pid(p);
REMOVE_LINKS(p);
write_unlock_irq(&tasklist_lock);
release_thread(p);
current->cmin_flt += p->min_flt + p->cmin_flt;
current->cmaj_flt += p->maj_flt + p->cmaj_flt;
......@@ -340,35 +350,39 @@ static void exit_notify(void)
NORET_TYPE void do_exit(long code)
{
struct task_struct *tsk = current;
if (in_interrupt())
printk("Aiee, killing interrupt handler\n");
if (current == task[0])
if (!tsk->pid)
panic("Attempted to kill the idle task!");
tsk->flags |= PF_EXITING;
del_timer(&tsk->real_timer);
lock_kernel();
fake_volatile:
current->flags |= PF_EXITING;
#ifdef CONFIG_BSD_PROCESS_ACCT
acct_process(code);
#endif
del_timer(&current->real_timer);
sem_exit();
__exit_mm(current);
__exit_mm(tsk);
#if CONFIG_AP1000
exit_msc(current);
exit_msc(tsk);
#endif
__exit_files(current);
__exit_fs(current);
__exit_sighand(current);
__exit_files(tsk);
__exit_fs(tsk);
__exit_sighand(tsk);
exit_thread();
current->state = TASK_ZOMBIE;
current->exit_code = code;
tsk->state = TASK_ZOMBIE;
tsk->exit_code = code;
exit_notify();
#ifdef DEBUG_PROC_TREE
audit_ptree();
#endif
if (current->exec_domain && current->exec_domain->module)
__MOD_DEC_USE_COUNT(current->exec_domain->module);
if (current->binfmt && current->binfmt->module)
__MOD_DEC_USE_COUNT(current->binfmt->module);
if (tsk->exec_domain && tsk->exec_domain->module)
__MOD_DEC_USE_COUNT(tsk->exec_domain->module);
if (tsk->binfmt && tsk->binfmt->module)
__MOD_DEC_USE_COUNT(tsk->binfmt->module);
schedule();
/*
* In order to get rid of the "volatile function does return" message
......@@ -388,9 +402,7 @@ NORET_TYPE void do_exit(long code)
asmlinkage int sys_exit(int error_code)
{
lock_kernel();
do_exit((error_code&0xff)<<8);
unlock_kernel();
}
asmlinkage int sys_wait4(pid_t pid,unsigned int * stat_addr, int options, struct rusage * ru)
......
......@@ -476,7 +476,7 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
{
int nr;
int error = -ENOMEM;
int retval = -ENOMEM;
struct task_struct *p;
down(&current->mm->mmap_sem);
......@@ -485,7 +485,7 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
if (!p)
goto bad_fork;
error = -EAGAIN;
retval = -EAGAIN;
nr = find_empty_process();
if (nr < 0)
goto bad_fork_free;
......@@ -504,8 +504,16 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
copy_flags(clone_flags, p);
p->pid = get_pid(clone_flags);
p->next_run = NULL;
p->prev_run = NULL;
/*
* This is a "shadow run" state. The process
* is marked runnable, but isn't actually on
* any run queue yet.. (that happens at the
* very end).
*/
p->state = TASK_RUNNING;
p->next_run = p;
p->prev_run = p;
p->p_pptr = p->p_opptr = current;
p->p_cptr = NULL;
init_waitqueue(&p->wait_chldexit);
......@@ -535,12 +543,13 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
spin_lock_init(&p->sigmask_lock);
}
#endif
p->lock_depth = 0;
p->lock_depth = -1; /* -1 = no lock */
p->start_time = jiffies;
p->tarray_ptr = &task[nr];
*p->tarray_ptr = p;
{
/* This makes it visible to the rest of the system */
unsigned long flags;
write_lock_irqsave(&tasklist_lock, flags);
SET_LINKS(p);
......@@ -550,7 +559,7 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
nr_tasks++;
error = -ENOMEM;
retval = -ENOMEM;
/* copy all the process information */
if (copy_files(clone_flags, p))
goto bad_fork_cleanup;
......@@ -560,8 +569,8 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
goto bad_fork_cleanup_fs;
if (copy_mm(nr, clone_flags, p))
goto bad_fork_cleanup_sighand;
error = copy_thread(nr, clone_flags, usp, p, regs);
if (error)
retval = copy_thread(nr, clone_flags, usp, p, regs);
if (retval)
goto bad_fork_cleanup_sighand;
p->semundo = NULL;
......@@ -579,18 +588,18 @@ int do_fork(unsigned long clone_flags, unsigned long usp, struct pt_regs *regs)
current->counter >>= 1;
p->counter = current->counter;
if(p->pid) {
wake_up_process(p); /* do this last, just in case */
} else {
p->state = TASK_RUNNING;
p->next_run = p->prev_run = p;
/* Ok, add it to the run-queues, let it rip! */
retval = p->pid;
if (retval) {
p->next_run = NULL;
p->prev_run = NULL;
wake_up_process(p); /* do this last */
}
++total_forks;
error = p->pid;
bad_fork:
up(&current->mm->mmap_sem);
unlock_kernel();
return error;
return retval;
bad_fork_cleanup_sighand:
exit_sighand(p);
......
......@@ -146,14 +146,21 @@ static inline void reschedule_idle(struct task_struct * p)
current->need_resched = 1;
}
/*
* Careful!
*
* This has to add the process to the _beginning_ of the
* run-queue, not the end. See the comment about "This is
* subtle" in the scheduler proper..
*/
static inline void add_to_runqueue(struct task_struct * p)
{
nr_running++;
reschedule_idle(p);
(p->prev_run = init_task.prev_run)->next_run = p;
p->next_run = &init_task;
init_task.prev_run = p;
struct task_struct *next = init_task.next_run;
p->prev_run = &init_task;
init_task.next_run = p;
p->next_run = next;
next->prev_run = p;
}
static inline void del_from_runqueue(struct task_struct * p)
......@@ -229,8 +236,11 @@ inline void wake_up_process(struct task_struct * p)
spin_lock_irqsave(&runqueue_lock, flags);
p->state = TASK_RUNNING;
if (!p->next_run)
if (!p->next_run) {
add_to_runqueue(p);
reschedule_idle(p);
nr_running++;
}
spin_unlock_irqrestore(&runqueue_lock, flags);
}
......@@ -420,6 +430,9 @@ int del_timer(struct timer_list * timer)
ret = detach_timer(timer);
timer->next = timer->prev = 0;
spin_unlock_irqrestore(&timerlist_lock, flags);
/* Make sure the timer isn't running in parallell.. */
synchronize_bh();
return ret;
}
......@@ -1351,8 +1364,8 @@ static int setscheduler(pid_t pid, int policy,
/*
* We play safe to avoid deadlocks.
*/
spin_lock_irq(&scheduler_lock);
spin_lock(&runqueue_lock);
spin_lock(&scheduler_lock);
spin_lock_irq(&runqueue_lock);
read_lock(&tasklist_lock);
p = find_process_by_pid(pid);
......@@ -1398,8 +1411,8 @@ static int setscheduler(pid_t pid, int policy,
out_unlock:
read_unlock(&tasklist_lock);
spin_unlock(&runqueue_lock);
spin_unlock_irq(&scheduler_lock);
spin_unlock_irq(&runqueue_lock);
spin_unlock(&scheduler_lock);
out_nounlock:
return retval;
......@@ -1590,13 +1603,13 @@ static void show_task(int nr,struct task_struct * p)
else
printk(" %016lx ", thread_saved_pc(&p->tss));
#endif
#if 0
for (free = 1; free < PAGE_SIZE/sizeof(long) ; free++) {
if (((unsigned long *)p->kernel_stack_page)[free])
break;
{
unsigned long * n = (unsigned long *) (p+1);
while (!*n)
n++;
free = (unsigned long) n - (unsigned long)(p+1);
}
#endif
printk("%5lu %5d %6d ", free*sizeof(long), p->pid, p->p_pptr->pid);
printk("%5lu %5d %6d ", free, p->pid, p->p_pptr->pid);
if (p->p_cptr)
printk("%5d ", p->p_cptr->pid);
else
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment