Commit 892692fb authored by Linus Torvalds's avatar Linus Torvalds

Import 1.3.84

parent 4ff1c5b1
The Linux Digiboard Driver
--------------------------
The Digiboard Driver for Linux supports the following boards:
DigiBoard PC/Xe, PC/Xi, PC/Xeve
Limitations:
------------
Currently the Driver does not do autoprobing. You have to configure
the driver with the correct I/O address in drivers/char/pcxxconfig.h.
The preconfigured I/O address is 0200h and the default memory address 0D0000h.
Use them and you will not have to worry about configuring anything.
Supporting Tools:
-----------------
Some tools and more detailed up to date information can be found at
ftp://ftp.fuller.edu/Linux/digi
The "ditty" tool described in the Digiboard Manuals for other Unixes
is also available.
Currently the Linux MAKEDEV command does not support generating the Digiboard
Devices. Use the following script to generate the devices:
------------------ mkdigidev begin
#!/bin/sh
#
# Script to create Digiboard Devices
# Christoph Lameter, April 4, 1996
#
# Usage:
# mkdigidev [<number of devices>]
#
DIGIMAJOR=30
DIGICUMAJOR=31
BOARDS=$1
if [ "$BOARDS" = "" ]; then
BOARDS=1
fi
boardnum=0
while [ $boardnum -lt $BOARDS ];
do
for c in 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15;
do
name=`expr $boardnum \* 16 + $c`
mknod /dev/ttyd$name c $DIGIMAJOR $name
mknod /dev/ttyD$name c $DIGICUMAJOR $name
done
boardnum=`expr $boardnum + 1`
done
------------------ mkdigidev end
The ttyd devices behave like the /dev/cua?? devices
and the ttyD devices are like the /dev/ttyS?? devices.
Sources of Information
----------------------
Webpage: http://private.fuller.edu/clameter/digi.html
Mailing List: digiboard@list.fuller.edu
(Write e-mail to that address to subscribe. Common ListServ commands work.
Archive of messages available)
Christoph Lameter (clameter@fuller.edu) 4. April 1996.
VERSION = 1 VERSION = 1
PATCHLEVEL = 3 PATCHLEVEL = 3
SUBLEVEL = 83 SUBLEVEL = 84
ARCH = i386 ARCH = i386
......
...@@ -112,6 +112,8 @@ CONFIG_SCSI_CONSTANTS=y ...@@ -112,6 +112,8 @@ CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_FUTURE_DOMAIN is not set # CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_GENERIC_NCR5380 is not set # CONFIG_SCSI_GENERIC_NCR5380 is not set
CONFIG_SCSI_NCR53C7xx=y CONFIG_SCSI_NCR53C7xx=y
CONFIG_SCSI_NCR53C7xx_sync=y
CONFIG_SCSI_NCR53C7xx_FAST=y
# CONFIG_SCSI_IN2000 is not set # CONFIG_SCSI_IN2000 is not set
# CONFIG_SCSI_PAS16 is not set # CONFIG_SCSI_PAS16 is not set
# CONFIG_SCSI_QLOGIC is not set # CONFIG_SCSI_QLOGIC is not set
...@@ -178,6 +180,7 @@ CONFIG_ISO9660_FS=y ...@@ -178,6 +180,7 @@ CONFIG_ISO9660_FS=y
# Character devices # Character devices
# #
CONFIG_SERIAL=y CONFIG_SERIAL=y
# CONFIG_DIGI is not set
# CONFIG_CYCLADES is not set # CONFIG_CYCLADES is not set
# CONFIG_STALDRV is not set # CONFIG_STALDRV is not set
# CONFIG_PRINTER is not set # CONFIG_PRINTER is not set
......
...@@ -81,7 +81,11 @@ extern struct hwrpb_struct *hwrpb; ...@@ -81,7 +81,11 @@ extern struct hwrpb_struct *hwrpb;
#if PCI_MODIFY #if PCI_MODIFY
#if 0
static unsigned int io_base = 64*KB; /* <64KB are (E)ISA ports */ static unsigned int io_base = 64*KB; /* <64KB are (E)ISA ports */
#else
static unsigned int io_base = 0xb000;
#endif
#if defined(CONFIG_ALPHA_XL) #if defined(CONFIG_ALPHA_XL)
/* /*
...@@ -318,15 +322,18 @@ static void layout_bus(struct pci_bus *bus) ...@@ -318,15 +322,18 @@ static void layout_bus(struct pci_bus *bus)
if (bus->self) { if (bus->self) {
struct pci_dev *bridge = bus->self; struct pci_dev *bridge = bus->self;
/* /*
* Set up the top and bottom of the I/O memory segment * Set up the top and bottom of the PCI I/O segment
* for this bus. * for this bus.
*/ */
pcibios_read_config_dword(bridge->bus->number, bridge->devfn, pcibios_read_config_dword(bridge->bus->number, bridge->devfn,
0x1c, &l); 0x1c, &l);
l = l | (bio >> 8) | ((tio - 1) & 0xf000); l = (l & 0xffff0000) | (bio >> 8) | ((tio - 1) & 0xf000);
pcibios_write_config_dword(bridge->bus->number, bridge->devfn, pcibios_write_config_dword(bridge->bus->number, bridge->devfn,
0x1c, l); 0x1c, l);
/*
* Set up the top and bottom of the PCI Memory segment
* for this bus.
*/
l = ((bmem & 0xfff00000) >> 16) | ((tmem - 1) & 0xfff00000); l = ((bmem & 0xfff00000) >> 16) | ((tmem - 1) & 0xfff00000);
pcibios_write_config_dword(bridge->bus->number, bridge->devfn, pcibios_write_config_dword(bridge->bus->number, bridge->devfn,
0x20, l); 0x20, l);
...@@ -445,6 +452,47 @@ static inline void enable_ide(long ide_base) ...@@ -445,6 +452,47 @@ static inline void enable_ide(long ide_base)
outb(data | 0x40, ide_base+1); /* turn on IDE, really! */ outb(data | 0x40, ide_base+1); /* turn on IDE, really! */
} }
/*
* A small note about bridges and interrupts. The DECchip 21050 (and later chips)
* adheres to the PCI-PCI bridge specification. This says that the interrupts on
* the other side of a bridge are swizzled in the following manner:
*
* Dev Interrupt Interupt
* Pin on Pin on
* Device Connector
*
* 4 A A
* B B
* C C
* D D
*
* 5 A B
* B C
* C D
* D A
*
* 6 A C
* B D
* C A
* D B
*
* 7 A D
* B A
* C B
* D C
*
* Where A = pin 1, B = pin 2 and so on and pin=0 = default = A.
* Thus, each swizzle is ((pin-1) + (device#-4)) % 4
*
* The following code is somewhat simplistic as it assumes only one bridge.
* I will fix it later (david.rusling@reo.mts.dec.com).
*/
static inline unsigned char bridge_swizzle(unsigned char pin, unsigned int slot)
{
/* swizzle */
return (((pin-1) + slot) % 4) + 1 ;
}
/* /*
* Most evaluation boards share most of the fixup code, which is isolated here. * Most evaluation boards share most of the fixup code, which is isolated here.
* This function is declared "inline" as only one platform will ever be selected * This function is declared "inline" as only one platform will ever be selected
...@@ -457,40 +505,64 @@ static inline void common_fixup(long min_idsel, long max_idsel, long irqs_per_sl ...@@ -457,40 +505,64 @@ static inline void common_fixup(long min_idsel, long max_idsel, long irqs_per_sl
{ {
struct pci_dev *dev; struct pci_dev *dev;
unsigned char pin; unsigned char pin;
unsigned char slot ;
/* /*
* Go through all devices, fixing up irqs as we see fit: * Go through all devices, fixing up irqs as we see fit:
*/ */
for (dev = pci_devices; dev; dev = dev->next) { for (dev = pci_devices; dev; dev = dev->next) {
dev->irq = 0; if (dev->class >> 16 != PCI_BASE_CLASS_BRIDGE) {
/* dev->irq = 0;
* Ignore things not on the primary bus - I'll figure /*
* this out one day - Dave Rusling * This device is not on the primary bus, we need to figure out which
*/ * interrupt pin it will come in on. We know which slot it will come
if (dev->bus->number != 0) * in on 'cos that slot is where the bridge is. Each time the interrupt
continue; * line passes through a PCI-PCI bridge we must apply the swizzle function
* (see the inline static routine above).
/* read the pin */ */
pcibios_read_config_byte(dev->bus->number, dev->devfn, if (dev->bus->number != 0) {
PCI_INTERRUPT_PIN, &pin); struct pci_dev *curr = dev ;
if (irq_tab[PCI_SLOT(dev->devfn) - min_idsel][pin] != -1) /* read the pin and do the PCI-PCI bridge interrupt pin swizzle */
dev->irq = irq_tab[PCI_SLOT(dev->devfn) - min_idsel][pin]; pcibios_read_config_byte(dev->bus->number, dev->devfn,
PCI_INTERRUPT_PIN, &pin);
/* cope with 0 */
if (pin == 0) pin = 1 ;
/* follow the chain of bridges, swizzling as we go */
do {
/* swizzle */
pin = bridge_swizzle(pin, PCI_SLOT(curr->devfn)) ;
/* move up the chain of bridges */
curr = curr->bus->self ;
} while (curr->bus->self) ;
/* The slot is the slot of the last bridge. */
slot = PCI_SLOT(curr->devfn) ;
} else {
/* work out the slot */
slot = PCI_SLOT(dev->devfn) ;
/* read the pin */
pcibios_read_config_byte(dev->bus->number, dev->devfn,
PCI_INTERRUPT_PIN, &pin);
}
if (irq_tab[slot - min_idsel][pin] != -1)
dev->irq = irq_tab[slot - min_idsel][pin];
#if PCI_MODIFY #if PCI_MODIFY
/* tell the device: */ /* tell the device: */
pcibios_write_config_byte(dev->bus->number, dev->devfn, pcibios_write_config_byte(dev->bus->number, dev->devfn,
PCI_INTERRUPT_LINE, dev->irq); PCI_INTERRUPT_LINE, dev->irq);
#endif #endif
/* /*
* if its a VGA, enable its BIOS ROM at C0000 * if its a VGA, enable its BIOS ROM at C0000
*/ */
if ((dev->class >> 8) == PCI_CLASS_DISPLAY_VGA) { if ((dev->class >> 8) == PCI_CLASS_DISPLAY_VGA) {
pcibios_write_config_dword(dev->bus->number, dev->devfn, pcibios_write_config_dword(dev->bus->number, dev->devfn,
PCI_ROM_ADDRESS, PCI_ROM_ADDRESS,
0x000c0000 | PCI_ROM_ADDRESS_ENABLE); 0x000c0000 | PCI_ROM_ADDRESS_ENABLE);
}
}
if (ide_base) {
enable_ide(ide_base);
} }
}
if (ide_base) {
enable_ide(ide_base);
} }
} }
......
...@@ -12,10 +12,15 @@ ...@@ -12,10 +12,15 @@
* This driver does NOT support DigiBoard's fastcook FEP option and * This driver does NOT support DigiBoard's fastcook FEP option and
* does not support the transparent print (i.e. digiprint) option. * does not support the transparent print (i.e. digiprint) option.
* *
* Please email any suggestions or bug reports to troyd@skypoint.com * This Driver is currently maintained by Christoph Lameter (clameter@fuller.edu)
* Please contact the mailing list for problems first.
* *
* Sources of Information:
* 1. The Linux Digiboard Page at http://private.fuller.edu/clameter/digi.html
* 2. The Linux Digiboard Mailing list at digiboard@list.fuller.edu
* (Simply write a message to introduce yourself to subscribe)
* *
* January 1996 Bug fixes by an unknown author and released as 1.5.2 * 1.5.2 Fall 1995 Bug fixes by David Nugent
* 1.5.3 March 9, 1996 Christoph Lameter: Fixed 115.2K Support. Memory * 1.5.3 March 9, 1996 Christoph Lameter: Fixed 115.2K Support. Memory
* allocation harmonized with 1.3.X Series. * allocation harmonized with 1.3.X Series.
* 1.5.4 March 30, 1996 Christoph Lameter: Fixup for 1.3.81. Use init_bh * 1.5.4 March 30, 1996 Christoph Lameter: Fixup for 1.3.81. Use init_bh
...@@ -30,6 +35,7 @@ ...@@ -30,6 +35,7 @@
The driver supports the native 57.6K and 115K Baudrates under Linux, but The driver supports the native 57.6K and 115K Baudrates under Linux, but
some distributions like Slackware 3.0 dont like these high baudrates. some distributions like Slackware 3.0 dont like these high baudrates.
*/ */
#include <linux/mm.h> #include <linux/mm.h>
#include <linux/ioport.h> #include <linux/ioport.h>
#include <linux/errno.h> #include <linux/errno.h>
......
...@@ -765,9 +765,17 @@ static struct device dev_3c509 = { ...@@ -765,9 +765,17 @@ static struct device dev_3c509 = {
0, 0, 0, 0,
0, 0, 0, NULL, el3_probe }; 0, 0, 0, NULL, el3_probe };
static int io = 0;
static int irq = 0;
int int
init_module(void) init_module(void)
{ {
dev_3c509.base_addr = io;
dev_3c509.irq = irq;
if (!EISA_bus && !io) {
printk("3c509: WARNING! Module load-time probing works reliably only for EISA bus!!\n");
}
if (register_netdev(&dev_3c509) != 0) if (register_netdev(&dev_3c509) != 0)
return -EIO; return -EIO;
return 0; return 0;
......
...@@ -22,11 +22,11 @@ ...@@ -22,11 +22,11 @@
* process. Since locks still depend on the process id, locks are inherited * process. Since locks still depend on the process id, locks are inherited
* after an exec() but not after a fork(). This agrees with POSIX, and both * after an exec() but not after a fork(). This agrees with POSIX, and both
* BSD and SVR4 practice. * BSD and SVR4 practice.
* Andy Walker (andy@keo.kvaerner.no), February 14, 1995 * Andy Walker (andy@lysaker.kvaerner.no), February 14, 1995
* *
* Scrapped free list which is redundant now that we allocate locks * Scrapped free list which is redundant now that we allocate locks
* dynamically with kmalloc()/kfree(). * dynamically with kmalloc()/kfree().
* Andy Walker (andy@keo.kvaerner.no), February 21, 1995 * Andy Walker (andy@lysaker.kvaerner.no), February 21, 1995
* *
* Implemented two lock personalities - F_FLOCK and F_POSIX. * Implemented two lock personalities - F_FLOCK and F_POSIX.
* *
...@@ -47,18 +47,21 @@ ...@@ -47,18 +47,21 @@
* upgrading from shared to exclusive (or vice versa). When this happens * upgrading from shared to exclusive (or vice versa). When this happens
* any processes blocked by the current lock are woken up and allowed to * any processes blocked by the current lock are woken up and allowed to
* run before the new lock is applied. * run before the new lock is applied.
* Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
* *
* NOTE:
* I do not intend to implement mandatory locks unless demand is *HUGE*.
* They are not in BSD, and POSIX.1 does not require them. I have never
* seen any public code that relied on them. As Kelly Carmichael suggests
* above, mandatory locks requires lots of changes elsewhere and I am
* reluctant to start something so drastic for so little gain.
* Andy Walker (andy@keo.kvaerner.no), June 09, 1995
*
* Removed some race conditions in flock_lock_file(), marked other possible * Removed some race conditions in flock_lock_file(), marked other possible
* races. Just grep for FIXME to see them. * races. Just grep for FIXME to see them.
* Dmitry Gorodchanin (begemot@bgm.rosprint.net), Feb 09, 1996. * Dmitry Gorodchanin (begemot@bgm.rosprint.net), Feb 09, 1996.
*
* Addressed Dmitry's concerns. Deadlock checking no longer recursive.
* Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
* once we've checked for blocking and deadlocking.
* Andy Walker (andy@lysaker.kvaerner.no), Apr 03, 1996.
*
* NOTE:
* Starting to look at mandatory locks - using SunOS as a model.
* Probably a configuration option because mandatory locking can cause
* all sorts of chaos with runaway processes.
*/ */
#include <asm/segment.h> #include <asm/segment.h>
...@@ -147,7 +150,7 @@ static inline void locks_delete_block(struct file_lock **block, ...@@ -147,7 +150,7 @@ static inline void locks_delete_block(struct file_lock **block,
} }
} }
/* flock() system call entry point. Apply a FLOCK style locks to /* flock() system call entry point. Apply a FLOCK style lock to
* an open file descriptor. * an open file descriptor.
*/ */
asmlinkage int sys_flock(unsigned int fd, unsigned int cmd) asmlinkage int sys_flock(unsigned int fd, unsigned int cmd)
...@@ -167,8 +170,8 @@ asmlinkage int sys_flock(unsigned int fd, unsigned int cmd) ...@@ -167,8 +170,8 @@ asmlinkage int sys_flock(unsigned int fd, unsigned int cmd)
return (flock_lock_file(filp, &file_lock, cmd & LOCK_UN ? 0 : cmd & LOCK_NB ? 0 : 1)); return (flock_lock_file(filp, &file_lock, cmd & LOCK_UN ? 0 : cmd & LOCK_NB ? 0 : 1));
} }
/* Report the first existing locks that would conflict with l. This implements /* Report the first existing lock that would conflict with l.
* the F_GETLK command of fcntl(). * This implements the F_GETLK command of fcntl().
*/ */
int fcntl_getlk(unsigned int fd, struct flock *l) int fcntl_getlk(unsigned int fd, struct flock *l)
{ {
...@@ -209,9 +212,10 @@ int fcntl_getlk(unsigned int fd, struct flock *l) ...@@ -209,9 +212,10 @@ int fcntl_getlk(unsigned int fd, struct flock *l)
return (0); return (0);
} }
/* Apply the lock described by l to an open file descriptor. This implements /* Apply the lock described by l to an open file descriptor.
* both the F_SETLK and F_SETLKW commands of fcntl(). It also emulates flock() * This implements both the F_SETLK and F_SETLKW commands of fcntl().
* in a pretty broken way for older C libraries. * It also emulates flock() in a pretty broken way for older C
* libraries.
*/ */
int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l) int fcntl_setlk(unsigned int fd, unsigned int cmd, struct flock *l)
{ {
...@@ -335,8 +339,8 @@ static int posix_make_lock(struct file *filp, struct file_lock *fl, ...@@ -335,8 +339,8 @@ static int posix_make_lock(struct file *filp, struct file_lock *fl,
return (1); return (1);
} }
/* Verify a call to flock() and fill in a file_lock structure with an appropriate /* Verify a call to flock() and fill in a file_lock structure with
* FLOCK lock. * an appropriate FLOCK lock.
*/ */
static int flock_make_lock(struct file *filp, struct file_lock *fl, static int flock_make_lock(struct file *filp, struct file_lock *fl,
unsigned int cmd) unsigned int cmd)
...@@ -368,8 +372,8 @@ static int flock_make_lock(struct file *filp, struct file_lock *fl, ...@@ -368,8 +372,8 @@ static int flock_make_lock(struct file *filp, struct file_lock *fl,
return (1); return (1);
} }
/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific checking /* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
* before calling the locks_conflict(). * checking before calling the locks_conflict().
*/ */
static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
{ {
...@@ -383,8 +387,8 @@ static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *s ...@@ -383,8 +387,8 @@ static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *s
return (locks_conflict(caller_fl, sys_fl)); return (locks_conflict(caller_fl, sys_fl));
} }
/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific checking /* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
* before calling the locks_conflict(). * checking before calling the locks_conflict().
*/ */
static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl) static int flock_locks_conflict(struct file_lock *caller_fl, struct file_lock *sys_fl)
{ {
...@@ -429,15 +433,15 @@ static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2) ...@@ -429,15 +433,15 @@ static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
(fl2->fl_end >= fl1->fl_start)); (fl2->fl_end >= fl1->fl_start));
} }
/* This function tests for deadlock condition before putting a process to sleep. /* This function tests for deadlock condition before putting a process to
* The detection scheme is recursive... we may need a test to make it exit if the * sleep. The detection scheme is no longer recursive. Recursive was neat,
* function gets stuck due to bad lock data. 4.4 BSD uses a maximum depth of 50 * but dangerous - we risked stack corruption if the lock data was bad, or
* for this. * if the recursion was too deep for any other reason.
* *
* FIXME: * We rely on the fact that a task can only be on one lock's wait queue
* IMHO this function is dangerous, deep recursion may result in kernel stack * at a time. When we find blocked_task on a wait queue we can re-search
* corruption. Perhaps we need to limit depth here. * with blocked_task equal to that queue's owner, until either blocked_task
* Dmitry Gorodchanin 09/02/96 * isn't found, or blocked_task is found on a queue owned by my_task.
*/ */
static int posix_locks_deadlock(struct task_struct *my_task, static int posix_locks_deadlock(struct task_struct *my_task,
struct task_struct *blocked_task) struct task_struct *blocked_task)
...@@ -445,20 +449,18 @@ static int posix_locks_deadlock(struct task_struct *my_task, ...@@ -445,20 +449,18 @@ static int posix_locks_deadlock(struct task_struct *my_task,
struct wait_queue *dlock_wait; struct wait_queue *dlock_wait;
struct file_lock *fl; struct file_lock *fl;
next_task:
for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) { for (fl = file_lock_table; fl != NULL; fl = fl->fl_nextlink) {
if (fl->fl_owner == NULL) if (fl->fl_owner == NULL || fl->fl_wait == NULL)
continue; /* Should never happen! */
if (fl->fl_owner != my_task)
continue; continue;
if (fl->fl_wait == NULL)
continue; /* no queues */
dlock_wait = fl->fl_wait; dlock_wait = fl->fl_wait;
do { do {
if (dlock_wait->task != NULL) { if (dlock_wait->task == blocked_task) {
if (dlock_wait->task == blocked_task) if (fl->fl_owner == my_task) {
return (-EDEADLOCK); return(-EDEADLOCK);
if (posix_locks_deadlock(dlock_wait->task, blocked_task)) }
return (-EDEADLOCK); blocked_task = fl->fl_owner;
goto next_task;
} }
dlock_wait = dlock_wait->next; dlock_wait = dlock_wait->next;
} while (dlock_wait != fl->fl_wait); } while (dlock_wait != fl->fl_wait);
...@@ -466,7 +468,7 @@ static int posix_locks_deadlock(struct task_struct *my_task, ...@@ -466,7 +468,7 @@ static int posix_locks_deadlock(struct task_struct *my_task,
return (0); return (0);
} }
/* Try to create a FLOCK lock on filp. We rely on FLOCK locks being sorting /* Try to create a FLOCK lock on filp. We rely on FLOCK locks being sorted
* first in an inode's lock list, and always insert new locks at the head * first in an inode's lock list, and always insert new locks at the head
* of the list. * of the list.
*/ */
...@@ -628,43 +630,46 @@ static int posix_lock_file(struct file *filp, struct file_lock *caller, ...@@ -628,43 +630,46 @@ static int posix_lock_file(struct file *filp, struct file_lock *caller,
} }
caller = fl; caller = fl;
added = 1; added = 1;
goto next_lock;
} }
/* Processing for different lock types is a bit more complex. else {
*/ /* Processing for different lock types is a bit
if (fl->fl_end < caller->fl_start) * more complex.
goto next_lock;
if (fl->fl_start > caller->fl_end)
break;
if (caller->fl_type == F_UNLCK)
added = 1;
if (fl->fl_start < caller->fl_start)
left = fl;
/* If the next lock in the list has a higher end address than
* the new one, insert the new one here.
*/
if (fl->fl_end > caller->fl_end) {
right = fl;
break;
}
if (fl->fl_start >= caller->fl_start) {
/* The new lock completely replaces an old one (This may
* happen several times).
*/ */
if (added) { if (fl->fl_end < caller->fl_start)
locks_delete_lock(before, 0); goto next_lock;
continue; if (fl->fl_start > caller->fl_end)
} break;
/* Replace the old lock with the new one. Wake up if (caller->fl_type == F_UNLCK)
* anybody waiting for the old one, as the change in added = 1;
* lock type might satisfy his needs. if (fl->fl_start < caller->fl_start)
left = fl;
/* If the next lock in the list has a higher end
* address than the new one, insert the new one here.
*/ */
wake_up(&fl->fl_wait); if (fl->fl_end > caller->fl_end) {
fl->fl_start = caller->fl_start; right = fl;
fl->fl_end = caller->fl_end; break;
fl->fl_type = caller->fl_type; }
caller = fl; if (fl->fl_start >= caller->fl_start) {
added = 1; /* The new lock completely replaces an old
* one (This may happen several times).
*/
if (added) {
locks_delete_lock(before, 0);
continue;
}
/* Replace the old lock with the new one.
* Wake up anybody waiting for the old one,
* as the change in lock type might satisfy
* their needs.
*/
wake_up(&fl->fl_wait);
fl->fl_start = caller->fl_start;
fl->fl_end = caller->fl_end;
fl->fl_type = caller->fl_type;
caller = fl;
added = 1;
}
} }
/* Go on to next lock. /* Go on to next lock.
*/ */
...@@ -672,18 +677,6 @@ static int posix_lock_file(struct file *filp, struct file_lock *caller, ...@@ -672,18 +677,6 @@ static int posix_lock_file(struct file *filp, struct file_lock *caller,
before = &(*before)->fl_next; before = &(*before)->fl_next;
} }
/* FIXME:
* Note: We may sleep in locks_alloc_lock(), so
* the 'before' pointer may be not valid any more.
* This can cause random kernel memory corruption.
* It seems the right way is to alloc two locks
* at the begining of this func, and then free them
* if they were not needed.
* Another way is to change GFP_KERNEL to GFP_ATOMIC
* in locks_alloc_lock() for this case.
*
* Dmitry Gorodchanin 09/02/96.
*/
if (!added) { if (!added) {
if (caller->fl_type == F_UNLCK) if (caller->fl_type == F_UNLCK)
return (0); return (0);
...@@ -723,7 +716,7 @@ static struct file_lock *locks_alloc_lock(struct file_lock *fl) ...@@ -723,7 +716,7 @@ static struct file_lock *locks_alloc_lock(struct file_lock *fl)
/* Okay, let's make a new file_lock structure... */ /* Okay, let's make a new file_lock structure... */
if ((tmp = (struct file_lock *)kmalloc(sizeof(struct file_lock), if ((tmp = (struct file_lock *)kmalloc(sizeof(struct file_lock),
GFP_KERNEL)) == NULL) GFP_ATOMIC)) == NULL)
return (tmp); return (tmp);
tmp->fl_nextlink = NULL; tmp->fl_nextlink = NULL;
...@@ -759,11 +752,12 @@ static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl) ...@@ -759,11 +752,12 @@ static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
} }
/* Delete a lock and free it. /* Delete a lock and free it.
* First remove our lock from the lock lists. Then remove all the blocked locks * First remove our lock from the lock lists. Then remove all the blocked
* from our blocked list, waking up the processes that own them. If told to wait, * locks from our blocked list, waking up the processes that own them. If
* then sleep on each of these lock's wait queues. Each blocked process will wake * told to wait, then sleep on each of these lock's wait queues. Each
* up and immediately wake up its own wait queue allowing us to be scheduled again. * blocked process will wake up and immediately wake up its own wait queue
* Lastly, wake up our own wait queue before freeing the file_lock structure. * allowing us to be scheduled again. Lastly, wake up our own wait queue
* before freeing the file_lock structure.
*/ */
static void locks_delete_lock(struct file_lock **fl_p, unsigned int wait) static void locks_delete_lock(struct file_lock **fl_p, unsigned int wait)
......
...@@ -50,7 +50,30 @@ extern __inline__ void atomic_sub(atomic_t i, atomic_t * v) ...@@ -50,7 +50,30 @@ extern __inline__ void atomic_sub(atomic_t i, atomic_t * v)
"m" (__atomic_fool_gcc(v))); "m" (__atomic_fool_gcc(v)));
} }
/*
* Same as above, but return true if we counted down to zero
*/
extern __inline__ int atomic_sub_and_test(atomic_t i, atomic_t * v)
{
unsigned long temp, result;
__asm__ __volatile__(
"\n1:\t"
"ldl_l %0,%1\n\t"
"subl %0,%3,%0\n\t"
"bis %0,%0,%2\n\t"
"stl_c %0,%1\n\t"
"beq %0,1b\n"
"2:"
:"=&r" (temp),
"=m" (__atomic_fool_gcc(v)),
"=&r" (result)
:"Ir" (i),
"m" (__atomic_fool_gcc(v)));
return result==0;
}
#define atomic_inc(v) atomic_add(1,(v)) #define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v)) #define atomic_dec(v) atomic_sub(1,(v))
#define atomic_dec_and_test(v) atomic_sub_and_test(1,(v))
#endif #endif
...@@ -13,6 +13,8 @@ ...@@ -13,6 +13,8 @@
#define LCA4_CPU 4 /* LCA4 (21066/21068) */ #define LCA4_CPU 4 /* LCA4 (21066/21068) */
#define EV5_CPU 5 /* EV5 (21164) */ #define EV5_CPU 5 /* EV5 (21164) */
#define EV45_CPU 6 /* EV4.5 (21064/xxx) */ #define EV45_CPU 6 /* EV4.5 (21064/xxx) */
#define EV56_CPU 7 /* EV5.6 (21164) */
#define EV6_CPU 8 /* EV6 (21164) */
/* /*
* DEC system types for Alpha systems. Found in HWRPB. * DEC system types for Alpha systems. Found in HWRPB.
......
...@@ -10,7 +10,7 @@ ...@@ -10,7 +10,7 @@
typedef unsigned int __kernel_dev_t; typedef unsigned int __kernel_dev_t;
typedef unsigned int __kernel_ino_t; typedef unsigned int __kernel_ino_t;
typedef unsigned int __kernel_mode_t; typedef unsigned int __kernel_mode_t;
typedef unsigned short __kernel_nlink_t; typedef unsigned int __kernel_nlink_t;
typedef long __kernel_off_t; typedef long __kernel_off_t;
typedef int __kernel_pid_t; typedef int __kernel_pid_t;
typedef unsigned int __kernel_uid_t; typedef unsigned int __kernel_uid_t;
......
...@@ -53,4 +53,15 @@ static __inline__ void atomic_dec(atomic_t *v) ...@@ -53,4 +53,15 @@ static __inline__ void atomic_dec(atomic_t *v)
:"m" (__atomic_fool_gcc(v))); :"m" (__atomic_fool_gcc(v)));
} }
static __inline__ int atomic_dec_and_test(atomic_t *v)
{
unsigned char c;
__asm__ __volatile__(
LOCK "decl %0; sete %1"
:"=m" (__atomic_fool_gcc(v)), "=qm" (c)
:"m" (__atomic_fool_gcc(v)));
return c != 0;
}
#endif #endif
...@@ -221,10 +221,10 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size) ...@@ -221,10 +221,10 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
#define cli() __asm__ __volatile__ ("cli": : :"memory") #define cli() __asm__ __volatile__ ("cli": : :"memory")
#define save_flags(x) \ #define save_flags(x) \
__asm__ __volatile__("pushfl ; popl %0":"=r" (x): /* no input */ :"memory") __asm__ __volatile__("pushfl ; popl %0":"=g" (x): /* no input */ :"memory")
#define restore_flags(x) \ #define restore_flags(x) \
__asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"r" (x):"memory") __asm__ __volatile__("pushl %0 ; popfl": /* no output */ :"g" (x):"memory")
#define iret() __asm__ __volatile__ ("iret": : :"memory") #define iret() __asm__ __volatile__ ("iret": : :"memory")
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#include <linux/time.h> #include <linux/time.h>
#include <linux/config.h> #include <linux/config.h>
#include <asm/atomic.h>
#define CONFIG_SKB_CHECK 0 #define CONFIG_SKB_CHECK 0
#define HAVE_ALLOC_SKB /* For the drivers to know */ #define HAVE_ALLOC_SKB /* For the drivers to know */
...@@ -101,7 +103,7 @@ struct sk_buff ...@@ -101,7 +103,7 @@ struct sk_buff
unsigned short protocol; /* Packet protocol from driver. */ unsigned short protocol; /* Packet protocol from driver. */
unsigned short truesize; /* Buffer size */ unsigned short truesize; /* Buffer size */
int count; /* reference count */ atomic_t count; /* reference count */
struct sk_buff *data_skb; /* Link to the actual data skb */ struct sk_buff *data_skb; /* Link to the actual data skb */
unsigned char *head; /* Head of buffer */ unsigned char *head; /* Head of buffer */
unsigned char *data; /* Data head pointer */ unsigned char *data; /* Data head pointer */
...@@ -300,14 +302,13 @@ extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list) ...@@ -300,14 +302,13 @@ extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
} }
/* /*
* Insert a packet before another one in a list. * Insert a packet on a list.
*/ */
extern __inline__ void __skb_insert(struct sk_buff *next, struct sk_buff *newsk, extern __inline__ void __skb_insert(struct sk_buff *newsk,
struct sk_buff * prev, struct sk_buff *next,
struct sk_buff_head * list) struct sk_buff_head * list)
{ {
struct sk_buff * prev = next->prev;
newsk->next = next; newsk->next = next;
newsk->prev = prev; newsk->prev = prev;
next->prev = newsk; next->prev = newsk;
...@@ -316,13 +317,16 @@ extern __inline__ void __skb_insert(struct sk_buff *next, struct sk_buff *newsk, ...@@ -316,13 +317,16 @@ extern __inline__ void __skb_insert(struct sk_buff *next, struct sk_buff *newsk,
list->qlen++; list->qlen++;
} }
/*
* Place a packet before a given packet in a list
*/
extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk) extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
{ {
unsigned long flags; unsigned long flags;
save_flags(flags); save_flags(flags);
cli(); cli();
__skb_insert(old, newsk, old->list); __skb_insert(newsk, old->prev, old, old->list);
restore_flags(flags); restore_flags(flags);
} }
...@@ -330,26 +334,13 @@ extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk) ...@@ -330,26 +334,13 @@ extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
* Place a packet after a given packet in a list. * Place a packet after a given packet in a list.
*/ */
extern __inline__ void __skb_append(struct sk_buff *prev, struct sk_buff *newsk,
struct sk_buff_head * list)
{
struct sk_buff * next = prev->next;
newsk->next = next;
newsk->prev = prev;
next->prev = newsk;
prev->next = newsk;
newsk->list = list;
list->qlen++;
}
extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk) extern __inline__ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
{ {
unsigned long flags; unsigned long flags;
save_flags(flags); save_flags(flags);
cli(); cli();
__skb_append(old, newsk, old->list); __skb_insert(newsk, old, old->next, old->list);
restore_flags(flags); restore_flags(flags);
} }
......
...@@ -179,11 +179,15 @@ static inline void move_last_runqueue(struct task_struct * p) ...@@ -179,11 +179,15 @@ static inline void move_last_runqueue(struct task_struct * p)
struct task_struct *next = p->next_run; struct task_struct *next = p->next_run;
struct task_struct *prev = p->prev_run; struct task_struct *prev = p->prev_run;
/* remove from list */
next->prev_run = prev; next->prev_run = prev;
prev->next_run = next; prev->next_run = next;
(p->prev_run = init_task.prev_run)->next_run = p; /* add back to list */
p->next_run = &init_task; p->next_run = &init_task;
prev = init_task.prev_run;
init_task.prev_run = p; init_task.prev_run = p;
p->prev_run = prev;
prev->next_run = p;
} }
/* /*
......
...@@ -78,7 +78,6 @@ struct page_descriptor { ...@@ -78,7 +78,6 @@ struct page_descriptor {
struct size_descriptor { struct size_descriptor {
struct page_descriptor *firstfree; struct page_descriptor *firstfree;
struct page_descriptor *dmafree; /* DMA-able memory */ struct page_descriptor *dmafree; /* DMA-able memory */
int size;
int nblocks; int nblocks;
int nmallocs; int nmallocs;
...@@ -91,49 +90,85 @@ struct size_descriptor { ...@@ -91,49 +90,85 @@ struct size_descriptor {
/* /*
* For now it is unsafe to allocate bucket sizes between n and * For now it is unsafe to allocate bucket sizes between n and
* n-sizeof(page_descriptor) where n is PAGE_SIZE * any power of two * n-sizeof(page_descriptor) where n is PAGE_SIZE * any power of two
*
* The blocksize and sizes arrays _must_ match!
*/ */
#if PAGE_SIZE == 4096 #if PAGE_SIZE == 4096
struct size_descriptor sizes[] = static const unsigned int blocksize[] = {
32,
64,
128,
252,
508,
1020,
2040,
4096 - 16,
8192 - 16,
16384 - 16,
32768 - 16,
65536 - 16,
131072 - 16,
0
};
static struct size_descriptor sizes[] =
{ {
{NULL, NULL, 32, 127, 0, 0, 0, 0, 0}, {NULL, NULL, 127, 0, 0, 0, 0, 0},
{NULL, NULL, 64, 63, 0, 0, 0, 0, 0}, {NULL, NULL, 63, 0, 0, 0, 0, 0},
{NULL, NULL, 128, 31, 0, 0, 0, 0, 0}, {NULL, NULL, 31, 0, 0, 0, 0, 0},
{NULL, NULL, 252, 16, 0, 0, 0, 0, 0}, {NULL, NULL, 16, 0, 0, 0, 0, 0},
{NULL, NULL, 508, 8, 0, 0, 0, 0, 0}, {NULL, NULL, 8, 0, 0, 0, 0, 0},
{NULL, NULL, 1020, 4, 0, 0, 0, 0, 0}, {NULL, NULL, 4, 0, 0, 0, 0, 0},
{NULL, NULL, 2040, 2, 0, 0, 0, 0, 0}, {NULL, NULL, 2, 0, 0, 0, 0, 0},
{NULL, NULL, 4096 - 16, 1, 0, 0, 0, 0, 0}, {NULL, NULL, 1, 0, 0, 0, 0, 0},
{NULL, NULL, 8192 - 16, 1, 0, 0, 0, 0, 1}, {NULL, NULL, 1, 0, 0, 0, 0, 1},
{NULL, NULL, 16384 - 16, 1, 0, 0, 0, 0, 2}, {NULL, NULL, 1, 0, 0, 0, 0, 2},
{NULL, NULL, 32768 - 16, 1, 0, 0, 0, 0, 3}, {NULL, NULL, 1, 0, 0, 0, 0, 3},
{NULL, NULL, 65536 - 16, 1, 0, 0, 0, 0, 4}, {NULL, NULL, 1, 0, 0, 0, 0, 4},
{NULL, NULL, 131072 - 16, 1, 0, 0, 0, 0, 5}, {NULL, NULL, 1, 0, 0, 0, 0, 5},
{NULL, NULL, 0, 0, 0, 0, 0, 0, 0} {NULL, NULL, 0, 0, 0, 0, 0, 0}
}; };
#elif PAGE_SIZE == 8192 #elif PAGE_SIZE == 8192
static const unsigned int blocksize[] = {
64,
128,
248,
504,
1016,
2040,
4080,
8192 - 32,
16384 - 32,
32768 - 32,
65536 - 32,
131072 - 32,
262144 - 32,
0
};
struct size_descriptor sizes[] = struct size_descriptor sizes[] =
{ {
{NULL, NULL, 64, 127, 0, 0, 0, 0, 0}, {NULL, NULL, 127, 0, 0, 0, 0, 0},
{NULL, NULL, 128, 63, 0, 0, 0, 0, 0}, {NULL, NULL, 63, 0, 0, 0, 0, 0},
{NULL, NULL, 248, 31, 0, 0, 0, 0, 0}, {NULL, NULL, 31, 0, 0, 0, 0, 0},
{NULL, NULL, 504, 16, 0, 0, 0, 0, 0}, {NULL, NULL, 16, 0, 0, 0, 0, 0},
{NULL, NULL, 1016, 8, 0, 0, 0, 0, 0}, {NULL, NULL, 8, 0, 0, 0, 0, 0},
{NULL, NULL, 2040, 4, 0, 0, 0, 0, 0}, {NULL, NULL, 4, 0, 0, 0, 0, 0},
{NULL, NULL, 4080, 2, 0, 0, 0, 0, 0}, {NULL, NULL, 2, 0, 0, 0, 0, 0},
{NULL, NULL, 8192 - 32, 1, 0, 0, 0, 0, 0}, {NULL, NULL, 1, 0, 0, 0, 0, 0},
{NULL, NULL, 16384 - 32, 1, 0, 0, 0, 0, 1}, {NULL, NULL, 1, 0, 0, 0, 0, 1},
{NULL, NULL, 32768 - 32, 1, 0, 0, 0, 0, 2}, {NULL, NULL, 1, 0, 0, 0, 0, 2},
{NULL, NULL, 65536 - 32, 1, 0, 0, 0, 0, 3}, {NULL, NULL, 1, 0, 0, 0, 0, 3},
{NULL, NULL, 131072 - 32, 1, 0, 0, 0, 0, 4}, {NULL, NULL, 1, 0, 0, 0, 0, 4},
{NULL, NULL, 262144 - 32, 1, 0, 0, 0, 0, 5}, {NULL, NULL, 1, 0, 0, 0, 0, 5},
{NULL, NULL, 0, 0, 0, 0, 0, 0, 0} {NULL, NULL, 0, 0, 0, 0, 0, 0}
}; };
#else #else
#error you need to make a version for your pagesize #error you need to make a version for your pagesize
#endif #endif
#define NBLOCKS(order) (sizes[order].nblocks) #define NBLOCKS(order) (sizes[order].nblocks)
#define BLOCKSIZE(order) (sizes[order].size) #define BLOCKSIZE(order) (blocksize[order])
#define AREASIZE(order) (PAGE_SIZE<<(sizes[order].gfporder)) #define AREASIZE(order) (PAGE_SIZE<<(sizes[order].gfporder))
...@@ -160,31 +195,28 @@ long kmalloc_init(long start_mem, long end_mem) ...@@ -160,31 +195,28 @@ long kmalloc_init(long start_mem, long end_mem)
} }
int get_order(int size)
{
int order;
/* Add the size of the header */
size += sizeof(struct block_header);
for (order = 0; BLOCKSIZE(order); order++)
if (size <= BLOCKSIZE(order))
return order;
return -1;
}
void *kmalloc(size_t size, int priority) void *kmalloc(size_t size, int priority)
{ {
unsigned long flags; unsigned long flags;
unsigned long type; unsigned long type;
int order, i, sz, dma; int order, dma;
struct block_header *p; struct block_header *p;
struct page_descriptor *page, **pg; struct page_descriptor *page, **pg;
order = get_order(size); /* Get order */
if (order < 0) { order = 0;
printk("kmalloc of too large a block (%d bytes).\n", (int) size); {
return (NULL); unsigned int realsize = size + sizeof(struct block_header);
for (;;) {
int ordersize = BLOCKSIZE(order);
if (realsize <= ordersize)
break;
order++;
if (ordersize)
continue;
printk("kmalloc of too large a block (%d bytes).\n", (int) size);
return NULL;
}
} }
dma = 0; dma = 0;
...@@ -213,11 +245,8 @@ void *kmalloc(size_t size, int priority) ...@@ -213,11 +245,8 @@ void *kmalloc(size_t size, int priority)
page = *pg; page = *pg;
if (page) { if (page) {
p = page->firstfree; p = page->firstfree;
if (p->bh_flags != MF_FREE) { if (p->bh_flags != MF_FREE)
restore_flags(flags); goto not_free_on_freelist;
printk("Problem: block on freelist at %08lx isn't free.\n", (long) p);
return NULL;
}
goto found_it; goto found_it;
} }
...@@ -225,34 +254,32 @@ void *kmalloc(size_t size, int priority) ...@@ -225,34 +254,32 @@ void *kmalloc(size_t size, int priority)
/* This can be done with ints on: This is private to this invocation */ /* This can be done with ints on: This is private to this invocation */
restore_flags(flags); restore_flags(flags);
/* sz is the size of the blocks we're dealing with */ {
sz = BLOCKSIZE(order); int i, sz;
/* sz is the size of the blocks we're dealing with */
sz = BLOCKSIZE(order);
page = (struct page_descriptor *) __get_free_pages(priority, page = (struct page_descriptor *) __get_free_pages(priority,
sizes[order].gfporder, dma); sizes[order].gfporder, dma);
if (!page) { if (!page)
static unsigned long last = 0; goto no_free_page;
if (priority != GFP_BUFFER && (last + 10 * HZ < jiffies)) { sizes[order].npages++;
last = jiffies;
printk("Couldn't get a free page.....\n");
}
return NULL;
}
sizes[order].npages++;
/* Loop for all but last block: */ /* Loop for all but last block: */
for (i = NBLOCKS(order), p = BH(page + 1); i > 1; i--, p = p->bh_next) { for (i = NBLOCKS(order), p = BH(page + 1); i > 1; i--, p = p->bh_next) {
p->bh_flags = MF_FREE;
p->bh_next = BH(((long) p) + sz);
}
/* Last block: */
p->bh_flags = MF_FREE; p->bh_flags = MF_FREE;
p->bh_next = BH(((long) p) + sz); p->bh_next = NULL;
}
/* Last block: */
p->bh_flags = MF_FREE;
p->bh_next = NULL;
page->order = order; page->order = order;
page->nfree = NBLOCKS(order); page->nfree = NBLOCKS(order);
p = BH(page+1); p = BH(page+1);
}
/* /*
* Now we're going to muck with the "global" freelist * Now we're going to muck with the "global" freelist
...@@ -276,6 +303,21 @@ void *kmalloc(size_t size, int priority) ...@@ -276,6 +303,21 @@ void *kmalloc(size_t size, int priority)
memset(p+1, 0xf0, size); memset(p+1, 0xf0, size);
#endif #endif
return p + 1; /* Pointer arithmetic: increments past header */ return p + 1; /* Pointer arithmetic: increments past header */
no_free_page:
{
static unsigned long last = 0;
if (priority != GFP_BUFFER && (last + 10 * HZ < jiffies)) {
last = jiffies;
printk("Couldn't get a free page.....\n");
}
return NULL;
}
not_free_on_freelist:
restore_flags(flags);
printk("Problem: block on freelist at %08lx isn't free.\n", (long) p);
return NULL;
} }
void kfree(void *ptr) void kfree(void *ptr)
......
...@@ -755,7 +755,7 @@ static inline void get_empty_page(struct task_struct * tsk, struct vm_area_struc ...@@ -755,7 +755,7 @@ static inline void get_empty_page(struct task_struct * tsk, struct vm_area_struc
pte = pte_wrprotect(mk_pte(ZERO_PAGE, vma->vm_page_prot)); pte = pte_wrprotect(mk_pte(ZERO_PAGE, vma->vm_page_prot));
if (write_access) { if (write_access) {
unsigned long page = get_free_page(GFP_KERNEL); unsigned long page = get_free_page(GFP_KERNEL);
pte = pte_mkwrite(mk_pte(page, vma->vm_page_prot)); pte = pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
vma->vm_mm->rss++; vma->vm_mm->rss++;
tsk->min_flt++; tsk->min_flt++;
if (!page) { if (!page) {
......
...@@ -390,22 +390,27 @@ void skb_insert(struct sk_buff *old, struct sk_buff *newsk) ...@@ -390,22 +390,27 @@ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
* Insert a packet before another one in a list. * Insert a packet before another one in a list.
*/ */
void __skb_insert(struct sk_buff *old, struct sk_buff *newsk) void __skb_insert(struct sk_buff *newsk,
struct sk_buff * prev, struct sk_buff *next,
struct sk_buff_head * list)
{ {
IS_SKB(old); IS_SKB(prev);
IS_SKB(newsk); IS_SKB(newsk);
IS_SKB(next);
if(!old->next || !old->prev) if(!prev->next || !prev->prev)
printk("insert after unlisted item!\n");
if(!next->next || !next->prev)
printk("insert before unlisted item!\n"); printk("insert before unlisted item!\n");
if(newsk->next || newsk->prev) if(newsk->next || newsk->prev)
printk("inserted item is already on a list.\n"); printk("inserted item is already on a list.\n");
newsk->next = old; newsk->next = next;
newsk->prev = old->prev; newsk->prev = prev;
old->prev = newsk; next->prev = newsk;
newsk->prev->next = newsk; prev->next = newsk;
newsk->list = old->list; newsk->list = list;
newsk->list->qlen++; list->qlen++;
} }
...@@ -437,25 +442,6 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk) ...@@ -437,25 +442,6 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
restore_flags(flags); restore_flags(flags);
} }
void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
{
IS_SKB(old);
IS_SKB(newsk);
if(!old->next || !old->prev)
printk("append before unlisted item!\n");
if(newsk->next || newsk->prev)
printk("append item is already on a list.\n");
newsk->prev = old;
newsk->next = old->next;
newsk->next->prev = newsk;
old->next = newsk;
newsk->list = old->list;
newsk->list->qlen++;
}
/* /*
* Remove an sk_buff from its list. Works even without knowing the list it * Remove an sk_buff from its list. Works even without knowing the list it
* is sitting on, which can be handy at times. It also means that THE LIST * is sitting on, which can be handy at times. It also means that THE LIST
...@@ -724,30 +710,26 @@ struct sk_buff *alloc_skb(unsigned int size,int priority) ...@@ -724,30 +710,26 @@ struct sk_buff *alloc_skb(unsigned int size,int priority)
static inline void __kfree_skbmem(struct sk_buff *skb) static inline void __kfree_skbmem(struct sk_buff *skb)
{ {
/* don't do anything if somebody still uses us */ /* don't do anything if somebody still uses us */
if (--skb->count <= 0) { if (atomic_dec_and_test(&skb->count)) {
kfree(skb->head); kfree(skb->head);
net_skbcount--; atomic_dec(&net_skbcount);
} }
} }
void kfree_skbmem(struct sk_buff *skb) void kfree_skbmem(struct sk_buff *skb)
{ {
unsigned long flags;
void * addr = skb->head; void * addr = skb->head;
save_flags(flags);
cli();
/* don't do anything if somebody still uses us */ /* don't do anything if somebody still uses us */
if (--skb->count <= 0) { if (atomic_dec_and_test(&skb->count)) {
/* free the skb that contains the actual data if we've clone()'d */ /* free the skb that contains the actual data if we've clone()'d */
if (skb->data_skb) { if (skb->data_skb) {
addr = skb; addr = skb;
__kfree_skbmem(skb->data_skb); __kfree_skbmem(skb->data_skb);
} }
kfree(addr); kfree(addr);
net_skbcount--; atomic_dec(&net_skbcount);
} }
restore_flags(flags);
} }
/* /*
......
...@@ -1183,6 +1183,31 @@ static int tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th) ...@@ -1183,6 +1183,31 @@ static int tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
return(0); return(0);
} }
/*
* Add a sk_buff to the TCP receive queue, calculating
* the ACK sequence as we go..
*/
static inline void tcp_insert_skb(struct sk_buff * skb, struct sk_buff_head * list)
{
struct sk_buff * prev, * next;
u32 seq;
/*
* Find where the new skb goes.. (This goes backwards,
* on the assumption that we get the packets in order)
*/
seq = skb->seq;
prev = list->prev;
next = (struct sk_buff *) list;
for (;;) {
if (prev == (struct sk_buff *) list || !after(prev->seq, seq))
break;
next = prev;
prev = prev->prev;
}
__skb_insert(skb, prev, next, list);
}
/* /*
* Called for each packet when we find a new ACK endpoint sequence in it * Called for each packet when we find a new ACK endpoint sequence in it
*/ */
...@@ -1196,48 +1221,28 @@ static inline u32 tcp_queue_ack(struct sk_buff * skb, struct sock * sk) ...@@ -1196,48 +1221,28 @@ static inline u32 tcp_queue_ack(struct sk_buff * skb, struct sock * sk)
if (skb->h.th->fin) if (skb->h.th->fin)
tcp_fin(skb,sk,skb->h.th); tcp_fin(skb,sk,skb->h.th);
return skb->end_seq; return skb->end_seq;
} }
/*
* Add a sk_buff to the TCP receive queue, calculating
* the ACK sequence as we go..
*/
static void tcp_queue(struct sk_buff * skb, struct sock * sk, static void tcp_queue(struct sk_buff * skb, struct sock * sk,
struct tcphdr *th, unsigned long saddr) struct tcphdr *th, unsigned long saddr)
{ {
struct sk_buff_head * list = &sk->receive_queue;
struct sk_buff * next;
u32 ack_seq; u32 ack_seq;
/* tcp_insert_skb(skb, &sk->receive_queue);
* Find where the new skb goes.. (This goes backwards,
* on the assumption that we get the packets in order)
*/
next = list->prev;
while (next != (struct sk_buff *) list) {
if (!after(next->seq, skb->seq))
break;
next = next->prev;
}
/*
* put it after the packet we found (which
* may be the list-head, but that's fine).
*/
__skb_append(next, skb, list);
next = skb->next;
/* /*
* Did we get anything new to ack? * Did we get anything new to ack?
*/ */
ack_seq = sk->acked_seq; ack_seq = sk->acked_seq;
if (!after(skb->seq, ack_seq) && after(skb->end_seq, ack_seq)) { if (!after(skb->seq, ack_seq) && after(skb->end_seq, ack_seq)) {
struct sk_buff_head * list = &sk->receive_queue;
struct sk_buff * next;
ack_seq = tcp_queue_ack(skb, sk); ack_seq = tcp_queue_ack(skb, sk);
/* /*
* Do we have any old packets to ack that the above * Do we have any old packets to ack that the above
* made visible? (Go forward from skb) * made visible? (Go forward from skb)
*/ */
next = skb->next;
while (next != (struct sk_buff *) list) { while (next != (struct sk_buff *) list) {
if (after(next->seq, ack_seq)) if (after(next->seq, ack_seq))
break; break;
...@@ -1471,67 +1476,60 @@ static inline void tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long len ...@@ -1471,67 +1476,60 @@ static inline void tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long len
} }
} }
/*
* Throw out all unnecessary packets: we've gone over the
* receive queue limit. This shouldn't happen in a normal
* TCP connection, but we might have gotten duplicates etc.
*/
static inline void tcp_forget_unacked(struct sk_buff_head * list)
{
for (;;) {
struct sk_buff * skb = list->prev;
/* gone through it all? */
if (skb == (struct sk_buff *) list)
break;
if (skb->acked)
break;
__skb_unlink(skb, list);
}
}
/* /*
* This should be a bit smarter and remove partially * This should be a bit smarter and remove partially
* overlapping stuff too, but this should be good * overlapping stuff too, but this should be good
* enough for any even remotely normal case (and the * enough for any even remotely normal case (and the
* worst that can happen is that we have a few * worst that can happen is that we have a few
* unnecessary packets in the receive queue). * unnecessary packets in the receive queue).
*
* This function is never called with an empty list..
*/ */
static inline void tcp_remove_dups(struct sk_buff_head * list) static inline void tcp_remove_dups(struct sk_buff_head * list)
{ {
struct sk_buff * skb = list->next; struct sk_buff * next = list->next;
for (;;) { for (;;) {
struct sk_buff * next; struct sk_buff * skb = next;
next = next->next;
if (skb == (struct sk_buff *) list) if (next == (struct sk_buff *) list)
break; break;
next = skb->next; if (before(next->end_seq, skb->end_seq)) {
if (next->seq == skb->seq) { __skb_unlink(next, list);
if (before(next->end_seq, skb->end_seq)) { kfree_skb(next, FREE_READ);
__skb_unlink(next, list); next = skb;
continue; continue;
}
__skb_unlink(skb, list);
} }
skb = next; if (next->seq != skb->seq)
continue;
__skb_unlink(skb, list);
kfree_skb(skb, FREE_READ);
} }
} }
/*
* Throw out all unnecessary packets: we've gone over the
* receive queue limit. This shouldn't happen in a normal
* TCP connection, but we might have gotten duplicates etc.
*/
static void prune_queue(struct sk_buff_head * list) static void prune_queue(struct sk_buff_head * list)
{ {
/* for (;;) {
* Throw out things we haven't acked. struct sk_buff * skb = list->prev;
*/
tcp_forget_unacked(list);
/* /* gone through it all? */
* Throw out duplicates if (skb == (struct sk_buff *) list)
*/ break;
tcp_remove_dups(list); if (!skb->acked) {
__skb_unlink(skb, list);
kfree_skb(skb, FREE_READ);
continue;
}
tcp_remove_dups(list);
break;
}
} }
/* /*
* A TCP packet has arrived. * A TCP packet has arrived.
* skb->h.raw is the TCP header. * skb->h.raw is the TCP header.
......
...@@ -64,6 +64,8 @@ static struct symbol_table net_syms = { ...@@ -64,6 +64,8 @@ static struct symbol_table net_syms = {
X(memcpy_fromiovec), X(memcpy_fromiovec),
X(sock_setsockopt), X(sock_setsockopt),
X(sock_getsockopt), X(sock_getsockopt),
X(sk_alloc),
X(sk_free),
X(sock_wake_async), X(sock_wake_async),
X(sock_alloc_send_skb), X(sock_alloc_send_skb),
X(skb_recv_datagram), X(skb_recv_datagram),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment