Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
892692fb
Commit
892692fb
authored
Nov 23, 2007
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Import 1.3.84
parent
4ff1c5b1
Changes
19
Show whitespace changes
Inline
Side-by-side
Showing
19 changed files
with
539 additions
and
329 deletions
+539
-329
Documentation/digiboard.txt
Documentation/digiboard.txt
+72
-0
Makefile
Makefile
+1
-1
arch/alpha/defconfig
arch/alpha/defconfig
+3
-0
arch/alpha/kernel/bios32.c
arch/alpha/kernel/bios32.c
+101
-29
drivers/char/pcxx.c
drivers/char/pcxx.c
+8
-2
drivers/net/3c509.c
drivers/net/3c509.c
+8
-0
fs/locks.c
fs/locks.c
+88
-94
include/asm-alpha/atomic.h
include/asm-alpha/atomic.h
+23
-0
include/asm-alpha/hwrpb.h
include/asm-alpha/hwrpb.h
+2
-0
include/asm-alpha/posix_types.h
include/asm-alpha/posix_types.h
+1
-1
include/asm-i386/atomic.h
include/asm-i386/atomic.h
+11
-0
include/asm-i386/system.h
include/asm-i386/system.h
+2
-2
include/linux/skbuff.h
include/linux/skbuff.h
+11
-20
kernel/sched.c
kernel/sched.c
+5
-1
mm/kmalloc.c
mm/kmalloc.c
+119
-77
mm/memory.c
mm/memory.c
+1
-1
net/core/skbuff.c
net/core/skbuff.c
+18
-36
net/ipv4/tcp_input.c
net/ipv4/tcp_input.c
+63
-65
net/netsyms.c
net/netsyms.c
+2
-0
No files found.
Documentation/digiboard.txt
0 → 100644
View file @
892692fb
The Linux Digiboard Driver
--------------------------
The Digiboard Driver for Linux supports the following boards:
DigiBoard PC/Xe, PC/Xi, PC/Xeve
Limitations:
------------
Currently the Driver does not do autoprobing. You have to configure
the driver with the correct I/O address in drivers/char/pcxxconfig.h.
The preconfigured I/O address is 0200h and the default memory address 0D0000h.
Use them and you will not have to worry about configuring anything.
Supporting Tools:
-----------------
Some tools and more detailed up to date information can be found at
ftp://ftp.fuller.edu/Linux/digi
The "ditty" tool described in the Digiboard Manuals for other Unixes
is also available.
Currently the Linux MAKEDEV command does not support generating the Digiboard
Devices. Use the following script to generate the devices:
------------------ mkdigidev begin
#!/bin/sh
#
# Script to create Digiboard Devices
# Christoph Lameter, April 4, 1996
#
# Usage:
# mkdigidev [<number of devices>]
#
DIGIMAJOR=30
DIGICUMAJOR=31
BOARDS=$1
if [ "$BOARDS" = "" ]; then
BOARDS=1
fi
boardnum=0
while [ $boardnum -lt $BOARDS ];
do
for c in 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15;
do
name=`expr $boardnum \* 16 + $c`
mknod /dev/ttyd$name c $DIGIMAJOR $name
mknod /dev/ttyD$name c $DIGICUMAJOR $name
done
boardnum=`expr $boardnum + 1`
done
------------------ mkdigidev end
The ttyd devices behave like the /dev/cua?? devices
and the ttyD devices are like the /dev/ttyS?? devices.
Sources of Information
----------------------
Webpage: http://private.fuller.edu/clameter/digi.html
Mailing List: digiboard@list.fuller.edu
(Write e-mail to that address to subscribe. Common ListServ commands work.
Archive of messages available)
Christoph Lameter (clameter@fuller.edu) 4. April 1996.
Makefile
View file @
892692fb
VERSION
=
1
PATCHLEVEL
=
3
SUBLEVEL
=
8
3
SUBLEVEL
=
8
4
ARCH
=
i386
...
...
arch/alpha/defconfig
View file @
892692fb
...
...
@@ -112,6 +112,8 @@ CONFIG_SCSI_CONSTANTS=y
# CONFIG_SCSI_FUTURE_DOMAIN is not set
# CONFIG_SCSI_GENERIC_NCR5380 is not set
CONFIG_SCSI_NCR53C7xx=y
CONFIG_SCSI_NCR53C7xx_sync=y
CONFIG_SCSI_NCR53C7xx_FAST=y
# CONFIG_SCSI_IN2000 is not set
# CONFIG_SCSI_PAS16 is not set
# CONFIG_SCSI_QLOGIC is not set
...
...
@@ -178,6 +180,7 @@ CONFIG_ISO9660_FS=y
# Character devices
#
CONFIG_SERIAL=y
# CONFIG_DIGI is not set
# CONFIG_CYCLADES is not set
# CONFIG_STALDRV is not set
# CONFIG_PRINTER is not set
...
...
arch/alpha/kernel/bios32.c
View file @
892692fb
...
...
@@ -81,7 +81,11 @@ extern struct hwrpb_struct *hwrpb;
#if PCI_MODIFY
#if 0
static unsigned int io_base = 64*KB; /* <64KB are (E)ISA ports */
#else
static
unsigned
int
io_base
=
0xb000
;
#endif
#if defined(CONFIG_ALPHA_XL)
/*
...
...
@@ -318,15 +322,18 @@ static void layout_bus(struct pci_bus *bus)
if
(
bus
->
self
)
{
struct
pci_dev
*
bridge
=
bus
->
self
;
/*
* Set up the top and bottom of the
I/O memory
segment
* Set up the top and bottom of the
PCI I/O
segment
* for this bus.
*/
pcibios_read_config_dword
(
bridge
->
bus
->
number
,
bridge
->
devfn
,
0x1c
,
&
l
);
l
=
l
|
(
bio
>>
8
)
|
((
tio
-
1
)
&
0xf000
);
l
=
(
l
&
0xffff0000
)
|
(
bio
>>
8
)
|
((
tio
-
1
)
&
0xf000
);
pcibios_write_config_dword
(
bridge
->
bus
->
number
,
bridge
->
devfn
,
0x1c
,
l
);
/*
* Set up the top and bottom of the PCI Memory segment
* for this bus.
*/
l
=
((
bmem
&
0xfff00000
)
>>
16
)
|
((
tmem
-
1
)
&
0xfff00000
);
pcibios_write_config_dword
(
bridge
->
bus
->
number
,
bridge
->
devfn
,
0x20
,
l
);
...
...
@@ -445,6 +452,47 @@ static inline void enable_ide(long ide_base)
outb
(
data
|
0x40
,
ide_base
+
1
);
/* turn on IDE, really! */
}
/*
* A small note about bridges and interrupts. The DECchip 21050 (and later chips)
* adheres to the PCI-PCI bridge specification. This says that the interrupts on
* the other side of a bridge are swizzled in the following manner:
*
* Dev Interrupt Interupt
* Pin on Pin on
* Device Connector
*
* 4 A A
* B B
* C C
* D D
*
* 5 A B
* B C
* C D
* D A
*
* 6 A C
* B D
* C A
* D B
*
* 7 A D
* B A
* C B
* D C
*
* Where A = pin 1, B = pin 2 and so on and pin=0 = default = A.
* Thus, each swizzle is ((pin-1) + (device#-4)) % 4
*
* The following code is somewhat simplistic as it assumes only one bridge.
* I will fix it later (david.rusling@reo.mts.dec.com).
*/
static
inline
unsigned
char
bridge_swizzle
(
unsigned
char
pin
,
unsigned
int
slot
)
{
/* swizzle */
return
(((
pin
-
1
)
+
slot
)
%
4
)
+
1
;
}
/*
* Most evaluation boards share most of the fixup code, which is isolated here.
* This function is declared "inline" as only one platform will ever be selected
...
...
@@ -457,24 +505,46 @@ static inline void common_fixup(long min_idsel, long max_idsel, long irqs_per_sl
{
struct
pci_dev
*
dev
;
unsigned
char
pin
;
unsigned
char
slot
;
/*
* Go through all devices, fixing up irqs as we see fit:
*/
for
(
dev
=
pci_devices
;
dev
;
dev
=
dev
->
next
)
{
if
(
dev
->
class
>>
16
!=
PCI_BASE_CLASS_BRIDGE
)
{
dev
->
irq
=
0
;
/*
* Ignore things not on the primary bus - I'll figure
* this out one day - Dave Rusling
* This device is not on the primary bus, we need to figure out which
* interrupt pin it will come in on. We know which slot it will come
* in on 'cos that slot is where the bridge is. Each time the interrupt
* line passes through a PCI-PCI bridge we must apply the swizzle function
* (see the inline static routine above).
*/
if
(
dev
->
bus
->
number
!=
0
)
continue
;
if
(
dev
->
bus
->
number
!=
0
)
{
struct
pci_dev
*
curr
=
dev
;
/* read the pin and do the PCI-PCI bridge interrupt pin swizzle */
pcibios_read_config_byte
(
dev
->
bus
->
number
,
dev
->
devfn
,
PCI_INTERRUPT_PIN
,
&
pin
);
/* cope with 0 */
if
(
pin
==
0
)
pin
=
1
;
/* follow the chain of bridges, swizzling as we go */
do
{
/* swizzle */
pin
=
bridge_swizzle
(
pin
,
PCI_SLOT
(
curr
->
devfn
))
;
/* move up the chain of bridges */
curr
=
curr
->
bus
->
self
;
}
while
(
curr
->
bus
->
self
)
;
/* The slot is the slot of the last bridge. */
slot
=
PCI_SLOT
(
curr
->
devfn
)
;
}
else
{
/* work out the slot */
slot
=
PCI_SLOT
(
dev
->
devfn
)
;
/* read the pin */
pcibios_read_config_byte
(
dev
->
bus
->
number
,
dev
->
devfn
,
PCI_INTERRUPT_PIN
,
&
pin
);
if
(
irq_tab
[
PCI_SLOT
(
dev
->
devfn
)
-
min_idsel
][
pin
]
!=
-
1
)
dev
->
irq
=
irq_tab
[
PCI_SLOT
(
dev
->
devfn
)
-
min_idsel
][
pin
];
}
if
(
irq_tab
[
slot
-
min_idsel
][
pin
]
!=
-
1
)
dev
->
irq
=
irq_tab
[
slot
-
min_idsel
][
pin
];
#if PCI_MODIFY
/* tell the device: */
pcibios_write_config_byte
(
dev
->
bus
->
number
,
dev
->
devfn
,
...
...
@@ -489,9 +559,11 @@ static inline void common_fixup(long min_idsel, long max_idsel, long irqs_per_sl
0x000c0000
|
PCI_ROM_ADDRESS_ENABLE
);
}
}
if
(
ide_base
)
{
enable_ide
(
ide_base
);
}
}
}
/*
...
...
drivers/char/pcxx.c
View file @
892692fb
...
...
@@ -12,10 +12,15 @@
* This driver does NOT support DigiBoard's fastcook FEP option and
* does not support the transparent print (i.e. digiprint) option.
*
* Please email any suggestions or bug reports to troyd@skypoint.com
* This Driver is currently maintained by Christoph Lameter (clameter@fuller.edu)
* Please contact the mailing list for problems first.
*
* Sources of Information:
* 1. The Linux Digiboard Page at http://private.fuller.edu/clameter/digi.html
* 2. The Linux Digiboard Mailing list at digiboard@list.fuller.edu
* (Simply write a message to introduce yourself to subscribe)
*
*
January 1996 Bug fixes by an unknown author and released as 1.5.2
*
1.5.2 Fall 1995 Bug fixes by David Nugent
* 1.5.3 March 9, 1996 Christoph Lameter: Fixed 115.2K Support. Memory
* allocation harmonized with 1.3.X Series.
* 1.5.4 March 30, 1996 Christoph Lameter: Fixup for 1.3.81. Use init_bh
...
...
@@ -30,6 +35,7 @@
The driver supports the native 57.6K and 115K Baudrates under Linux, but
some distributions like Slackware 3.0 dont like these high baudrates.
*/
#include <linux/mm.h>
#include <linux/ioport.h>
#include <linux/errno.h>
...
...
drivers/net/3c509.c
View file @
892692fb
...
...
@@ -765,9 +765,17 @@ static struct device dev_3c509 = {
0
,
0
,
0
,
0
,
0
,
NULL
,
el3_probe
};
static
int
io
=
0
;
static
int
irq
=
0
;
int
init_module
(
void
)
{
dev_3c509
.
base_addr
=
io
;
dev_3c509
.
irq
=
irq
;
if
(
!
EISA_bus
&&
!
io
)
{
printk
(
"3c509: WARNING! Module load-time probing works reliably only for EISA bus!!
\n
"
);
}
if
(
register_netdev
(
&
dev_3c509
)
!=
0
)
return
-
EIO
;
return
0
;
...
...
fs/locks.c
View file @
892692fb
...
...
@@ -22,11 +22,11 @@
* process. Since locks still depend on the process id, locks are inherited
* after an exec() but not after a fork(). This agrees with POSIX, and both
* BSD and SVR4 practice.
* Andy Walker (andy@
keo
.kvaerner.no), February 14, 1995
* Andy Walker (andy@
lysaker
.kvaerner.no), February 14, 1995
*
* Scrapped free list which is redundant now that we allocate locks
* dynamically with kmalloc()/kfree().
* Andy Walker (andy@
keo
.kvaerner.no), February 21, 1995
* Andy Walker (andy@
lysaker
.kvaerner.no), February 21, 1995
*
* Implemented two lock personalities - F_FLOCK and F_POSIX.
*
...
...
@@ -47,18 +47,21 @@
* upgrading from shared to exclusive (or vice versa). When this happens
* any processes blocked by the current lock are woken up and allowed to
* run before the new lock is applied.
*
* NOTE:
* I do not intend to implement mandatory locks unless demand is *HUGE*.
* They are not in BSD, and POSIX.1 does not require them. I have never
* seen any public code that relied on them. As Kelly Carmichael suggests
* above, mandatory locks requires lots of changes elsewhere and I am
* reluctant to start something so drastic for so little gain.
* Andy Walker (andy@keo.kvaerner.no), June 09, 1995
* Andy Walker (andy@lysaker.kvaerner.no), June 09, 1995
*
* Removed some race conditions in flock_lock_file(), marked other possible
* races. Just grep for FIXME to see them.
* Dmitry Gorodchanin (begemot@bgm.rosprint.net), Feb 09, 1996.
*
* Addressed Dmitry's concerns. Deadlock checking no longer recursive.
* Lock allocation changed to GFP_ATOMIC as we can't afford to sleep
* once we've checked for blocking and deadlocking.
* Andy Walker (andy@lysaker.kvaerner.no), Apr 03, 1996.
*
* NOTE:
* Starting to look at mandatory locks - using SunOS as a model.
* Probably a configuration option because mandatory locking can cause
* all sorts of chaos with runaway processes.
*/
#include <asm/segment.h>
...
...
@@ -147,7 +150,7 @@ static inline void locks_delete_block(struct file_lock **block,
}
}
/* flock() system call entry point. Apply a FLOCK style lock
s
to
/* flock() system call entry point. Apply a FLOCK style lock to
* an open file descriptor.
*/
asmlinkage
int
sys_flock
(
unsigned
int
fd
,
unsigned
int
cmd
)
...
...
@@ -167,8 +170,8 @@ asmlinkage int sys_flock(unsigned int fd, unsigned int cmd)
return
(
flock_lock_file
(
filp
,
&
file_lock
,
cmd
&
LOCK_UN
?
0
:
cmd
&
LOCK_NB
?
0
:
1
));
}
/* Report the first existing lock
s that would conflict with l. This implements
* the F_GETLK command of fcntl().
/* Report the first existing lock
that would conflict with l.
*
This implements
the F_GETLK command of fcntl().
*/
int
fcntl_getlk
(
unsigned
int
fd
,
struct
flock
*
l
)
{
...
...
@@ -209,9 +212,10 @@ int fcntl_getlk(unsigned int fd, struct flock *l)
return
(
0
);
}
/* Apply the lock described by l to an open file descriptor. This implements
* both the F_SETLK and F_SETLKW commands of fcntl(). It also emulates flock()
* in a pretty broken way for older C libraries.
/* Apply the lock described by l to an open file descriptor.
* This implements both the F_SETLK and F_SETLKW commands of fcntl().
* It also emulates flock() in a pretty broken way for older C
* libraries.
*/
int
fcntl_setlk
(
unsigned
int
fd
,
unsigned
int
cmd
,
struct
flock
*
l
)
{
...
...
@@ -335,8 +339,8 @@ static int posix_make_lock(struct file *filp, struct file_lock *fl,
return
(
1
);
}
/* Verify a call to flock() and fill in a file_lock structure with
an appropriate
* FLOCK lock.
/* Verify a call to flock() and fill in a file_lock structure with
*
an appropriate
FLOCK lock.
*/
static
int
flock_make_lock
(
struct
file
*
filp
,
struct
file_lock
*
fl
,
unsigned
int
cmd
)
...
...
@@ -368,8 +372,8 @@ static int flock_make_lock(struct file *filp, struct file_lock *fl,
return
(
1
);
}
/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
checking
* before calling the locks_conflict().
/* Determine if lock sys_fl blocks lock caller_fl. POSIX specific
*
checking
before calling the locks_conflict().
*/
static
int
posix_locks_conflict
(
struct
file_lock
*
caller_fl
,
struct
file_lock
*
sys_fl
)
{
...
...
@@ -383,8 +387,8 @@ static int posix_locks_conflict(struct file_lock *caller_fl, struct file_lock *s
return
(
locks_conflict
(
caller_fl
,
sys_fl
));
}
/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
checking
* before calling the locks_conflict().
/* Determine if lock sys_fl blocks lock caller_fl. FLOCK specific
*
checking
before calling the locks_conflict().
*/
static
int
flock_locks_conflict
(
struct
file_lock
*
caller_fl
,
struct
file_lock
*
sys_fl
)
{
...
...
@@ -429,15 +433,15 @@ static int locks_overlap(struct file_lock *fl1, struct file_lock *fl2)
(
fl2
->
fl_end
>=
fl1
->
fl_start
));
}
/* This function tests for deadlock condition before putting a process to
sleep.
*
The detection scheme is recursive... we may need a test to make it exit if the
*
function gets stuck due to bad lock data. 4.4 BSD uses a maximum depth of 50
*
for this
.
/* This function tests for deadlock condition before putting a process to
*
sleep. The detection scheme is no longer recursive. Recursive was neat,
*
but dangerous - we risked stack corruption if the lock data was bad, or
*
if the recursion was too deep for any other reason
.
*
*
FIXME:
*
IMHO this function is dangerous, deep recursion may result in kernel stack
*
corruption. Perhaps we need to limit depth here.
*
Dmitry Gorodchanin 09/02/96
*
We rely on the fact that a task can only be on one lock's wait queue
*
at a time. When we find blocked_task on a wait queue we can re-search
*
with blocked_task equal to that queue's owner, until either blocked_task
*
isn't found, or blocked_task is found on a queue owned by my_task.
*/
static
int
posix_locks_deadlock
(
struct
task_struct
*
my_task
,
struct
task_struct
*
blocked_task
)
...
...
@@ -445,20 +449,18 @@ static int posix_locks_deadlock(struct task_struct *my_task,
struct
wait_queue
*
dlock_wait
;
struct
file_lock
*
fl
;
next_task:
for
(
fl
=
file_lock_table
;
fl
!=
NULL
;
fl
=
fl
->
fl_nextlink
)
{
if
(
fl
->
fl_owner
==
NULL
)
continue
;
/* Should never happen! */
if
(
fl
->
fl_owner
!=
my_task
)
if
(
fl
->
fl_owner
==
NULL
||
fl
->
fl_wait
==
NULL
)
continue
;
if
(
fl
->
fl_wait
==
NULL
)
continue
;
/* no queues */
dlock_wait
=
fl
->
fl_wait
;
do
{
if
(
dlock_wait
->
task
!=
NULL
)
{
if
(
dlock_wait
->
task
==
blocked_task
)
return
(
-
EDEADLOCK
);
if
(
posix_locks_deadlock
(
dlock_wait
->
task
,
blocked_task
))
return
(
-
EDEADLOCK
);
if
(
dlock_wait
->
task
==
blocked_task
)
{
if
(
fl
->
fl_owner
==
my_task
)
{
return
(
-
EDEADLOCK
);
}
blocked_task
=
fl
->
fl_owner
;
goto
next_task
;
}
dlock_wait
=
dlock_wait
->
next
;
}
while
(
dlock_wait
!=
fl
->
fl_wait
);
...
...
@@ -466,7 +468,7 @@ static int posix_locks_deadlock(struct task_struct *my_task,
return
(
0
);
}
/* Try to create a FLOCK lock on filp. We rely on FLOCK locks being sort
ing
/* Try to create a FLOCK lock on filp. We rely on FLOCK locks being sort
ed
* first in an inode's lock list, and always insert new locks at the head
* of the list.
*/
...
...
@@ -628,9 +630,10 @@ static int posix_lock_file(struct file *filp, struct file_lock *caller,
}
caller
=
fl
;
added
=
1
;
goto
next_lock
;
}
/* Processing for different lock types is a bit more complex.
else
{
/* Processing for different lock types is a bit
* more complex.
*/
if
(
fl
->
fl_end
<
caller
->
fl_start
)
goto
next_lock
;
...
...
@@ -640,24 +643,25 @@ static int posix_lock_file(struct file *filp, struct file_lock *caller,
added
=
1
;
if
(
fl
->
fl_start
<
caller
->
fl_start
)
left
=
fl
;
/* If the next lock in the list has a higher end address than
*
the new one, insert the new one here.
/* If the next lock in the list has a higher end
* address than
the new one, insert the new one here.
*/
if
(
fl
->
fl_end
>
caller
->
fl_end
)
{
right
=
fl
;
break
;
}
if
(
fl
->
fl_start
>=
caller
->
fl_start
)
{
/* The new lock completely replaces an old one (This may
*
happen several times).
/* The new lock completely replaces an old
* one (This may
happen several times).
*/
if
(
added
)
{
locks_delete_lock
(
before
,
0
);
continue
;
}
/* Replace the old lock with the new one. Wake up
* anybody waiting for the old one, as the change in
* lock type might satisfy his needs.
/* Replace the old lock with the new one.
* Wake up anybody waiting for the old one,
* as the change in lock type might satisfy
* their needs.
*/
wake_up
(
&
fl
->
fl_wait
);
fl
->
fl_start
=
caller
->
fl_start
;
...
...
@@ -666,24 +670,13 @@ static int posix_lock_file(struct file *filp, struct file_lock *caller,
caller
=
fl
;
added
=
1
;
}
}
/* Go on to next lock.
*/
next_lock:
before
=
&
(
*
before
)
->
fl_next
;
}
/* FIXME:
* Note: We may sleep in locks_alloc_lock(), so
* the 'before' pointer may be not valid any more.
* This can cause random kernel memory corruption.
* It seems the right way is to alloc two locks
* at the begining of this func, and then free them
* if they were not needed.
* Another way is to change GFP_KERNEL to GFP_ATOMIC
* in locks_alloc_lock() for this case.
*
* Dmitry Gorodchanin 09/02/96.
*/
if
(
!
added
)
{
if
(
caller
->
fl_type
==
F_UNLCK
)
return
(
0
);
...
...
@@ -723,7 +716,7 @@ static struct file_lock *locks_alloc_lock(struct file_lock *fl)
/* Okay, let's make a new file_lock structure... */
if
((
tmp
=
(
struct
file_lock
*
)
kmalloc
(
sizeof
(
struct
file_lock
),
GFP_
KERNEL
))
==
NULL
)
GFP_
ATOMIC
))
==
NULL
)
return
(
tmp
);
tmp
->
fl_nextlink
=
NULL
;
...
...
@@ -759,11 +752,12 @@ static void locks_insert_lock(struct file_lock **pos, struct file_lock *fl)
}
/* Delete a lock and free it.
* First remove our lock from the lock lists. Then remove all the blocked locks
* from our blocked list, waking up the processes that own them. If told to wait,
* then sleep on each of these lock's wait queues. Each blocked process will wake
* up and immediately wake up its own wait queue allowing us to be scheduled again.
* Lastly, wake up our own wait queue before freeing the file_lock structure.
* First remove our lock from the lock lists. Then remove all the blocked
* locks from our blocked list, waking up the processes that own them. If
* told to wait, then sleep on each of these lock's wait queues. Each
* blocked process will wake up and immediately wake up its own wait queue
* allowing us to be scheduled again. Lastly, wake up our own wait queue
* before freeing the file_lock structure.
*/
static
void
locks_delete_lock
(
struct
file_lock
**
fl_p
,
unsigned
int
wait
)
...
...
include/asm-alpha/atomic.h
View file @
892692fb
...
...
@@ -50,7 +50,30 @@ extern __inline__ void atomic_sub(atomic_t i, atomic_t * v)
"m"
(
__atomic_fool_gcc
(
v
)));
}
/*
* Same as above, but return true if we counted down to zero
*/
extern
__inline__
int
atomic_sub_and_test
(
atomic_t
i
,
atomic_t
*
v
)
{
unsigned
long
temp
,
result
;
__asm__
__volatile__
(
"
\n
1:
\t
"
"ldl_l %0,%1
\n\t
"
"subl %0,%3,%0
\n\t
"
"bis %0,%0,%2
\n\t
"
"stl_c %0,%1
\n\t
"
"beq %0,1b
\n
"
"2:"
:
"=&r"
(
temp
),
"=m"
(
__atomic_fool_gcc
(
v
)),
"=&r"
(
result
)
:
"Ir"
(
i
),
"m"
(
__atomic_fool_gcc
(
v
)));
return
result
==
0
;
}
#define atomic_inc(v) atomic_add(1,(v))
#define atomic_dec(v) atomic_sub(1,(v))
#define atomic_dec_and_test(v) atomic_sub_and_test(1,(v))
#endif
include/asm-alpha/hwrpb.h
View file @
892692fb
...
...
@@ -13,6 +13,8 @@
#define LCA4_CPU 4
/* LCA4 (21066/21068) */
#define EV5_CPU 5
/* EV5 (21164) */
#define EV45_CPU 6
/* EV4.5 (21064/xxx) */
#define EV56_CPU 7
/* EV5.6 (21164) */
#define EV6_CPU 8
/* EV6 (21164) */
/*
* DEC system types for Alpha systems. Found in HWRPB.
...
...
include/asm-alpha/posix_types.h
View file @
892692fb
...
...
@@ -10,7 +10,7 @@
typedef
unsigned
int
__kernel_dev_t
;
typedef
unsigned
int
__kernel_ino_t
;
typedef
unsigned
int
__kernel_mode_t
;
typedef
unsigned
shor
t
__kernel_nlink_t
;
typedef
unsigned
in
t
__kernel_nlink_t
;
typedef
long
__kernel_off_t
;
typedef
int
__kernel_pid_t
;
typedef
unsigned
int
__kernel_uid_t
;
...
...
include/asm-i386/atomic.h
View file @
892692fb
...
...
@@ -53,4 +53,15 @@ static __inline__ void atomic_dec(atomic_t *v)
:
"m"
(
__atomic_fool_gcc
(
v
)));
}
static
__inline__
int
atomic_dec_and_test
(
atomic_t
*
v
)
{
unsigned
char
c
;
__asm__
__volatile__
(
LOCK
"decl %0; sete %1"
:
"=m"
(
__atomic_fool_gcc
(
v
)),
"=qm"
(
c
)
:
"m"
(
__atomic_fool_gcc
(
v
)));
return
c
!=
0
;
}
#endif
include/asm-i386/system.h
View file @
892692fb
...
...
@@ -221,10 +221,10 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
#define cli() __asm__ __volatile__ ("cli": : :"memory")
#define save_flags(x) \
__asm__ __volatile__("pushfl ; popl %0":"=
r
" (x):
/* no input */
:"memory")
__asm__ __volatile__("pushfl ; popl %0":"=
g
" (x):
/* no input */
:"memory")
#define restore_flags(x) \
__asm__ __volatile__("pushl %0 ; popfl":
/* no output */
:"
r
" (x):"memory")
__asm__ __volatile__("pushl %0 ; popfl":
/* no output */
:"
g
" (x):"memory")
#define iret() __asm__ __volatile__ ("iret": : :"memory")
...
...
include/linux/skbuff.h
View file @
892692fb
...
...
@@ -18,6 +18,8 @@
#include <linux/time.h>
#include <linux/config.h>
#include <asm/atomic.h>
#define CONFIG_SKB_CHECK 0
#define HAVE_ALLOC_SKB
/* For the drivers to know */
...
...
@@ -101,7 +103,7 @@ struct sk_buff
unsigned
short
protocol
;
/* Packet protocol from driver. */
unsigned
short
truesize
;
/* Buffer size */
int
count
;
/* reference count */
atomic_t
count
;
/* reference count */
struct
sk_buff
*
data_skb
;
/* Link to the actual data skb */
unsigned
char
*
head
;
/* Head of buffer */
unsigned
char
*
data
;
/* Data head pointer */
...
...
@@ -300,14 +302,13 @@ extern __inline__ struct sk_buff *skb_dequeue(struct sk_buff_head *list)
}
/*
* Insert a packet
before another one i
n a list.
* Insert a packet
o
n a list.
*/
extern
__inline__
void
__skb_insert
(
struct
sk_buff
*
next
,
struct
sk_buff
*
newsk
,
extern
__inline__
void
__skb_insert
(
struct
sk_buff
*
newsk
,
struct
sk_buff
*
prev
,
struct
sk_buff
*
next
,
struct
sk_buff_head
*
list
)
{
struct
sk_buff
*
prev
=
next
->
prev
;
newsk
->
next
=
next
;
newsk
->
prev
=
prev
;
next
->
prev
=
newsk
;
...
...
@@ -316,13 +317,16 @@ extern __inline__ void __skb_insert(struct sk_buff *next, struct sk_buff *newsk,
list
->
qlen
++
;
}
/*
* Place a packet before a given packet in a list
*/
extern
__inline__
void
skb_insert
(
struct
sk_buff
*
old
,
struct
sk_buff
*
newsk
)
{
unsigned
long
flags
;
save_flags
(
flags
);
cli
();
__skb_insert
(
old
,
newsk
,
old
->
list
);
__skb_insert
(
newsk
,
old
->
prev
,
old
,
old
->
list
);
restore_flags
(
flags
);
}
...
...
@@ -330,26 +334,13 @@ extern __inline__ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
* Place a packet after a given packet in a list.
*/
extern
__inline__
void
__skb_append
(
struct
sk_buff
*
prev
,
struct
sk_buff
*
newsk
,
struct
sk_buff_head
*
list
)
{
struct
sk_buff
*
next
=
prev
->
next
;
newsk
->
next
=
next
;
newsk
->
prev
=
prev
;
next
->
prev
=
newsk
;
prev
->
next
=
newsk
;
newsk
->
list
=
list
;
list
->
qlen
++
;
}
extern
__inline__
void
skb_append
(
struct
sk_buff
*
old
,
struct
sk_buff
*
newsk
)
{
unsigned
long
flags
;
save_flags
(
flags
);
cli
();
__skb_
append
(
old
,
newsk
,
old
->
list
);
__skb_
insert
(
newsk
,
old
,
old
->
next
,
old
->
list
);
restore_flags
(
flags
);
}
...
...
kernel/sched.c
View file @
892692fb
...
...
@@ -179,11 +179,15 @@ static inline void move_last_runqueue(struct task_struct * p)
struct
task_struct
*
next
=
p
->
next_run
;
struct
task_struct
*
prev
=
p
->
prev_run
;
/* remove from list */
next
->
prev_run
=
prev
;
prev
->
next_run
=
next
;
(
p
->
prev_run
=
init_task
.
prev_run
)
->
next_run
=
p
;
/* add back to list */
p
->
next_run
=
&
init_task
;
prev
=
init_task
.
prev_run
;
init_task
.
prev_run
=
p
;
p
->
prev_run
=
prev
;
prev
->
next_run
=
p
;
}
/*
...
...
mm/kmalloc.c
View file @
892692fb
...
...
@@ -78,7 +78,6 @@ struct page_descriptor {
struct
size_descriptor
{
struct
page_descriptor
*
firstfree
;
struct
page_descriptor
*
dmafree
;
/* DMA-able memory */
int
size
;
int
nblocks
;
int
nmallocs
;
...
...
@@ -91,49 +90,85 @@ struct size_descriptor {
/*
* For now it is unsafe to allocate bucket sizes between n and
* n-sizeof(page_descriptor) where n is PAGE_SIZE * any power of two
*
* The blocksize and sizes arrays _must_ match!
*/
#if PAGE_SIZE == 4096
struct
size_descriptor
sizes
[]
=
static
const
unsigned
int
blocksize
[]
=
{
32
,
64
,
128
,
252
,
508
,
1020
,
2040
,
4096
-
16
,
8192
-
16
,
16384
-
16
,
32768
-
16
,
65536
-
16
,
131072
-
16
,
0
};
static
struct
size_descriptor
sizes
[]
=
{
{
NULL
,
NULL
,
32
,
127
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
6
4
,
6
3
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
128
,
31
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
252
,
16
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
508
,
8
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
1020
,
4
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
2
040
,
2
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
4096
-
16
,
1
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
8192
-
16
,
1
,
0
,
0
,
0
,
0
,
1
},
{
NULL
,
NULL
,
1
6384
-
16
,
1
,
0
,
0
,
0
,
0
,
2
},
{
NULL
,
NULL
,
32768
-
16
,
1
,
0
,
0
,
0
,
0
,
3
},
{
NULL
,
NULL
,
65536
-
16
,
1
,
0
,
0
,
0
,
0
,
4
},
{
NULL
,
NULL
,
1
31072
-
16
,
1
,
0
,
0
,
0
,
0
,
5
},
{
NULL
,
NULL
,
0
,
0
,
0
,
0
,
0
,
0
,
0
}
{
NULL
,
NULL
,
127
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
63
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
31
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
16
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
8
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
4
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
2
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
1
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
1
,
0
,
0
,
0
,
0
,
1
},
{
NULL
,
NULL
,
1
,
0
,
0
,
0
,
0
,
2
},
{
NULL
,
NULL
,
1
,
0
,
0
,
0
,
0
,
3
},
{
NULL
,
NULL
,
1
,
0
,
0
,
0
,
0
,
4
},
{
NULL
,
NULL
,
1
,
0
,
0
,
0
,
0
,
5
},
{
NULL
,
NULL
,
0
,
0
,
0
,
0
,
0
,
0
}
};
#elif PAGE_SIZE == 8192
static
const
unsigned
int
blocksize
[]
=
{
64
,
128
,
248
,
504
,
1016
,
2040
,
4080
,
8192
-
32
,
16384
-
32
,
32768
-
32
,
65536
-
32
,
131072
-
32
,
262144
-
32
,
0
};
struct
size_descriptor
sizes
[]
=
{
{
NULL
,
NULL
,
64
,
127
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
128
,
63
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
248
,
31
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
504
,
16
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
1016
,
8
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
2040
,
4
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
4080
,
2
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
8192
-
32
,
1
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
1
6384
-
32
,
1
,
0
,
0
,
0
,
0
,
1
},
{
NULL
,
NULL
,
32768
-
32
,
1
,
0
,
0
,
0
,
0
,
2
},
{
NULL
,
NULL
,
65536
-
32
,
1
,
0
,
0
,
0
,
0
,
3
},
{
NULL
,
NULL
,
1
31072
-
32
,
1
,
0
,
0
,
0
,
0
,
4
},
{
NULL
,
NULL
,
262144
-
32
,
1
,
0
,
0
,
0
,
0
,
5
},
{
NULL
,
NULL
,
0
,
0
,
0
,
0
,
0
,
0
,
0
}
{
NULL
,
NULL
,
127
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
63
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
31
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
16
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
8
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
4
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
2
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
1
,
0
,
0
,
0
,
0
,
0
},
{
NULL
,
NULL
,
1
,
0
,
0
,
0
,
0
,
1
},
{
NULL
,
NULL
,
1
,
0
,
0
,
0
,
0
,
2
},
{
NULL
,
NULL
,
1
,
0
,
0
,
0
,
0
,
3
},
{
NULL
,
NULL
,
1
,
0
,
0
,
0
,
0
,
4
},
{
NULL
,
NULL
,
1
,
0
,
0
,
0
,
0
,
5
},
{
NULL
,
NULL
,
0
,
0
,
0
,
0
,
0
,
0
}
};
#else
#error you need to make a version for your pagesize
#endif
#define NBLOCKS(order) (sizes[order].nblocks)
#define BLOCKSIZE(order) (
sizes[order].size
)
#define BLOCKSIZE(order) (
blocksize[order]
)
#define AREASIZE(order) (PAGE_SIZE<<(sizes[order].gfporder))
...
...
@@ -160,31 +195,28 @@ long kmalloc_init(long start_mem, long end_mem)
}
int
get_order
(
int
size
)
{
int
order
;
/* Add the size of the header */
size
+=
sizeof
(
struct
block_header
);
for
(
order
=
0
;
BLOCKSIZE
(
order
);
order
++
)
if
(
size
<=
BLOCKSIZE
(
order
))
return
order
;
return
-
1
;
}
void
*
kmalloc
(
size_t
size
,
int
priority
)
{
unsigned
long
flags
;
unsigned
long
type
;
int
order
,
i
,
sz
,
dma
;
int
order
,
dma
;
struct
block_header
*
p
;
struct
page_descriptor
*
page
,
**
pg
;
order
=
get_order
(
size
);
if
(
order
<
0
)
{
/* Get order */
order
=
0
;
{
unsigned
int
realsize
=
size
+
sizeof
(
struct
block_header
);
for
(;;)
{
int
ordersize
=
BLOCKSIZE
(
order
);
if
(
realsize
<=
ordersize
)
break
;
order
++
;
if
(
ordersize
)
continue
;
printk
(
"kmalloc of too large a block (%d bytes).
\n
"
,
(
int
)
size
);
return
(
NULL
);
return
NULL
;
}
}
dma
=
0
;
...
...
@@ -213,11 +245,8 @@ void *kmalloc(size_t size, int priority)
page
=
*
pg
;
if
(
page
)
{
p
=
page
->
firstfree
;
if
(
p
->
bh_flags
!=
MF_FREE
)
{
restore_flags
(
flags
);
printk
(
"Problem: block on freelist at %08lx isn't free.
\n
"
,
(
long
)
p
);
return
NULL
;
}
if
(
p
->
bh_flags
!=
MF_FREE
)
goto
not_free_on_freelist
;
goto
found_it
;
}
...
...
@@ -225,20 +254,17 @@ void *kmalloc(size_t size, int priority)
/* This can be done with ints on: This is private to this invocation */
restore_flags
(
flags
);
{
int
i
,
sz
;
/* sz is the size of the blocks we're dealing with */
sz
=
BLOCKSIZE
(
order
);
page
=
(
struct
page_descriptor
*
)
__get_free_pages
(
priority
,
sizes
[
order
].
gfporder
,
dma
);
if
(
!
page
)
{
static
unsigned
long
last
=
0
;
if
(
priority
!=
GFP_BUFFER
&&
(
last
+
10
*
HZ
<
jiffies
))
{
last
=
jiffies
;
printk
(
"Couldn't get a free page.....
\n
"
);
}
return
NULL
;
}
if
(
!
page
)
goto
no_free_page
;
sizes
[
order
].
npages
++
;
/* Loop for all but last block: */
...
...
@@ -253,6 +279,7 @@ void *kmalloc(size_t size, int priority)
page
->
order
=
order
;
page
->
nfree
=
NBLOCKS
(
order
);
p
=
BH
(
page
+
1
);
}
/*
* Now we're going to muck with the "global" freelist
...
...
@@ -276,6 +303,21 @@ void *kmalloc(size_t size, int priority)
memset
(
p
+
1
,
0xf0
,
size
);
#endif
return
p
+
1
;
/* Pointer arithmetic: increments past header */
no_free_page:
{
static
unsigned
long
last
=
0
;
if
(
priority
!=
GFP_BUFFER
&&
(
last
+
10
*
HZ
<
jiffies
))
{
last
=
jiffies
;
printk
(
"Couldn't get a free page.....
\n
"
);
}
return
NULL
;
}
not_free_on_freelist:
restore_flags
(
flags
);
printk
(
"Problem: block on freelist at %08lx isn't free.
\n
"
,
(
long
)
p
);
return
NULL
;
}
void
kfree
(
void
*
ptr
)
...
...
mm/memory.c
View file @
892692fb
...
...
@@ -755,7 +755,7 @@ static inline void get_empty_page(struct task_struct * tsk, struct vm_area_struc
pte
=
pte_wrprotect
(
mk_pte
(
ZERO_PAGE
,
vma
->
vm_page_prot
));
if
(
write_access
)
{
unsigned
long
page
=
get_free_page
(
GFP_KERNEL
);
pte
=
pte_mkwrite
(
mk_pte
(
page
,
vma
->
vm_page_prot
));
pte
=
pte_mkwrite
(
pte_mkdirty
(
mk_pte
(
page
,
vma
->
vm_page_prot
)
));
vma
->
vm_mm
->
rss
++
;
tsk
->
min_flt
++
;
if
(
!
page
)
{
...
...
net/core/skbuff.c
View file @
892692fb
...
...
@@ -390,22 +390,27 @@ void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
* Insert a packet before another one in a list.
*/
void
__skb_insert
(
struct
sk_buff
*
old
,
struct
sk_buff
*
newsk
)
void
__skb_insert
(
struct
sk_buff
*
newsk
,
struct
sk_buff
*
prev
,
struct
sk_buff
*
next
,
struct
sk_buff_head
*
list
)
{
IS_SKB
(
old
);
IS_SKB
(
prev
);
IS_SKB
(
newsk
);
IS_SKB
(
next
);
if
(
!
old
->
next
||
!
old
->
prev
)
if
(
!
prev
->
next
||
!
prev
->
prev
)
printk
(
"insert after unlisted item!
\n
"
);
if
(
!
next
->
next
||
!
next
->
prev
)
printk
(
"insert before unlisted item!
\n
"
);
if
(
newsk
->
next
||
newsk
->
prev
)
printk
(
"inserted item is already on a list.
\n
"
);
newsk
->
next
=
old
;
newsk
->
prev
=
old
->
prev
;
old
->
prev
=
newsk
;
newsk
->
prev
->
next
=
newsk
;
newsk
->
list
=
old
->
list
;
newsk
->
list
->
qlen
++
;
newsk
->
next
=
next
;
newsk
->
prev
=
prev
;
next
->
prev
=
newsk
;
prev
->
next
=
newsk
;
newsk
->
list
=
list
;
list
->
qlen
++
;
}
...
...
@@ -437,25 +442,6 @@ void skb_append(struct sk_buff *old, struct sk_buff *newsk)
restore_flags
(
flags
);
}
void
__skb_append
(
struct
sk_buff
*
old
,
struct
sk_buff
*
newsk
)
{
IS_SKB
(
old
);
IS_SKB
(
newsk
);
if
(
!
old
->
next
||
!
old
->
prev
)
printk
(
"append before unlisted item!
\n
"
);
if
(
newsk
->
next
||
newsk
->
prev
)
printk
(
"append item is already on a list.
\n
"
);
newsk
->
prev
=
old
;
newsk
->
next
=
old
->
next
;
newsk
->
next
->
prev
=
newsk
;
old
->
next
=
newsk
;
newsk
->
list
=
old
->
list
;
newsk
->
list
->
qlen
++
;
}
/*
* Remove an sk_buff from its list. Works even without knowing the list it
* is sitting on, which can be handy at times. It also means that THE LIST
...
...
@@ -724,30 +710,26 @@ struct sk_buff *alloc_skb(unsigned int size,int priority)
static
inline
void
__kfree_skbmem
(
struct
sk_buff
*
skb
)
{
/* don't do anything if somebody still uses us */
if
(
--
skb
->
count
<=
0
)
{
if
(
atomic_dec_and_test
(
&
skb
->
count
)
)
{
kfree
(
skb
->
head
);
net_skbcount
--
;
atomic_dec
(
&
net_skbcount
)
;
}
}
void
kfree_skbmem
(
struct
sk_buff
*
skb
)
{
unsigned
long
flags
;
void
*
addr
=
skb
->
head
;
save_flags
(
flags
);
cli
();
/* don't do anything if somebody still uses us */
if
(
--
skb
->
count
<=
0
)
{
if
(
atomic_dec_and_test
(
&
skb
->
count
)
)
{
/* free the skb that contains the actual data if we've clone()'d */
if
(
skb
->
data_skb
)
{
addr
=
skb
;
__kfree_skbmem
(
skb
->
data_skb
);
}
kfree
(
addr
);
net_skbcount
--
;
atomic_dec
(
&
net_skbcount
)
;
}
restore_flags
(
flags
);
}
/*
...
...
net/ipv4/tcp_input.c
View file @
892692fb
...
...
@@ -1183,6 +1183,31 @@ static int tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
return
(
0
);
}
/*
* Add a sk_buff to the TCP receive queue, calculating
* the ACK sequence as we go..
*/
static
inline
void
tcp_insert_skb
(
struct
sk_buff
*
skb
,
struct
sk_buff_head
*
list
)
{
struct
sk_buff
*
prev
,
*
next
;
u32
seq
;
/*
* Find where the new skb goes.. (This goes backwards,
* on the assumption that we get the packets in order)
*/
seq
=
skb
->
seq
;
prev
=
list
->
prev
;
next
=
(
struct
sk_buff
*
)
list
;
for
(;;)
{
if
(
prev
==
(
struct
sk_buff
*
)
list
||
!
after
(
prev
->
seq
,
seq
))
break
;
next
=
prev
;
prev
=
prev
->
prev
;
}
__skb_insert
(
skb
,
prev
,
next
,
list
);
}
/*
* Called for each packet when we find a new ACK endpoint sequence in it
*/
...
...
@@ -1198,46 +1223,26 @@ static inline u32 tcp_queue_ack(struct sk_buff * skb, struct sock * sk)
return
skb
->
end_seq
;
}
/*
* Add a sk_buff to the TCP receive queue, calculating
* the ACK sequence as we go..
*/
static
void
tcp_queue
(
struct
sk_buff
*
skb
,
struct
sock
*
sk
,
struct
tcphdr
*
th
,
unsigned
long
saddr
)
{
struct
sk_buff_head
*
list
=
&
sk
->
receive_queue
;
struct
sk_buff
*
next
;
u32
ack_seq
;
/*
* Find where the new skb goes.. (This goes backwards,
* on the assumption that we get the packets in order)
*/
next
=
list
->
prev
;
while
(
next
!=
(
struct
sk_buff
*
)
list
)
{
if
(
!
after
(
next
->
seq
,
skb
->
seq
))
break
;
next
=
next
->
prev
;
}
/*
* put it after the packet we found (which
* may be the list-head, but that's fine).
*/
__skb_append
(
next
,
skb
,
list
);
next
=
skb
->
next
;
tcp_insert_skb
(
skb
,
&
sk
->
receive_queue
);
/*
* Did we get anything new to ack?
*/
ack_seq
=
sk
->
acked_seq
;
if
(
!
after
(
skb
->
seq
,
ack_seq
)
&&
after
(
skb
->
end_seq
,
ack_seq
))
{
struct
sk_buff_head
*
list
=
&
sk
->
receive_queue
;
struct
sk_buff
*
next
;
ack_seq
=
tcp_queue_ack
(
skb
,
sk
);
/*
* Do we have any old packets to ack that the above
* made visible? (Go forward from skb)
*/
next
=
skb
->
next
;
while
(
next
!=
(
struct
sk_buff
*
)
list
)
{
if
(
after
(
next
->
seq
,
ack_seq
))
break
;
...
...
@@ -1471,67 +1476,60 @@ static inline void tcp_urg(struct sock *sk, struct tcphdr *th, unsigned long len
}
}
/*
* Throw out all unnecessary packets: we've gone over the
* receive queue limit. This shouldn't happen in a normal
* TCP connection, but we might have gotten duplicates etc.
*/
static
inline
void
tcp_forget_unacked
(
struct
sk_buff_head
*
list
)
{
for
(;;)
{
struct
sk_buff
*
skb
=
list
->
prev
;
/* gone through it all? */
if
(
skb
==
(
struct
sk_buff
*
)
list
)
break
;
if
(
skb
->
acked
)
break
;
__skb_unlink
(
skb
,
list
);
}
}
/*
* This should be a bit smarter and remove partially
* overlapping stuff too, but this should be good
* enough for any even remotely normal case (and the
* worst that can happen is that we have a few
* unnecessary packets in the receive queue).
*
* This function is never called with an empty list..
*/
static
inline
void
tcp_remove_dups
(
struct
sk_buff_head
*
list
)
{
struct
sk_buff
*
skb
=
list
->
next
;
struct
sk_buff
*
next
=
list
->
next
;
for
(;;)
{
struct
sk_buff
*
next
;
if
(
skb
==
(
struct
sk_buff
*
)
list
)
struct
sk_buff
*
skb
=
next
;
next
=
next
->
next
;
if
(
next
==
(
struct
sk_buff
*
)
list
)
break
;
next
=
skb
->
next
;
if
(
next
->
seq
==
skb
->
seq
)
{
if
(
before
(
next
->
end_seq
,
skb
->
end_seq
))
{
__skb_unlink
(
next
,
list
);
kfree_skb
(
next
,
FREE_READ
);
next
=
skb
;
continue
;
}
if
(
next
->
seq
!=
skb
->
seq
)
continue
;
__skb_unlink
(
skb
,
list
);
}
skb
=
next
;
kfree_skb
(
skb
,
FREE_READ
);
}
}
/*
* Throw out all unnecessary packets: we've gone over the
* receive queue limit. This shouldn't happen in a normal
* TCP connection, but we might have gotten duplicates etc.
*/
static
void
prune_queue
(
struct
sk_buff_head
*
list
)
{
/*
* Throw out things we haven't acked.
*/
tcp_forget_unacked
(
list
);
for
(;;)
{
struct
sk_buff
*
skb
=
list
->
prev
;
/*
* Throw out duplicates
*/
/* gone through it all? */
if
(
skb
==
(
struct
sk_buff
*
)
list
)
break
;
if
(
!
skb
->
acked
)
{
__skb_unlink
(
skb
,
list
);
kfree_skb
(
skb
,
FREE_READ
);
continue
;
}
tcp_remove_dups
(
list
);
break
;
}
}
/*
* A TCP packet has arrived.
* skb->h.raw is the TCP header.
...
...
net/netsyms.c
View file @
892692fb
...
...
@@ -64,6 +64,8 @@ static struct symbol_table net_syms = {
X
(
memcpy_fromiovec
),
X
(
sock_setsockopt
),
X
(
sock_getsockopt
),
X
(
sk_alloc
),
X
(
sk_free
),
X
(
sock_wake_async
),
X
(
sock_alloc_send_skb
),
X
(
skb_recv_datagram
),
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment