Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
472bbf0a
Commit
472bbf0a
authored
Nov 23, 2007
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Import 2.1.80pre3
parent
8772d71c
Changes
43
Hide whitespace changes
Inline
Side-by-side
Showing
43 changed files
with
271 additions
and
292 deletions
+271
-292
Documentation/Configure.help
Documentation/Configure.help
+23
-0
drivers/block/ide-dma.c
drivers/block/ide-dma.c
+1
-1
drivers/block/md.c
drivers/block/md.c
+1
-1
drivers/char/bttv.c
drivers/char/bttv.c
+14
-1
drivers/char/epca.c
drivers/char/epca.c
+2
-0
drivers/net/Config.in
drivers/net/Config.in
+1
-1
drivers/net/hamradio/dmascc.c
drivers/net/hamradio/dmascc.c
+0
-1
drivers/net/hp.c
drivers/net/hp.c
+1
-0
drivers/net/ipddp.c
drivers/net/ipddp.c
+0
-1
drivers/net/sdla_fr.c
drivers/net/sdla_fr.c
+3
-4
drivers/net/sdla_ppp.c
drivers/net/sdla_ppp.c
+2
-3
drivers/net/sdla_x25.c
drivers/net/sdla_x25.c
+2
-3
drivers/net/shaper.c
drivers/net/shaper.c
+6
-7
drivers/net/strip.c
drivers/net/strip.c
+0
-1
drivers/net/tulip.c
drivers/net/tulip.c
+12
-8
drivers/scsi/scsi.c
drivers/scsi/scsi.c
+3
-4
fs/buffer.c
fs/buffer.c
+1
-1
fs/nfs/dir.c
fs/nfs/dir.c
+18
-38
include/asm-alpha/processor.h
include/asm-alpha/processor.h
+1
-1
include/asm-i386/processor.h
include/asm-i386/processor.h
+1
-1
include/asm-i386/uaccess.h
include/asm-i386/uaccess.h
+9
-9
include/linux/dcache.h
include/linux/dcache.h
+1
-1
include/linux/fs.h
include/linux/fs.h
+1
-1
include/linux/mm.h
include/linux/mm.h
+22
-14
include/linux/slab.h
include/linux/slab.h
+9
-9
include/linux/swap.h
include/linux/swap.h
+1
-1
mm/filemap.c
mm/filemap.c
+3
-3
mm/mmap.c
mm/mmap.c
+7
-0
mm/page_alloc.c
mm/page_alloc.c
+8
-11
mm/simp.c
mm/simp.c
+2
-2
mm/slab.c
mm/slab.c
+11
-37
mm/vmscan.c
mm/vmscan.c
+29
-25
net/bridge/br.c
net/bridge/br.c
+17
-25
net/core/dev.c
net/core/dev.c
+2
-2
net/core/neighbour.c
net/core/neighbour.c
+6
-6
net/core/skbuff.c
net/core/skbuff.c
+8
-8
net/core/sock.c
net/core/sock.c
+28
-56
net/ipv4/Config.in
net/ipv4/Config.in
+3
-0
net/ipv4/ip_forward.c
net/ipv4/ip_forward.c
+2
-1
net/ipv4/rarp.c
net/ipv4/rarp.c
+3
-0
net/ipv4/sysctl_net_ipv4.c
net/ipv4/sysctl_net_ipv4.c
+4
-2
net/ipv6/ndisc.c
net/ipv6/ndisc.c
+2
-0
net/netsyms.c
net/netsyms.c
+1
-2
No files found.
Documentation/Configure.help
View file @
472bbf0a
...
...
@@ -725,6 +725,29 @@ CONFIG_NET
see http://www.inka.de/sites/lina/linux/NetTools/index_en.html for
details.
Fast switching (read help!)
CONFIG_NET_FASTROUTE
Enable direct NIC-to-NIC data transfers.
*** This option is NOT COMPATIBLE with several important ***
*** networking options: especially CONFIG*FIREWALL. ***
However, it will work with all options in CONFIG_IP_ADVANCED_ROUTER
section (except for CONFIG_IP_ROUTE_TOS). At the moment few of devices
supports it (tulip is one of them, modified 8390 can be found at
ftp://ftp.inr.ac.ru/ip-routing/fastroute-8390.tar.gz).
Remember, short cuts make long delays :-), say N.
Forwarding between high speed interfaces
CONFIG_NET_HW_FLOWCONTROL
This option enables NIC hardware throttling during periods of
extremal congestion. At the moment only couple of device drivers
support it (really, one --- tulip, modified 8390 can be found
at ftp://ftp.inr.ac.ru/ip-routing/fastroute-8390.tar.gz).
Really, this option is applicable to any machine attached
to enough fast network, and even 10Mb NIC
is able to kill not very slow box, sort of 120MHz Pentium.
However, do not enable this option, if you did not experienced
any serious problems.
Network aliasing
CONFIG_NET_ALIAS
This will allow you to set multiple network addresses on the same
...
...
drivers/block/ide-dma.c
View file @
472bbf0a
...
...
@@ -306,7 +306,7 @@ void ide_setup_dma (ide_hwif_t *hwif, unsigned long dma_base, unsigned int num_p
* safely use __get_free_page() here instead
* of __get_dma_pages() -- no ISA limitations.
*/
dmatable
=
__get_free_pages
(
GFP_KERNEL
,
1
,
0
);
dmatable
=
__get_free_pages
(
GFP_KERNEL
,
1
);
leftover
=
dmatable
?
PAGE_SIZE
:
0
;
}
if
(
!
dmatable
)
{
...
...
drivers/block/md.c
View file @
472bbf0a
...
...
@@ -493,7 +493,7 @@ static int do_md_stop (int minor, struct inode *inode)
/*
* ioctl : one open channel
*/
printk
(
"STOP_MD md%x failed : i_count=%d, busy=%d
\n
"
,
printk
(
"STOP_MD md%x failed : i_count=%
l
d, busy=%d
\n
"
,
minor
,
inode
->
i_count
,
md_dev
[
minor
].
busy
);
return
-
EBUSY
;
}
...
...
drivers/char/bttv.c
View file @
472bbf0a
...
...
@@ -1478,7 +1478,20 @@ static void handle_chipset(void)
unsigned
char
bus
,
devfn
;
unsigned
char
b
,
bo
;
/* nothing wrong with this one, just checking buffer control config */
/* Beware the SiS 85C496 my friend - rev 49 don't work with a bttv */
if
(
!
pcibios_find_device
(
PCI_VENDOR_ID_SI
,
PCI_DEVICE_ID_SI_496
,
index
,
&
bus
,
&
devfn
))
{
printk
(
KERN_WARNING
"BT848 and SIS 85C496 chipset don't always work together.
\n
"
);
}
if
(
!
pcibios_find_device
(
PCI_VENDOR_ID_INTEL
,
PCI_DEVICE_ID_INTEL_82441
,
index
,
&
bus
,
&
devfn
))
{
pcibios_read_config_byte
(
bus
,
devfn
,
0x53
,
&
b
);
DEBUG
(
printk
(
KERN_INFO
"bttv: Host bridge: 82441FX Natoma, "
));
DEBUG
(
printk
(
"bufcon=0x%02x
\n
"
,
b
));
}
if
(
!
pcibios_find_device
(
PCI_VENDOR_ID_INTEL
,
PCI_DEVICE_ID_INTEL_82441
,
index
,
&
bus
,
&
devfn
))
...
...
drivers/char/epca.c
View file @
472bbf0a
...
...
@@ -29,8 +29,10 @@
#ifdef MODVERSIONS
#ifndef MODULE
#define MODULE
#endif
#endif
/* -----------------------------------------------------------------------
This way modules should work regardless if they defined MODULE or
...
...
drivers/net/Config.in
View file @
472bbf0a
...
...
@@ -183,7 +183,7 @@ if [ "$CONFIG_WAN_ROUTER" != "n" ]; then
bool 'WAN drivers' CONFIG_WAN_DRIVERS
if [ "$CONFIG_WAN_DRIVERS" = "y" ]; then
dep_tristate 'Sangoma WANPIPE(tm) multiprotocol cards' CONFIG_VENDOR_SANGOMA $CONFIG_WAN_DRIVERS
if [ "$CONFIG_VENDOR_SANGOMA"
= "y
" ]; then
if [ "$CONFIG_VENDOR_SANGOMA"
!= "n
" ]; then
int ' Maximum number of cards' CONFIG_WANPIPE_CARDS 1
bool ' WANPIPE X.25 support' CONFIG_WANPIPE_X25
bool ' WANPIPE Frame Relay support' CONFIG_WANPIPE_FR
...
...
drivers/net/hamradio/dmascc.c
View file @
472bbf0a
...
...
@@ -41,7 +41,6 @@
#include <asm/irq.h>
#include <asm/segment.h>
#include <net/ax25.h>
#include <stdio.h>
#include "z8530.h"
...
...
drivers/net/hp.c
View file @
472bbf0a
...
...
@@ -31,6 +31,7 @@ static const char *version =
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <asm/system.h>
#include <asm/io.h>
...
...
drivers/net/ipddp.c
View file @
472bbf0a
...
...
@@ -256,7 +256,6 @@ static int ipddp_rebuild_header(struct sk_buff *skb)
memcpy
(
skb
->
data
,(
void
*
)
&
at
,
sizeof
(
at
));
skb
->
dev
=
rt
->
dev
;
/* set skb->dev to appropriate device */
skb
->
arp
=
1
;
/* so the actual device doesn't try to arp it... */
skb
->
protocol
=
htons
(
ETH_P_ATALK
);
/* Protocol has changed */
return
0
;
...
...
drivers/net/sdla_fr.c
View file @
472bbf0a
/*****************************************************************************
*
sdla_fr
.
c
WANPIPE
(
tm
)
Multiprotocol
WAN
Link
Driver
.
Frame
relay
module
.
*
*
Author
(
s
)
:
Gene
Kozin
...
...
@@ -673,9 +673,8 @@ static int if_init(struct device *dev)
/* Set transmit buffer queue length */
dev
->
tx_queue_len
=
30
;
/* Initialize socket buffers */
for
(
i
=
0
;
i
<
DEV_NUMBUFFS
;
++
i
)
skb_queue_head_init
(
&
dev
->
buffs
[
i
]);
dev_init_buffers
(
dev
);
set_chan_state
(
dev
,
WAN_DISCONNECTED
);
return
0
;
}
...
...
drivers/net/sdla_ppp.c
View file @
472bbf0a
...
...
@@ -478,9 +478,8 @@ static int if_init (struct device* dev)
dev
->
tx_queue_len
=
100
;
/* Initialize socket buffers */
for
(
i
=
0
;
i
<
DEV_NUMBUFFS
;
++
i
)
skb_queue_head_init
(
&
dev
->
buffs
[
i
]);
dev_init_buffers
(
dev
);
return
0
;
}
...
...
drivers/net/sdla_x25.c
View file @
472bbf0a
...
...
@@ -572,9 +572,8 @@ static int if_init (struct device* dev)
dev
->
tx_queue_len
=
10
;
/* Initialize socket buffers */
for
(
i
=
0
;
i
<
DEV_NUMBUFFS
;
++
i
)
skb_queue_head_init
(
&
dev
->
buffs
[
i
])
;
dev_init_buffers
(
dev
);
set_chan_state
(
dev
,
WAN_DISCONNECTED
);
return
0
;
}
...
...
drivers/net/shaper.c
View file @
472bbf0a
...
...
@@ -254,7 +254,6 @@ static void shaper_queue_xmit(struct shaper *shaper, struct sk_buff *skb)
if
(
newskb
)
{
newskb
->
dev
=
shaper
->
dev
;
newskb
->
arp
=
1
;
newskb
->
priority
=
2
;
if
(
sh_debug
)
printk
(
"Kick new frame to %s, %d
\n
"
,
...
...
@@ -448,17 +447,17 @@ static int shaper_rebuild_header(struct sk_buff *skb)
return
v
;
}
static
int
shaper_cache
(
struct
dst_entry
*
dst
,
struct
neighbour
*
neigh
,
struct
hh_cache
*
hh
)
static
int
shaper_cache
(
struct
neighbour
*
neigh
,
struct
hh_cache
*
hh
)
{
struct
shaper
*
sh
=
dst
->
dev
->
priv
;
struct
shaper
*
sh
=
neigh
->
dev
->
priv
;
struct
device
*
tmp
;
int
ret
;
if
(
sh_debug
)
printk
(
"Shaper header cache bind
\n
"
);
tmp
=
dst
->
dev
;
dst
->
dev
=
sh
->
dev
;
ret
=
sh
->
hard_header_cache
(
dst
,
neigh
,
hh
);
dst
->
dev
=
tmp
;
tmp
=
neigh
->
dev
;
neigh
->
dev
=
sh
->
dev
;
ret
=
sh
->
hard_header_cache
(
neigh
,
hh
);
neigh
->
dev
=
tmp
;
return
ret
;
}
...
...
drivers/net/strip.c
View file @
472bbf0a
...
...
@@ -82,7 +82,6 @@ static const char StripVersion[] = "1.2-STUART.CHESHIRE";
#include <linux/version.h>
#endif
#include <stdlib.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/bitops.h>
...
...
drivers/net/tulip.c
View file @
472bbf0a
...
...
@@ -1100,11 +1100,7 @@ tulip_open(struct device *dev)
outl
(
0x00200000
|
0x4800
,
ioaddr
+
CSR0
);
#else
#ifndef ORIGINAL_TEXT
#ifndef __SMP__
#define x86 ((struct cpuinfo_x86*)cpu_data)->x86
#else
#error What should we make here?
#endif
#define x86 (boot_cpu_data.x86)
#endif
outl
(
0x00200000
|
(
x86
<=
4
?
0x4800
:
0x8000
),
ioaddr
+
CSR0
);
if
(
x86
<=
4
)
...
...
@@ -1753,7 +1749,15 @@ tulip_start_xmit(struct sk_buff *skb, struct device *dev)
#ifdef CONFIG_NET_FASTROUTE
cli
();
dev
->
tx_semaphore
=
0
;
if
(
xchg
(
&
dev
->
tx_semaphore
,
0
)
==
0
)
{
sti
();
/* With new queueing algorithm returning 1 when dev->tbusy == 0
should not result in lockups, but I am still not sure. --ANK
*/
if
(
net_ratelimit
())
printk
(
KERN_CRIT
"Please check: are you still alive?
\n
"
);
return
1
;
}
#endif
/* Block a timer-based transmit from overlapping. This could better be
done with atomic_swap(1, dev->tbusy), but set_bit() works as well. */
...
...
@@ -1764,7 +1768,7 @@ tulip_start_xmit(struct sk_buff *skb, struct device *dev)
if
(
jiffies
-
dev
->
trans_start
>=
TX_TIMEOUT
)
tulip_tx_timeout
(
dev
);
#ifdef CONFIG_NET_FASTROUTE
dev
->
tx_semaphore
=
0
;
dev
->
tx_semaphore
=
1
;
#endif
return
1
;
}
...
...
@@ -1803,7 +1807,7 @@ tulip_start_xmit(struct sk_buff *skb, struct device *dev)
dev
->
trans_start
=
jiffies
;
#ifdef CONFIG_NET_FASTROUTE
dev
->
tx_semaphore
=
0
;
dev
->
tx_semaphore
=
1
;
sti
();
#endif
...
...
drivers/scsi/scsi.c
View file @
472bbf0a
...
...
@@ -1910,7 +1910,7 @@ int scsi_free(void *obj, unsigned int len)
int
scsi_loadable_module_flag
;
/* Set after we scan builtin drivers */
void
*
scsi_init_malloc
(
unsigned
int
size
,
int
priority
)
void
*
scsi_init_malloc
(
unsigned
int
size
,
int
gfp_mask
)
{
void
*
retval
;
...
...
@@ -1923,10 +1923,9 @@ void * scsi_init_malloc(unsigned int size, int priority)
for
(
order
=
0
,
a_size
=
PAGE_SIZE
;
a_size
<
size
;
order
++
,
a_size
<<=
1
)
;
retval
=
(
void
*
)
__get_dma_pages
(
priority
&
GFP_LEVEL_MASK
,
order
);
retval
=
(
void
*
)
__get_free_pages
(
gfp_mask
|
GFP_DMA
,
order
);
}
else
retval
=
kmalloc
(
size
,
priority
);
retval
=
kmalloc
(
size
,
gfp_mask
);
if
(
retval
)
memset
(
retval
,
0
,
size
);
...
...
fs/buffer.c
View file @
472bbf0a
...
...
@@ -1706,7 +1706,7 @@ void show_buffers(void)
void
buffer_init
(
void
)
{
hash_table
=
(
struct
buffer_head
**
)
__get_free_pages
(
GFP_ATOMIC
,
HASH_PAGES_ORDER
,
0
);
__get_free_pages
(
GFP_ATOMIC
,
HASH_PAGES_ORDER
);
if
(
!
hash_table
)
panic
(
"Failed to allocate buffer hash table
\n
"
);
memset
(
hash_table
,
0
,
NR_HASH
*
sizeof
(
struct
buffer_head
*
));
...
...
fs/nfs/dir.c
View file @
472bbf0a
...
...
@@ -494,49 +494,29 @@ struct dentry_operations nfs_dentry_operations = {
NULL
,
/* d_hash */
NULL
,
/* d_compare */
nfs_dentry_delete
,
/* d_delete(struct dentry *) */
nfs_dentry_
iput
,
/* d_iput(struct dentry *, struct inode
*) */
nfs_dentry_
release
/* d_release(struct dentry
*) */
nfs_dentry_
release
,
/* d_release(struct dentry
*) */
nfs_dentry_
iput
/* d_iput(struct dentry *, struct inode
*) */
};
#ifdef NFS_PARANOIA
/*
* Display all dentries holding the specified inode.
*/
static
void
show_dentry
(
struct
inode
*
inode
)
static
void
show_dentry
(
struct
list_head
*
dlist
)
{
struct
dentry
*
parent
=
inode
->
i_sb
->
s_root
;
struct
dentry
*
this_parent
=
parent
;
struct
list_head
*
next
;
repeat:
next
=
this_parent
->
d_subdirs
.
next
;
resume:
while
(
next
!=
&
this_parent
->
d_subdirs
)
{
struct
list_head
*
tmp
=
next
;
struct
dentry
*
dentry
=
list_entry
(
tmp
,
struct
dentry
,
d_child
);
next
=
tmp
->
next
;
if
(
dentry
->
d_inode
==
inode
)
{
int
unhashed
=
list_empty
(
&
dentry
->
d_hash
);
printk
(
"show_dentry: %s/%s, d_count=%d%s
\n
"
,
dentry
->
d_parent
->
d_name
.
name
,
dentry
->
d_name
.
name
,
dentry
->
d_count
,
unhashed
?
"(unhashed)"
:
""
);
}
/*
* Descend a level if the d_subdirs list is non-empty.
*/
if
(
!
list_empty
(
&
dentry
->
d_subdirs
))
{
this_parent
=
dentry
;
goto
repeat
;
}
}
/*
* All done at this level ... ascend and resume the search.
*/
if
(
this_parent
!=
parent
)
{
next
=
this_parent
->
d_child
.
next
;
this_parent
=
this_parent
->
d_parent
;
goto
resume
;
struct
list_head
*
tmp
=
dlist
;
while
((
tmp
=
tmp
->
next
)
!=
dlist
)
{
struct
dentry
*
dentry
=
list_entry
(
tmp
,
struct
dentry
,
d_alias
);
const
char
*
unhashed
=
""
;
if
(
list_empty
(
&
dentry
->
d_hash
))
unhashed
=
"(unhashed)"
;
printk
(
"show_dentry: %s/%s, d_count=%d%s
\n
"
,
dentry
->
d_parent
->
d_name
.
name
,
dentry
->
d_name
.
name
,
dentry
->
d_count
,
unhashed
);
}
}
#endif
...
...
@@ -602,7 +582,7 @@ if (inode->i_count > (S_ISDIR(inode->i_mode) ? 1 : inode->i_nlink)) {
printk
(
"nfs_lookup: %s/%s ino=%ld in use, count=%d, nlink=%d
\n
"
,
dentry
->
d_parent
->
d_name
.
name
,
dentry
->
d_name
.
name
,
inode
->
i_ino
,
inode
->
i_count
,
inode
->
i_nlink
);
show_dentry
(
inode
);
show_dentry
(
&
inode
->
i_dentry
);
}
#endif
no_entry:
...
...
@@ -637,7 +617,7 @@ if (inode->i_count > (S_ISDIR(inode->i_mode) ? 1 : inode->i_nlink)) {
printk
(
"nfs_instantiate: %s/%s ino=%ld in use, count=%d, nlink=%d
\n
"
,
dentry
->
d_parent
->
d_name
.
name
,
dentry
->
d_name
.
name
,
inode
->
i_ino
,
inode
->
i_count
,
inode
->
i_nlink
);
show_dentry
(
inode
);
show_dentry
(
&
inode
->
i_dentry
);
}
#endif
d_instantiate
(
dentry
,
inode
);
...
...
include/asm-alpha/processor.h
View file @
472bbf0a
...
...
@@ -93,7 +93,7 @@ extern void release_thread(struct task_struct *);
/* NOTE: The task struct and the stack go together! */
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1
,0
))
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
#define init_task (init_task_union.task)
...
...
include/asm-i386/processor.h
View file @
472bbf0a
...
...
@@ -205,7 +205,7 @@ extern inline unsigned long thread_saved_pc(struct thread_struct *t)
* NOTE! The task struct and the stack go together
*/
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1
,0
))
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
#define init_task (init_task_union.task)
...
...
include/asm-i386/uaccess.h
View file @
472bbf0a
...
...
@@ -132,15 +132,15 @@ extern void __put_user_bad(void);
:"0" (ptr),"d" (x) \
:"cx")
#define put_user(x,ptr) \
({ int __ret_pu; \
switch(sizeof (*(ptr))) { \
case 1: __put_user_x(1,__ret_pu,(
char
)(x),ptr); break; \
case 2: __put_user_x(2,__ret_pu,(
short)(x),ptr); break;
\
case 4: __put_user_x(4,__ret_pu,(
int
)(x),ptr); break; \
default: __put_user_x(X,__ret_pu,x,ptr); break; \
} \
__ret_pu; \
#define put_user(x,ptr)
\
({ int __ret_pu;
\
switch(sizeof (*(ptr))) {
\
case 1: __put_user_x(1,__ret_pu,(
__typeof__(*(ptr))
)(x),ptr); break; \
case 2: __put_user_x(2,__ret_pu,(
__typeof__(*(ptr)))(x),ptr); break;
\
case 4: __put_user_x(4,__ret_pu,(
__typeof__(*(ptr))
)(x),ptr); break; \
default: __put_user_x(X,__ret_pu,x,ptr); break;
\
}
\
__ret_pu;
\
})
#define __get_user(x,ptr) \
...
...
include/linux/dcache.h
View file @
472bbf0a
...
...
@@ -75,8 +75,8 @@ struct dentry_operations {
int
(
*
d_hash
)
(
struct
dentry
*
,
struct
qstr
*
);
int
(
*
d_compare
)
(
struct
dentry
*
,
struct
qstr
*
,
struct
qstr
*
);
void
(
*
d_delete
)(
struct
dentry
*
);
void
(
*
d_iput
)(
struct
dentry
*
,
struct
inode
*
);
void
(
*
d_release
)(
struct
dentry
*
);
void
(
*
d_iput
)(
struct
dentry
*
,
struct
inode
*
);
};
/* the dentry parameter passed to d_hash and d_compare is the parent
...
...
include/linux/fs.h
View file @
472bbf0a
...
...
@@ -321,7 +321,7 @@ struct inode {
struct
list_head
i_dentry
;
unsigned
long
i_ino
;
unsigned
long
i_count
;
unsigned
int
i_count
;
kdev_t
i_dev
;
umode_t
i_mode
;
nlink_t
i_nlink
;
...
...
include/linux/mm.h
View file @
472bbf0a
...
...
@@ -237,15 +237,15 @@ extern mem_map_t * mem_map;
* goes to clearing the page. If you want a page without the clearing
* overhead, just use __get_free_page() directly..
*/
#define __get_free_page(
priority) __get_free_pages((priority),0
,0)
#define __get_dma_pages(
priority, order) __get_free_pages((priority),(order),1
)
extern
unsigned
long
FASTCALL
(
__get_free_pages
(
int
priority
,
unsigned
long
gfporder
,
int
dma
));
#define __get_free_page(
gfp_mask) __get_free_pages((gfp_mask)
,0)
#define __get_dma_pages(
gfp_mask, order) __get_free_pages((gfp_mask) | GFP_DMA,(order)
)
extern
unsigned
long
FASTCALL
(
__get_free_pages
(
int
gfp_mask
,
unsigned
long
gfp_order
));
extern
inline
unsigned
long
get_free_page
(
int
priority
)
extern
inline
unsigned
long
get_free_page
(
int
gfp_mask
)
{
unsigned
long
page
;
page
=
__get_free_page
(
priority
);
page
=
__get_free_page
(
gfp_mask
);
if
(
page
)
clear_page
(
page
);
return
page
;
...
...
@@ -297,19 +297,27 @@ extern void truncate_inode_pages(struct inode *, unsigned long);
extern
unsigned
long
get_cached_page
(
struct
inode
*
,
unsigned
long
,
int
);
extern
void
put_cached_page
(
unsigned
long
);
#define GFP_BUFFER 0x00
#define GFP_ATOMIC 0x01
#define GFP_USER 0x02
#define GFP_KERNEL 0x03
#define GFP_NOBUFFER 0x04
#define GFP_NFS 0x05
/*
* GFP bitmasks..
*/
#define __GFP_WAIT 0x01
#define __GFP_IO 0x02
#define __GFP_LOW 0x00
#define __GFP_MED 0x04
#define __GFP_HIGH 0x08
#define __GFP_DMA 0x80
#define GFP_BUFFER (__GFP_LOW | __GFP_WAIT)
#define GFP_ATOMIC (__GFP_HIGH)
#define GFP_USER (__GFP_LOW | __GFP_WAIT | __GFP_IO)
#define GFP_KERNEL (__GFP_LOW | __GFP_WAIT | __GFP_IO)
#define GFP_NFS (__GFP_MED | __GFP_WAIT | __GFP_IO)
/* Flag - indicates that the buffer will be suitable for DMA. Ignored on some
platforms, used as appropriate on others */
#define GFP_DMA 0x80
#define GFP_LEVEL_MASK 0xf
#define GFP_DMA __GFP_DMA
/* vma is the first one with address < vma->vm_end,
* and even address < vma->vm_start. Have to extend vma. */
...
...
include/linux/slab.h
View file @
472bbf0a
...
...
@@ -15,14 +15,14 @@ typedef struct kmem_cache_s kmem_cache_t;
#include <asm/cache.h>
/* flags for kmem_cache_alloc() */
#define SLAB_BUFFER GFP_BUFFER
/* 0x00 */
#define SLAB_ATOMIC GFP_ATOMIC
/* 0x01 */
#define SLAB_USER GFP_USER
/* 0x02 */
#define SLAB_KERNEL GFP_KERNEL
/* 0x03 */
#define SLAB_N
OBUFFER GFP_NOBUFFER
/* 0x04 */
#define SLAB_
NFS GFP_NFS
/* 0x05 */
#define SLAB_DMA GFP_DMA
/* 0x08 */
#define
SLAB_LEVEL_MASK GFP_LEVEL_MASK
/* 0x0f */
#define SLAB_BUFFER GFP_BUFFER
#define SLAB_ATOMIC GFP_ATOMIC
#define SLAB_USER GFP_USER
#define SLAB_KERNEL GFP_KERNEL
#define SLAB_N
FS GFP_NFS
#define SLAB_
DMA GFP_DMA
#define
SLAB_LEVEL_MASK 0x0000007fUL
#define SLAB_NO_GROW 0x00001000UL
/* don't grow a cache */
/* flags to pass to kmem_cache_create().
...
...
@@ -59,7 +59,7 @@ extern void *kmalloc(size_t, int);
extern
void
kfree
(
const
void
*
);
extern
void
kfree_s
(
const
void
*
,
size_t
);
extern
int
kmem_cache_reap
(
int
,
int
,
int
);
extern
void
kmem_cache_reap
(
int
);
extern
int
get_slabinfo
(
char
*
);
/* System wide caches */
...
...
include/linux/swap.h
View file @
472bbf0a
...
...
@@ -49,7 +49,7 @@ struct sysinfo;
extern
int
shm_swap
(
int
,
int
);
/* linux/mm/vmscan.c */
extern
int
try_to_free_page
(
int
,
int
,
int
);
extern
int
try_to_free_page
(
int
);
/* linux/mm/page_io.c */
extern
void
rw_swap_page
(
int
,
unsigned
long
,
char
*
,
int
);
...
...
mm/filemap.c
View file @
472bbf0a
...
...
@@ -115,7 +115,7 @@ void truncate_inode_pages(struct inode * inode, unsigned long start)
}
}
int
shrink_mmap
(
int
priority
,
int
dma
)
int
shrink_mmap
(
int
priority
,
int
gfp_mask
)
{
static
unsigned
long
clock
=
0
;
struct
page
*
page
;
...
...
@@ -134,7 +134,7 @@ int shrink_mmap(int priority, int dma)
if
(
PageLocked
(
page
))
goto
next
;
if
(
dma
&&
!
PageDMA
(
page
))
if
(
(
gfp_mask
&
__GFP_DMA
)
&&
!
PageDMA
(
page
))
goto
next
;
/* First of all, regenerate the page's referenced bit
from any buffers in the page */
...
...
@@ -173,7 +173,7 @@ int shrink_mmap(int priority, int dma)
}
/* is it a buffer cache page? */
if
(
bh
&&
try_to_free_buffer
(
bh
,
&
bh
,
6
))
if
(
(
gfp_mask
&
__GFP_IO
)
&&
bh
&&
try_to_free_buffer
(
bh
,
&
bh
,
6
))
return
1
;
break
;
...
...
mm/mmap.c
View file @
472bbf0a
...
...
@@ -502,6 +502,13 @@ int do_munmap(unsigned long addr, size_t len)
mpnt
=
next
;
}
if
(
free
&&
(
free
->
vm_start
<
addr
)
&&
(
free
->
vm_end
>
addr
+
len
))
{
if
(
mm
->
map_count
>
MAX_MAP_COUNT
)
{
kmem_cache_free
(
vm_area_cachep
,
extra
);
return
-
ENOMEM
;
}
}
/* Ok - we have the memory areas we should free on the 'free' list,
* so release them, and unmap the page range..
* If the one of the segments is only being partially unmapped,
...
...
mm/page_alloc.c
View file @
472bbf0a
...
...
@@ -204,7 +204,7 @@ do { unsigned long size = 1 << high; \
map->age = PAGE_INITIAL_AGE; \
} while (0)
unsigned
long
__get_free_pages
(
int
priority
,
unsigned
long
order
,
int
dma
)
unsigned
long
__get_free_pages
(
int
gfp_mask
,
unsigned
long
order
)
{
unsigned
long
flags
,
maxorder
;
...
...
@@ -216,28 +216,25 @@ unsigned long __get_free_pages(int priority, unsigned long order, int dma)
* to empty in order to find a free page..
*/
maxorder
=
order
+
NR_MEM_LISTS
/
3
;
switch
(
priority
)
{
case
GFP_ATOMIC
:
maxorder
=
NR_MEM_LISTS
;
/* fallthrough - no need to jump around */
case
GFP_NFS
:
if
(
gfp_mask
&
__GFP_MED
)
maxorder
+=
NR_MEM_LISTS
/
3
;
}
if
((
gfp_mask
&
__GFP_HIGH
)
||
maxorder
>
NR_MEM_LISTS
)
maxorder
=
NR_MEM_LISTS
;
if
(
in_interrupt
()
&&
priority
!=
GFP_ATOMIC
)
{
if
(
in_interrupt
()
&&
(
gfp_mask
&
__GFP_WAIT
)
)
{
static
int
count
=
0
;
if
(
++
count
<
5
)
{
printk
(
"gfp called nonatomically from interrupt %p
\n
"
,
__builtin_return_address
(
0
));
priority
=
GFP_ATOMIC
;
gfp_mask
&=
~
__GFP_WAIT
;
}
}
repeat:
spin_lock_irqsave
(
&
page_alloc_lock
,
flags
);
RMQUEUE
(
order
,
maxorder
,
dma
);
RMQUEUE
(
order
,
maxorder
,
(
gfp_mask
&
GFP_DMA
)
);
spin_unlock_irqrestore
(
&
page_alloc_lock
,
flags
);
if
(
priority
!=
GFP_BUFFER
&&
priority
!=
GFP_ATOMIC
&&
try_to_free_page
(
priority
,
dma
,
1
))
if
(
(
gfp_mask
&
__GFP_WAIT
)
&&
try_to_free_page
(
gfp_mask
))
goto
repeat
;
nopage:
return
0
;
...
...
mm/simp.c
View file @
472bbf0a
...
...
@@ -115,7 +115,7 @@ struct simp * simp_create(char * name, long size,
if
(
!
global
)
{
#ifdef __SMP__
global
=
(
struct
global_data
*
)
__get_free_pages
(
GFP_KERNEL
,
ORDER
,
0
);
global
=
(
struct
global_data
*
)
__get_free_pages
(
GFP_KERNEL
,
ORDER
);
memset
(
global
,
0
,
CHUNK_SIZE
);
#else
global
=
(
struct
global_data
*
)
get_free_page
(
GFP_KERNEL
);
...
...
@@ -167,7 +167,7 @@ static void alloc_header(struct simp * simp)
spin_unlock
(
&
simp
->
lock
);
for
(;;)
{
hdr
=
(
struct
header
*
)
__get_free_pages
(
GFP_KERNEL
,
ORDER
,
0
);
hdr
=
(
struct
header
*
)
__get_free_pages
(
GFP_KERNEL
,
ORDER
);
if
(
hdr
)
break
;
if
(
!
simp_garbage
())
...
...
mm/slab.c
View file @
472bbf0a
...
...
@@ -502,8 +502,7 @@ kmem_getpages(kmem_cache_t *cachep, unsigned long flags, unsigned int *dma)
void
*
addr
;
*
dma
=
flags
&
SLAB_DMA
;
addr
=
(
void
*
)
__get_free_pages
(
flags
&
SLAB_LEVEL_MASK
,
cachep
->
c_gfporder
,
*
dma
);
addr
=
(
void
*
)
__get_free_pages
(
flags
,
cachep
->
c_gfporder
);
/* Assume that now we have the pages no one else can legally
* messes with the 'struct page's.
* However vm_scan() might try to test the structure to see if
...
...
@@ -1716,19 +1715,18 @@ kmem_find_general_cachep(size_t size)
* This function _cannot_ be called within a int, but it
* can be interrupted.
*/
int
kmem_cache_reap
(
int
pri
,
int
dma
,
int
wait
)
void
kmem_cache_reap
(
int
gfp_mask
)
{
kmem_slab_t
*
slabp
;
kmem_cache_t
*
searchp
;
kmem_cache_t
*
best_cachep
;
unsigned
int
scan
;
unsigned
int
reap_level
;
static
unsigned
long
call_count
=
0
;
if
(
in_interrupt
())
{
printk
(
"kmem_cache_reap() called within int!
\n
"
);
return
0
;
return
;
}
/* We really need a test semphore op so we can avoid sleeping when
...
...
@@ -1736,28 +1734,8 @@ kmem_cache_reap(int pri, int dma, int wait)
*/
down
(
&
cache_chain_sem
);
scan
=
10
-
pri
;
if
(
pri
==
6
&&
!
dma
)
{
if
(
++
call_count
==
199
)
{
/* Hack Alert!
* Occassionally we try hard to reap a slab.
*/
call_count
=
0UL
;
reap_level
=
0
;
scan
+=
2
;
}
else
reap_level
=
3
;
}
else
{
if
(
pri
>=
5
)
{
/* We also come here for dma==1 at pri==6, just
* to try that bit harder (assumes that there are
* less DMAable pages in a system - not always true,
* but this doesn't hurt).
*/
reap_level
=
2
;
}
else
reap_level
=
0
;
}
scan
=
10
;
reap_level
=
0
;
best_cachep
=
NULL
;
searchp
=
clock_searchp
;
...
...
@@ -1796,7 +1774,7 @@ kmem_cache_reap(int pri, int dma, int wait)
}
spin_unlock_irq
(
&
searchp
->
c_spinlock
);
if
(
dma
&&
!
dma_flag
)
if
(
(
gfp_mask
&
GFP_DMA
)
&&
!
dma_flag
)
goto
next
;
if
(
full_free
)
{
...
...
@@ -1809,10 +1787,6 @@ kmem_cache_reap(int pri, int dma, int wait)
* more than one page per slab (as it can be difficult
* to get high orders from gfp()).
*/
if
(
pri
==
6
)
{
/* magic '6' from try_to_free_page() */
if
(
searchp
->
c_gfporder
||
searchp
->
c_ctor
)
full_free
--
;
}
if
(
full_free
>=
reap_level
)
{
reap_level
=
full_free
;
best_cachep
=
searchp
;
...
...
@@ -1830,12 +1804,12 @@ kmem_cache_reap(int pri, int dma, int wait)
if
(
!
best_cachep
)
{
/* couldn't find anthying to reap */
return
0
;
return
;
}
spin_lock_irq
(
&
best_cachep
->
c_spinlock
);
if
(
!
best_cachep
->
c_growing
&&
!
(
slabp
=
best_cachep
->
c_lastp
)
->
s_inuse
&&
slabp
!=
kmem_slab_end
(
best_cachep
))
{
if
(
dma
)
{
if
(
gfp_mask
&
GFP_DMA
)
{
do
{
if
(
slabp
->
s_dma
)
goto
good_dma
;
...
...
@@ -1858,11 +1832,11 @@ kmem_cache_reap(int pri, int dma, int wait)
*/
spin_unlock_irq
(
&
best_cachep
->
c_spinlock
);
kmem_slab_destroy
(
best_cachep
,
slabp
);
return
1
;
return
;
}
dma_fail:
spin_unlock_irq
(
&
best_cachep
->
c_spinlock
);
return
0
;
return
;
}
#if SLAB_SELFTEST
...
...
mm/vmscan.c
View file @
472bbf0a
...
...
@@ -61,7 +61,7 @@ static void init_swap_timer(void);
* have died while we slept).
*/
static
inline
int
try_to_swap_out
(
struct
task_struct
*
tsk
,
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
pte_t
*
page_table
,
int
dma
,
int
wait
)
unsigned
long
address
,
pte_t
*
page_table
,
int
gfp_mask
)
{
pte_t
pte
;
unsigned
long
entry
;
...
...
@@ -78,7 +78,7 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
page_map
=
mem_map
+
MAP_NR
(
page
);
if
(
PageReserved
(
page_map
)
||
PageLocked
(
page_map
)
||
(
dma
&&
!
PageDMA
(
page_map
)))
||
(
(
gfp_mask
&
__GFP_DMA
)
&&
!
PageDMA
(
page_map
)))
return
0
;
/* Deal with page aging. Pages age from being unused; they
* rejuvenate on being accessed. Only swap old pages (age==0
...
...
@@ -112,7 +112,7 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
set_pte
(
page_table
,
__pte
(
entry
));
flush_tlb_page
(
vma
,
address
);
tsk
->
nswap
++
;
rw_swap_page
(
WRITE
,
entry
,
(
char
*
)
page
,
wait
);
rw_swap_page
(
WRITE
,
entry
,
(
char
*
)
page
,
(
gfp_mask
&
__GFP_WAIT
)
);
}
/*
* For now, this is safe, because the test above makes
...
...
@@ -166,7 +166,7 @@ static inline int try_to_swap_out(struct task_struct * tsk, struct vm_area_struc
*/
static
inline
int
swap_out_pmd
(
struct
task_struct
*
tsk
,
struct
vm_area_struct
*
vma
,
pmd_t
*
dir
,
unsigned
long
address
,
unsigned
long
end
,
int
dma
,
int
wait
)
pmd_t
*
dir
,
unsigned
long
address
,
unsigned
long
end
,
int
gfp_mask
)
{
pte_t
*
pte
;
unsigned
long
pmd_end
;
...
...
@@ -188,7 +188,7 @@ static inline int swap_out_pmd(struct task_struct * tsk, struct vm_area_struct *
do
{
int
result
;
tsk
->
swap_address
=
address
+
PAGE_SIZE
;
result
=
try_to_swap_out
(
tsk
,
vma
,
address
,
pte
,
dma
,
wait
);
result
=
try_to_swap_out
(
tsk
,
vma
,
address
,
pte
,
gfp_mask
);
if
(
result
)
return
result
;
address
+=
PAGE_SIZE
;
...
...
@@ -198,7 +198,7 @@ static inline int swap_out_pmd(struct task_struct * tsk, struct vm_area_struct *
}
static
inline
int
swap_out_pgd
(
struct
task_struct
*
tsk
,
struct
vm_area_struct
*
vma
,
pgd_t
*
dir
,
unsigned
long
address
,
unsigned
long
end
,
int
dma
,
int
wait
)
pgd_t
*
dir
,
unsigned
long
address
,
unsigned
long
end
,
int
gfp_mask
)
{
pmd_t
*
pmd
;
unsigned
long
pgd_end
;
...
...
@@ -218,7 +218,7 @@ static inline int swap_out_pgd(struct task_struct * tsk, struct vm_area_struct *
end
=
pgd_end
;
do
{
int
result
=
swap_out_pmd
(
tsk
,
vma
,
pmd
,
address
,
end
,
dma
,
wait
);
int
result
=
swap_out_pmd
(
tsk
,
vma
,
pmd
,
address
,
end
,
gfp_mask
);
if
(
result
)
return
result
;
address
=
(
address
+
PMD_SIZE
)
&
PMD_MASK
;
...
...
@@ -228,7 +228,7 @@ static inline int swap_out_pgd(struct task_struct * tsk, struct vm_area_struct *
}
static
int
swap_out_vma
(
struct
task_struct
*
tsk
,
struct
vm_area_struct
*
vma
,
pgd_t
*
pgdir
,
unsigned
long
start
,
int
dma
,
int
wait
)
pgd_t
*
pgdir
,
unsigned
long
start
,
int
gfp_mask
)
{
unsigned
long
end
;
...
...
@@ -239,7 +239,7 @@ static int swap_out_vma(struct task_struct * tsk, struct vm_area_struct * vma,
end
=
vma
->
vm_end
;
while
(
start
<
end
)
{
int
result
=
swap_out_pgd
(
tsk
,
vma
,
pgdir
,
start
,
end
,
dma
,
wait
);
int
result
=
swap_out_pgd
(
tsk
,
vma
,
pgdir
,
start
,
end
,
gfp_mask
);
if
(
result
)
return
result
;
start
=
(
start
+
PGDIR_SIZE
)
&
PGDIR_MASK
;
...
...
@@ -248,7 +248,7 @@ static int swap_out_vma(struct task_struct * tsk, struct vm_area_struct * vma,
return
0
;
}
static
int
swap_out_process
(
struct
task_struct
*
p
,
int
dma
,
int
wait
)
static
int
swap_out_process
(
struct
task_struct
*
p
,
int
gfp_mask
)
{
unsigned
long
address
;
struct
vm_area_struct
*
vma
;
...
...
@@ -269,7 +269,7 @@ static int swap_out_process(struct task_struct * p, int dma, int wait)
address
=
vma
->
vm_start
;
for
(;;)
{
int
result
=
swap_out_vma
(
p
,
vma
,
pgd_offset
(
p
->
mm
,
address
),
address
,
dma
,
wait
);
int
result
=
swap_out_vma
(
p
,
vma
,
pgd_offset
(
p
->
mm
,
address
),
address
,
gfp_mask
);
if
(
result
)
return
result
;
vma
=
vma
->
vm_next
;
...
...
@@ -286,7 +286,7 @@ static int swap_out_process(struct task_struct * p, int dma, int wait)
* N.B. This function returns only 0 or 1. Return values != 1 from
* the lower level routines result in continued processing.
*/
static
int
swap_out
(
unsigned
int
priority
,
int
dma
,
int
wait
)
static
int
swap_out
(
unsigned
int
priority
,
int
gfp_mask
)
{
struct
task_struct
*
p
,
*
pbest
;
int
counter
,
assign
,
max_cnt
;
...
...
@@ -337,7 +337,7 @@ static int swap_out(unsigned int priority, int dma, int wait)
}
pbest
->
swap_cnt
--
;
switch
(
swap_out_process
(
pbest
,
dma
,
wait
))
{
switch
(
swap_out_process
(
pbest
,
gfp_mask
))
{
case
0
:
/*
* Clear swap_cnt so we don't look at this task
...
...
@@ -361,7 +361,7 @@ static int swap_out(unsigned int priority, int dma, int wait)
* to be. This works out OK, because we now do proper aging on page
* contents.
*/
static
inline
int
do_try_to_free_page
(
int
priority
,
int
dma
,
int
wait
)
static
inline
int
do_try_to_free_page
(
int
gfp_mask
)
{
static
int
state
=
0
;
int
i
=
6
;
...
...
@@ -369,25 +369,27 @@ static inline int do_try_to_free_page(int priority, int dma, int wait)
/* Let the dcache know we're looking for memory ... */
shrink_dcache_memory
();
/* Always trim SLAB caches when memory gets low. */
(
void
)
kmem_cache_reap
(
0
,
dma
,
wait
);
kmem_cache_reap
(
gfp_mask
);
/*
we don't try as hard if we're not waiting
.. */
/*
We try harder if we are waiting
.. */
stop
=
3
;
if
(
wait
)
if
(
gfp_mask
&
__GFP_WAIT
)
stop
=
0
;
switch
(
state
)
{
do
{
case
0
:
if
(
shrink_mmap
(
i
,
dma
))
if
(
shrink_mmap
(
i
,
gfp_mask
))
return
1
;
state
=
1
;
case
1
:
if
(
shm_swap
(
i
,
dma
))
if
(
(
gfp_mask
&
__GFP_IO
)
&&
shm_swap
(
i
,
gfp_mask
))
return
1
;
state
=
2
;
default:
if
(
swap_out
(
i
,
dma
,
wait
))
if
(
swap_out
(
i
,
gfp_mask
))
return
1
;
state
=
0
;
i
--
;
...
...
@@ -403,12 +405,12 @@ static inline int do_try_to_free_page(int priority, int dma, int wait)
* now we need this so that we can do page allocations
* without holding the kernel lock etc.
*/
int
try_to_free_page
(
int
priority
,
int
dma
,
int
wait
)
int
try_to_free_page
(
int
gfp_mask
)
{
int
retval
;
lock_kernel
();
retval
=
do_try_to_free_page
(
priority
,
dma
,
wait
);
retval
=
do_try_to_free_page
(
gfp_mask
);
unlock_kernel
();
return
retval
;
}
...
...
@@ -476,15 +478,17 @@ int kswapd(void *unused)
* go back to sleep to let other tasks run.
*/
for
(
fail
=
0
;
fail
++
<
MAX_SWAP_FAIL
;)
{
int
pages
,
wait
;
int
pages
,
gfp_mask
;
pages
=
nr_free_pages
;
if
(
nr_free_pages
>=
min_free_pages
)
pages
+=
atomic_read
(
&
nr_async_pages
);
if
(
pages
>=
free_pages_high
)
break
;
wait
=
(
pages
<
free_pages_low
);
if
(
try_to_free_page
(
GFP_KERNEL
,
0
,
wait
))
gfp_mask
=
__GFP_IO
;
if
(
pages
<
free_pages_low
)
gfp_mask
|=
__GFP_WAIT
;
if
(
try_to_free_page
(
gfp_mask
))
fail
=
0
;
}
/*
...
...
net/bridge/br.c
View file @
472bbf0a
...
...
@@ -1000,16 +1000,16 @@ static int hold_timer_expired(int port_no)
static
int
send_config_bpdu
(
int
port_no
,
Config_bpdu
*
config_bpdu
)
{
struct
sk_buff
*
skb
;
struct
device
*
dev
=
port_info
[
port_no
].
dev
;
int
size
;
unsigned
long
flags
;
struct
ethhdr
*
eth
;
struct
sk_buff
*
skb
;
struct
device
*
dev
=
port_info
[
port_no
].
dev
;
int
size
;
struct
ethhdr
*
eth
;
if
(
port_info
[
port_no
].
state
==
Disabled
)
{
printk
(
KERN_DEBUG
"send_config_bpdu: port %i not valid
\n
"
,
port_no
);
return
(
-
1
);
}
}
if
(
br_stats
.
flags
&
BR_DEBUG
)
printk
(
"send_config_bpdu: "
);
/*
...
...
@@ -1017,10 +1017,11 @@ struct ethhdr *eth;
*/
size
=
dev
->
hard_header_len
+
sizeof
(
Config_bpdu
);
skb
=
alloc_skb
(
size
,
GFP_ATOMIC
);
if
(
skb
==
NULL
)
{
if
(
skb
==
NULL
)
{
printk
(
KERN_DEBUG
"send_config_bpdu: no skb available
\n
"
);
return
(
-
1
);
}
}
skb
->
dev
=
dev
;
skb
->
mac
.
raw
=
skb
->
h
.
raw
=
skb_put
(
skb
,
size
);
eth
=
skb
->
mac
.
ethernet
;
...
...
@@ -1049,21 +1050,17 @@ struct ethhdr *eth;
/* won't get bridged again... */
skb
->
pkt_bridged
=
IS_BRIDGED
;
skb
->
arp
=
1
;
/* do not resolve... */
save_flags
(
flags
);
cli
();
skb_queue_tail
(
dev
->
buffs
,
skb
);
restore_flags
(
flags
);
skb
->
dev
=
dev
;
dev_queue_xmit
(
skb
);
return
(
0
);
}
static
int
send_tcn_bpdu
(
int
port_no
,
Tcn_bpdu
*
bpdu
)
{
struct
sk_buff
*
skb
;
struct
device
*
dev
=
port_info
[
port_no
].
dev
;
int
size
;
unsigned
long
flags
;
struct
ethhdr
*
eth
;
struct
sk_buff
*
skb
;
struct
device
*
dev
=
port_info
[
port_no
].
dev
;
int
size
;
struct
ethhdr
*
eth
;
if
(
port_info
[
port_no
].
state
==
Disabled
)
{
printk
(
KERN_DEBUG
"send_tcn_bpdu: port %i not valid
\n
"
,
port_no
);
...
...
@@ -1105,11 +1102,8 @@ struct ethhdr *eth;
/* mark that we've been here... */
skb
->
pkt_bridged
=
IS_BRIDGED
;
skb
->
arp
=
1
;
/* do not resolve... */
save_flags
(
flags
);
cli
();
skb_queue_tail
(
dev
->
buffs
,
skb
);
restore_flags
(
flags
);
skb
->
dev
=
dev
;
dev_queue_xmit
(
skb
);
return
(
0
);
}
...
...
@@ -1199,7 +1193,6 @@ int br_receive_frame(struct sk_buff *skb) /* 3.5 */
port
=
find_port
(
skb
->
dev
);
skb
->
arp
=
1
;
/* Received frame so it is resolved */
skb
->
h
.
raw
=
skb
->
mac
.
raw
;
eth
=
skb
->
mac
.
ethernet
;
if
(
br_stats
.
flags
&
BR_DEBUG
)
...
...
@@ -1519,7 +1512,6 @@ static int br_flood(struct sk_buff *skb, int port)
nskb
->
dev
=
port_info
[
i
].
dev
;
/* To get here we must have done ARP already,
or have a received valid MAC header */
nskb
->
arp
=
1
;
/* printk("Flood to port %d\n",i);*/
nskb
->
h
.
raw
=
nskb
->
data
+
ETH_HLEN
;
...
...
net/core/dev.c
View file @
472bbf0a
...
...
@@ -724,7 +724,7 @@ void netif_rx(struct sk_buff *skb)
}
#ifdef CONFIG_BRIDGE
static
inline
void
handle_bridge
(
struct
skbuff
*
skb
,
unsigned
short
type
)
static
inline
void
handle_bridge
(
struct
sk
_
buff
*
skb
,
unsigned
short
type
)
{
if
(
br_stats
.
flags
&
BR_UP
&&
br_protocol_ok
(
ntohs
(
type
)))
{
...
...
@@ -739,7 +739,7 @@ static inline void handle_bridge(struct skbuff *skb, unsigned short type)
if
(
br_receive_frame
(
skb
))
{
sti
();
continue
;
return
;
}
/*
* Pull the MAC header off for the copy going to
...
...
net/core/neighbour.c
View file @
472bbf0a
...
...
@@ -432,7 +432,7 @@ static void neigh_periodic_timer(unsigned long arg)
if
(
state
&
(
NUD_PERMANENT
|
NUD_IN_TIMER
))
goto
next_elt
;
if
(
n
->
used
-
n
->
confirmed
<
0
)
if
(
(
long
)(
n
->
used
-
n
->
confirmed
)
<
0
)
n
->
used
=
n
->
confirmed
;
if
(
atomic_read
(
&
n
->
refcnt
)
==
0
&&
...
...
@@ -795,17 +795,17 @@ static void neigh_proxy_process(unsigned long arg)
while
(
skb
!=
(
struct
sk_buff
*
)
&
tbl
->
proxy_queue
)
{
struct
sk_buff
*
back
=
skb
;
long
tdif
=
back
->
stamp
.
tv_usec
-
now
;
skb
=
skb
->
next
;
if
(
back
->
stamp
.
tv_usec
-
now
<=
0
)
{
if
(
tdif
<=
0
)
{
__skb_unlink
(
back
,
&
tbl
->
proxy_queue
);
if
(
tbl
->
proxy_redo
)
tbl
->
proxy_redo
(
back
);
else
kfree_skb
(
back
,
FREE_WRITE
);
}
else
{
if
(
!
sched_next
||
back
->
stamp
.
tv_usec
-
now
<
sched_next
)
sched_next
=
back
->
stamp
.
tv_usec
-
now
;
}
}
else
if
(
!
sched_next
||
tdif
<
sched_next
)
sched_next
=
tdif
;
}
del_timer
(
&
tbl
->
proxy_timer
);
if
(
sched_next
)
{
...
...
net/core/skbuff.c
View file @
472bbf0a
...
...
@@ -113,18 +113,18 @@ void __kfree_skb(struct sk_buff *skb)
* to be a good idea.
*/
struct
sk_buff
*
alloc_skb
(
unsigned
int
size
,
int
priority
)
struct
sk_buff
*
alloc_skb
(
unsigned
int
size
,
int
gfp_mask
)
{
struct
sk_buff
*
skb
;
unsigned
char
*
bptr
;
int
len
;
if
(
in_interrupt
()
&&
priority
!=
GFP_ATOMIC
)
{
if
(
in_interrupt
()
&&
(
gfp_mask
&
__GFP_WAIT
)
)
{
static
int
count
=
0
;
if
(
++
count
<
5
)
{
printk
(
KERN_ERR
"alloc_skb called nonatomically "
"from interrupt %p
\n
"
,
__builtin_return_address
(
0
));
priority
=
GFP_ATOMIC
;
gfp_mask
&=
~
__GFP_WAIT
;
}
}
...
...
@@ -144,7 +144,7 @@ struct sk_buff *alloc_skb(unsigned int size,int priority)
* Allocate some space
*/
bptr
=
kmalloc
(
size
,
priority
);
bptr
=
kmalloc
(
size
,
gfp_mask
);
if
(
bptr
==
NULL
)
{
atomic_inc
(
&
net_fails
);
return
NULL
;
...
...
@@ -226,7 +226,7 @@ void kfree_skbmem(struct sk_buff *skb)
* Duplicate an sk_buff. The new one is not owned by a socket.
*/
struct
sk_buff
*
skb_clone
(
struct
sk_buff
*
skb
,
int
priority
)
struct
sk_buff
*
skb_clone
(
struct
sk_buff
*
skb
,
int
gfp_mask
)
{
struct
sk_buff
*
n
;
int
inbuff
=
0
;
...
...
@@ -237,7 +237,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, int priority)
skb
->
inclone
=
SKB_CLONE_ORIG
;
inbuff
=
SKB_CLONE_INLINE
;
}
else
{
n
=
kmalloc
(
sizeof
(
*
n
),
priority
);
n
=
kmalloc
(
sizeof
(
*
n
),
gfp_mask
);
if
(
!
n
)
return
NULL
;
}
...
...
@@ -263,7 +263,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, int priority)
* This is slower, and copies the whole data area
*/
struct
sk_buff
*
skb_copy
(
struct
sk_buff
*
skb
,
int
priority
)
struct
sk_buff
*
skb_copy
(
struct
sk_buff
*
skb
,
int
gfp_mask
)
{
struct
sk_buff
*
n
;
unsigned
long
offset
;
...
...
@@ -272,7 +272,7 @@ struct sk_buff *skb_copy(struct sk_buff *skb, int priority)
* Allocate the copy buffer
*/
n
=
alloc_skb
(
skb
->
end
-
skb
->
head
,
priority
);
n
=
alloc_skb
(
skb
->
end
-
skb
->
head
,
gfp_mask
);
if
(
n
==
NULL
)
return
NULL
;
...
...
net/core/sock.c
View file @
472bbf0a
...
...
@@ -76,6 +76,7 @@
* Steve Whitehouse: Added various other default routines
* common to several socket families.
* Chris Evans : Call suser() check last on F_SETOWN
* Jay Schulist : Added SO_ATTACH_FILTER and SO_DETACH_FILTER.
* Andi Kleen : Add sock_kmalloc()/sock_kfree_s()
*
* To Fix:
...
...
@@ -287,48 +288,6 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
break
;
#ifdef CONFIG_NET_SECURITY
/*
* FIXME: make these error things that are not
* available!
*/
case
SO_SECURITY_AUTHENTICATION
:
if
(
val
<=
IPSEC_LEVEL_DEFAULT
)
{
sk
->
authentication
=
val
;
return
0
;
}
if
(
net_families
[
sock
->
ops
->
family
]
->
authentication
)
sk
->
authentication
=
val
;
else
return
-
EINVAL
;
break
;
case
SO_SECURITY_ENCRYPTION_TRANSPORT
:
if
(
val
<=
IPSEC_LEVEL_DEFAULT
)
{
sk
->
encryption
=
val
;
return
0
;
}
if
(
net_families
[
sock
->
ops
->
family
]
->
encryption
)
sk
->
encryption
=
val
;
else
return
-
EINVAL
;
break
;
case
SO_SECURITY_ENCRYPTION_NETWORK
:
if
(
val
<=
IPSEC_LEVEL_DEFAULT
)
{
sk
->
encrypt_net
=
val
;
return
0
;
}
if
(
net_families
[
sock
->
ops
->
family
]
->
encrypt_net
)
sk
->
encrypt_net
=
val
;
else
return
-
EINVAL
;
break
;
#endif
case
SO_BINDTODEVICE
:
/* Bind this socket to a particular device like "eth0",
* as specified in an ifreq structure. If the device
...
...
@@ -369,6 +328,33 @@ int sock_setsockopt(struct socket *sock, int level, int optname,
return
0
;
#ifdef CONFIG_FILTER
case
SO_ATTACH_FILTER
:
if
(
optlen
<
sizeof
(
struct
sock_fprog
))
return
-
EINVAL
;
if
(
copy_from_user
(
&
fprog
,
optval
,
sizeof
(
fprog
)))
{
ret
=
-
EFAULT
;
break
;
}
ret
=
sk_attach_filter
(
&
fprog
,
sk
);
break
;
case
SO_DETACH_FILTER
:
if
(
sk
->
filter
)
{
fprog
.
filter
=
sk
->
filter_data
;
kfree_s
(
fprog
.
filter
,
(
sizeof
(
fprog
.
filter
)
*
sk
->
filter
));
sk
->
filter_data
=
NULL
;
sk
->
filter
=
0
;
return
0
;
}
else
return
-
EINVAL
;
break
;
#endif
/* We implement the SO_SNDLOWAT etc to
not be settable (1003.1g 5.3) */
default:
...
...
@@ -479,20 +465,6 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
return
-
EFAULT
;
goto
lenout
;
#ifdef CONFIG_NET_SECURITY
case
SO_SECURITY_AUTHENTICATION
:
v
.
val
=
sk
->
authentication
;
break
;
case
SO_SECURITY_ENCRYPTION_TRANSPORT
:
v
.
val
=
sk
->
encryption
;
break
;
case
SO_SECURITY_ENCRYPTION_NETWORK
:
v
.
val
=
sk
->
encrypt_net
;
break
;
#endif
default:
return
(
-
ENOPROTOOPT
);
}
...
...
net/ipv4/Config.in
View file @
472bbf0a
...
...
@@ -27,6 +27,9 @@ if [ "$CONFIG_FIREWALL" = "y" ]; then
if [ "$CONFIG_IP_FIREWALL" = "y" ]; then
if [ "$CONFIG_NETLINK" = "y" ]; then
bool 'IP: firewall packet netlink device' CONFIG_IP_FIREWALL_NETLINK
if [ "$CONFIG_IP_FIREWALL_NETLINK" = "y" ]; then
define_bool CONFIG_NETLINK_DEV y
fi
fi
bool 'IP: firewall packet logging' CONFIG_IP_FIREWALL_VERBOSE
bool 'IP: transparent proxy support' CONFIG_IP_TRANSPARENT_PROXY
...
...
net/ipv4/ip_forward.c
View file @
472bbf0a
...
...
@@ -178,7 +178,8 @@ int ip_forward(struct sk_buff *skb)
{
#endif
maddr
=
inet_select_addr
(
dev2
,
rt
->
rt_gateway
,
RT_SCOPE_UNIVERSE
);
if
(
fw_res
=
ip_fw_masq_icmp
(
&
skb
,
maddr
)
<
0
)
{
fw_res
=
ip_fw_masq_icmp
(
&
skb
,
maddr
);
if
(
fw_res
<
0
)
{
kfree_skb
(
skb
,
FREE_READ
);
return
-
1
;
}
...
...
net/ipv4/rarp.c
View file @
472bbf0a
...
...
@@ -190,6 +190,8 @@ static void rarp_init_pkt (void)
rarp_pkt_inited
=
1
;
}
#ifdef MODULE
static
void
rarp_end_pkt
(
void
)
{
if
(
!
rarp_pkt_inited
)
...
...
@@ -199,6 +201,7 @@ static void rarp_end_pkt(void)
rarp_pkt_inited
=
0
;
}
#endif
/*
* Receive an arp request by the device layer. Maybe it should be
...
...
net/ipv4/sysctl_net_ipv4.c
View file @
472bbf0a
...
...
@@ -104,9 +104,11 @@ static
int
ipv4_sysctl_rtcache_flush
(
ctl_table
*
ctl
,
int
write
,
struct
file
*
filp
,
void
*
buffer
,
size_t
*
lenp
)
{
if
(
write
)
if
(
write
)
{
rt_cache_flush
(
0
);
return
0
;
return
0
;
}
else
return
-
EINVAL
;
}
ctl_table
ipv4_table
[]
=
{
...
...
net/ipv6/ndisc.c
View file @
472bbf0a
...
...
@@ -1199,7 +1199,9 @@ __initfunc(void ndisc_init(struct net_proto_family *ops))
void
ndisc_cleanup
(
void
)
{
#ifdef CONFIG_PROC_FS
#ifndef CONFIG_RTNETLINK
proc_net_unregister
(
ndisc_proc_entry
.
low_ino
);
#endif
#endif
neigh_table_clear
(
&
nd_tbl
);
}
...
...
net/netsyms.c
View file @
472bbf0a
...
...
@@ -287,13 +287,11 @@ EXPORT_SYMBOL(ipv4_specific);
EXPORT_SYMBOL
(
tcp_simple_retransmit
);
EXPORT_SYMBOL
(
xrlim_allow
);
EXPORT_SYMBOL
(
dev_mc_delete
);
#endif
#ifdef CONFIG_PACKET_MODULE
EXPORT_SYMBOL
(
dev_set_allmulti
);
EXPORT_SYMBOL
(
dev_set_promiscuity
);
EXPORT_SYMBOL
(
dev_mc_delete
);
EXPORT_SYMBOL
(
sklist_remove_socket
);
EXPORT_SYMBOL
(
rtnl_wait
);
EXPORT_SYMBOL
(
rtnl_rlockct
);
...
...
@@ -383,6 +381,7 @@ EXPORT_SYMBOL(tty_register_ldisc);
EXPORT_SYMBOL
(
kill_fasync
);
EXPORT_SYMBOL
(
ip_rcv
);
EXPORT_SYMBOL
(
arp_rcv
);
EXPORT_SYMBOL
(
dev_mc_delete
);
EXPORT_SYMBOL
(
rtnl_lock
);
EXPORT_SYMBOL
(
rtnl_unlock
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment