Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
d4c6fff7
Commit
d4c6fff7
authored
Jun 23, 2004
by
David S. Miller
Browse files
Options
Browse Files
Download
Plain Diff
Merge nuts.davemloft.net:/disk1/BK/acme-2.6
into nuts.davemloft.net:/disk1/BK/net-2.6
parents
bc61f4ff
334cdf67
Changes
12
Show whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
210 additions
and
203 deletions
+210
-203
include/net/sock.h
include/net/sock.h
+33
-1
include/net/tcp.h
include/net/tcp.h
+4
-28
net/core/stream.c
net/core/stream.c
+97
-0
net/ipv4/proc.c
net/ipv4/proc.c
+2
-2
net/ipv4/sysctl_net_ipv4.c
net/ipv4/sysctl_net_ipv4.c
+6
-6
net/ipv4/tcp.c
net/ipv4/tcp.c
+15
-116
net/ipv4/tcp_input.c
net/ipv4/tcp_input.c
+17
-17
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_ipv4.c
+24
-21
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_minisocks.c
+1
-1
net/ipv4/tcp_output.c
net/ipv4/tcp_output.c
+1
-1
net/ipv4/tcp_timer.c
net/ipv4/tcp_timer.c
+6
-6
net/ipv6/tcp_ipv6.c
net/ipv6/tcp_ipv6.c
+4
-4
No files found.
include/net/sock.h
View file @
d4c6fff7
...
...
@@ -491,10 +491,11 @@ extern int sk_stream_wait_connect(struct sock *sk, long *timeo_p);
extern
int
sk_stream_wait_memory
(
struct
sock
*
sk
,
long
*
timeo_p
);
extern
void
sk_stream_wait_close
(
struct
sock
*
sk
,
long
timeo_p
);
extern
int
sk_stream_error
(
struct
sock
*
sk
,
int
flags
,
int
err
);
extern
void
sk_stream_kill_queues
(
struct
sock
*
sk
);
extern
int
sk_wait_data
(
struct
sock
*
sk
,
long
*
timeo
);
/*
IP
protocol blocks we attach to sockets.
/*
Networking
protocol blocks we attach to sockets.
* socket layer -> transport layer interface
* transport -> network interface is defined by struct inet_proto
*/
...
...
@@ -538,6 +539,21 @@ struct proto {
void
(
*
unhash
)(
struct
sock
*
sk
);
int
(
*
get_port
)(
struct
sock
*
sk
,
unsigned
short
snum
);
/* Memory pressure */
void
(
*
enter_memory_pressure
)(
void
);
atomic_t
memory_allocated
;
/* Current allocated memory. */
atomic_t
sockets_allocated
;
/* Current number of sockets. */
/*
* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
* All the sk_stream_mem_schedule() is of this nature: accounting
* is strict, actions are advisory and have some latency.
*/
int
memory_pressure
;
int
sysctl_mem
[
3
];
int
sysctl_wmem
[
3
];
int
sysctl_rmem
[
3
];
char
name
[
32
];
struct
{
...
...
@@ -628,6 +644,22 @@ static inline struct inode *SOCK_INODE(struct socket *socket)
return
&
container_of
(
socket
,
struct
socket_alloc
,
socket
)
->
vfs_inode
;
}
extern
void
__sk_stream_mem_reclaim
(
struct
sock
*
sk
);
extern
int
sk_stream_mem_schedule
(
struct
sock
*
sk
,
int
size
,
int
kind
);
#define SK_STREAM_MEM_QUANTUM ((int)PAGE_SIZE)
static
inline
int
sk_stream_pages
(
int
amt
)
{
return
(
amt
+
SK_STREAM_MEM_QUANTUM
-
1
)
/
SK_STREAM_MEM_QUANTUM
;
}
static
inline
void
sk_stream_mem_reclaim
(
struct
sock
*
sk
)
{
if
(
sk
->
sk_forward_alloc
>=
SK_STREAM_MEM_QUANTUM
)
__sk_stream_mem_reclaim
(
sk
);
}
/* Used by processes to "lock" a socket state, so that
* interrupts and bottom half handlers won't change it
* from under us. It essentially blocks any incoming
...
...
include/net/tcp.h
View file @
d4c6fff7
...
...
@@ -594,9 +594,6 @@ extern int sysctl_tcp_fack;
extern
int
sysctl_tcp_reordering
;
extern
int
sysctl_tcp_ecn
;
extern
int
sysctl_tcp_dsack
;
extern
int
sysctl_tcp_mem
[
3
];
extern
int
sysctl_tcp_wmem
[
3
];
extern
int
sysctl_tcp_rmem
[
3
];
extern
int
sysctl_tcp_app_win
;
extern
int
sysctl_tcp_adv_win_scale
;
extern
int
sysctl_tcp_tw_reuse
;
...
...
@@ -614,10 +611,6 @@ extern int sysctl_tcp_bic_low_window;
extern
int
sysctl_tcp_default_win_scale
;
extern
int
sysctl_tcp_moderate_rcvbuf
;
extern
atomic_t
tcp_memory_allocated
;
extern
atomic_t
tcp_sockets_allocated
;
extern
int
tcp_memory_pressure
;
struct
open_request
;
struct
or_calltable
{
...
...
@@ -1867,24 +1860,7 @@ static __inline__ void tcp_openreq_init(struct open_request *req,
req
->
rmt_port
=
skb
->
h
.
th
->
source
;
}
#define TCP_MEM_QUANTUM ((int)PAGE_SIZE)
extern
void
__tcp_mem_reclaim
(
struct
sock
*
sk
);
extern
int
tcp_mem_schedule
(
struct
sock
*
sk
,
int
size
,
int
kind
);
static
inline
void
tcp_mem_reclaim
(
struct
sock
*
sk
)
{
if
(
sk
->
sk_forward_alloc
>=
TCP_MEM_QUANTUM
)
__tcp_mem_reclaim
(
sk
);
}
static
inline
void
tcp_enter_memory_pressure
(
void
)
{
if
(
!
tcp_memory_pressure
)
{
NET_INC_STATS
(
TCPMemoryPressures
);
tcp_memory_pressure
=
1
;
}
}
extern
void
tcp_enter_memory_pressure
(
void
);
static
inline
struct
sk_buff
*
tcp_alloc_pskb
(
struct
sock
*
sk
,
int
size
,
int
mem
,
int
gfp
)
{
...
...
@@ -1893,7 +1869,7 @@ static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem,
if
(
skb
)
{
skb
->
truesize
+=
mem
;
if
(
sk
->
sk_forward_alloc
>=
(
int
)
skb
->
truesize
||
tcp
_mem_schedule
(
sk
,
skb
->
truesize
,
0
))
{
sk_stream
_mem_schedule
(
sk
,
skb
->
truesize
,
0
))
{
skb_reserve
(
skb
,
MAX_TCP_HEADER
);
return
skb
;
}
...
...
@@ -1913,7 +1889,7 @@ static inline struct sk_buff *tcp_alloc_skb(struct sock *sk, int size, int gfp)
static
inline
struct
page
*
tcp_alloc_page
(
struct
sock
*
sk
)
{
if
(
sk
->
sk_forward_alloc
>=
(
int
)
PAGE_SIZE
||
tcp
_mem_schedule
(
sk
,
PAGE_SIZE
,
0
))
{
sk_stream
_mem_schedule
(
sk
,
PAGE_SIZE
,
0
))
{
struct
page
*
page
=
alloc_pages
(
sk
->
sk_allocation
,
0
);
if
(
page
)
return
page
;
...
...
@@ -1929,7 +1905,7 @@ static inline void tcp_writequeue_purge(struct sock *sk)
while
((
skb
=
__skb_dequeue
(
&
sk
->
sk_write_queue
))
!=
NULL
)
sk_stream_free_skb
(
sk
,
skb
);
tcp
_mem_reclaim
(
sk
);
sk_stream
_mem_reclaim
(
sk
);
}
extern
void
tcp_listen_wlock
(
void
);
...
...
net/core/stream.c
View file @
d4c6fff7
...
...
@@ -188,3 +188,100 @@ int sk_stream_error(struct sock *sk, int flags, int err)
}
EXPORT_SYMBOL
(
sk_stream_error
);
void
__sk_stream_mem_reclaim
(
struct
sock
*
sk
)
{
if
(
sk
->
sk_forward_alloc
>=
SK_STREAM_MEM_QUANTUM
)
{
atomic_sub
(
sk
->
sk_forward_alloc
/
SK_STREAM_MEM_QUANTUM
,
&
sk
->
sk_prot
->
memory_allocated
);
sk
->
sk_forward_alloc
&=
SK_STREAM_MEM_QUANTUM
-
1
;
if
(
sk
->
sk_prot
->
memory_pressure
&&
(
atomic_read
(
&
sk
->
sk_prot
->
memory_allocated
)
<
sk
->
sk_prot
->
sysctl_mem
[
0
]))
sk
->
sk_prot
->
memory_pressure
=
0
;
}
}
EXPORT_SYMBOL
(
__sk_stream_mem_reclaim
);
int
sk_stream_mem_schedule
(
struct
sock
*
sk
,
int
size
,
int
kind
)
{
int
amt
=
sk_stream_pages
(
size
);
sk
->
sk_forward_alloc
+=
amt
*
SK_STREAM_MEM_QUANTUM
;
atomic_add
(
amt
,
&
sk
->
sk_prot
->
memory_allocated
);
/* Under limit. */
if
(
atomic_read
(
&
sk
->
sk_prot
->
memory_allocated
)
<
sk
->
sk_prot
->
sysctl_mem
[
0
])
{
if
(
sk
->
sk_prot
->
memory_pressure
)
sk
->
sk_prot
->
memory_pressure
=
0
;
return
1
;
}
/* Over hard limit. */
if
(
atomic_read
(
&
sk
->
sk_prot
->
memory_allocated
)
>
sk
->
sk_prot
->
sysctl_mem
[
2
])
{
sk
->
sk_prot
->
enter_memory_pressure
();
goto
suppress_allocation
;
}
/* Under pressure. */
if
(
atomic_read
(
&
sk
->
sk_prot
->
memory_allocated
)
>
sk
->
sk_prot
->
sysctl_mem
[
1
])
sk
->
sk_prot
->
enter_memory_pressure
();
if
(
kind
)
{
if
(
atomic_read
(
&
sk
->
sk_rmem_alloc
)
<
sk
->
sk_prot
->
sysctl_rmem
[
0
])
return
1
;
}
else
if
(
sk
->
sk_wmem_queued
<
sk
->
sk_prot
->
sysctl_wmem
[
0
])
return
1
;
if
(
!
sk
->
sk_prot
->
memory_pressure
||
sk
->
sk_prot
->
sysctl_mem
[
2
]
>
atomic_read
(
&
sk
->
sk_prot
->
sockets_allocated
)
*
sk_stream_pages
(
sk
->
sk_wmem_queued
+
atomic_read
(
&
sk
->
sk_rmem_alloc
)
+
sk
->
sk_forward_alloc
))
return
1
;
suppress_allocation:
if
(
!
kind
)
{
sk_stream_moderate_sndbuf
(
sk
);
/* Fail only if socket is _under_ its sndbuf.
* In this case we cannot block, so that we have to fail.
*/
if
(
sk
->
sk_wmem_queued
+
size
>=
sk
->
sk_sndbuf
)
return
1
;
}
/* Alas. Undo changes. */
sk
->
sk_forward_alloc
-=
amt
*
SK_STREAM_MEM_QUANTUM
;
atomic_sub
(
amt
,
&
sk
->
sk_prot
->
memory_allocated
);
return
0
;
}
EXPORT_SYMBOL
(
sk_stream_mem_schedule
);
void
sk_stream_kill_queues
(
struct
sock
*
sk
)
{
/* First the read buffer. */
__skb_queue_purge
(
&
sk
->
sk_receive_queue
);
/* Next, the error queue. */
__skb_queue_purge
(
&
sk
->
sk_error_queue
);
/* Next, the write queue. */
BUG_TRAP
(
skb_queue_empty
(
&
sk
->
sk_write_queue
));
/* Account for returned memory. */
sk_stream_mem_reclaim
(
sk
);
BUG_TRAP
(
!
sk
->
sk_wmem_queued
);
BUG_TRAP
(
!
sk
->
sk_forward_alloc
);
/* It is _impossible_ for the backlog to contain anything
* when we get here. All user references to this socket
* have gone away, only the net layer knows can touch it.
*/
}
EXPORT_SYMBOL
(
sk_stream_kill_queues
);
net/ipv4/proc.c
View file @
d4c6fff7
...
...
@@ -65,8 +65,8 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
socket_seq_show
(
seq
);
seq_printf
(
seq
,
"TCP: inuse %d orphan %d tw %d alloc %d mem %d
\n
"
,
fold_prot_inuse
(
&
tcp_prot
),
atomic_read
(
&
tcp_orphan_count
),
tcp_tw_count
,
atomic_read
(
&
tcp_sockets_allocated
),
atomic_read
(
&
tcp_memory_allocated
));
tcp_tw_count
,
atomic_read
(
&
tcp_
prot
.
sockets_allocated
),
atomic_read
(
&
tcp_
prot
.
memory_allocated
));
seq_printf
(
seq
,
"UDP: inuse %d
\n
"
,
fold_prot_inuse
(
&
udp_prot
));
seq_printf
(
seq
,
"RAW: inuse %d
\n
"
,
fold_prot_inuse
(
&
raw_prot
));
seq_printf
(
seq
,
"FRAG: inuse %d memory %d
\n
"
,
ip_frag_nqueues
,
...
...
net/ipv4/sysctl_net_ipv4.c
View file @
d4c6fff7
...
...
@@ -508,24 +508,24 @@ ctl_table ipv4_table[] = {
{
.
ctl_name
=
NET_TCP_MEM
,
.
procname
=
"tcp_mem"
,
.
data
=
&
sysctl_tcp
_mem
,
.
maxlen
=
sizeof
(
sysctl_tcp
_mem
),
.
data
=
&
tcp_prot
.
sysctl
_mem
,
.
maxlen
=
sizeof
(
tcp_prot
.
sysctl
_mem
),
.
mode
=
0644
,
.
proc_handler
=
&
proc_dointvec
},
{
.
ctl_name
=
NET_TCP_WMEM
,
.
procname
=
"tcp_wmem"
,
.
data
=
&
sysctl_tcp
_wmem
,
.
maxlen
=
sizeof
(
sysctl_tcp
_wmem
),
.
data
=
&
tcp_prot
.
sysctl
_wmem
,
.
maxlen
=
sizeof
(
tcp_prot
.
sysctl
_wmem
),
.
mode
=
0644
,
.
proc_handler
=
&
proc_dointvec
},
{
.
ctl_name
=
NET_TCP_RMEM
,
.
procname
=
"tcp_rmem"
,
.
data
=
&
sysctl_tcp
_rmem
,
.
maxlen
=
sizeof
(
sysctl_tcp
_rmem
),
.
data
=
&
tcp_prot
.
sysctl
_rmem
,
.
maxlen
=
sizeof
(
tcp_prot
.
sysctl
_rmem
),
.
mode
=
0644
,
.
proc_handler
=
&
proc_dointvec
},
...
...
net/ipv4/tcp.c
View file @
d4c6fff7
...
...
@@ -278,85 +278,11 @@ atomic_t tcp_orphan_count = ATOMIC_INIT(0);
int
sysctl_tcp_default_win_scale
=
7
;
int
sysctl_tcp_mem
[
3
];
int
sysctl_tcp_wmem
[
3
]
=
{
4
*
1024
,
16
*
1024
,
128
*
1024
};
int
sysctl_tcp_rmem
[
3
]
=
{
4
*
1024
,
87380
,
87380
*
2
};
atomic_t
tcp_memory_allocated
;
/* Current allocated memory. */
atomic_t
tcp_sockets_allocated
;
/* Current number of TCP sockets. */
/* Pressure flag: try to collapse.
* Technical note: it is used by multiple contexts non atomically.
* All the tcp_mem_schedule() is of this nature: accounting
* is strict, actions are advisory and have some latency. */
int
tcp_memory_pressure
;
#define TCP_PAGES(amt) (((amt) + TCP_MEM_QUANTUM - 1) / TCP_MEM_QUANTUM)
int
tcp_mem_schedule
(
struct
sock
*
sk
,
int
size
,
int
kind
)
void
tcp_enter_memory_pressure
(
void
)
{
int
amt
=
TCP_PAGES
(
size
);
sk
->
sk_forward_alloc
+=
amt
*
TCP_MEM_QUANTUM
;
atomic_add
(
amt
,
&
tcp_memory_allocated
);
/* Under limit. */
if
(
atomic_read
(
&
tcp_memory_allocated
)
<
sysctl_tcp_mem
[
0
])
{
if
(
tcp_memory_pressure
)
tcp_memory_pressure
=
0
;
return
1
;
}
/* Over hard limit. */
if
(
atomic_read
(
&
tcp_memory_allocated
)
>
sysctl_tcp_mem
[
2
])
{
tcp_enter_memory_pressure
();
goto
suppress_allocation
;
}
/* Under pressure. */
if
(
atomic_read
(
&
tcp_memory_allocated
)
>
sysctl_tcp_mem
[
1
])
tcp_enter_memory_pressure
();
if
(
kind
)
{
if
(
atomic_read
(
&
sk
->
sk_rmem_alloc
)
<
sysctl_tcp_rmem
[
0
])
return
1
;
}
else
if
(
sk
->
sk_wmem_queued
<
sysctl_tcp_wmem
[
0
])
return
1
;
if
(
!
tcp_memory_pressure
||
sysctl_tcp_mem
[
2
]
>
atomic_read
(
&
tcp_sockets_allocated
)
*
TCP_PAGES
(
sk
->
sk_wmem_queued
+
atomic_read
(
&
sk
->
sk_rmem_alloc
)
+
sk
->
sk_forward_alloc
))
return
1
;
suppress_allocation:
if
(
!
kind
)
{
sk_stream_moderate_sndbuf
(
sk
);
/* Fail only if socket is _under_ its sndbuf.
* In this case we cannot block, so that we have to fail.
*/
if
(
sk
->
sk_wmem_queued
+
size
>=
sk
->
sk_sndbuf
)
return
1
;
}
/* Alas. Undo changes. */
sk
->
sk_forward_alloc
-=
amt
*
TCP_MEM_QUANTUM
;
atomic_sub
(
amt
,
&
tcp_memory_allocated
);
return
0
;
}
void
__tcp_mem_reclaim
(
struct
sock
*
sk
)
{
if
(
sk
->
sk_forward_alloc
>=
TCP_MEM_QUANTUM
)
{
atomic_sub
(
sk
->
sk_forward_alloc
/
TCP_MEM_QUANTUM
,
&
tcp_memory_allocated
);
sk
->
sk_forward_alloc
&=
TCP_MEM_QUANTUM
-
1
;
if
(
tcp_memory_pressure
&&
atomic_read
(
&
tcp_memory_allocated
)
<
sysctl_tcp_mem
[
0
])
tcp_memory_pressure
=
0
;
if
(
!
tcp_prot
.
memory_pressure
)
{
NET_INC_STATS
(
TCPMemoryPressures
);
tcp_prot
.
memory_pressure
=
1
;
}
}
...
...
@@ -1624,29 +1550,6 @@ void tcp_shutdown(struct sock *sk, int how)
}
}
static
__inline__
void
tcp_kill_sk_queues
(
struct
sock
*
sk
)
{
/* First the read buffer. */
__skb_queue_purge
(
&
sk
->
sk_receive_queue
);
/* Next, the error queue. */
__skb_queue_purge
(
&
sk
->
sk_error_queue
);
/* Next, the write queue. */
BUG_TRAP
(
skb_queue_empty
(
&
sk
->
sk_write_queue
));
/* Account for returned memory. */
tcp_mem_reclaim
(
sk
);
BUG_TRAP
(
!
sk
->
sk_wmem_queued
);
BUG_TRAP
(
!
sk
->
sk_forward_alloc
);
/* It is _impossible_ for the backlog to contain anything
* when we get here. All user references to this socket
* have gone away, only the net layer knows can touch it.
*/
}
/*
* At this point, there should be no process reference to this
* socket, and thus no user references at all. Therefore we
...
...
@@ -1674,7 +1577,7 @@ void tcp_destroy_sock(struct sock *sk)
sk
->
sk_prot
->
destroy
(
sk
);
tcp_kill_sk
_queues
(
sk
);
sk_stream_kill
_queues
(
sk
);
xfrm_sk_free_policy
(
sk
);
...
...
@@ -1717,7 +1620,7 @@ void tcp_close(struct sock *sk, long timeout)
__kfree_skb
(
skb
);
}
tcp
_mem_reclaim
(
sk
);
sk_stream
_mem_reclaim
(
sk
);
/* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
* 3.10, we send a RST here because data was lost. To
...
...
@@ -1816,10 +1719,10 @@ void tcp_close(struct sock *sk, long timeout)
}
}
if
(
sk
->
sk_state
!=
TCP_CLOSE
)
{
tcp
_mem_reclaim
(
sk
);
sk_stream
_mem_reclaim
(
sk
);
if
(
atomic_read
(
&
tcp_orphan_count
)
>
sysctl_tcp_max_orphans
||
(
sk
->
sk_wmem_queued
>
SOCK_MIN_SNDBUF
&&
atomic_read
(
&
tcp_
memory_allocated
)
>
sysctl_tcp
_mem
[
2
]))
{
atomic_read
(
&
tcp_
prot
.
memory_allocated
)
>
tcp_prot
.
sysctl
_mem
[
2
]))
{
if
(
net_ratelimit
())
printk
(
KERN_INFO
"TCP: too many of orphaned "
"sockets
\n
"
);
...
...
@@ -2366,15 +2269,15 @@ void __init tcp_init(void)
}
tcp_port_rover
=
sysctl_local_port_range
[
0
]
-
1
;
sysctl_tcp
_mem
[
0
]
=
768
<<
order
;
sysctl_tcp
_mem
[
1
]
=
1024
<<
order
;
sysctl_tcp
_mem
[
2
]
=
1536
<<
order
;
tcp_prot
.
sysctl
_mem
[
0
]
=
768
<<
order
;
tcp_prot
.
sysctl
_mem
[
1
]
=
1024
<<
order
;
tcp_prot
.
sysctl
_mem
[
2
]
=
1536
<<
order
;
if
(
order
<
3
)
{
sysctl_tcp
_wmem
[
2
]
=
64
*
1024
;
sysctl_tcp
_rmem
[
0
]
=
PAGE_SIZE
;
sysctl_tcp
_rmem
[
1
]
=
43689
;
sysctl_tcp
_rmem
[
2
]
=
2
*
43689
;
tcp_prot
.
sysctl
_wmem
[
2
]
=
64
*
1024
;
tcp_prot
.
sysctl
_rmem
[
0
]
=
PAGE_SIZE
;
tcp_prot
.
sysctl
_rmem
[
1
]
=
43689
;
tcp_prot
.
sysctl
_rmem
[
2
]
=
2
*
43689
;
}
printk
(
KERN_INFO
"TCP: Hash tables configured "
...
...
@@ -2384,9 +2287,6 @@ void __init tcp_init(void)
tcpdiag_init
();
}
EXPORT_SYMBOL
(
__tcp_mem_reclaim
);
EXPORT_SYMBOL
(
sysctl_tcp_rmem
);
EXPORT_SYMBOL
(
sysctl_tcp_wmem
);
EXPORT_SYMBOL
(
tcp_accept
);
EXPORT_SYMBOL
(
tcp_close
);
EXPORT_SYMBOL
(
tcp_close_state
);
...
...
@@ -2402,6 +2302,5 @@ EXPORT_SYMBOL(tcp_sendmsg);
EXPORT_SYMBOL
(
tcp_sendpage
);
EXPORT_SYMBOL
(
tcp_setsockopt
);
EXPORT_SYMBOL
(
tcp_shutdown
);
EXPORT_SYMBOL
(
tcp_sockets_allocated
);
EXPORT_SYMBOL
(
tcp_statistics
);
EXPORT_SYMBOL
(
tcp_timewait_cachep
);
net/ipv4/tcp_input.c
View file @
d4c6fff7
...
...
@@ -207,7 +207,7 @@ static void tcp_fixup_sndbuf(struct sock *sk)
sizeof
(
struct
sk_buff
);
if
(
sk
->
sk_sndbuf
<
3
*
sndmem
)
sk
->
sk_sndbuf
=
min
(
3
*
sndmem
,
sysctl_tcp
_wmem
[
2
]);
sk
->
sk_sndbuf
=
min
(
3
*
sndmem
,
tcp_prot
.
sysctl
_wmem
[
2
]);
}
/* 2. Tuning advertised window (window_clamp, rcv_ssthresh)
...
...
@@ -259,7 +259,7 @@ tcp_grow_window(struct sock *sk, struct tcp_opt *tp, struct sk_buff *skb)
/* Check #1 */
if
(
tp
->
rcv_ssthresh
<
tp
->
window_clamp
&&
(
int
)
tp
->
rcv_ssthresh
<
tcp_space
(
sk
)
&&
!
tcp_memory_pressure
)
{
!
tcp_
prot
.
memory_pressure
)
{
int
incr
;
/* Check #2. Increase window, if skb with such overhead
...
...
@@ -291,7 +291,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
while
(
tcp_win_from_space
(
rcvmem
)
<
tp
->
advmss
)
rcvmem
+=
128
;
if
(
sk
->
sk_rcvbuf
<
4
*
rcvmem
)
sk
->
sk_rcvbuf
=
min
(
4
*
rcvmem
,
sysctl_tcp
_rmem
[
2
]);
sk
->
sk_rcvbuf
=
min
(
4
*
rcvmem
,
tcp_prot
.
sysctl
_rmem
[
2
]);
}
/* 4. Try to fixup all. It is made iimediately after connection enters
...
...
@@ -347,12 +347,12 @@ static void tcp_clamp_window(struct sock *sk, struct tcp_opt *tp)
* do not clamp window. Try to expand rcvbuf instead.
*/
if
(
ofo_win
)
{
if
(
sk
->
sk_rcvbuf
<
sysctl_tcp
_rmem
[
2
]
&&
if
(
sk
->
sk_rcvbuf
<
tcp_prot
.
sysctl
_rmem
[
2
]
&&
!
(
sk
->
sk_userlocks
&
SOCK_RCVBUF_LOCK
)
&&
!
tcp_memory_pressure
&&
atomic_read
(
&
tcp_
memory_allocated
)
<
sysctl_tcp
_mem
[
0
])
!
tcp_
prot
.
memory_pressure
&&
atomic_read
(
&
tcp_
prot
.
memory_allocated
)
<
tcp_prot
.
sysctl
_mem
[
0
])
sk
->
sk_rcvbuf
=
min
(
atomic_read
(
&
sk
->
sk_rmem_alloc
),
sysctl_tcp
_rmem
[
2
]);
tcp_prot
.
sysctl
_rmem
[
2
]);
}
if
(
atomic_read
(
&
sk
->
sk_rmem_alloc
)
>
sk
->
sk_rcvbuf
)
{
app_win
+=
ofo_win
;
...
...
@@ -477,7 +477,7 @@ void tcp_rcv_space_adjust(struct sock *sk)
while
(
tcp_win_from_space
(
rcvmem
)
<
tp
->
advmss
)
rcvmem
+=
128
;
space
*=
rcvmem
;
space
=
min
(
space
,
sysctl_tcp
_rmem
[
2
]);
space
=
min
(
space
,
tcp_prot
.
sysctl
_rmem
[
2
]);
if
(
space
>
sk
->
sk_rcvbuf
)
{
sk
->
sk_rcvbuf
=
space
;
...
...
@@ -535,7 +535,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_opt *tp, struct sk_b
* restart window, so that we send ACKs quickly.
*/
tcp_incr_quickack
(
tp
);
tcp
_mem_reclaim
(
sk
);
sk_stream
_mem_reclaim
(
sk
);
}
}
tp
->
ack
.
lrcvtime
=
now
;
...
...
@@ -3166,7 +3166,7 @@ static void tcp_fin(struct sk_buff *skb, struct sock *sk, struct tcphdr *th)
__skb_queue_purge
(
&
tp
->
out_of_order_queue
);
if
(
tp
->
sack_ok
)
tcp_sack_reset
(
tp
);
tcp
_mem_reclaim
(
sk
);
sk_stream
_mem_reclaim
(
sk
);
if
(
!
sock_flag
(
sk
,
SOCK_DEAD
))
{
sk
->
sk_state_change
(
sk
);
...
...
@@ -3401,7 +3401,7 @@ static void tcp_ofo_queue(struct sock *sk)
static
inline
int
tcp_rmem_schedule
(
struct
sock
*
sk
,
struct
sk_buff
*
skb
)
{
return
(
int
)
skb
->
truesize
<=
sk
->
sk_forward_alloc
||
tcp
_mem_schedule
(
sk
,
skb
->
truesize
,
1
);
sk_stream
_mem_schedule
(
sk
,
skb
->
truesize
,
1
);
}
static
int
tcp_prune_queue
(
struct
sock
*
sk
);
...
...
@@ -3768,14 +3768,14 @@ static int tcp_prune_queue(struct sock *sk)
if
(
atomic_read
(
&
sk
->
sk_rmem_alloc
)
>=
sk
->
sk_rcvbuf
)
tcp_clamp_window
(
sk
,
tp
);
else
if
(
tcp_memory_pressure
)
else
if
(
tcp_
prot
.
memory_pressure
)
tp
->
rcv_ssthresh
=
min
(
tp
->
rcv_ssthresh
,
4U
*
tp
->
advmss
);
tcp_collapse_ofo_queue
(
sk
);
tcp_collapse
(
sk
,
sk
->
sk_receive_queue
.
next
,
(
struct
sk_buff
*
)
&
sk
->
sk_receive_queue
,
tp
->
copied_seq
,
tp
->
rcv_nxt
);
tcp
_mem_reclaim
(
sk
);
sk_stream
_mem_reclaim
(
sk
);
if
(
atomic_read
(
&
sk
->
sk_rmem_alloc
)
<=
sk
->
sk_rcvbuf
)
return
0
;
...
...
@@ -3796,7 +3796,7 @@ static int tcp_prune_queue(struct sock *sk)
*/
if
(
tp
->
sack_ok
)
tcp_sack_reset
(
tp
);
tcp
_mem_reclaim
(
sk
);
sk_stream
_mem_reclaim
(
sk
);
}
if
(
atomic_read
(
&
sk
->
sk_rmem_alloc
)
<=
sk
->
sk_rcvbuf
)
...
...
@@ -3848,15 +3848,15 @@ static void tcp_new_space(struct sock *sk)
if
(
tp
->
packets_out
<
tp
->
snd_cwnd
&&
!
(
sk
->
sk_userlocks
&
SOCK_SNDBUF_LOCK
)
&&
!
tcp_memory_pressure
&&
atomic_read
(
&
tcp_
memory_allocated
)
<
sysctl_tcp
_mem
[
0
])
{
!
tcp_
prot
.
memory_pressure
&&
atomic_read
(
&
tcp_
prot
.
memory_allocated
)
<
tcp_prot
.
sysctl
_mem
[
0
])
{
int
sndmem
=
max_t
(
u32
,
tp
->
mss_clamp
,
tp
->
mss_cache
)
+
MAX_TCP_HEADER
+
16
+
sizeof
(
struct
sk_buff
),
demanded
=
max_t
(
unsigned
int
,
tp
->
snd_cwnd
,
tp
->
reordering
+
1
);
sndmem
*=
2
*
demanded
;
if
(
sndmem
>
sk
->
sk_sndbuf
)
sk
->
sk_sndbuf
=
min
(
sndmem
,
sysctl_tcp
_wmem
[
2
]);
sk
->
sk_sndbuf
=
min
(
sndmem
,
tcp_prot
.
sysctl
_wmem
[
2
]);
tp
->
snd_cwnd_stamp
=
tcp_time_stamp
;
}
...
...
net/ipv4/tcp_ipv4.c
View file @
d4c6fff7
...
...
@@ -2086,10 +2086,10 @@ static int tcp_v4_init_sock(struct sock *sk)
tp
->
af_specific
=
&
ipv4_specific
;
sk
->
sk_sndbuf
=
sysctl_tcp
_wmem
[
1
];
sk
->
sk_rcvbuf
=
sysctl_tcp
_rmem
[
1
];
sk
->
sk_sndbuf
=
tcp_prot
.
sysctl
_wmem
[
1
];
sk
->
sk_rcvbuf
=
tcp_prot
.
sysctl
_rmem
[
1
];
atomic_inc
(
&
tcp_sockets_allocated
);
atomic_inc
(
&
tcp_
prot
.
sockets_allocated
);
return
0
;
}
...
...
@@ -2113,7 +2113,7 @@ static int tcp_v4_destroy_sock(struct sock *sk)
if
(
tp
->
bind_hash
)
tcp_put_port
(
sk
);
atomic_dec
(
&
tcp_sockets_allocated
);
atomic_dec
(
&
tcp_
prot
.
sockets_allocated
);
return
0
;
}
...
...
@@ -2599,6 +2599,9 @@ struct proto tcp_prot = {
.
hash
=
tcp_v4_hash
,
.
unhash
=
tcp_unhash
,
.
get_port
=
tcp_v4_get_port
,
.
enter_memory_pressure
=
tcp_enter_memory_pressure
,
.
sysctl_wmem
=
{
4
*
1024
,
16
*
1024
,
128
*
1024
},
.
sysctl_rmem
=
{
4
*
1024
,
87380
,
87380
*
2
},
};
...
...
net/ipv4/tcp_minisocks.c
View file @
d4c6fff7
...
...
@@ -801,7 +801,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
#ifdef INET_REFCNT_DEBUG
atomic_inc
(
&
inet_sock_nr
);
#endif
atomic_inc
(
&
tcp_sockets_allocated
);
atomic_inc
(
&
tcp_
prot
.
sockets_allocated
);
if
(
sock_flag
(
newsk
,
SOCK_KEEPOPEN
))
tcp_reset_keepalive_timer
(
newsk
,
...
...
net/ipv4/tcp_output.c
View file @
d4c6fff7
...
...
@@ -672,7 +672,7 @@ u32 __tcp_select_window(struct sock *sk)
if
(
free_space
<
full_space
/
2
)
{
tp
->
ack
.
quick
=
0
;
if
(
tcp_memory_pressure
)
if
(
tcp_
prot
.
memory_pressure
)
tp
->
rcv_ssthresh
=
min
(
tp
->
rcv_ssthresh
,
4U
*
tp
->
advmss
);
if
(
free_space
<
mss
)
...
...
net/ipv4/tcp_timer.c
View file @
d4c6fff7
...
...
@@ -113,7 +113,7 @@ static int tcp_out_of_resources(struct sock *sk, int do_reset)
if
(
orphans
>=
sysctl_tcp_max_orphans
||
(
sk
->
sk_wmem_queued
>
SOCK_MIN_SNDBUF
&&
atomic_read
(
&
tcp_
memory_allocated
)
>
sysctl_tcp
_mem
[
2
]))
{
atomic_read
(
&
tcp_
prot
.
memory_allocated
)
>
tcp_prot
.
sysctl
_mem
[
2
]))
{
if
(
net_ratelimit
())
printk
(
KERN_INFO
"Out of socket memory
\n
"
);
...
...
@@ -217,7 +217,7 @@ static void tcp_delack_timer(unsigned long data)
goto
out_unlock
;
}
tcp
_mem_reclaim
(
sk
);
sk_stream
_mem_reclaim
(
sk
);
if
(
sk
->
sk_state
==
TCP_CLOSE
||
!
(
tp
->
ack
.
pending
&
TCP_ACK_TIMER
))
goto
out
;
...
...
@@ -257,8 +257,8 @@ static void tcp_delack_timer(unsigned long data)
TCP_CHECK_TIMER
(
sk
);
out:
if
(
tcp_memory_pressure
)
tcp
_mem_reclaim
(
sk
);
if
(
tcp_
prot
.
memory_pressure
)
sk_stream
_mem_reclaim
(
sk
);
out_unlock:
bh_unlock_sock
(
sk
);
sock_put
(
sk
);
...
...
@@ -448,7 +448,7 @@ static void tcp_write_timer(unsigned long data)
TCP_CHECK_TIMER
(
sk
);
out:
tcp
_mem_reclaim
(
sk
);
sk_stream
_mem_reclaim
(
sk
);
out_unlock:
bh_unlock_sock
(
sk
);
sock_put
(
sk
);
...
...
@@ -633,7 +633,7 @@ static void tcp_keepalive_timer (unsigned long data)
}
TCP_CHECK_TIMER
(
sk
);
tcp
_mem_reclaim
(
sk
);
sk_stream
_mem_reclaim
(
sk
);
resched:
tcp_reset_keepalive_timer
(
sk
,
elapsed
);
...
...
net/ipv6/tcp_ipv6.c
View file @
d4c6fff7
...
...
@@ -1882,10 +1882,10 @@ static int tcp_v6_init_sock(struct sock *sk)
sk
->
sk_write_space
=
sk_stream_write_space
;
sk
->
sk_use_write_queue
=
1
;
sk
->
sk_sndbuf
=
sysctl_tcp
_wmem
[
1
];
sk
->
sk_rcvbuf
=
sysctl_tcp
_rmem
[
1
];
sk
->
sk_sndbuf
=
tcp_prot
.
sysctl
_wmem
[
1
];
sk
->
sk_rcvbuf
=
tcp_prot
.
sysctl
_rmem
[
1
];
atomic_inc
(
&
tcp_sockets_allocated
);
atomic_inc
(
&
tcp_
prot
.
sockets_allocated
);
return
0
;
}
...
...
@@ -1909,7 +1909,7 @@ static int tcp_v6_destroy_sock(struct sock *sk)
if
(
tcp_sk
(
sk
)
->
bind_hash
)
tcp_put_port
(
sk
);
atomic_dec
(
&
tcp_sockets_allocated
);
atomic_dec
(
&
tcp_
prot
.
sockets_allocated
);
return
inet6_destroy_sock
(
sk
);
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment