Commit 42e5b5c8 authored by David S. Miller's avatar David S. Miller

Merge bk://kernel.bkbits.net/acme/net-2.6

into nuts.davemloft.net:/disk1/BK/net-2.6
parents bfb1e4e5 5d18286c
......@@ -413,6 +413,21 @@ static inline int sk_acceptq_is_full(struct sock *sk)
return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
}
/*
* Compute minimal free write space needed to queue new packets.
*/
static inline int sk_stream_min_wspace(struct sock *sk)
{
return sk->sk_wmem_queued / 2;
}
static inline int sk_stream_wspace(struct sock *sk)
{
return sk->sk_sndbuf - sk->sk_wmem_queued;
}
extern void sk_stream_write_space(struct sock *sk);
/* The per-socket spinlock must be held here. */
#define sk_add_backlog(__sk, __skb) \
do { if (!(__sk)->sk_backlog.tail) { \
......@@ -902,6 +917,11 @@ sk_dst_check(struct sock *sk, u32 cookie)
return dst;
}
static inline void sk_charge_skb(struct sock *sk, struct sk_buff *skb)
{
sk->sk_wmem_queued += skb->truesize;
sk->sk_forward_alloc -= skb->truesize;
}
/*
* Queue a received datagram if it will fit. Stream and sequenced
......
......@@ -870,7 +870,6 @@ extern void tcp_close(struct sock *sk,
long timeout);
extern struct sock * tcp_accept(struct sock *sk, int flags, int *err);
extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
extern void tcp_write_space(struct sock *sk);
extern int tcp_getsockopt(struct sock *sk, int level,
int optname,
......@@ -1195,21 +1194,6 @@ struct tcp_skb_cb {
#include <net/tcp_ecn.h>
/*
* Compute minimal free write space needed to queue new packets.
*/
static inline int tcp_min_write_space(struct sock *sk)
{
return sk->sk_wmem_queued / 2;
}
static inline int tcp_wspace(struct sock *sk)
{
return sk->sk_sndbuf - sk->sk_wmem_queued;
}
/* This determines how many packets are "in the network" to the best
* of our knowledge. In many cases it is conservative, but where
* detailed information is available from the receiver (via SACK
......@@ -1899,12 +1883,6 @@ static inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb)
__kfree_skb(skb);
}
static inline void tcp_charge_skb(struct sock *sk, struct sk_buff *skb)
{
sk->sk_wmem_queued += skb->truesize;
sk->sk_forward_alloc -= skb->truesize;
}
extern void __tcp_mem_reclaim(struct sock *sk);
extern int tcp_mem_schedule(struct sock *sk, int size, int kind);
......
......@@ -2,7 +2,7 @@
# Makefile for the Linux networking core.
#
obj-y := sock.o skbuff.o iovec.o datagram.o scm.o
obj-y := sock.o skbuff.o iovec.o datagram.o stream.o scm.o
obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
......
/*
* SUCS NET3:
*
* Generic stream handling routines. These are generic for most
* protocols. Even IP. Tonight 8-).
* This is used because TCP, LLC (others too) layer all have mostly
* identical sendmsg() and recvmsg() code.
* So we (will) share it here.
*
* Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
* (from old tcp.c code)
* Alan Cox <alan@redhat.com> (Borrowed comments 8-))
*/
#include <linux/module.h>
#include <linux/net.h>
#include <linux/signal.h>
#include <linux/wait.h>
#include <net/sock.h>
/**
* sk_stream_write_space - stream socket write_space callback.
* sk - socket
*
* FIXME: write proper description
*/
void sk_stream_write_space(struct sock *sk)
{
struct socket *sock = sk->sk_socket;
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk) && sock) {
clear_bit(SOCK_NOSPACE, &sock->flags);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
wake_up_interruptible(sk->sk_sleep);
if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock, 2, POLL_OUT);
}
}
EXPORT_SYMBOL(sk_stream_write_space);
......@@ -447,7 +447,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
mask |= POLLIN | POLLRDNORM;
if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
if (tcp_wspace(sk) >= tcp_min_write_space(sk)) {
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
mask |= POLLOUT | POLLWRNORM;
} else { /* send SIGIO later */
set_bit(SOCK_ASYNC_NOSPACE,
......@@ -458,7 +458,7 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
* wspace test but before the flags are set,
* IO signal will be lost.
*/
if (tcp_wspace(sk) >= tcp_min_write_space(sk))
if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
mask |= POLLOUT | POLLWRNORM;
}
}
......@@ -469,24 +469,6 @@ unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
return mask;
}
/*
* TCP socket write_space callback.
*/
void tcp_write_space(struct sock *sk)
{
struct socket *sock = sk->sk_socket;
if (tcp_wspace(sk) >= tcp_min_write_space(sk) && sock) {
clear_bit(SOCK_NOSPACE, &sock->flags);
if (sk->sk_sleep && waitqueue_active(sk->sk_sleep))
wake_up_interruptible(sk->sk_sleep);
if (sock->fasync_list && !(sk->sk_shutdown & SEND_SHUTDOWN))
sock_wake_async(sock, 2, POLL_OUT);
}
}
int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
{
struct tcp_opt *tp = tcp_sk(sk);
......@@ -796,7 +778,7 @@ static inline void skb_entail(struct sock *sk, struct tcp_opt *tp,
TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
TCP_SKB_CB(skb)->sacked = 0;
__skb_queue_tail(&sk->sk_write_queue, skb);
tcp_charge_skb(sk, skb);
sk_charge_skb(sk, skb);
if (!tp->send_head)
tp->send_head = skb;
else if (tp->nonagle&TCP_NAGLE_PUSH)
......@@ -2635,4 +2617,3 @@ EXPORT_SYMBOL(tcp_shutdown);
EXPORT_SYMBOL(tcp_sockets_allocated);
EXPORT_SYMBOL(tcp_statistics);
EXPORT_SYMBOL(tcp_timewait_cachep);
EXPORT_SYMBOL(tcp_write_space);
......@@ -2081,7 +2081,7 @@ static int tcp_v4_init_sock(struct sock *sk)
sk->sk_state = TCP_CLOSE;
sk->sk_write_space = tcp_write_space;
sk->sk_write_space = sk_stream_write_space;
sk->sk_use_write_queue = 1;
tp->af_specific = &ipv4_specific;
......
......@@ -720,7 +720,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct open_request *req,
newsk->sk_backlog.head = newsk->sk_backlog.tail = NULL;
newsk->sk_callback_lock = RW_LOCK_UNLOCKED;
skb_queue_head_init(&newsk->sk_error_queue);
newsk->sk_write_space = tcp_write_space;
newsk->sk_write_space = sk_stream_write_space;
if ((filter = newsk->sk_filter) != NULL)
sk_filter_charge(newsk, filter);
......
......@@ -326,7 +326,7 @@ static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb)
/* Advance write_seq and place onto the write_queue. */
tp->write_seq = TCP_SKB_CB(skb)->end_seq;
__skb_queue_tail(&sk->sk_write_queue, skb);
tcp_charge_skb(sk, skb);
sk_charge_skb(sk, skb);
/* Queue it, remembering where we must start sending. */
if (tp->send_head == NULL)
......@@ -439,7 +439,7 @@ static int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len)
buff = tcp_alloc_skb(sk, nsize, GFP_ATOMIC);
if (buff == NULL)
return -ENOMEM; /* We'll just try again later. */
tcp_charge_skb(sk, buff);
sk_charge_skb(sk, buff);
/* Correct the sequence numbers. */
TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len;
......@@ -1169,7 +1169,7 @@ int tcp_send_synack(struct sock *sk)
__skb_unlink(skb, &sk->sk_write_queue);
__skb_queue_head(&sk->sk_write_queue, nskb);
tcp_free_skb(sk, skb);
tcp_charge_skb(sk, nskb);
sk_charge_skb(sk, nskb);
skb = nskb;
}
......@@ -1329,7 +1329,7 @@ int tcp_connect(struct sock *sk)
TCP_SKB_CB(buff)->when = tcp_time_stamp;
tp->retrans_stamp = TCP_SKB_CB(buff)->when;
__skb_queue_tail(&sk->sk_write_queue, buff);
tcp_charge_skb(sk, buff);
sk_charge_skb(sk, buff);
tp->packets_out++;
tcp_transmit_skb(sk, skb_clone(buff, GFP_KERNEL));
TCP_INC_STATS(TcpActiveOpens);
......
......@@ -1878,7 +1878,7 @@ static int tcp_v6_init_sock(struct sock *sk)
tp->af_specific = &ipv6_specific;
sk->sk_write_space = tcp_write_space;
sk->sk_write_space = sk_stream_write_space;
sk->sk_use_write_queue = 1;
sk->sk_sndbuf = sysctl_tcp_wmem[1];
......
......@@ -125,7 +125,7 @@ svc_sock_wspace(struct svc_sock *svsk)
int wspace;
if (svsk->sk_sock->type == SOCK_STREAM)
wspace = tcp_wspace(svsk->sk_sk);
wspace = sk_stream_wspace(svsk->sk_sk);
else
wspace = sock_wspace(svsk->sk_sk);
......
......@@ -1086,8 +1086,8 @@ xprt_write_space(struct sock *sk)
/* Wait until we have enough socket memory */
if (xprt->stream) {
/* from net/ipv4/tcp.c:tcp_write_space */
if (tcp_wspace(sk) < tcp_min_write_space(sk))
/* from net/core/stream.c:sk_stream_write_space */
if (sk_stream_wspace(sk) < sk_stream_min_wspace(sk))
goto out;
} else {
/* from net/core/sock.c:sock_def_write_space */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment