Commit 463c84b9 authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo Committed by David S. Miller

[NET]: Introduce inet_connection_sock

This creates struct inet_connection_sock, moving members out of struct
tcp_sock that are shareable with other INET connection oriented
protocols, such as DCCP, that in my private tree already uses most of
these members.

The functions that operate on these members were renamed, using a
inet_csk_ prefix while not being moved yet to a new file, so as to
ease the review of these changes.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@ghostprotocols.net>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 87d11ceb
......@@ -128,7 +128,6 @@ static inline struct inet_request_sock *inet_rsk(const struct request_sock *sk)
return (struct inet_request_sock *)sk;
}
struct inet_bind_bucket;
struct ipv6_pinfo;
struct inet_sock {
......@@ -158,7 +157,6 @@ struct inet_sock {
int mc_index; /* Multicast device index */
__u32 mc_addr;
struct ip_mc_socklist *mc_list; /* Group array */
struct inet_bind_bucket *bind_hash;
/*
* Following members are used to retain the infomation to build
* an ip header on each ip fragmentation while the socket is corked.
......
......@@ -333,15 +333,15 @@ static inline struct in6_addr *tcp_v6_rcv_saddr(const struct sock *sk)
return sk->sk_family == AF_INET6 ? __tcp_v6_rcv_saddr(sk) : NULL;
}
static inline int tcp_twsk_ipv6only(const struct sock *sk)
static inline int inet_twsk_ipv6only(const struct sock *sk)
{
return inet_twsk(sk)->tw_ipv6only;
}
static inline int tcp_v6_ipv6only(const struct sock *sk)
static inline int inet_v6_ipv6only(const struct sock *sk)
{
return likely(sk->sk_state != TCP_TIME_WAIT) ?
ipv6_only_sock(sk) : tcp_twsk_ipv6only(sk);
ipv6_only_sock(sk) : inet_twsk_ipv6only(sk);
}
#else
#define __ipv6_only_sock(sk) 0
......@@ -360,7 +360,7 @@ static inline struct raw6_sock *raw6_sk(const struct sock *sk)
#define __tcp_v6_rcv_saddr(__sk) NULL
#define tcp_v6_rcv_saddr(__sk) NULL
#define tcp_twsk_ipv6only(__sk) 0
#define tcp_v6_ipv6only(__sk) 0
#define inet_v6_ipv6only(__sk) 0
#endif /* defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) */
#define INET6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
......
......@@ -177,8 +177,8 @@ struct tcp_info
#include <linux/config.h>
#include <linux/skbuff.h>
#include <linux/ip.h>
#include <net/sock.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
/* This defines a selective acknowledgement block. */
......@@ -219,8 +219,8 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
}
struct tcp_sock {
/* inet_sock has to be the first member of tcp_sock */
struct inet_sock inet;
/* inet_connection_sock has to be the first member of tcp_sock */
struct inet_connection_sock inet_conn;
int tcp_header_len; /* Bytes of tcp header to send */
/*
......@@ -241,18 +241,6 @@ struct tcp_sock {
__u32 snd_sml; /* Last byte of the most recently transmitted small packet */
__u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
__u32 lsndtime; /* timestamp of last sent data packet (for restart window) */
/* Delayed ACK control data */
struct {
__u8 pending; /* ACK is pending */
__u8 quick; /* Scheduled number of quick acks */
__u8 pingpong; /* The session is interactive */
__u8 blocked; /* Delayed ACK was blocked by socket lock*/
__u32 ato; /* Predicted tick of soft clock */
unsigned long timeout; /* Currently scheduled timeout */
__u32 lrcvtime; /* timestamp of last received data packet*/
__u16 last_seg_size; /* Size of last incoming segment */
__u16 rcv_mss; /* MSS used for delayed ACK decisions */
} ack;
/* Data for direct copy to user */
struct {
......@@ -271,8 +259,8 @@ struct tcp_sock {
__u16 xmit_size_goal; /* Goal for segmenting output packets */
__u16 ext_header_len; /* Network protocol overhead (IP/IPv6 options) */
__u8 ca_state; /* State of fast-retransmit machine */
__u8 retransmits; /* Number of unrecovered RTO timeouts. */
__u8 keepalive_probes; /* num of allowed keep alive probes */
__u16 advmss; /* Advertised MSS */
__u32 window_clamp; /* Maximal window to advertise */
__u32 rcv_ssthresh; /* Current window clamp */
......@@ -281,7 +269,7 @@ struct tcp_sock {
__u8 reordering; /* Packet reordering metric. */
__u8 frto_counter; /* Number of new acks after RTO */
__u8 unused;
__u8 nonagle; /* Disable Nagle algorithm? */
__u8 defer_accept; /* User waits for some data after accept() */
/* RTT measurement */
......@@ -290,19 +278,13 @@ struct tcp_sock {
__u32 mdev_max; /* maximal mdev for the last rtt period */
__u32 rttvar; /* smoothed mdev_max */
__u32 rtt_seq; /* sequence number to update rttvar */
__u32 rto; /* retransmit timeout */
__u32 packets_out; /* Packets which are "in flight" */
__u32 left_out; /* Packets which leaved network */
__u32 retrans_out; /* Retransmitted packets out */
__u8 backoff; /* backoff */
/*
* Options received (usually on last packet, some only on SYN packets).
*/
__u8 nonagle; /* Disable Nagle algorithm? */
__u8 keepalive_probes; /* num of allowed keep alive probes */
__u8 probes_out; /* unanswered 0 window probes */
struct tcp_options_received rx_opt;
/*
......@@ -315,11 +297,6 @@ struct tcp_sock {
__u32 snd_cwnd_used;
__u32 snd_cwnd_stamp;
/* Two commonly used timers in both sender and receiver paths. */
unsigned long timeout;
struct timer_list retransmit_timer; /* Resend (no ack) */
struct timer_list delack_timer; /* Ack delay */
struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */
......@@ -334,7 +311,7 @@ struct tcp_sock {
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
__u8 syn_retries; /* num of allowed syn retries */
__u8 probes_out; /* unanswered 0 window probes */
__u8 ecn_flags; /* ECN status bits. */
__u16 prior_ssthresh; /* ssthresh saved at recovery start */
__u32 lost_out; /* Lost packets */
......@@ -349,14 +326,12 @@ struct tcp_sock {
int undo_retrans; /* number of undoable retransmissions. */
__u32 urg_seq; /* Seq of received urgent pointer */
__u16 urg_data; /* Saved octet of OOB data and control flags */
__u8 pending; /* Scheduled timer event */
__u8 urg_mode; /* In urgent mode */
/* ONE BYTE HOLE, TRY TO PACK! */
__u32 snd_up; /* Urgent pointer */
__u32 total_retrans; /* Total retransmits for entire connection */
struct request_sock_queue accept_queue; /* FIFO of established children */
unsigned int keepalive_time; /* time before keep alive takes place */
unsigned int keepalive_intvl; /* time interval between keep alive probes */
int linger2;
......
/*
* NET Generic infrastructure for INET connection oriented protocols.
*
* Definitions for inet_connection_sock
*
* Authors: Many people, see the TCP sources
*
* From code originally in TCP
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifndef _INET_CONNECTION_SOCK_H
#define _INET_CONNECTION_SOCK_H
#include <linux/ip.h>
#include <linux/timer.h>
#include <net/request_sock.h>
struct inet_bind_bucket;
struct inet_hashinfo;
/** inet_connection_sock - INET connection oriented sock
*
* @icsk_accept_queue: FIFO of established children
* @icsk_bind_hash: Bind node
* @icsk_timeout: Timeout
* @icsk_retransmit_timer: Resend (no ack)
* @icsk_rto: Retransmit timeout
* @icsk_retransmits: Number of unrecovered [RTO] timeouts
* @icsk_pending: Scheduled timer event
* @icsk_backoff: Backoff
* @icsk_syn_retries: Number of allowed SYN (or equivalent) retries
* @icsk_ack: Delayed ACK control data
*/
struct inet_connection_sock {
/* inet_sock has to be the first member! */
struct inet_sock icsk_inet;
struct request_sock_queue icsk_accept_queue;
struct inet_bind_bucket *icsk_bind_hash;
unsigned long icsk_timeout;
struct timer_list icsk_retransmit_timer;
struct timer_list icsk_delack_timer;
__u32 icsk_rto;
__u8 icsk_retransmits;
__u8 icsk_pending;
__u8 icsk_backoff;
__u8 icsk_syn_retries;
struct {
__u8 pending; /* ACK is pending */
__u8 quick; /* Scheduled number of quick acks */
__u8 pingpong; /* The session is interactive */
__u8 blocked; /* Delayed ACK was blocked by socket lock */
__u32 ato; /* Predicted tick of soft clock */
unsigned long timeout; /* Currently scheduled timeout */
__u32 lrcvtime; /* timestamp of last received data packet */
__u16 last_seg_size; /* Size of last incoming segment */
__u16 rcv_mss; /* MSS used for delayed ACK decisions */
} icsk_ack;
};
static inline struct inet_connection_sock *inet_csk(const struct sock *sk)
{
return (struct inet_connection_sock *)sk;
}
extern void inet_csk_init_xmit_timers(struct sock *sk,
void (*retransmit_handler)(unsigned long),
void (*delack_handler)(unsigned long),
void (*keepalive_handler)(unsigned long));
extern void inet_csk_clear_xmit_timers(struct sock *sk);
extern struct request_sock *inet_csk_search_req(const struct sock *sk,
struct request_sock ***prevp,
const __u16 rport,
const __u32 raddr,
const __u32 laddr);
extern int inet_csk_get_port(struct inet_hashinfo *hashinfo,
struct sock *sk, unsigned short snum);
extern struct dst_entry* inet_csk_route_req(struct sock *sk,
const struct request_sock *req);
#endif /* _INET_CONNECTION_SOCK_H */
......@@ -17,7 +17,6 @@
#include <linux/config.h>
#include <linux/interrupt.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <linux/list.h>
#include <linux/slab.h>
......@@ -26,6 +25,7 @@
#include <linux/types.h>
#include <linux/wait.h>
#include <net/inet_connection_sock.h>
#include <net/sock.h>
#include <net/tcp_states.h>
......@@ -185,9 +185,9 @@ static inline void __inet_inherit_port(struct inet_hashinfo *table,
struct inet_bind_bucket *tb;
spin_lock(&head->lock);
tb = inet_sk(sk)->bind_hash;
tb = inet_csk(sk)->icsk_bind_hash;
sk_add_bind_node(child, &tb->owners);
inet_sk(child)->bind_hash = tb;
inet_csk(child)->icsk_bind_hash = tb;
spin_unlock(&head->lock);
}
......
......@@ -224,17 +224,17 @@ static inline int reqsk_queue_added(struct request_sock_queue *queue)
return prev_qlen;
}
static inline int reqsk_queue_len(struct request_sock_queue *queue)
static inline int reqsk_queue_len(const struct request_sock_queue *queue)
{
return queue->listen_opt != NULL ? queue->listen_opt->qlen : 0;
}
static inline int reqsk_queue_len_young(struct request_sock_queue *queue)
static inline int reqsk_queue_len_young(const struct request_sock_queue *queue)
{
return queue->listen_opt->qlen_young;
}
static inline int reqsk_queue_is_full(struct request_sock_queue *queue)
static inline int reqsk_queue_is_full(const struct request_sock_queue *queue)
{
return queue->listen_opt->qlen >> queue->listen_opt->max_qlen_log;
}
......
......@@ -493,9 +493,6 @@ extern int sk_wait_data(struct sock *sk, long *timeo);
struct request_sock_ops;
/* Here is the right place to enable sock refcounting debugging */
//#define SOCK_REFCNT_DEBUG
/* Networking protocol blocks we attach to sockets.
* socket layer -> transport layer interface
* transport -> network interface is defined by struct inet_proto
......
This diff is collapsed.
......@@ -88,7 +88,7 @@ static inline void TCP_ECN_check_ce(struct tcp_sock *tp, struct sk_buff *skb)
* it is surely retransmit. It is not in ECN RFC,
* but Linux follows this rule. */
else if (INET_ECN_is_not_ect((TCP_SKB_CB(skb)->flags)))
tcp_enter_quickack_mode(tp);
tcp_enter_quickack_mode((struct sock *)tp);
}
}
......
......@@ -19,6 +19,7 @@
#include <linux/slab.h>
#include <linux/wait.h>
#include <net/inet_connection_sock.h>
#include <net/inet_hashtables.h>
/*
......@@ -56,10 +57,9 @@ void inet_bind_bucket_destroy(kmem_cache_t *cachep, struct inet_bind_bucket *tb)
void inet_bind_hash(struct sock *sk, struct inet_bind_bucket *tb,
const unsigned short snum)
{
struct inet_sock *inet = inet_sk(sk);
inet->num = snum;
inet_sk(sk)->num = snum;
sk_add_bind_node(sk, &tb->owners);
inet->bind_hash = tb;
inet_csk(sk)->icsk_bind_hash = tb;
}
EXPORT_SYMBOL(inet_bind_hash);
......@@ -69,16 +69,15 @@ EXPORT_SYMBOL(inet_bind_hash);
*/
static void __inet_put_port(struct inet_hashinfo *hashinfo, struct sock *sk)
{
struct inet_sock *inet = inet_sk(sk);
const int bhash = inet_bhashfn(inet->num, hashinfo->bhash_size);
const int bhash = inet_bhashfn(inet_sk(sk)->num, hashinfo->bhash_size);
struct inet_bind_hashbucket *head = &hashinfo->bhash[bhash];
struct inet_bind_bucket *tb;
spin_lock(&head->lock);
tb = inet->bind_hash;
tb = inet_csk(sk)->icsk_bind_hash;
__sk_del_bind_node(sk);
inet->bind_hash = NULL;
inet->num = 0;
inet_csk(sk)->icsk_bind_hash = NULL;
inet_sk(sk)->num = 0;
inet_bind_bucket_destroy(hashinfo->bind_bucket_cachep, tb);
spin_unlock(&head->lock);
}
......
......@@ -56,6 +56,7 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
struct inet_hashinfo *hashinfo)
{
const struct inet_sock *inet = inet_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
struct inet_ehash_bucket *ehead = &hashinfo->ehash[sk->sk_hashent];
struct inet_bind_hashbucket *bhead;
/* Step 1: Put TW into bind hash. Original socket stays there too.
......@@ -64,8 +65,8 @@ void __inet_twsk_hashdance(struct inet_timewait_sock *tw, struct sock *sk,
*/
bhead = &hashinfo->bhash[inet_bhashfn(inet->num, hashinfo->bhash_size)];
spin_lock(&bhead->lock);
tw->tw_tb = inet->bind_hash;
BUG_TRAP(inet->bind_hash);
tw->tw_tb = icsk->icsk_bind_hash;
BUG_TRAP(icsk->icsk_bind_hash);
inet_twsk_add_bind_node(tw, &tw->tw_tb->owners);
spin_unlock(&bhead->lock);
......
......@@ -180,7 +180,7 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
child = tp->af_specific->syn_recv_sock(sk, skb, req, dst);
if (child)
tcp_acceptq_queue(sk, req, child);
inet_csk_reqsk_queue_add(sk, req, child);
else
reqsk_free(req);
......
This diff is collapsed.
......@@ -48,8 +48,9 @@ static struct sock *tcpnl;
static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
int ext, u32 pid, u32 seq, u16 nlmsg_flags)
{
struct inet_sock *inet = inet_sk(sk);
const struct inet_sock *inet = inet_sk(sk);
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcpdiagmsg *r;
struct nlmsghdr *nlh;
struct tcp_info *info = NULL;
......@@ -129,14 +130,14 @@ static int tcpdiag_fill(struct sk_buff *skb, struct sock *sk,
#define EXPIRES_IN_MS(tmo) ((tmo-jiffies)*1000+HZ-1)/HZ
if (tp->pending == TCP_TIME_RETRANS) {
if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
r->tcpdiag_timer = 1;
r->tcpdiag_retrans = tp->retransmits;
r->tcpdiag_expires = EXPIRES_IN_MS(tp->timeout);
} else if (tp->pending == TCP_TIME_PROBE0) {
r->tcpdiag_retrans = icsk->icsk_retransmits;
r->tcpdiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
r->tcpdiag_timer = 4;
r->tcpdiag_retrans = tp->probes_out;
r->tcpdiag_expires = EXPIRES_IN_MS(tp->timeout);
r->tcpdiag_expires = EXPIRES_IN_MS(icsk->icsk_timeout);
} else if (timer_pending(&sk->sk_timer)) {
r->tcpdiag_timer = 2;
r->tcpdiag_retrans = tp->probes_out;
......@@ -497,7 +498,7 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
{
struct tcpdiag_entry entry;
struct tcpdiagreq *r = NLMSG_DATA(cb->nlh);
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt;
struct rtattr *bc = NULL;
struct inet_sock *inet = inet_sk(sk);
......@@ -513,9 +514,9 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
entry.family = sk->sk_family;
read_lock_bh(&tp->accept_queue.syn_wait_lock);
read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
lopt = tp->accept_queue.listen_opt;
lopt = icsk->icsk_accept_queue.listen_opt;
if (!lopt || !lopt->qlen)
goto out;
......@@ -572,7 +573,7 @@ static int tcpdiag_dump_reqs(struct sk_buff *skb, struct sock *sk,
}
out:
read_unlock_bh(&tp->accept_queue.syn_wait_lock);
read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
return err;
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -271,7 +271,8 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
if (tw != NULL) {
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
const int rto = (tp->rto << 2) - (tp->rto >> 1);
const struct inet_connection_sock *icsk = inet_csk(sk);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
tcptw->tw_rcv_nxt = tp->rcv_nxt;
......@@ -605,10 +606,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
struct inet_request_sock *ireq = inet_rsk(req);
struct tcp_request_sock *treq = tcp_rsk(req);
struct inet_sock *newinet = inet_sk(newsk);
struct inet_connection_sock *newicsk = inet_csk(newsk);
struct tcp_sock *newtp;
newsk->sk_state = TCP_SYN_RECV;
newinet->bind_hash = NULL;
newicsk->icsk_bind_hash = NULL;
/* Clone the TCP header template */
newinet->dport = ireq->rmt_port;
......@@ -624,11 +626,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
tcp_init_wl(newtp, treq->snt_isn, treq->rcv_isn);
newtp->retransmits = 0;
newtp->backoff = 0;
newicsk->icsk_retransmits = 0;
newicsk->icsk_backoff = 0;
newtp->srtt = 0;
newtp->mdev = TCP_TIMEOUT_INIT;
newtp->rto = TCP_TIMEOUT_INIT;
newicsk->icsk_rto = TCP_TIMEOUT_INIT;
newtp->packets_out = 0;
newtp->left_out = 0;
......@@ -667,11 +669,11 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->rx_opt.num_sacks = 0;
newtp->urg_data = 0;
/* Deinitialize accept_queue to trap illegal accesses. */
memset(&newtp->accept_queue, 0, sizeof(newtp->accept_queue));
memset(&newicsk->icsk_accept_queue, 0, sizeof(newicsk->icsk_accept_queue));
if (sock_flag(newsk, SOCK_KEEPOPEN))
tcp_reset_keepalive_timer(newsk,
keepalive_time_when(newtp));
inet_csk_reset_keepalive_timer(newsk,
keepalive_time_when(newtp));
newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
if((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) {
......@@ -701,7 +703,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
newtp->tcp_header_len = sizeof(struct tcphdr);
}
if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len)
newtp->ack.last_seg_size = skb->len-newtp->tcp_header_len;
newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
newtp->rx_opt.mss_clamp = req->mss;
TCP_ECN_openreq_child(newtp, req);
if (newtp->ecn_flags&TCP_ECN_OK)
......@@ -881,10 +883,10 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
if (child == NULL)
goto listen_overflow;
tcp_synq_unlink(tp, req, prev);
tcp_synq_removed(sk, req);
inet_csk_reqsk_queue_unlink(sk, req, prev);
inet_csk_reqsk_queue_removed(sk, req);
tcp_acceptq_queue(sk, req, child);
inet_csk_reqsk_queue_add(sk, req, child);
return child;
listen_overflow:
......@@ -898,7 +900,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
if (!(flg & TCP_FLAG_RST))
req->rsk_ops->send_reset(skb);
tcp_synq_drop(sk, req, prev);
inet_csk_reqsk_queue_drop(sk, req, prev);
return NULL;
}
......
......@@ -105,8 +105,9 @@ static __u16 tcp_advertise_mss(struct sock *sk)
/* RFC2861. Reset CWND after idle period longer RTO to "restart window".
* This is the first part of cwnd validation mechanism. */
static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst)
static void tcp_cwnd_restart(struct sock *sk, struct dst_entry *dst)
{
struct tcp_sock *tp = tcp_sk(sk);
s32 delta = tcp_time_stamp - tp->lsndtime;
u32 restart_cwnd = tcp_init_cwnd(tp, dst);
u32 cwnd = tp->snd_cwnd;
......@@ -116,7 +117,7 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst)
tp->snd_ssthresh = tcp_current_ssthresh(tp);
restart_cwnd = min(restart_cwnd, cwnd);
while ((delta -= tp->rto) > 0 && cwnd > restart_cwnd)
while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
cwnd >>= 1;
tp->snd_cwnd = max(cwnd, restart_cwnd);
tp->snd_cwnd_stamp = tcp_time_stamp;
......@@ -126,26 +127,25 @@ static void tcp_cwnd_restart(struct tcp_sock *tp, struct dst_entry *dst)
static inline void tcp_event_data_sent(struct tcp_sock *tp,
struct sk_buff *skb, struct sock *sk)
{
u32 now = tcp_time_stamp;
struct inet_connection_sock *icsk = inet_csk(sk);
const u32 now = tcp_time_stamp;
if (!tp->packets_out && (s32)(now - tp->lsndtime) > tp->rto)
tcp_cwnd_restart(tp, __sk_dst_get(sk));
if (!tp->packets_out && (s32)(now - tp->lsndtime) > icsk->icsk_rto)
tcp_cwnd_restart(sk, __sk_dst_get(sk));
tp->lsndtime = now;
/* If it is a reply for ato after last received
* packet, enter pingpong mode.
*/
if ((u32)(now - tp->ack.lrcvtime) < tp->ack.ato)
tp->ack.pingpong = 1;
if ((u32)(now - icsk->icsk_ack.lrcvtime) < icsk->icsk_ack.ato)
icsk->icsk_ack.pingpong = 1;
}
static __inline__ void tcp_event_ack_sent(struct sock *sk, unsigned int pkts)
{
struct tcp_sock *tp = tcp_sk(sk);
tcp_dec_quickack_mode(tp, pkts);
tcp_clear_xmit_timer(sk, TCP_TIME_DACK);
tcp_dec_quickack_mode(sk, pkts);
inet_csk_clear_xmit_timer(sk, ICSK_TIME_DACK);
}
/* Determine a window scaling and initial window to offer.
......@@ -696,7 +696,7 @@ static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp)
if (tp->packets_out > tp->snd_cwnd_used)
tp->snd_cwnd_used = tp->packets_out;
if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto)
tcp_cwnd_application_limited(sk);
}
}
......@@ -1147,6 +1147,7 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
*/
u32 __tcp_select_window(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
/* MSS for the peer's data. Previous verions used mss_clamp
* here. I don't know if the value based on our guesses
......@@ -1154,7 +1155,7 @@ u32 __tcp_select_window(struct sock *sk)
* but may be worse for the performance because of rcv_mss
* fluctuations. --SAW 1998/11/1
*/
int mss = tp->ack.rcv_mss;
int mss = icsk->icsk_ack.rcv_mss;
int free_space = tcp_space(sk);
int full_space = min_t(int, tp->window_clamp, tcp_full_space(sk));
int window;
......@@ -1163,7 +1164,7 @@ u32 __tcp_select_window(struct sock *sk)
mss = full_space;
if (free_space < full_space/2) {
tp->ack.quick = 0;
icsk->icsk_ack.quick = 0;
if (tcp_memory_pressure)
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U*tp->advmss);
......@@ -1491,7 +1492,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (skb ==
skb_peek(&sk->sk_write_queue))
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
inet_csk(sk)->icsk_rto);
}
packet_cnt -= tcp_skb_pcount(skb);
......@@ -1544,7 +1546,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
break;
if (skb == skb_peek(&sk->sk_write_queue))
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
NET_INC_STATS_BH(LINUX_MIB_TCPFORWARDRETRANS);
}
......@@ -1780,8 +1782,8 @@ static inline void tcp_connect_init(struct sock *sk)
tp->rcv_wup = 0;
tp->copied_seq = 0;
tp->rto = TCP_TIMEOUT_INIT;
tp->retransmits = 0;
inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
inet_csk(sk)->icsk_retransmits = 0;
tcp_clear_retrans(tp);
}
......@@ -1824,7 +1826,7 @@ int tcp_connect(struct sock *sk)
TCP_INC_STATS(TCP_MIB_ACTIVEOPENS);
/* Timer for repeating the SYN until an answer. */
tcp_reset_xmit_timer(sk, TCP_TIME_RETRANS, tp->rto);
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk(sk)->icsk_rto);
return 0;
}
......@@ -1834,20 +1836,21 @@ int tcp_connect(struct sock *sk)
*/
void tcp_send_delayed_ack(struct sock *sk)
{
struct tcp_sock *tp = tcp_sk(sk);
int ato = tp->ack.ato;
struct inet_connection_sock *icsk = inet_csk(sk);
int ato = icsk->icsk_ack.ato;
unsigned long timeout;
if (ato > TCP_DELACK_MIN) {
const struct tcp_sock *tp = tcp_sk(sk);
int max_ato = HZ/2;
if (tp->ack.pingpong || (tp->ack.pending&TCP_ACK_PUSHED))
if (icsk->icsk_ack.pingpong || (icsk->icsk_ack.pending & ICSK_ACK_PUSHED))
max_ato = TCP_DELACK_MAX;
/* Slow path, intersegment interval is "high". */
/* If some rtt estimate is known, use it to bound delayed ack.
* Do not use tp->rto here, use results of rtt measurements
* Do not use inet_csk(sk)->icsk_rto here, use results of rtt measurements
* directly.
*/
if (tp->srtt) {
......@@ -1864,21 +1867,22 @@ void tcp_send_delayed_ack(struct sock *sk)
timeout = jiffies + ato;
/* Use new timeout only if there wasn't a older one earlier. */
if (tp->ack.pending&TCP_ACK_TIMER) {
if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
/* If delack timer was blocked or is about to expire,
* send ACK now.
*/
if (tp->ack.blocked || time_before_eq(tp->ack.timeout, jiffies+(ato>>2))) {
if (icsk->icsk_ack.blocked ||
time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
tcp_send_ack(sk);
return;
}
if (!time_before(timeout, tp->ack.timeout))
timeout = tp->ack.timeout;
if (!time_before(timeout, icsk->icsk_ack.timeout))
timeout = icsk->icsk_ack.timeout;
}
tp->ack.pending |= TCP_ACK_SCHED|TCP_ACK_TIMER;
tp->ack.timeout = timeout;
sk_reset_timer(sk, &tp->delack_timer, timeout);
icsk->icsk_ack.pending |= ICSK_ACK_SCHED | ICSK_ACK_TIMER;
icsk->icsk_ack.timeout = timeout;
sk_reset_timer(sk, &icsk->icsk_delack_timer, timeout);
}
/* This routine sends an ack and also updates the window. */
......@@ -1895,9 +1899,9 @@ void tcp_send_ack(struct sock *sk)
*/
buff = alloc_skb(MAX_TCP_HEADER, GFP_ATOMIC);
if (buff == NULL) {
tcp_schedule_ack(tp);
tp->ack.ato = TCP_ATO_MIN;
tcp_reset_xmit_timer(sk, TCP_TIME_DACK, TCP_DELACK_MAX);
inet_csk_schedule_ack(sk);
inet_csk(sk)->icsk_ack.ato = TCP_ATO_MIN;
inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK, TCP_DELACK_MAX);
return;
}
......@@ -2011,6 +2015,7 @@ int tcp_write_wakeup(struct sock *sk)
*/
void tcp_send_probe0(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
int err;
......@@ -2019,16 +2024,16 @@ void tcp_send_probe0(struct sock *sk)
if (tp->packets_out || !sk->sk_send_head) {
/* Cancel probe timer, if it is not required. */
tp->probes_out = 0;
tp->backoff = 0;
icsk->icsk_backoff = 0;
return;
}
if (err <= 0) {
if (tp->backoff < sysctl_tcp_retries2)
tp->backoff++;
if (icsk->icsk_backoff < sysctl_tcp_retries2)
icsk->icsk_backoff++;
tp->probes_out++;
tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0,
min(tp->rto << tp->backoff, TCP_RTO_MAX));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(icsk->icsk_rto << icsk->icsk_backoff, TCP_RTO_MAX));
} else {
/* If packet was not sent due to local congestion,
* do not backoff and do not remember probes_out.
......@@ -2038,8 +2043,9 @@ void tcp_send_probe0(struct sock *sk)
*/
if (!tp->probes_out)
tp->probes_out=1;
tcp_reset_xmit_timer (sk, TCP_TIME_PROBE0,
min(tp->rto << tp->backoff, TCP_RESOURCE_PROBE_INTERVAL));
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
min(icsk->icsk_rto << icsk->icsk_backoff,
TCP_RESOURCE_PROBE_INTERVAL));
}
}
......
This diff is collapsed.
......@@ -1043,7 +1043,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
u32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr;
u32 sk2_rcv_saddr = inet_rcv_saddr(sk2);
int sk_ipv6only = ipv6_only_sock(sk);
int sk2_ipv6only = tcp_v6_ipv6only(sk2);
int sk2_ipv6only = inet_v6_ipv6only(sk2);
int addr_type = ipv6_addr_type(sk_rcv_saddr6);
int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
......
......@@ -207,9 +207,9 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
tb->fastreuse = 0;
success:
if (!inet_sk(sk)->bind_hash)
if (!inet_csk(sk)->icsk_bind_hash)
inet_bind_hash(sk, tb, snum);
BUG_TRAP(inet_sk(sk)->bind_hash == tb);
BUG_TRAP(inet_csk(sk)->icsk_bind_hash == tb);
ret = 0;
fail_unlock:
......@@ -381,7 +381,7 @@ EXPORT_SYMBOL_GPL(tcp_v6_lookup);
* Open request hash tables.
*/
static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd)
static u32 tcp_v6_synq_hash(const struct in6_addr *raddr, const u16 rport, const u32 rnd)
{
u32 a, b, c;
......@@ -401,14 +401,15 @@ static u32 tcp_v6_synq_hash(struct in6_addr *raddr, u16 rport, u32 rnd)
return c & (TCP_SYNQ_HSIZE - 1);
}
static struct request_sock *tcp_v6_search_req(struct tcp_sock *tp,
static struct request_sock *tcp_v6_search_req(const struct sock *sk,
struct request_sock ***prevp,
__u16 rport,
struct in6_addr *raddr,
struct in6_addr *laddr,
int iif)
{
struct listen_sock *lopt = tp->accept_queue.listen_opt;
const struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
struct request_sock *req, **prev;
for (prev = &lopt->syn_table[tcp_v6_synq_hash(raddr, rport, lopt->hash_rnd)];
......@@ -619,7 +620,7 @@ static int tcp_v6_hash_connect(struct sock *sk)
}
head = &tcp_hashinfo.bhash[inet_bhashfn(snum, tcp_hashinfo.bhash_size)];
tb = inet_sk(sk)->bind_hash;
tb = inet_csk(sk)->icsk_bind_hash;
spin_lock_bh(&head->lock);
if (sk_head(&tb->owners) == sk && !sk->sk_bind_node.next) {
......@@ -925,7 +926,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
if (sock_owned_by_user(sk))
goto out;
req = tcp_v6_search_req(tp, &prev, th->dest, &hdr->daddr,
req = tcp_v6_search_req(sk, &prev, th->dest, &hdr->daddr,
&hdr->saddr, tcp_v6_iif(skb));
if (!req)
goto out;
......@@ -940,7 +941,7 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
goto out;
}
tcp_synq_drop(sk, req, prev);
inet_csk_reqsk_queue_drop(sk, req, prev);
goto out;
case TCP_SYN_SENT:
......@@ -1245,11 +1246,10 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
{
struct request_sock *req, **prev;
struct tcphdr *th = skb->h.th;
struct tcp_sock *tp = tcp_sk(sk);
struct sock *nsk;
/* Find possible connection requests. */
req = tcp_v6_search_req(tp, &prev, th->source, &skb->nh.ipv6h->saddr,
req = tcp_v6_search_req(sk, &prev, th->source, &skb->nh.ipv6h->saddr,
&skb->nh.ipv6h->daddr, tcp_v6_iif(skb));
if (req)
return tcp_check_req(sk, skb, req, prev);
......@@ -1278,12 +1278,12 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk,struct sk_buff *skb)
static void tcp_v6_synq_add(struct sock *sk, struct request_sock *req)
{
struct tcp_sock *tp = tcp_sk(sk);
struct listen_sock *lopt = tp->accept_queue.listen_opt;
u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
struct inet_connection_sock *icsk = inet_csk(sk);
struct listen_sock *lopt = icsk->icsk_accept_queue.listen_opt;
const u32 h = tcp_v6_synq_hash(&tcp6_rsk(req)->rmt_addr, inet_rsk(req)->rmt_port, lopt->hash_rnd);
reqsk_queue_hash_req(&tp->accept_queue, h, req, TCP_TIMEOUT_INIT);
tcp_synq_added(sk);
reqsk_queue_hash_req(&icsk->icsk_accept_queue, h, req, TCP_TIMEOUT_INIT);
inet_csk_reqsk_queue_added(sk, TCP_TIMEOUT_INIT);
}
......@@ -1308,13 +1308,13 @@ static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
/*
* There are no SYN attacks on IPv6, yet...
*/
if (tcp_synq_is_full(sk) && !isn) {
if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
if (net_ratelimit())
printk(KERN_INFO "TCPv6: dropping request, synflood is possible\n");
goto drop;
}
if (sk_acceptq_is_full(sk) && tcp_synq_young(sk) > 1)
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
goto drop;
req = reqsk_alloc(&tcp6_request_sock_ops);
......@@ -2015,7 +2015,7 @@ static int tcp_v6_init_sock(struct sock *sk)
tcp_init_xmit_timers(sk);
tcp_prequeue_init(tp);
tp->rto = TCP_TIMEOUT_INIT;
inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev = TCP_TIMEOUT_INIT;
/* So many TCP implementations out there (incorrectly) count the
......@@ -2098,18 +2098,20 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
unsigned long timer_expires;
struct inet_sock *inet = inet_sk(sp);
struct tcp_sock *tp = tcp_sk(sp);
const struct inet_connection_sock *icsk = inet_csk(sp);
struct ipv6_pinfo *np = inet6_sk(sp);
dest = &np->daddr;
src = &np->rcv_saddr;
destp = ntohs(inet->dport);
srcp = ntohs(inet->sport);
if (tp->pending == TCP_TIME_RETRANS) {
if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
timer_active = 1;
timer_expires = tp->timeout;
} else if (tp->pending == TCP_TIME_PROBE0) {
timer_expires = icsk->icsk_timeout;
} else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
timer_active = 4;
timer_expires = tp->timeout;
timer_expires = icsk->icsk_timeout;
} else if (timer_pending(&sp->sk_timer)) {
timer_active = 2;
timer_expires = sp->sk_timer.expires;
......@@ -2130,12 +2132,14 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
tp->write_seq-tp->snd_una, tp->rcv_nxt-tp->copied_seq,
timer_active,
jiffies_to_clock_t(timer_expires - jiffies),
tp->retransmits,
icsk->icsk_retransmits,
sock_i_uid(sp),
tp->probes_out,
sock_i_ino(sp),
atomic_read(&sp->sk_refcnt), sp,
tp->rto, tp->ack.ato, (tp->ack.quick<<1)|tp->ack.pingpong,
icsk->icsk_rto,
icsk->icsk_ack.ato,
(icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
tp->snd_cwnd, tp->snd_ssthresh>=0xFFFF?-1:tp->snd_ssthresh
);
}
......@@ -2227,7 +2231,7 @@ struct proto tcpv6_prot = {
.close = tcp_close,
.connect = tcp_v6_connect,
.disconnect = tcp_disconnect,
.accept = tcp_accept,
.accept = inet_csk_accept,
.ioctl = tcp_ioctl,
.init = tcp_v6_init_sock,
.destroy = tcp_v6_destroy_sock,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment