Commit 8292a17a authored by Arnaldo Carvalho de Melo's avatar Arnaldo Carvalho de Melo Committed by David S. Miller

[ICSK]: Rename struct tcp_func to struct inet_connection_sock_af_ops

And move it to struct inet_connection_sock. DCCP will use it in the
upcoming changesets.
Signed-off-by: default avatarArnaldo Carvalho de Melo <acme@mandriva.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent ca304b61
......@@ -295,8 +295,6 @@ struct tcp_sock {
struct sk_buff_head out_of_order_queue; /* Out of order segments go here */
struct tcp_func *af_specific; /* Operations which are AF_INET{4,6} specific */
__u32 rcv_wnd; /* Current receiver window */
__u32 rcv_wup; /* rcv_nxt on last window update sent */
__u32 write_seq; /* Tail(+1) of data held in tcp send buffer */
......
......@@ -15,6 +15,7 @@
#ifndef _INET_CONNECTION_SOCK_H
#define _INET_CONNECTION_SOCK_H
#include <linux/compiler.h>
#include <linux/ip.h>
#include <linux/string.h>
#include <linux/timer.h>
......@@ -29,6 +30,29 @@ struct inet_bind_bucket;
struct inet_hashinfo;
struct tcp_congestion_ops;
/*
* Pointers to address related TCP functions
* (i.e. things that depend on the address family)
*/
struct inet_connection_sock_af_ops {
int (*queue_xmit)(struct sk_buff *skb, int ipfragok);
void (*send_check)(struct sock *sk, int len,
struct sk_buff *skb);
int (*rebuild_header)(struct sock *sk);
int (*conn_request)(struct sock *sk, struct sk_buff *skb);
struct sock *(*syn_recv_sock)(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst);
int (*remember_stamp)(struct sock *sk);
__u16 net_header_len;
int (*setsockopt)(struct sock *sk, int level, int optname,
char __user *optval, int optlen);
int (*getsockopt)(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
int sockaddr_len;
};
/** inet_connection_sock - INET connection oriented sock
*
* @icsk_accept_queue: FIFO of established children
......@@ -37,6 +61,7 @@ struct tcp_congestion_ops;
* @icsk_retransmit_timer: Resend (no ack)
* @icsk_rto: Retransmit timeout
* @icsk_ca_ops Pluggable congestion control hook
* @icsk_af_ops Operations which are AF_INET{4,6} specific
* @icsk_ca_state: Congestion control state
* @icsk_retransmits: Number of unrecovered [RTO] timeouts
* @icsk_pending: Scheduled timer event
......@@ -55,6 +80,7 @@ struct inet_connection_sock {
struct timer_list icsk_delack_timer;
__u32 icsk_rto;
struct tcp_congestion_ops *icsk_ca_ops;
struct inet_connection_sock_af_ops *icsk_af_ops;
__u8 icsk_ca_state;
__u8 icsk_retransmits;
__u8 icsk_pending;
......
......@@ -224,53 +224,6 @@ extern atomic_t tcp_memory_allocated;
extern atomic_t tcp_sockets_allocated;
extern int tcp_memory_pressure;
/*
* Pointers to address related TCP functions
* (i.e. things that depend on the address family)
*/
struct tcp_func {
int (*queue_xmit) (struct sk_buff *skb,
int ipfragok);
void (*send_check) (struct sock *sk,
struct tcphdr *th,
int len,
struct sk_buff *skb);
int (*rebuild_header) (struct sock *sk);
int (*conn_request) (struct sock *sk,
struct sk_buff *skb);
struct sock * (*syn_recv_sock) (struct sock *sk,
struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst);
int (*remember_stamp) (struct sock *sk);
__u16 net_header_len;
int (*setsockopt) (struct sock *sk,
int level,
int optname,
char __user *optval,
int optlen);
int (*getsockopt) (struct sock *sk,
int level,
int optname,
char __user *optval,
int __user *optlen);
void (*addr2sockaddr) (struct sock *sk,
struct sockaddr *);
int sockaddr_len;
};
/*
* The next routines deal with comparing 32 bit unsigned ints
* and worry about wraparound (automatic with unsigned arithmetic).
......@@ -405,8 +358,7 @@ extern void tcp_parse_options(struct sk_buff *skb,
* TCP v4 functions exported for the inet6 API
*/
extern void tcp_v4_send_check(struct sock *sk,
struct tcphdr *th, int len,
extern void tcp_v4_send_check(struct sock *sk, int len,
struct sk_buff *skb);
extern int tcp_v4_conn_request(struct sock *sk,
......
......@@ -44,7 +44,7 @@ extern int datagram_send_ctl(struct msghdr *msg,
/*
* address family specific functions
*/
extern struct tcp_func ipv4_specific;
extern struct inet_connection_sock_af_ops ipv4_specific;
extern int inet6_destroy_sock(struct sock *sk);
......
......@@ -173,10 +173,10 @@ static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
struct dst_entry *dst)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
struct sock *child;
child = tp->af_specific->syn_recv_sock(sk, skb, req, dst);
child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst);
if (child)
inet_csk_reqsk_queue_add(sk, req, child);
else
......
......@@ -1696,8 +1696,8 @@ int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
int err = 0;
if (level != SOL_TCP)
return tp->af_specific->setsockopt(sk, level, optname,
optval, optlen);
return icsk->icsk_af_ops->setsockopt(sk, level, optname,
optval, optlen);
/* This is a string value all the others are int's */
if (optname == TCP_CONGESTION) {
......@@ -1939,8 +1939,8 @@ int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
int val, len;
if (level != SOL_TCP)
return tp->af_specific->getsockopt(sk, level, optname,
optval, optlen);
return icsk->icsk_af_ops->getsockopt(sk, level, optname,
optval, optlen);
if (get_user(len, optlen))
return -EFAULT;
......
......@@ -4071,8 +4071,10 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
mb();
tcp_set_state(sk, TCP_ESTABLISHED);
icsk = inet_csk(sk);
/* Make sure socket is routed, for correct metrics. */
tp->af_specific->rebuild_header(sk);
icsk->icsk_af_ops->rebuild_header(sk);
tcp_init_metrics(sk);
......@@ -4098,8 +4100,6 @@ static int tcp_rcv_synsent_state_process(struct sock *sk, struct sk_buff *skb,
sk_wake_async(sk, 0, POLL_OUT);
}
icsk = inet_csk(sk);
if (sk->sk_write_pending ||
icsk->icsk_accept_queue.rskq_defer_accept ||
icsk->icsk_ack.pingpong) {
......@@ -4220,6 +4220,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
struct tcphdr *th, unsigned len)
{
struct tcp_sock *tp = tcp_sk(sk);
struct inet_connection_sock *icsk = inet_csk(sk);
int queued = 0;
tp->rx_opt.saw_tstamp = 0;
......@@ -4236,7 +4237,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
goto discard;
if(th->syn) {
if(tp->af_specific->conn_request(sk, skb) < 0)
if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
return 1;
/* Now we have several options: In theory there is
......@@ -4349,7 +4350,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
/* Make sure socket is routed, for
* correct metrics.
*/
tp->af_specific->rebuild_header(sk);
icsk->icsk_af_ops->rebuild_header(sk);
tcp_init_metrics(sk);
......
......@@ -86,8 +86,7 @@ int sysctl_tcp_low_latency;
/* Socket used for sending RSTs */
static struct socket *tcp_socket;
void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
struct sk_buff *skb);
void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb);
struct inet_hashinfo __cacheline_aligned tcp_hashinfo = {
.lhash_lock = RW_LOCK_UNLOCKED,
......@@ -645,10 +644,10 @@ void tcp_v4_err(struct sk_buff *skb, u32 info)
}
/* This routine computes an IPv4 TCP checksum. */
void tcp_v4_send_check(struct sock *sk, struct tcphdr *th, int len,
struct sk_buff *skb)
void tcp_v4_send_check(struct sock *sk, int len, struct sk_buff *skb)
{
struct inet_sock *inet = inet_sk(sk);
struct tcphdr *th = skb->h.th;
if (skb->ip_summed == CHECKSUM_HW) {
th->check = ~tcp_v4_check(th, len, inet->saddr, inet->daddr, 0);
......@@ -1383,7 +1382,7 @@ int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw)
return 0;
}
struct tcp_func ipv4_specific = {
struct inet_connection_sock_af_ops ipv4_specific = {
.queue_xmit = ip_queue_xmit,
.send_check = tcp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
......@@ -1434,7 +1433,7 @@ static int tcp_v4_init_sock(struct sock *sk)
sk->sk_write_space = sk_stream_write_space;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
tp->af_specific = &ipv4_specific;
icsk->icsk_af_ops = &ipv4_specific;
sk->sk_sndbuf = sysctl_tcp_wmem[1];
sk->sk_rcvbuf = sysctl_tcp_rmem[1];
......
......@@ -274,18 +274,18 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
void tcp_time_wait(struct sock *sk, int state, int timeo)
{
struct inet_timewait_sock *tw = NULL;
const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk);
int recycle_ok = 0;
if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp)
recycle_ok = tp->af_specific->remember_stamp(sk);
recycle_ok = icsk->icsk_af_ops->remember_stamp(sk);
if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets)
tw = inet_twsk_alloc(sk, state);
if (tw != NULL) {
struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
const struct inet_connection_sock *icsk = inet_csk(sk);
const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
......@@ -456,7 +456,6 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
struct request_sock **prev)
{
struct tcphdr *th = skb->h.th;
struct tcp_sock *tp = tcp_sk(sk);
u32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
int paws_reject = 0;
struct tcp_options_received tmp_opt;
......@@ -613,7 +612,8 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
* ESTABLISHED STATE. If it will be dropped after
* socket is created, wait for troubles.
*/
child = tp->af_specific->syn_recv_sock(sk, skb, req, NULL);
child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb,
req, NULL);
if (child == NULL)
goto listen_overflow;
......
......@@ -371,7 +371,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
TCP_ECN_send(sk, tp, skb, tcp_header_size);
}
tp->af_specific->send_check(sk, th, skb->len, skb);
icsk->icsk_af_ops->send_check(sk, skb->len, skb);
if (likely(tcb->flags & TCPCB_FLAG_ACK))
tcp_event_ack_sent(sk, tcp_skb_pcount(skb));
......@@ -381,7 +381,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
TCP_INC_STATS(TCP_MIB_OUTSEGS);
err = tp->af_specific->queue_xmit(skb, 0);
err = icsk->icsk_af_ops->queue_xmit(skb, 0);
if (unlikely(err <= 0))
return err;
......@@ -638,12 +638,11 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu)
{
struct tcp_sock *tp = tcp_sk(sk);
int mss_now;
/* Calculate base mss without TCP options:
It is MMS_S - sizeof(tcphdr) of rfc1122
*/
mss_now = pmtu - tp->af_specific->net_header_len - sizeof(struct tcphdr);
int mss_now = (pmtu - inet_csk(sk)->icsk_af_ops->net_header_len -
sizeof(struct tcphdr));
/* Clamp it (mss_clamp does not include tcp options) */
if (mss_now > tp->rx_opt.mss_clamp)
......@@ -705,9 +704,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
xmit_size_goal = mss_now;
if (doing_tso) {
xmit_size_goal = 65535 -
tp->af_specific->net_header_len -
tp->ext_header_len - tp->tcp_header_len;
xmit_size_goal = (65535 -
inet_csk(sk)->icsk_af_ops->net_header_len -
tp->ext_header_len - tp->tcp_header_len);
if (tp->max_window &&
(xmit_size_goal > (tp->max_window >> 1)))
......@@ -1422,7 +1421,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
(sysctl_tcp_retrans_collapse != 0))
tcp_retrans_try_collapse(sk, skb, cur_mss);
if(tp->af_specific->rebuild_header(sk))
if (inet_csk(sk)->icsk_af_ops->rebuild_header(sk))
return -EHOSTUNREACH; /* Routing failure or similar. */
/* Some Solaris stacks overoptimize and ignore the FIN on a
......
......@@ -170,7 +170,7 @@ int ipv6_setsockopt(struct sock *sk, int level, int optname,
sock_prot_inc_use(&tcp_prot);
local_bh_enable();
sk->sk_prot = &tcp_prot;
tp->af_specific = &ipv4_specific;
inet_csk(sk)->icsk_af_ops = &ipv4_specific;
sk->sk_socket->ops = &inet_stream_ops;
sk->sk_family = PF_INET;
tcp_sync_mss(sk, tp->pmtu_cookie);
......
......@@ -68,14 +68,14 @@
static void tcp_v6_send_reset(struct sk_buff *skb);
static void tcp_v6_reqsk_send_ack(struct sk_buff *skb, struct request_sock *req);
static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len,
static void tcp_v6_send_check(struct sock *sk, int len,
struct sk_buff *skb);
static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb);
static int tcp_v6_xmit(struct sk_buff *skb, int ipfragok);
static struct tcp_func ipv6_mapped;
static struct tcp_func ipv6_specific;
static struct inet_connection_sock_af_ops ipv6_mapped;
static struct inet_connection_sock_af_ops ipv6_specific;
int inet6_csk_bind_conflict(const struct sock *sk,
const struct inet_bind_bucket *tb)
......@@ -107,9 +107,7 @@ static int tcp_v6_get_port(struct sock *sk, unsigned short snum)
static void tcp_v6_hash(struct sock *sk)
{
if (sk->sk_state != TCP_CLOSE) {
struct tcp_sock *tp = tcp_sk(sk);
if (tp->af_specific == &ipv6_mapped) {
if (inet_csk(sk)->icsk_af_ops == &ipv6_mapped) {
tcp_prot.hash(sk);
return;
}
......@@ -417,14 +415,14 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
sin.sin_port = usin->sin6_port;
sin.sin_addr.s_addr = usin->sin6_addr.s6_addr32[3];
tp->af_specific = &ipv6_mapped;
inet_csk(sk)->icsk_af_ops = &ipv6_mapped;
sk->sk_backlog_rcv = tcp_v4_do_rcv;
err = tcp_v4_connect(sk, (struct sockaddr *)&sin, sizeof(sin));
if (err) {
tp->ext_header_len = exthdrlen;
tp->af_specific = &ipv6_specific;
inet_csk(sk)->icsk_af_ops = &ipv6_specific;
sk->sk_backlog_rcv = tcp_v6_do_rcv;
goto failure;
} else {
......@@ -751,10 +749,10 @@ static int ipv6_opt_accepted(struct sock *sk, struct sk_buff *skb)
}
static void tcp_v6_send_check(struct sock *sk, struct tcphdr *th, int len,
struct sk_buff *skb)
static void tcp_v6_send_check(struct sock *sk, int len, struct sk_buff *skb)
{
struct ipv6_pinfo *np = inet6_sk(sk);
struct tcphdr *th = skb->h.th;
if (skb->ip_summed == CHECKSUM_HW) {
th->check = ~csum_ipv6_magic(&np->saddr, &np->daddr, len, IPPROTO_TCP, 0);
......@@ -1070,7 +1068,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
ipv6_addr_copy(&newnp->rcv_saddr, &newnp->saddr);
newtp->af_specific = &ipv6_mapped;
inet_csk(newsk)->icsk_af_ops = &ipv6_mapped;
newsk->sk_backlog_rcv = tcp_v4_do_rcv;
newnp->pktoptions = NULL;
newnp->opt = NULL;
......@@ -1084,7 +1082,7 @@ static struct sock * tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
*/
/* It is tricky place. Until this moment IPv4 tcp
worked with IPv6 af_tcp.af_specific.
worked with IPv6 icsk.icsk_af_ops.
Sync it now.
*/
tcp_sync_mss(newsk, newtp->pmtu_cookie);
......@@ -1631,7 +1629,7 @@ static int tcp_v6_remember_stamp(struct sock *sk)
return 0;
}
static struct tcp_func ipv6_specific = {
static struct inet_connection_sock_af_ops ipv6_specific = {
.queue_xmit = tcp_v6_xmit,
.send_check = tcp_v6_send_check,
.rebuild_header = tcp_v6_rebuild_header,
......@@ -1650,7 +1648,7 @@ static struct tcp_func ipv6_specific = {
* TCP over IPv4 via INET6 API
*/
static struct tcp_func ipv6_mapped = {
static struct inet_connection_sock_af_ops ipv6_mapped = {
.queue_xmit = ip_queue_xmit,
.send_check = tcp_v4_send_check,
.rebuild_header = inet_sk_rebuild_header,
......@@ -1700,7 +1698,7 @@ static int tcp_v6_init_sock(struct sock *sk)
sk->sk_state = TCP_CLOSE;
tp->af_specific = &ipv6_specific;
icsk->icsk_af_ops = &ipv6_specific;
icsk->icsk_ca_ops = &tcp_init_congestion_ops;
sk->sk_write_space = sk_stream_write_space;
sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment