Commit 9e412ba7 authored by Ilpo Järvinen's avatar Ilpo Järvinen Committed by David S. Miller

[TCP]: Sed magic converts func(sk, tp, ...) -> func(sk, ...)

This is (mostly) automated change using magic:

sed -e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N'
    -e '/struct sock \*sk/ N' -e '/struct sock \*sk/ N'
    -e 's|struct sock \*sk,[\n\t ]*struct tcp_sock \*tp\([^{]*\n{\n\)|
	  struct sock \*sk\1\tstruct tcp_sock *tp = tcp_sk(sk);\n|g'
    -e 's|struct sock \*sk, struct tcp_sock \*tp|
	  struct sock \*sk|g' -e 's|sk, tp\([^-]\)|sk\1|g'

Fixed four unused variable (tp) warnings that were introduced.

In addition, manually added newlines after local variables and
tweaked function arguments positioning.

$ gcc --version
gcc (GCC) 4.1.1 20060525 (Red Hat 4.1.1-1)
...
$ codiff -fV built-in.o.old built-in.o.new
net/ipv4/route.c:
  rt_cache_flush |  +14
 1 function changed, 14 bytes added

net/ipv4/tcp.c:
  tcp_setsockopt |   -5
  tcp_sendpage   |  -25
  tcp_sendmsg    |  -16
 3 functions changed, 46 bytes removed

net/ipv4/tcp_input.c:
  tcp_try_undo_recovery |   +3
  tcp_try_undo_dsack    |   +2
  tcp_mark_head_lost    |  -12
  tcp_ack               |  -15
  tcp_event_data_recv   |  -32
  tcp_rcv_state_process |  -10
  tcp_rcv_established   |   +1
 7 functions changed, 6 bytes added, 69 bytes removed, diff: -63

net/ipv4/tcp_output.c:
  update_send_head          |   -9
  tcp_transmit_skb          |  +19
  tcp_cwnd_validate         |   +1
  tcp_write_wakeup          |  -17
  __tcp_push_pending_frames |  -25
  tcp_push_one              |   -8
  tcp_send_fin              |   -4
 7 functions changed, 20 bytes added, 63 bytes removed, diff: -43

built-in.o.new:
 18 functions changed, 40 bytes added, 178 bytes removed, diff: -138
Signed-off-by: default avatarIlpo Järvinen <ilpo.jarvinen@helsinki.fi>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 38b4da38
...@@ -420,9 +420,9 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, ...@@ -420,9 +420,9 @@ extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
/* tcp_output.c */ /* tcp_output.c */
extern void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, extern void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
unsigned int cur_mss, int nonagle); int nonagle);
extern int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp); extern int tcp_may_send_now(struct sock *sk);
extern int tcp_retransmit_skb(struct sock *, struct sk_buff *); extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
extern void tcp_xmit_retransmit_queue(struct sock *); extern void tcp_xmit_retransmit_queue(struct sock *);
extern void tcp_simple_retransmit(struct sock *); extern void tcp_simple_retransmit(struct sock *);
...@@ -479,8 +479,10 @@ static inline void tcp_fast_path_on(struct tcp_sock *tp) ...@@ -479,8 +479,10 @@ static inline void tcp_fast_path_on(struct tcp_sock *tp)
__tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale); __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
} }
static inline void tcp_fast_path_check(struct sock *sk, struct tcp_sock *tp) static inline void tcp_fast_path_check(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (skb_queue_empty(&tp->out_of_order_queue) && if (skb_queue_empty(&tp->out_of_order_queue) &&
tp->rcv_wnd && tp->rcv_wnd &&
atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf && atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
...@@ -591,10 +593,10 @@ static inline void tcp_dec_pcount_approx(__u32 *count, ...@@ -591,10 +593,10 @@ static inline void tcp_dec_pcount_approx(__u32 *count,
} }
} }
static inline void tcp_packets_out_inc(struct sock *sk, static inline void tcp_packets_out_inc(struct sock *sk,
struct tcp_sock *tp,
const struct sk_buff *skb) const struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
int orig = tp->packets_out; int orig = tp->packets_out;
tp->packets_out += tcp_skb_pcount(skb); tp->packets_out += tcp_skb_pcount(skb);
...@@ -778,18 +780,21 @@ static inline void tcp_minshall_update(struct tcp_sock *tp, int mss, ...@@ -778,18 +780,21 @@ static inline void tcp_minshall_update(struct tcp_sock *tp, int mss,
tp->snd_sml = TCP_SKB_CB(skb)->end_seq; tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
} }
static inline void tcp_check_probe_timer(struct sock *sk, struct tcp_sock *tp) static inline void tcp_check_probe_timer(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
if (!tp->packets_out && !icsk->icsk_pending) if (!tp->packets_out && !icsk->icsk_pending)
inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0, inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
icsk->icsk_rto, TCP_RTO_MAX); icsk->icsk_rto, TCP_RTO_MAX);
} }
static inline void tcp_push_pending_frames(struct sock *sk, static inline void tcp_push_pending_frames(struct sock *sk)
struct tcp_sock *tp)
{ {
__tcp_push_pending_frames(sk, tp, tcp_current_mss(sk, 1), tp->nonagle); struct tcp_sock *tp = tcp_sk(sk);
__tcp_push_pending_frames(sk, tcp_current_mss(sk, 1), tp->nonagle);
} }
static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq) static inline void tcp_init_wl(struct tcp_sock *tp, u32 ack, u32 seq)
......
...@@ -27,9 +27,10 @@ static inline void TCP_ECN_send_synack(struct tcp_sock *tp, ...@@ -27,9 +27,10 @@ static inline void TCP_ECN_send_synack(struct tcp_sock *tp,
TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE; TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_ECE;
} }
static inline void TCP_ECN_send_syn(struct sock *sk, struct tcp_sock *tp, static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb)
struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
tp->ecn_flags = 0; tp->ecn_flags = 0;
if (sysctl_tcp_ecn) { if (sysctl_tcp_ecn) {
TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR; TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_ECE|TCPCB_FLAG_CWR;
...@@ -44,9 +45,11 @@ TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th) ...@@ -44,9 +45,11 @@ TCP_ECN_make_synack(struct request_sock *req, struct tcphdr *th)
th->ece = 1; th->ece = 1;
} }
static inline void TCP_ECN_send(struct sock *sk, struct tcp_sock *tp, static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb,
struct sk_buff *skb, int tcp_header_len) int tcp_header_len)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (tp->ecn_flags & TCP_ECN_OK) { if (tp->ecn_flags & TCP_ECN_OK) {
/* Not-retransmitted data segment: set ECT and inject CWR. */ /* Not-retransmitted data segment: set ECT and inject CWR. */
if (skb->len != tcp_header_len && if (skb->len != tcp_header_len &&
......
...@@ -460,9 +460,9 @@ static inline int forced_push(struct tcp_sock *tp) ...@@ -460,9 +460,9 @@ static inline int forced_push(struct tcp_sock *tp)
return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
} }
static inline void skb_entail(struct sock *sk, struct tcp_sock *tp, static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
skb->csum = 0; skb->csum = 0;
...@@ -486,15 +486,17 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags, ...@@ -486,15 +486,17 @@ static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
} }
} }
static inline void tcp_push(struct sock *sk, struct tcp_sock *tp, int flags, static inline void tcp_push(struct sock *sk, int flags, int mss_now,
int mss_now, int nonagle) int nonagle)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (tcp_send_head(sk)) { if (tcp_send_head(sk)) {
struct sk_buff *skb = tcp_write_queue_tail(sk); struct sk_buff *skb = tcp_write_queue_tail(sk);
if (!(flags & MSG_MORE) || forced_push(tp)) if (!(flags & MSG_MORE) || forced_push(tp))
tcp_mark_push(tp, skb); tcp_mark_push(tp, skb);
tcp_mark_urg(tp, flags, skb); tcp_mark_urg(tp, flags, skb);
__tcp_push_pending_frames(sk, tp, mss_now, __tcp_push_pending_frames(sk, mss_now,
(flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
} }
} }
...@@ -540,7 +542,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -540,7 +542,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
if (!skb) if (!skb)
goto wait_for_memory; goto wait_for_memory;
skb_entail(sk, tp, skb); skb_entail(sk, skb);
copy = size_goal; copy = size_goal;
} }
...@@ -586,7 +588,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -586,7 +588,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
if (forced_push(tp)) { if (forced_push(tp)) {
tcp_mark_push(tp, skb); tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tcp_send_head(sk)) } else if (skb == tcp_send_head(sk))
tcp_push_one(sk, mss_now); tcp_push_one(sk, mss_now);
continue; continue;
...@@ -595,7 +597,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -595,7 +597,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory: wait_for_memory:
if (copied) if (copied)
tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error; goto do_error;
...@@ -606,7 +608,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse ...@@ -606,7 +608,7 @@ static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffse
out: out:
if (copied) if (copied)
tcp_push(sk, tp, flags, mss_now, tp->nonagle); tcp_push(sk, flags, mss_now, tp->nonagle);
return copied; return copied;
do_error: do_error:
...@@ -637,8 +639,9 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, ...@@ -637,8 +639,9 @@ ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
#define TCP_PAGE(sk) (sk->sk_sndmsg_page) #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
#define TCP_OFF(sk) (sk->sk_sndmsg_off) #define TCP_OFF(sk) (sk->sk_sndmsg_off)
static inline int select_size(struct sock *sk, struct tcp_sock *tp) static inline int select_size(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
int tmp = tp->mss_cache; int tmp = tp->mss_cache;
if (sk->sk_route_caps & NETIF_F_SG) { if (sk->sk_route_caps & NETIF_F_SG) {
...@@ -714,7 +717,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -714,7 +717,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (!sk_stream_memory_free(sk)) if (!sk_stream_memory_free(sk))
goto wait_for_sndbuf; goto wait_for_sndbuf;
skb = sk_stream_alloc_pskb(sk, select_size(sk, tp), skb = sk_stream_alloc_pskb(sk, select_size(sk),
0, sk->sk_allocation); 0, sk->sk_allocation);
if (!skb) if (!skb)
goto wait_for_memory; goto wait_for_memory;
...@@ -725,7 +728,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -725,7 +728,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (sk->sk_route_caps & NETIF_F_ALL_CSUM) if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
skb->ip_summed = CHECKSUM_PARTIAL; skb->ip_summed = CHECKSUM_PARTIAL;
skb_entail(sk, tp, skb); skb_entail(sk, skb);
copy = size_goal; copy = size_goal;
} }
...@@ -830,7 +833,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -830,7 +833,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
if (forced_push(tp)) { if (forced_push(tp)) {
tcp_mark_push(tp, skb); tcp_mark_push(tp, skb);
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH); __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
} else if (skb == tcp_send_head(sk)) } else if (skb == tcp_send_head(sk))
tcp_push_one(sk, mss_now); tcp_push_one(sk, mss_now);
continue; continue;
...@@ -839,7 +842,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -839,7 +842,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
wait_for_memory: wait_for_memory:
if (copied) if (copied)
tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
goto do_error; goto do_error;
...@@ -851,7 +854,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -851,7 +854,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
out: out:
if (copied) if (copied)
tcp_push(sk, tp, flags, mss_now, tp->nonagle); tcp_push(sk, flags, mss_now, tp->nonagle);
TCP_CHECK_TIMER(sk); TCP_CHECK_TIMER(sk);
release_sock(sk); release_sock(sk);
return copied; return copied;
...@@ -1389,7 +1392,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, ...@@ -1389,7 +1392,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
skip_copy: skip_copy:
if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
tp->urg_data = 0; tp->urg_data = 0;
tcp_fast_path_check(sk, tp); tcp_fast_path_check(sk);
} }
if (used + offset < skb->len) if (used + offset < skb->len)
continue; continue;
...@@ -1830,7 +1833,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, ...@@ -1830,7 +1833,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
* for currently queued segments. * for currently queued segments.
*/ */
tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
tcp_push_pending_frames(sk, tp); tcp_push_pending_frames(sk);
} else { } else {
tp->nonagle &= ~TCP_NAGLE_OFF; tp->nonagle &= ~TCP_NAGLE_OFF;
} }
...@@ -1854,7 +1857,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, ...@@ -1854,7 +1857,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
tp->nonagle &= ~TCP_NAGLE_CORK; tp->nonagle &= ~TCP_NAGLE_CORK;
if (tp->nonagle&TCP_NAGLE_OFF) if (tp->nonagle&TCP_NAGLE_OFF)
tp->nonagle |= TCP_NAGLE_PUSH; tp->nonagle |= TCP_NAGLE_PUSH;
tcp_push_pending_frames(sk, tp); tcp_push_pending_frames(sk);
} }
break; break;
......
This diff is collapsed.
...@@ -62,12 +62,13 @@ int sysctl_tcp_base_mss __read_mostly = 512; ...@@ -62,12 +62,13 @@ int sysctl_tcp_base_mss __read_mostly = 512;
/* By default, RFC2861 behavior. */ /* By default, RFC2861 behavior. */
int sysctl_tcp_slow_start_after_idle __read_mostly = 1; int sysctl_tcp_slow_start_after_idle __read_mostly = 1;
static void update_send_head(struct sock *sk, struct tcp_sock *tp, static void update_send_head(struct sock *sk, struct sk_buff *skb)
struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
tcp_advance_send_head(sk, skb); tcp_advance_send_head(sk, skb);
tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; tp->snd_nxt = TCP_SKB_CB(skb)->end_seq;
tcp_packets_out_inc(sk, tp, skb); tcp_packets_out_inc(sk, skb);
} }
/* SND.NXT, if window was not shrunk. /* SND.NXT, if window was not shrunk.
...@@ -76,8 +77,10 @@ static void update_send_head(struct sock *sk, struct tcp_sock *tp, ...@@ -76,8 +77,10 @@ static void update_send_head(struct sock *sk, struct tcp_sock *tp,
* Anything in between SND.UNA...SND.UNA+SND.WND also can be already * Anything in between SND.UNA...SND.UNA+SND.WND also can be already
* invalid. OK, let's make this for now: * invalid. OK, let's make this for now:
*/ */
static inline __u32 tcp_acceptable_seq(struct sock *sk, struct tcp_sock *tp) static inline __u32 tcp_acceptable_seq(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt)) if (!before(tp->snd_una+tp->snd_wnd, tp->snd_nxt))
return tp->snd_nxt; return tp->snd_nxt;
else else
...@@ -516,7 +519,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, ...@@ -516,7 +519,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
md5 ? &md5_hash_location : md5 ? &md5_hash_location :
#endif #endif
NULL); NULL);
TCP_ECN_send(sk, tp, skb, tcp_header_size); TCP_ECN_send(sk, skb, tcp_header_size);
} }
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
...@@ -927,8 +930,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed) ...@@ -927,8 +930,9 @@ unsigned int tcp_current_mss(struct sock *sk, int large_allowed)
/* Congestion window validation. (RFC2861) */ /* Congestion window validation. (RFC2861) */
static void tcp_cwnd_validate(struct sock *sk, struct tcp_sock *tp) static void tcp_cwnd_validate(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
__u32 packets_out = tp->packets_out; __u32 packets_out = tp->packets_out;
if (packets_out >= tp->snd_cwnd) { if (packets_out >= tp->snd_cwnd) {
...@@ -1076,8 +1080,9 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb, ...@@ -1076,8 +1080,9 @@ static unsigned int tcp_snd_test(struct sock *sk, struct sk_buff *skb,
return cwnd_quota; return cwnd_quota;
} }
int tcp_may_send_now(struct sock *sk, struct tcp_sock *tp) int tcp_may_send_now(struct sock *sk)
{ {
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb = tcp_send_head(sk); struct sk_buff *skb = tcp_send_head(sk);
return (skb && return (skb &&
...@@ -1144,8 +1149,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, ...@@ -1144,8 +1149,9 @@ static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len,
* *
* This algorithm is from John Heffner. * This algorithm is from John Heffner.
*/ */
static int tcp_tso_should_defer(struct sock *sk, struct tcp_sock *tp, struct sk_buff *skb) static int tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)
{ {
struct tcp_sock *tp = tcp_sk(sk);
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
u32 send_win, cong_win, limit, in_flight; u32 send_win, cong_win, limit, in_flight;
...@@ -1324,7 +1330,7 @@ static int tcp_mtu_probe(struct sock *sk) ...@@ -1324,7 +1330,7 @@ static int tcp_mtu_probe(struct sock *sk)
/* Decrement cwnd here because we are sending /* Decrement cwnd here because we are sending
* effectively two packets. */ * effectively two packets. */
tp->snd_cwnd--; tp->snd_cwnd--;
update_send_head(sk, tp, nskb); update_send_head(sk, nskb);
icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len); icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq; tp->mtu_probe.probe_seq_start = TCP_SKB_CB(nskb)->seq;
...@@ -1387,7 +1393,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) ...@@ -1387,7 +1393,7 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
nonagle : TCP_NAGLE_PUSH)))) nonagle : TCP_NAGLE_PUSH))))
break; break;
} else { } else {
if (tcp_tso_should_defer(sk, tp, skb)) if (tcp_tso_should_defer(sk, skb))
break; break;
} }
...@@ -1416,14 +1422,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) ...@@ -1416,14 +1422,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
/* Advance the send_head. This one is sent out. /* Advance the send_head. This one is sent out.
* This call will increment packets_out. * This call will increment packets_out.
*/ */
update_send_head(sk, tp, skb); update_send_head(sk, skb);
tcp_minshall_update(tp, mss_now, skb); tcp_minshall_update(tp, mss_now, skb);
sent_pkts++; sent_pkts++;
} }
if (likely(sent_pkts)) { if (likely(sent_pkts)) {
tcp_cwnd_validate(sk, tp); tcp_cwnd_validate(sk);
return 0; return 0;
} }
return !tp->packets_out && tcp_send_head(sk); return !tp->packets_out && tcp_send_head(sk);
...@@ -1433,14 +1439,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle) ...@@ -1433,14 +1439,14 @@ static int tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle)
* TCP_CORK or attempt at coalescing tiny packets. * TCP_CORK or attempt at coalescing tiny packets.
* The socket must be locked by the caller. * The socket must be locked by the caller.
*/ */
void __tcp_push_pending_frames(struct sock *sk, struct tcp_sock *tp, void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
unsigned int cur_mss, int nonagle) int nonagle)
{ {
struct sk_buff *skb = tcp_send_head(sk); struct sk_buff *skb = tcp_send_head(sk);
if (skb) { if (skb) {
if (tcp_write_xmit(sk, cur_mss, nonagle)) if (tcp_write_xmit(sk, cur_mss, nonagle))
tcp_check_probe_timer(sk, tp); tcp_check_probe_timer(sk);
} }
} }
...@@ -1484,8 +1490,8 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now) ...@@ -1484,8 +1490,8 @@ void tcp_push_one(struct sock *sk, unsigned int mss_now)
TCP_SKB_CB(skb)->when = tcp_time_stamp; TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) { if (likely(!tcp_transmit_skb(sk, skb, 1, sk->sk_allocation))) {
update_send_head(sk, tp, skb); update_send_head(sk, skb);
tcp_cwnd_validate(sk, tp); tcp_cwnd_validate(sk);
return; return;
} }
} }
...@@ -1933,7 +1939,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk) ...@@ -1933,7 +1939,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
* segments to send. * segments to send.
*/ */
if (tcp_may_send_now(sk, tp)) if (tcp_may_send_now(sk))
return; return;
if (tp->forward_skb_hint) { if (tp->forward_skb_hint) {
...@@ -2023,7 +2029,7 @@ void tcp_send_fin(struct sock *sk) ...@@ -2023,7 +2029,7 @@ void tcp_send_fin(struct sock *sk)
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq + 1;
tcp_queue_skb(sk, skb); tcp_queue_skb(sk, skb);
} }
__tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_OFF); __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_OFF);
} }
/* We get here when a process closes a file descriptor (either due to /* We get here when a process closes a file descriptor (either due to
...@@ -2033,7 +2039,6 @@ void tcp_send_fin(struct sock *sk) ...@@ -2033,7 +2039,6 @@ void tcp_send_fin(struct sock *sk)
*/ */
void tcp_send_active_reset(struct sock *sk, gfp_t priority) void tcp_send_active_reset(struct sock *sk, gfp_t priority)
{ {
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb; struct sk_buff *skb;
/* NOTE: No TCP options attached and we never retransmit this. */ /* NOTE: No TCP options attached and we never retransmit this. */
...@@ -2053,7 +2058,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) ...@@ -2053,7 +2058,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
skb_shinfo(skb)->gso_type = 0; skb_shinfo(skb)->gso_type = 0;
/* Send it off. */ /* Send it off. */
TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk, tp); TCP_SKB_CB(skb)->seq = tcp_acceptable_seq(sk);
TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq; TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(skb)->seq;
TCP_SKB_CB(skb)->when = tcp_time_stamp; TCP_SKB_CB(skb)->when = tcp_time_stamp;
if (tcp_transmit_skb(sk, skb, 0, priority)) if (tcp_transmit_skb(sk, skb, 0, priority))
...@@ -2271,7 +2276,7 @@ int tcp_connect(struct sock *sk) ...@@ -2271,7 +2276,7 @@ int tcp_connect(struct sock *sk)
skb_reserve(buff, MAX_TCP_HEADER); skb_reserve(buff, MAX_TCP_HEADER);
TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN; TCP_SKB_CB(buff)->flags = TCPCB_FLAG_SYN;
TCP_ECN_send_syn(sk, tp, buff); TCP_ECN_send_syn(sk, buff);
TCP_SKB_CB(buff)->sacked = 0; TCP_SKB_CB(buff)->sacked = 0;
skb_shinfo(buff)->gso_segs = 1; skb_shinfo(buff)->gso_segs = 1;
skb_shinfo(buff)->gso_size = 0; skb_shinfo(buff)->gso_size = 0;
...@@ -2363,7 +2368,6 @@ void tcp_send_ack(struct sock *sk) ...@@ -2363,7 +2368,6 @@ void tcp_send_ack(struct sock *sk)
{ {
/* If we have been reset, we may not send again. */ /* If we have been reset, we may not send again. */
if (sk->sk_state != TCP_CLOSE) { if (sk->sk_state != TCP_CLOSE) {
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *buff; struct sk_buff *buff;
/* We are not putting this on the write queue, so /* We are not putting this on the write queue, so
...@@ -2389,7 +2393,7 @@ void tcp_send_ack(struct sock *sk) ...@@ -2389,7 +2393,7 @@ void tcp_send_ack(struct sock *sk)
skb_shinfo(buff)->gso_type = 0; skb_shinfo(buff)->gso_type = 0;
/* Send it off, this clears delayed acks for us. */ /* Send it off, this clears delayed acks for us. */
TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk, tp); TCP_SKB_CB(buff)->seq = TCP_SKB_CB(buff)->end_seq = tcp_acceptable_seq(sk);
TCP_SKB_CB(buff)->when = tcp_time_stamp; TCP_SKB_CB(buff)->when = tcp_time_stamp;
tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC); tcp_transmit_skb(sk, buff, 0, GFP_ATOMIC);
} }
...@@ -2467,7 +2471,7 @@ int tcp_write_wakeup(struct sock *sk) ...@@ -2467,7 +2471,7 @@ int tcp_write_wakeup(struct sock *sk)
TCP_SKB_CB(skb)->when = tcp_time_stamp; TCP_SKB_CB(skb)->when = tcp_time_stamp;
err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC);
if (!err) { if (!err) {
update_send_head(sk, tp, skb); update_send_head(sk, skb);
} }
return err; return err;
} else { } else {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment