Commit b6b6d653 authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

inet: remove icsk_ack.blocked

TCP has been using it to work around the possibility of tcp_delack_timer()
finding the socket owned by user.

After commit 6f458dfb ("tcp: improve latencies of timer triggered events")
we added TCP_DELACK_TIMER_DEFERRED atomic bit for more immediate recovery,
so we can get rid of icsk_ack.blocked

This frees space that following patch will reuse.
Signed-off-by: default avatarEric Dumazet <edumazet@google.com>
Acked-by: default avatarSoheil Hassas Yeganeh <soheil@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 20c168be
...@@ -110,7 +110,7 @@ struct inet_connection_sock { ...@@ -110,7 +110,7 @@ struct inet_connection_sock {
__u8 pending; /* ACK is pending */ __u8 pending; /* ACK is pending */
__u8 quick; /* Scheduled number of quick acks */ __u8 quick; /* Scheduled number of quick acks */
__u8 pingpong; /* The session is interactive */ __u8 pingpong; /* The session is interactive */
__u8 blocked; /* Delayed ACK was blocked by socket lock */ /* one byte hole. */
__u32 ato; /* Predicted tick of soft clock */ __u32 ato; /* Predicted tick of soft clock */
unsigned long timeout; /* Currently scheduled timeout */ unsigned long timeout; /* Currently scheduled timeout */
__u32 lrcvtime; /* timestamp of last received data packet */ __u32 lrcvtime; /* timestamp of last received data packet */
...@@ -198,7 +198,7 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what) ...@@ -198,7 +198,7 @@ static inline void inet_csk_clear_xmit_timer(struct sock *sk, const int what)
sk_stop_timer(sk, &icsk->icsk_retransmit_timer); sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
#endif #endif
} else if (what == ICSK_TIME_DACK) { } else if (what == ICSK_TIME_DACK) {
icsk->icsk_ack.blocked = icsk->icsk_ack.pending = 0; icsk->icsk_ack.pending = 0;
#ifdef INET_CSK_CLEAR_TIMERS #ifdef INET_CSK_CLEAR_TIMERS
sk_stop_timer(sk, &icsk->icsk_delack_timer); sk_stop_timer(sk, &icsk->icsk_delack_timer);
#endif #endif
......
...@@ -176,7 +176,6 @@ static void dccp_delack_timer(struct timer_list *t) ...@@ -176,7 +176,6 @@ static void dccp_delack_timer(struct timer_list *t)
bh_lock_sock(sk); bh_lock_sock(sk);
if (sock_owned_by_user(sk)) { if (sock_owned_by_user(sk)) {
/* Try again later. */ /* Try again later. */
icsk->icsk_ack.blocked = 1;
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
sk_reset_timer(sk, &icsk->icsk_delack_timer, sk_reset_timer(sk, &icsk->icsk_delack_timer,
jiffies + TCP_DELACK_MIN); jiffies + TCP_DELACK_MIN);
......
...@@ -564,7 +564,7 @@ void inet_csk_clear_xmit_timers(struct sock *sk) ...@@ -564,7 +564,7 @@ void inet_csk_clear_xmit_timers(struct sock *sk)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
icsk->icsk_pending = icsk->icsk_ack.pending = icsk->icsk_ack.blocked = 0; icsk->icsk_pending = icsk->icsk_ack.pending = 0;
sk_stop_timer(sk, &icsk->icsk_retransmit_timer); sk_stop_timer(sk, &icsk->icsk_retransmit_timer);
sk_stop_timer(sk, &icsk->icsk_delack_timer); sk_stop_timer(sk, &icsk->icsk_delack_timer);
......
...@@ -1538,10 +1538,8 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied) ...@@ -1538,10 +1538,8 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
if (inet_csk_ack_scheduled(sk)) { if (inet_csk_ack_scheduled(sk)) {
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
/* Delayed ACKs frequently hit locked sockets during bulk
* receive. */ if (/* Once-per-two-segments ACK was not sent by tcp_input.c */
if (icsk->icsk_ack.blocked ||
/* Once-per-two-segments ACK was not sent by tcp_input.c */
tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
/* /*
* If this read emptied read buffer, we send ACK, if * If this read emptied read buffer, we send ACK, if
......
...@@ -3911,11 +3911,8 @@ void tcp_send_delayed_ack(struct sock *sk) ...@@ -3911,11 +3911,8 @@ void tcp_send_delayed_ack(struct sock *sk)
/* Use new timeout only if there wasn't a older one earlier. */ /* Use new timeout only if there wasn't a older one earlier. */
if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) { if (icsk->icsk_ack.pending & ICSK_ACK_TIMER) {
/* If delack timer was blocked or is about to expire, /* If delack timer is about to expire, send ACK now. */
* send ACK now. if (time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
*/
if (icsk->icsk_ack.blocked ||
time_before_eq(icsk->icsk_ack.timeout, jiffies + (ato >> 2))) {
tcp_send_ack(sk); tcp_send_ack(sk);
return; return;
} }
......
...@@ -331,7 +331,6 @@ static void tcp_delack_timer(struct timer_list *t) ...@@ -331,7 +331,6 @@ static void tcp_delack_timer(struct timer_list *t)
if (!sock_owned_by_user(sk)) { if (!sock_owned_by_user(sk)) {
tcp_delack_timer_handler(sk); tcp_delack_timer_handler(sk);
} else { } else {
icsk->icsk_ack.blocked = 1;
__NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED); __NET_INC_STATS(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
/* deleguate our work to tcp_release_cb() */ /* deleguate our work to tcp_release_cb() */
if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags)) if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, &sk->sk_tsq_flags))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment