Commit caa20d9a authored by Stephen Hemminger's avatar Stephen Hemminger Committed by David S. Miller

[TCP]: spelling fixes

Minor spelling fixes for TCP code.
Signed-off-by: default avatarStephen Hemminger <shemminger@osdl.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 326f36e9
...@@ -89,10 +89,10 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); ...@@ -89,10 +89,10 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
*/ */
#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
* connection: ~180sec is RFC minumum */ * connection: ~180sec is RFC minimum */
#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
* connection: ~180sec is RFC minumum */ * connection: ~180sec is RFC minimum */
#define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
...@@ -180,7 +180,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo); ...@@ -180,7 +180,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
/* Flags in tp->nonagle */ /* Flags in tp->nonagle */
#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */ #define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
#define TCP_NAGLE_CORK 2 /* Socket is corked */ #define TCP_NAGLE_CORK 2 /* Socket is corked */
#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */ #define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
extern struct inet_timewait_death_row tcp_death_row; extern struct inet_timewait_death_row tcp_death_row;
...@@ -552,13 +552,13 @@ extern u32 __tcp_select_window(struct sock *sk); ...@@ -552,13 +552,13 @@ extern u32 __tcp_select_window(struct sock *sk);
/* TCP timestamps are only 32-bits, this causes a slight /* TCP timestamps are only 32-bits, this causes a slight
* complication on 64-bit systems since we store a snapshot * complication on 64-bit systems since we store a snapshot
* of jiffies in the buffer control blocks below. We decidely * of jiffies in the buffer control blocks below. We decidedly
* only use of the low 32-bits of jiffies and hide the ugly * only use of the low 32-bits of jiffies and hide the ugly
* casts with the following macro. * casts with the following macro.
*/ */
#define tcp_time_stamp ((__u32)(jiffies)) #define tcp_time_stamp ((__u32)(jiffies))
/* This is what the send packet queueing engine uses to pass /* This is what the send packet queuing engine uses to pass
* TCP per-packet control information to the transmission * TCP per-packet control information to the transmission
* code. We also store the host-order sequence numbers in * code. We also store the host-order sequence numbers in
* here too. This is 36 bytes on 32-bit architectures, * here too. This is 36 bytes on 32-bit architectures,
...@@ -598,7 +598,7 @@ struct tcp_skb_cb { ...@@ -598,7 +598,7 @@ struct tcp_skb_cb {
#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */ #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS) #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
#define TCPCB_URG 0x20 /* Urgent pointer advenced here */ #define TCPCB_URG 0x20 /* Urgent pointer advanced here */
#define TCPCB_AT_TAIL (TCPCB_URG) #define TCPCB_AT_TAIL (TCPCB_URG)
......
...@@ -1640,7 +1640,7 @@ int tcp_disconnect(struct sock *sk, int flags) ...@@ -1640,7 +1640,7 @@ int tcp_disconnect(struct sock *sk, int flags)
} else if (tcp_need_reset(old_state) || } else if (tcp_need_reset(old_state) ||
(tp->snd_nxt != tp->write_seq && (tp->snd_nxt != tp->write_seq &&
(1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
/* The last check adjusts for discrepance of Linux wrt. RFC /* The last check adjusts for discrepancy of Linux wrt. RFC
* states * states
*/ */
tcp_send_active_reset(sk, gfp_any()); tcp_send_active_reset(sk, gfp_any());
......
...@@ -42,7 +42,7 @@ ...@@ -42,7 +42,7 @@
* Andi Kleen : Moved open_request checking here * Andi Kleen : Moved open_request checking here
* and process RSTs for open_requests. * and process RSTs for open_requests.
* Andi Kleen : Better prune_queue, and other fixes. * Andi Kleen : Better prune_queue, and other fixes.
* Andrey Savochkin: Fix RTT measurements in the presnce of * Andrey Savochkin: Fix RTT measurements in the presence of
* timestamps. * timestamps.
* Andrey Savochkin: Check sequence numbers correctly when * Andrey Savochkin: Check sequence numbers correctly when
* removing SACKs due to in sequence incoming * removing SACKs due to in sequence incoming
...@@ -224,7 +224,7 @@ static void tcp_fixup_sndbuf(struct sock *sk) ...@@ -224,7 +224,7 @@ static void tcp_fixup_sndbuf(struct sock *sk)
* of receiver window. Check #2. * of receiver window. Check #2.
* *
* The scheme does not work when sender sends good segments opening * The scheme does not work when sender sends good segments opening
* window and then starts to feed us spagetti. But it should work * window and then starts to feed us spaghetti. But it should work
* in common situations. Otherwise, we have to rely on queue collapsing. * in common situations. Otherwise, we have to rely on queue collapsing.
*/ */
...@@ -278,7 +278,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk) ...@@ -278,7 +278,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff); int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
/* Try to select rcvbuf so that 4 mss-sized segments /* Try to select rcvbuf so that 4 mss-sized segments
* will fit to window and correspoding skbs will fit to our rcvbuf. * will fit to window and corresponding skbs will fit to our rcvbuf.
* (was 3; 4 is minimum to allow fast retransmit to work.) * (was 3; 4 is minimum to allow fast retransmit to work.)
*/ */
while (tcp_win_from_space(rcvmem) < tp->advmss) while (tcp_win_from_space(rcvmem) < tp->advmss)
...@@ -287,7 +287,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk) ...@@ -287,7 +287,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]); sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]);
} }
/* 4. Try to fixup all. It is made iimediately after connection enters /* 4. Try to fixup all. It is made immediately after connection enters
* established state. * established state.
*/ */
static void tcp_init_buffer_space(struct sock *sk) static void tcp_init_buffer_space(struct sock *sk)
...@@ -367,8 +367,8 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) ...@@ -367,8 +367,8 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
* are stalled on filesystem I/O. * are stalled on filesystem I/O.
* *
* Also, since we are only going for a minimum in the * Also, since we are only going for a minimum in the
* non-timestamp case, we do not smoothe things out * non-timestamp case, we do not smoother things out
* else with timestamps disabled convergance takes too * else with timestamps disabled convergence takes too
* long. * long.
*/ */
if (!win_dep) { if (!win_dep) {
...@@ -377,7 +377,7 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep) ...@@ -377,7 +377,7 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
} else if (m < new_sample) } else if (m < new_sample)
new_sample = m << 3; new_sample = m << 3;
} else { } else {
/* No previous mesaure. */ /* No previous measure. */
new_sample = m << 3; new_sample = m << 3;
} }
...@@ -506,7 +506,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_ ...@@ -506,7 +506,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
if (icsk->icsk_ack.ato > icsk->icsk_rto) if (icsk->icsk_ack.ato > icsk->icsk_rto)
icsk->icsk_ack.ato = icsk->icsk_rto; icsk->icsk_ack.ato = icsk->icsk_rto;
} else if (m > icsk->icsk_rto) { } else if (m > icsk->icsk_rto) {
/* Too long gap. Apparently sender falled to /* Too long gap. Apparently sender failed to
* restart window, so that we send ACKs quickly. * restart window, so that we send ACKs quickly.
*/ */
tcp_incr_quickack(sk); tcp_incr_quickack(sk);
...@@ -546,7 +546,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt) ...@@ -546,7 +546,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
* *
* Funny. This algorithm seems to be very broken. * Funny. This algorithm seems to be very broken.
* These formulae increase RTO, when it should be decreased, increase * These formulae increase RTO, when it should be decreased, increase
* too slowly, when it should be incresed fastly, decrease too fastly * too slowly, when it should be increased fastly, decrease too fastly
* etc. I guess in BSD RTO takes ONE value, so that it is absolutely * etc. I guess in BSD RTO takes ONE value, so that it is absolutely
* does not matter how to _calculate_ it. Seems, it was trap * does not matter how to _calculate_ it. Seems, it was trap
* that VJ failed to avoid. 8) * that VJ failed to avoid. 8)
...@@ -607,14 +607,14 @@ static inline void tcp_set_rto(struct sock *sk) ...@@ -607,14 +607,14 @@ static inline void tcp_set_rto(struct sock *sk)
* at least by solaris and freebsd. "Erratic ACKs" has _nothing_ * at least by solaris and freebsd. "Erratic ACKs" has _nothing_
* to do with delayed acks, because at cwnd>2 true delack timeout * to do with delayed acks, because at cwnd>2 true delack timeout
* is invisible. Actually, Linux-2.4 also generates erratic * is invisible. Actually, Linux-2.4 also generates erratic
* ACKs in some curcumstances. * ACKs in some circumstances.
*/ */
inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar; inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
/* 2. Fixups made earlier cannot be right. /* 2. Fixups made earlier cannot be right.
* If we do not estimate RTO correctly without them, * If we do not estimate RTO correctly without them,
* all the algo is pure shit and should be replaced * all the algo is pure shit and should be replaced
* with correct one. It is exaclty, which we pretend to do. * with correct one. It is exactly, which we pretend to do.
*/ */
} }
...@@ -772,7 +772,7 @@ static void tcp_init_metrics(struct sock *sk) ...@@ -772,7 +772,7 @@ static void tcp_init_metrics(struct sock *sk)
* to make it more realistic. * to make it more realistic.
* *
* A bit of theory. RTT is time passed after "normal" sized packet * A bit of theory. RTT is time passed after "normal" sized packet
* is sent until it is ACKed. In normal curcumstances sending small * is sent until it is ACKed. In normal circumstances sending small
* packets force peer to delay ACKs and calculation is correct too. * packets force peer to delay ACKs and calculation is correct too.
* The algorithm is adaptive and, provided we follow specs, it * The algorithm is adaptive and, provided we follow specs, it
* NEVER underestimate RTT. BUT! If peer tries to make some clever * NEVER underestimate RTT. BUT! If peer tries to make some clever
...@@ -1899,7 +1899,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una, ...@@ -1899,7 +1899,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
} }
/* Read draft-ietf-tcplw-high-performance before mucking /* Read draft-ietf-tcplw-high-performance before mucking
* with this code. (Superceeds RFC1323) * with this code. (Supersedes RFC1323)
*/ */
static void tcp_ack_saw_tstamp(struct sock *sk, int flag) static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
{ {
...@@ -1912,7 +1912,7 @@ static void tcp_ack_saw_tstamp(struct sock *sk, int flag) ...@@ -1912,7 +1912,7 @@ static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
* 1998/04/10 Andrey V. Savochkin <saw@msu.ru> * 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
* *
* Changed: reset backoff as soon as we see the first valid sample. * Changed: reset backoff as soon as we see the first valid sample.
* If we do not, we get strongly overstimated rto. With timestamps * If we do not, we get strongly overestimated rto. With timestamps
* samples are accepted even from very old segments: f.e., when rtt=1 * samples are accepted even from very old segments: f.e., when rtt=1
* increases to 8, we retransmit 5 times and after 8 seconds delayed * increases to 8, we retransmit 5 times and after 8 seconds delayed
* answer arrives rto becomes 120 seconds! If at least one of segments * answer arrives rto becomes 120 seconds! If at least one of segments
...@@ -2268,7 +2268,7 @@ static void tcp_process_frto(struct sock *sk, u32 prior_snd_una) ...@@ -2268,7 +2268,7 @@ static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
} }
/* F-RTO affects on two new ACKs following RTO. /* F-RTO affects on two new ACKs following RTO.
* At latest on third ACK the TCP behavor is back to normal. * At latest on third ACK the TCP behavior is back to normal.
*/ */
tp->frto_counter = (tp->frto_counter + 1) % 3; tp->frto_counter = (tp->frto_counter + 1) % 3;
} }
...@@ -2344,7 +2344,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) ...@@ -2344,7 +2344,7 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
tcp_process_frto(sk, prior_snd_una); tcp_process_frto(sk, prior_snd_una);
if (tcp_ack_is_dubious(sk, flag)) { if (tcp_ack_is_dubious(sk, flag)) {
/* Advanve CWND, if state allows this. */ /* Advance CWND, if state allows this. */
if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag)) if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0); tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0);
tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag); tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
...@@ -3133,7 +3133,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list, ...@@ -3133,7 +3133,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
{ {
struct sk_buff *skb; struct sk_buff *skb;
/* First, check that queue is collapsable and find /* First, check that queue is collapsible and find
* the point where collapsing can be useful. */ * the point where collapsing can be useful. */
for (skb = head; skb != tail; ) { for (skb = head; skb != tail; ) {
/* No new bits? It is possible on ofo queue. */ /* No new bits? It is possible on ofo queue. */
...@@ -3441,7 +3441,7 @@ static __inline__ void tcp_ack_snd_check(struct sock *sk) ...@@ -3441,7 +3441,7 @@ static __inline__ void tcp_ack_snd_check(struct sock *sk)
/* /*
* This routine is only called when we have urgent data * This routine is only called when we have urgent data
* signalled. Its the 'slow' part of tcp_urg. It could be * signaled. Its the 'slow' part of tcp_urg. It could be
* moved inline now as tcp_urg is only called from one * moved inline now as tcp_urg is only called from one
* place. We handle URGent data wrong. We have to - as * place. We handle URGent data wrong. We have to - as
* BSD still doesn't use the correction from RFC961. * BSD still doesn't use the correction from RFC961.
...@@ -3486,7 +3486,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th) ...@@ -3486,7 +3486,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
* urgent. To do this requires some care. We cannot just ignore * urgent. To do this requires some care. We cannot just ignore
* tp->copied_seq since we would read the last urgent byte again * tp->copied_seq since we would read the last urgent byte again
* as data, nor can we alter copied_seq until this data arrives * as data, nor can we alter copied_seq until this data arrives
* or we break the sematics of SIOCATMARK (and thus sockatmark()) * or we break the semantics of SIOCATMARK (and thus sockatmark())
* *
* NOTE. Double Dutch. Rendering to plain English: author of comment * NOTE. Double Dutch. Rendering to plain English: author of comment
* above did something sort of send("A", MSG_OOB); send("B", MSG_OOB); * above did something sort of send("A", MSG_OOB); send("B", MSG_OOB);
...@@ -3631,7 +3631,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb, ...@@ -3631,7 +3631,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tp->rx_opt.saw_tstamp = 0; tp->rx_opt.saw_tstamp = 0;
/* pred_flags is 0xS?10 << 16 + snd_wnd /* pred_flags is 0xS?10 << 16 + snd_wnd
* if header_predition is to be made * if header_prediction is to be made
* 'S' will always be tp->tcp_header_len >> 2 * 'S' will always be tp->tcp_header_len >> 2
* '?' will be 0 for the fast path, otherwise pred_flags is 0 to * '?' will be 0 for the fast path, otherwise pred_flags is 0 to
* turn it off (when there are holes in the receive * turn it off (when there are holes in the receive
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
* request_sock handling and moved * request_sock handling and moved
* most of it into the af independent code. * most of it into the af independent code.
* Added tail drop and some other bugfixes. * Added tail drop and some other bugfixes.
* Added new listen sematics. * Added new listen semantics.
* Mike McLagan : Routing by source * Mike McLagan : Routing by source
* Juan Jose Ciarlante: ip_dynaddr bits * Juan Jose Ciarlante: ip_dynaddr bits
* Andi Kleen: various fixes. * Andi Kleen: various fixes.
...@@ -1210,7 +1210,7 @@ int tcp_v4_rcv(struct sk_buff *skb) ...@@ -1210,7 +1210,7 @@ int tcp_v4_rcv(struct sk_buff *skb)
/* An explanation is required here, I think. /* An explanation is required here, I think.
* Packet length and doff are validated by header prediction, * Packet length and doff are validated by header prediction,
* provided case of th->doff==0 is elimineted. * provided case of th->doff==0 is eliminated.
* So, we defer the checks. */ * So, we defer the checks. */
if ((skb->ip_summed != CHECKSUM_UNNECESSARY && if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
tcp_v4_checksum_init(skb))) tcp_v4_checksum_init(skb)))
......
...@@ -158,7 +158,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, ...@@ -158,7 +158,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
/* I am shamed, but failed to make it more elegant. /* I am shamed, but failed to make it more elegant.
* Yes, it is direct reference to IP, which is impossible * Yes, it is direct reference to IP, which is impossible
* to generalize to IPv6. Taking into account that IPv6 * to generalize to IPv6. Taking into account that IPv6
* do not undertsnad recycling in any case, it not * do not understand recycling in any case, it not
* a big problem in practice. --ANK */ * a big problem in practice. --ANK */
if (tw->tw_family == AF_INET && if (tw->tw_family == AF_INET &&
tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp && tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
...@@ -194,7 +194,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, ...@@ -194,7 +194,7 @@ tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
/* In window segment, it may be only reset or bare ack. */ /* In window segment, it may be only reset or bare ack. */
if (th->rst) { if (th->rst) {
/* This is TIME_WAIT assasination, in two flavors. /* This is TIME_WAIT assassination, in two flavors.
* Oh well... nobody has a sufficient solution to this * Oh well... nobody has a sufficient solution to this
* protocol bug yet. * protocol bug yet.
*/ */
...@@ -551,7 +551,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb, ...@@ -551,7 +551,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
/* RFC793 page 36: "If the connection is in any non-synchronized state ... /* RFC793 page 36: "If the connection is in any non-synchronized state ...
* and the incoming segment acknowledges something not yet * and the incoming segment acknowledges something not yet
* sent (the segment carries an unaccaptable ACK) ... * sent (the segment carries an unacceptable ACK) ...
* a reset is sent." * a reset is sent."
* *
* Invalid ACK: reset will be sent by listening socket * Invalid ACK: reset will be sent by listening socket
......
...@@ -599,7 +599,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) ...@@ -599,7 +599,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
for TCP options, but includes only bare TCP header. for TCP options, but includes only bare TCP header.
tp->rx_opt.mss_clamp is mss negotiated at connection setup. tp->rx_opt.mss_clamp is mss negotiated at connection setup.
It is minumum of user_mss and mss received with SYN. It is minimum of user_mss and mss received with SYN.
It also does not include TCP options. It also does not include TCP options.
tp->pmtu_cookie is last pmtu, seen by this function. tp->pmtu_cookie is last pmtu, seen by this function.
...@@ -1171,7 +1171,7 @@ u32 __tcp_select_window(struct sock *sk) ...@@ -1171,7 +1171,7 @@ u32 __tcp_select_window(struct sock *sk)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
/* MSS for the peer's data. Previous verions used mss_clamp /* MSS for the peer's data. Previous versions used mss_clamp
* here. I don't know if the value based on our guesses * here. I don't know if the value based on our guesses
* of peer's MSS is better for the performance. It's more correct * of peer's MSS is better for the performance. It's more correct
* but may be worse for the performance because of rcv_mss * but may be worse for the performance because of rcv_mss
...@@ -1361,7 +1361,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) ...@@ -1361,7 +1361,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
int err; int err;
/* Do not sent more than we queued. 1/4 is reserved for possible /* Do not sent more than we queued. 1/4 is reserved for possible
* copying overhead: frgagmentation, tunneling, mangling etc. * copying overhead: fragmentation, tunneling, mangling etc.
*/ */
if (atomic_read(&sk->sk_wmem_alloc) > if (atomic_read(&sk->sk_wmem_alloc) >
min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf)) min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
......
...@@ -58,7 +58,7 @@ static void tcp_write_err(struct sock *sk) ...@@ -58,7 +58,7 @@ static void tcp_write_err(struct sock *sk)
* to prevent DoS attacks. It is called when a retransmission timeout * to prevent DoS attacks. It is called when a retransmission timeout
* or zero probe timeout occurs on orphaned socket. * or zero probe timeout occurs on orphaned socket.
* *
* Criterium is still not confirmed experimentally and may change. * Criteria is still not confirmed experimentally and may change.
* We kill the socket, if: * We kill the socket, if:
* 1. If number of orphaned sockets exceeds an administratively configured * 1. If number of orphaned sockets exceeds an administratively configured
* limit. * limit.
...@@ -132,7 +132,7 @@ static int tcp_write_timeout(struct sock *sk) ...@@ -132,7 +132,7 @@ static int tcp_write_timeout(struct sock *sk)
hole detection. :-( hole detection. :-(
It is place to make it. It is not made. I do not want It is place to make it. It is not made. I do not want
to make it. It is disguisting. It does not work in any to make it. It is disgusting. It does not work in any
case. Let me to cite the same draft, which requires for case. Let me to cite the same draft, which requires for
us to implement this: us to implement this:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment