Commit e26925ec authored by David S. Miller's avatar David S. Miller

Merge branch 'tcp-TCP-TS-option-use-1-ms-clock'

Eric Dumazet says:

====================
tcp: TCP TS option use 1 ms clock

TCP Timestamps option is defined in RFC 7323

Traditionally on linux, it has been tied to the internal
'jiffy' variable, because it had been a cheap and good enough
generator.

Unfortunately some distros use HZ=250 or even HZ=100 leading
to not very useful TCP timestamps.

For TCP flows in the DC, Google has used usec resolution for more
than two years with great success [1].
RCVBUF autotuning is more precise.

This series converts tp->tcp_mstamp to a plain u64 value storing
a 1 usec TCP clock.

This choice will allow us to upstream the 1 usec TS option as
discussed in IETF 97.

Kathleen Nichols [2] and others advocate for 1ms TS clocks for
network analysis. (1ms being the lowest value supported by RFC 7323.)

[1] https://www.ietf.org/proceedings/97/slides/slides-97-tcpm-tcp-options-for-low-latency-00.pdf
[2] http://netseminar.stanford.edu/seminars/02_02_17.pdf
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 9d4f97f9 9a568de4
...@@ -506,66 +506,6 @@ typedef unsigned int sk_buff_data_t; ...@@ -506,66 +506,6 @@ typedef unsigned int sk_buff_data_t;
typedef unsigned char *sk_buff_data_t; typedef unsigned char *sk_buff_data_t;
#endif #endif
/**
* struct skb_mstamp - multi resolution time stamps
* @stamp_us: timestamp in us resolution
* @stamp_jiffies: timestamp in jiffies
*/
struct skb_mstamp {
union {
u64 v64;
struct {
u32 stamp_us;
u32 stamp_jiffies;
};
};
};
/**
* skb_mstamp_get - get current timestamp
* @cl: place to store timestamps
*/
static inline void skb_mstamp_get(struct skb_mstamp *cl)
{
u64 val = local_clock();
do_div(val, NSEC_PER_USEC);
cl->stamp_us = (u32)val;
cl->stamp_jiffies = (u32)jiffies;
}
/**
* skb_mstamp_delta - compute the difference in usec between two skb_mstamp
* @t1: pointer to newest sample
* @t0: pointer to oldest sample
*/
static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
const struct skb_mstamp *t0)
{
s32 delta_us = t1->stamp_us - t0->stamp_us;
u32 delta_jiffies = t1->stamp_jiffies - t0->stamp_jiffies;
/* If delta_us is negative, this might be because interval is too big,
* or local_clock() drift is too big : fallback using jiffies.
*/
if (delta_us <= 0 ||
delta_jiffies >= (INT_MAX / (USEC_PER_SEC / HZ)))
delta_us = jiffies_to_usecs(delta_jiffies);
return delta_us;
}
static inline bool skb_mstamp_after(const struct skb_mstamp *t1,
const struct skb_mstamp *t0)
{
s32 diff = t1->stamp_jiffies - t0->stamp_jiffies;
if (!diff)
diff = t1->stamp_us - t0->stamp_us;
return diff > 0;
}
/** /**
* struct sk_buff - socket buffer * struct sk_buff - socket buffer
* @next: Next buffer in list * @next: Next buffer in list
...@@ -646,7 +586,7 @@ struct sk_buff { ...@@ -646,7 +586,7 @@ struct sk_buff {
union { union {
ktime_t tstamp; ktime_t tstamp;
struct skb_mstamp skb_mstamp; u64 skb_mstamp;
}; };
}; };
struct rb_node rbnode; /* used in netem & tcp stack */ struct rb_node rbnode; /* used in netem & tcp stack */
......
...@@ -123,7 +123,7 @@ struct tcp_request_sock_ops; ...@@ -123,7 +123,7 @@ struct tcp_request_sock_ops;
struct tcp_request_sock { struct tcp_request_sock {
struct inet_request_sock req; struct inet_request_sock req;
const struct tcp_request_sock_ops *af_specific; const struct tcp_request_sock_ops *af_specific;
struct skb_mstamp snt_synack; /* first SYNACK sent time */ u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener; bool tfo_listener;
u32 txhash; u32 txhash;
u32 rcv_isn; u32 rcv_isn;
...@@ -211,7 +211,7 @@ struct tcp_sock { ...@@ -211,7 +211,7 @@ struct tcp_sock {
/* Information of the most recently (s)acked skb */ /* Information of the most recently (s)acked skb */
struct tcp_rack { struct tcp_rack {
struct skb_mstamp mstamp; /* (Re)sent time of the skb */ u64 mstamp; /* (Re)sent time of the skb */
u32 rtt_us; /* Associated RTT */ u32 rtt_us; /* Associated RTT */
u32 end_seq; /* Ending TCP sequence of the skb */ u32 end_seq; /* Ending TCP sequence of the skb */
u8 advanced; /* mstamp advanced since last lost marking */ u8 advanced; /* mstamp advanced since last lost marking */
...@@ -240,7 +240,7 @@ struct tcp_sock { ...@@ -240,7 +240,7 @@ struct tcp_sock {
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */ u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
/* RTT measurement */ /* RTT measurement */
struct skb_mstamp tcp_mstamp; /* most recent packet received/sent */ u64 tcp_mstamp; /* most recent packet received/sent */
u32 srtt_us; /* smoothed round trip time << 3 in usecs */ u32 srtt_us; /* smoothed round trip time << 3 in usecs */
u32 mdev_us; /* medium deviation */ u32 mdev_us; /* medium deviation */
u32 mdev_max_us; /* maximal mdev for the last rtt period */ u32 mdev_max_us; /* maximal mdev for the last rtt period */
...@@ -280,8 +280,8 @@ struct tcp_sock { ...@@ -280,8 +280,8 @@ struct tcp_sock {
u32 delivered; /* Total data packets delivered incl. rexmits */ u32 delivered; /* Total data packets delivered incl. rexmits */
u32 lost; /* Total data packets lost incl. rexmits */ u32 lost; /* Total data packets lost incl. rexmits */
u32 app_limited; /* limited until "delivered" reaches this val */ u32 app_limited; /* limited until "delivered" reaches this val */
struct skb_mstamp first_tx_mstamp; /* start of window send phase */ u64 first_tx_mstamp; /* start of window send phase */
struct skb_mstamp delivered_mstamp; /* time we reached "delivered" */ u64 delivered_mstamp; /* time we reached "delivered" */
u32 rate_delivered; /* saved rate sample: packets delivered */ u32 rate_delivered; /* saved rate sample: packets delivered */
u32 rate_interval_us; /* saved rate sample: time elapsed */ u32 rate_interval_us; /* saved rate sample: time elapsed */
...@@ -335,16 +335,16 @@ struct tcp_sock { ...@@ -335,16 +335,16 @@ struct tcp_sock {
/* Receiver side RTT estimation */ /* Receiver side RTT estimation */
struct { struct {
u32 rtt_us; u32 rtt_us;
u32 seq; u32 seq;
struct skb_mstamp time; u64 time;
} rcv_rtt_est; } rcv_rtt_est;
/* Receiver queue space */ /* Receiver queue space */
struct { struct {
int space; int space;
u32 seq; u32 seq;
struct skb_mstamp time; u64 time;
} rcvq_space; } rcvq_space;
/* TCP-specific MTU probe information. */ /* TCP-specific MTU probe information. */
......
...@@ -519,7 +519,7 @@ static inline u32 tcp_cookie_time(void) ...@@ -519,7 +519,7 @@ static inline u32 tcp_cookie_time(void)
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th, u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp); u16 *mssp);
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss); __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
__u32 cookie_init_timestamp(struct request_sock *req); u64 cookie_init_timestamp(struct request_sock *req);
bool cookie_timestamp_decode(struct tcp_options_received *opt); bool cookie_timestamp_decode(struct tcp_options_received *opt);
bool cookie_ecn_ok(const struct tcp_options_received *opt, bool cookie_ecn_ok(const struct tcp_options_received *opt,
const struct net *net, const struct dst_entry *dst); const struct net *net, const struct dst_entry *dst);
...@@ -700,17 +700,61 @@ u32 __tcp_select_window(struct sock *sk); ...@@ -700,17 +700,61 @@ u32 __tcp_select_window(struct sock *sk);
void tcp_send_window_probe(struct sock *sk); void tcp_send_window_probe(struct sock *sk);
/* TCP timestamps are only 32-bits, this causes a slight /* TCP uses 32bit jiffies to save some space.
* complication on 64-bit systems since we store a snapshot * Note that this is different from tcp_time_stamp, which
* of jiffies in the buffer control blocks below. We decided * historically has been the same until linux-4.13.
* to use only the low 32-bits of jiffies and hide the ugly
* casts with the following macro.
*/ */
#define tcp_time_stamp ((__u32)(jiffies)) #define tcp_jiffies32 ((u32)jiffies)
/*
* Deliver a 32bit value for TCP timestamp option (RFC 7323)
* It is no longer tied to jiffies, but to 1 ms clock.
* Note: double check if you want to use tcp_jiffies32 instead of this.
*/
#define TCP_TS_HZ 1000
static inline u64 tcp_clock_ns(void)
{
return local_clock();
}
static inline u64 tcp_clock_us(void)
{
return div_u64(tcp_clock_ns(), NSEC_PER_USEC);
}
/* This should only be used in contexts where tp->tcp_mstamp is up to date */
static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
{
return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
}
/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
static inline u32 tcp_time_stamp_raw(void)
{
return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
}
/* Refresh 1us clock of a TCP socket,
* ensuring monotically increasing values.
*/
static inline void tcp_mstamp_refresh(struct tcp_sock *tp)
{
u64 val = tcp_clock_us();
if (val > tp->tcp_mstamp)
tp->tcp_mstamp = val;
}
static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
{
return max_t(s64, t1 - t0, 0);
}
static inline u32 tcp_skb_timestamp(const struct sk_buff *skb) static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
{ {
return skb->skb_mstamp.stamp_jiffies; return div_u64(skb->skb_mstamp, USEC_PER_SEC / TCP_TS_HZ);
} }
...@@ -775,9 +819,9 @@ struct tcp_skb_cb { ...@@ -775,9 +819,9 @@ struct tcp_skb_cb {
/* pkts S/ACKed so far upon tx of skb, incl retrans: */ /* pkts S/ACKed so far upon tx of skb, incl retrans: */
__u32 delivered; __u32 delivered;
/* start of send pipeline phase */ /* start of send pipeline phase */
struct skb_mstamp first_tx_mstamp; u64 first_tx_mstamp;
/* when we reached the "delivered" count */ /* when we reached the "delivered" count */
struct skb_mstamp delivered_mstamp; u64 delivered_mstamp;
} tx; /* only used for outgoing skbs */ } tx; /* only used for outgoing skbs */
union { union {
struct inet_skb_parm h4; struct inet_skb_parm h4;
...@@ -893,7 +937,7 @@ struct ack_sample { ...@@ -893,7 +937,7 @@ struct ack_sample {
* A sample is invalid if "delivered" or "interval_us" is negative. * A sample is invalid if "delivered" or "interval_us" is negative.
*/ */
struct rate_sample { struct rate_sample {
struct skb_mstamp prior_mstamp; /* starting timestamp for interval */ u64 prior_mstamp; /* starting timestamp for interval */
u32 prior_delivered; /* tp->delivered at "prior_mstamp" */ u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
s32 delivered; /* number of packets delivered over interval */ s32 delivered; /* number of packets delivered over interval */
long interval_us; /* time for tp->delivered to incr "delivered" */ long interval_us; /* time for tp->delivered to incr "delivered" */
...@@ -1242,7 +1286,7 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk) ...@@ -1242,7 +1286,7 @@ static inline void tcp_slow_start_after_idle_check(struct sock *sk)
if (!sysctl_tcp_slow_start_after_idle || tp->packets_out || if (!sysctl_tcp_slow_start_after_idle || tp->packets_out ||
ca_ops->cong_control) ca_ops->cong_control)
return; return;
delta = tcp_time_stamp - tp->lsndtime; delta = tcp_jiffies32 - tp->lsndtime;
if (delta > inet_csk(sk)->icsk_rto) if (delta > inet_csk(sk)->icsk_rto)
tcp_cwnd_restart(sk, delta); tcp_cwnd_restart(sk, delta);
} }
...@@ -1304,8 +1348,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp) ...@@ -1304,8 +1348,8 @@ static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
{ {
const struct inet_connection_sock *icsk = &tp->inet_conn; const struct inet_connection_sock *icsk = &tp->inet_conn;
return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime, return min_t(u32, tcp_jiffies32 - icsk->icsk_ack.lrcvtime,
tcp_time_stamp - tp->rcv_tstamp); tcp_jiffies32 - tp->rcv_tstamp);
} }
static inline int tcp_fin_time(const struct sock *sk) static inline int tcp_fin_time(const struct sock *sk)
...@@ -1859,7 +1903,7 @@ void tcp_init(void); ...@@ -1859,7 +1903,7 @@ void tcp_init(void);
/* tcp_recovery.c */ /* tcp_recovery.c */
extern void tcp_rack_mark_lost(struct sock *sk); extern void tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
const struct skb_mstamp *xmit_time); u64 xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk); extern void tcp_rack_reo_timeout(struct sock *sk);
/* /*
......
...@@ -233,7 +233,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len) ...@@ -233,7 +233,7 @@ static void ccid2_hc_tx_packet_sent(struct sock *sk, unsigned int len)
{ {
struct dccp_sock *dp = dccp_sk(sk); struct dccp_sock *dp = dccp_sk(sk);
struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk); struct ccid2_hc_tx_sock *hc = ccid2_hc_tx_sk(sk);
const u32 now = ccid2_time_stamp; const u32 now = ccid2_jiffies32;
struct ccid2_seq *next; struct ccid2_seq *next;
/* slow-start after idle periods (RFC 2581, RFC 2861) */ /* slow-start after idle periods (RFC 2581, RFC 2861) */
...@@ -466,7 +466,7 @@ static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp, ...@@ -466,7 +466,7 @@ static void ccid2_new_ack(struct sock *sk, struct ccid2_seq *seqp,
* The cleanest solution is to not use the ccid2s_sent field at all * The cleanest solution is to not use the ccid2s_sent field at all
* and instead use DCCP timestamps: requires changes in other places. * and instead use DCCP timestamps: requires changes in other places.
*/ */
ccid2_rtt_estimator(sk, ccid2_time_stamp - seqp->ccid2s_sent); ccid2_rtt_estimator(sk, ccid2_jiffies32 - seqp->ccid2s_sent);
} }
static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
...@@ -478,7 +478,7 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp) ...@@ -478,7 +478,7 @@ static void ccid2_congestion_event(struct sock *sk, struct ccid2_seq *seqp)
return; return;
} }
hc->tx_last_cong = ccid2_time_stamp; hc->tx_last_cong = ccid2_jiffies32;
hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U; hc->tx_cwnd = hc->tx_cwnd / 2 ? : 1U;
hc->tx_ssthresh = max(hc->tx_cwnd, 2U); hc->tx_ssthresh = max(hc->tx_cwnd, 2U);
...@@ -731,7 +731,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk) ...@@ -731,7 +731,7 @@ static int ccid2_hc_tx_init(struct ccid *ccid, struct sock *sk)
hc->tx_rto = DCCP_TIMEOUT_INIT; hc->tx_rto = DCCP_TIMEOUT_INIT;
hc->tx_rpdupack = -1; hc->tx_rpdupack = -1;
hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_time_stamp; hc->tx_last_cong = hc->tx_lsndtime = hc->tx_cwnd_stamp = ccid2_jiffies32;
hc->tx_cwnd_used = 0; hc->tx_cwnd_used = 0;
setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire, setup_timer(&hc->tx_rtotimer, ccid2_hc_tx_rto_expire,
(unsigned long)sk); (unsigned long)sk);
......
...@@ -27,7 +27,7 @@ ...@@ -27,7 +27,7 @@
* CCID-2 timestamping faces the same issues as TCP timestamping. * CCID-2 timestamping faces the same issues as TCP timestamping.
* Hence we reuse/share as much of the code as possible. * Hence we reuse/share as much of the code as possible.
*/ */
#define ccid2_time_stamp tcp_time_stamp #define ccid2_jiffies32 ((u32)jiffies)
/* NUMDUPACK parameter from RFC 4341, p. 6 */ /* NUMDUPACK parameter from RFC 4341, p. 6 */
#define NUMDUPACK 3 #define NUMDUPACK 3
......
...@@ -66,10 +66,10 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, ...@@ -66,10 +66,10 @@ static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport,
* Since subsequent timestamps use the normal tcp_time_stamp value, we * Since subsequent timestamps use the normal tcp_time_stamp value, we
* must make sure that the resulting initial timestamp is <= tcp_time_stamp. * must make sure that the resulting initial timestamp is <= tcp_time_stamp.
*/ */
__u32 cookie_init_timestamp(struct request_sock *req) u64 cookie_init_timestamp(struct request_sock *req)
{ {
struct inet_request_sock *ireq; struct inet_request_sock *ireq;
u32 ts, ts_now = tcp_time_stamp; u32 ts, ts_now = tcp_time_stamp_raw();
u32 options = 0; u32 options = 0;
ireq = inet_rsk(req); ireq = inet_rsk(req);
...@@ -88,7 +88,7 @@ __u32 cookie_init_timestamp(struct request_sock *req) ...@@ -88,7 +88,7 @@ __u32 cookie_init_timestamp(struct request_sock *req)
ts <<= TSBITS; ts <<= TSBITS;
ts |= options; ts |= options;
} }
return ts; return (u64)ts * (USEC_PER_SEC / TCP_TS_HZ);
} }
...@@ -343,7 +343,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb) ...@@ -343,7 +343,7 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok = tcp_opt.wscale_ok; ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp; ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
treq->snt_synack.v64 = 0; treq->snt_synack = 0;
treq->tfo_listener = false; treq->tfo_listener = false;
ireq->ir_iif = inet_request_bound_dev_if(sk, skb); ireq->ir_iif = inet_request_bound_dev_if(sk, skb);
......
...@@ -386,7 +386,7 @@ void tcp_init_sock(struct sock *sk) ...@@ -386,7 +386,7 @@ void tcp_init_sock(struct sock *sk)
icsk->icsk_rto = TCP_TIMEOUT_INIT; icsk->icsk_rto = TCP_TIMEOUT_INIT;
tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
minmax_reset(&tp->rtt_min, tcp_time_stamp, ~0U); minmax_reset(&tp->rtt_min, tcp_jiffies32, ~0U);
/* So many TCP implementations out there (incorrectly) count the /* So many TCP implementations out there (incorrectly) count the
* initial SYN frame in their delayed-ACK and congestion control * initial SYN frame in their delayed-ACK and congestion control
...@@ -2706,7 +2706,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level, ...@@ -2706,7 +2706,7 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
if (!tp->repair) if (!tp->repair)
err = -EPERM; err = -EPERM;
else else
tp->tsoffset = val - tcp_time_stamp; tp->tsoffset = val - tcp_time_stamp_raw();
break; break;
case TCP_REPAIR_WINDOW: case TCP_REPAIR_WINDOW:
err = tcp_repair_set_window(tp, optval, optlen); err = tcp_repair_set_window(tp, optval, optlen);
...@@ -2757,7 +2757,7 @@ static void tcp_get_info_chrono_stats(const struct tcp_sock *tp, ...@@ -2757,7 +2757,7 @@ static void tcp_get_info_chrono_stats(const struct tcp_sock *tp,
for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) { for (i = TCP_CHRONO_BUSY; i < __TCP_CHRONO_MAX; ++i) {
stats[i] = tp->chrono_stat[i - 1]; stats[i] = tp->chrono_stat[i - 1];
if (i == tp->chrono_type) if (i == tp->chrono_type)
stats[i] += tcp_time_stamp - tp->chrono_start; stats[i] += tcp_jiffies32 - tp->chrono_start;
stats[i] *= USEC_PER_SEC / HZ; stats[i] *= USEC_PER_SEC / HZ;
total += stats[i]; total += stats[i];
} }
...@@ -2841,7 +2841,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info) ...@@ -2841,7 +2841,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
info->tcpi_retrans = tp->retrans_out; info->tcpi_retrans = tp->retrans_out;
info->tcpi_fackets = tp->fackets_out; info->tcpi_fackets = tp->fackets_out;
now = tcp_time_stamp; now = tcp_jiffies32;
info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
...@@ -3072,7 +3072,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level, ...@@ -3072,7 +3072,7 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
break; break;
case TCP_TIMESTAMP: case TCP_TIMESTAMP:
val = tcp_time_stamp + tp->tsoffset; val = tcp_time_stamp_raw() + tp->tsoffset;
break; break;
case TCP_NOTSENT_LOWAT: case TCP_NOTSENT_LOWAT:
val = tp->notsent_lowat; val = tp->notsent_lowat;
......
...@@ -91,7 +91,7 @@ struct bbr { ...@@ -91,7 +91,7 @@ struct bbr {
struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */ struct minmax bw; /* Max recent delivery rate in pkts/uS << 24 */
u32 rtt_cnt; /* count of packet-timed rounds elapsed */ u32 rtt_cnt; /* count of packet-timed rounds elapsed */
u32 next_rtt_delivered; /* scb->tx.delivered at end of round */ u32 next_rtt_delivered; /* scb->tx.delivered at end of round */
struct skb_mstamp cycle_mstamp; /* time of this cycle phase start */ u64 cycle_mstamp; /* time of this cycle phase start */
u32 mode:3, /* current bbr_mode in state machine */ u32 mode:3, /* current bbr_mode in state machine */
prev_ca_state:3, /* CA state on previous ACK */ prev_ca_state:3, /* CA state on previous ACK */
packet_conservation:1, /* use packet conservation? */ packet_conservation:1, /* use packet conservation? */
...@@ -411,7 +411,7 @@ static bool bbr_is_next_cycle_phase(struct sock *sk, ...@@ -411,7 +411,7 @@ static bool bbr_is_next_cycle_phase(struct sock *sk,
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk); struct bbr *bbr = inet_csk_ca(sk);
bool is_full_length = bool is_full_length =
skb_mstamp_us_delta(&tp->delivered_mstamp, &bbr->cycle_mstamp) > tcp_stamp_us_delta(tp->delivered_mstamp, bbr->cycle_mstamp) >
bbr->min_rtt_us; bbr->min_rtt_us;
u32 inflight, bw; u32 inflight, bw;
...@@ -497,7 +497,7 @@ static void bbr_reset_lt_bw_sampling_interval(struct sock *sk) ...@@ -497,7 +497,7 @@ static void bbr_reset_lt_bw_sampling_interval(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct bbr *bbr = inet_csk_ca(sk); struct bbr *bbr = inet_csk_ca(sk);
bbr->lt_last_stamp = tp->delivered_mstamp.stamp_jiffies; bbr->lt_last_stamp = div_u64(tp->delivered_mstamp, USEC_PER_MSEC);
bbr->lt_last_delivered = tp->delivered; bbr->lt_last_delivered = tp->delivered;
bbr->lt_last_lost = tp->lost; bbr->lt_last_lost = tp->lost;
bbr->lt_rtt_cnt = 0; bbr->lt_rtt_cnt = 0;
...@@ -551,7 +551,7 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs) ...@@ -551,7 +551,7 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
struct bbr *bbr = inet_csk_ca(sk); struct bbr *bbr = inet_csk_ca(sk);
u32 lost, delivered; u32 lost, delivered;
u64 bw; u64 bw;
s32 t; u32 t;
if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */ if (bbr->lt_use_bw) { /* already using long-term rate, lt_bw? */
if (bbr->mode == BBR_PROBE_BW && bbr->round_start && if (bbr->mode == BBR_PROBE_BW && bbr->round_start &&
...@@ -603,15 +603,15 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs) ...@@ -603,15 +603,15 @@ static void bbr_lt_bw_sampling(struct sock *sk, const struct rate_sample *rs)
return; return;
/* Find average delivery rate in this sampling interval. */ /* Find average delivery rate in this sampling interval. */
t = (s32)(tp->delivered_mstamp.stamp_jiffies - bbr->lt_last_stamp); t = div_u64(tp->delivered_mstamp, USEC_PER_MSEC) - bbr->lt_last_stamp;
if (t < 1) if ((s32)t < 1)
return; /* interval is less than one jiffy, so wait */ return; /* interval is less than one ms, so wait */
t = jiffies_to_usecs(t); /* Check if can multiply without overflow */
/* Interval long enough for jiffies_to_usecs() to return a bogus 0? */ if (t >= ~0U / USEC_PER_MSEC) {
if (t < 1) {
bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */ bbr_reset_lt_bw_sampling(sk); /* interval too long; reset */
return; return;
} }
t *= USEC_PER_MSEC;
bw = (u64)delivered * BW_UNIT; bw = (u64)delivered * BW_UNIT;
do_div(bw, t); do_div(bw, t);
bbr_lt_bw_interval_done(sk, bw); bbr_lt_bw_interval_done(sk, bw);
...@@ -730,12 +730,12 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) ...@@ -730,12 +730,12 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
bool filter_expired; bool filter_expired;
/* Track min RTT seen in the min_rtt_win_sec filter window: */ /* Track min RTT seen in the min_rtt_win_sec filter window: */
filter_expired = after(tcp_time_stamp, filter_expired = after(tcp_jiffies32,
bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ); bbr->min_rtt_stamp + bbr_min_rtt_win_sec * HZ);
if (rs->rtt_us >= 0 && if (rs->rtt_us >= 0 &&
(rs->rtt_us <= bbr->min_rtt_us || filter_expired)) { (rs->rtt_us <= bbr->min_rtt_us || filter_expired)) {
bbr->min_rtt_us = rs->rtt_us; bbr->min_rtt_us = rs->rtt_us;
bbr->min_rtt_stamp = tcp_time_stamp; bbr->min_rtt_stamp = tcp_jiffies32;
} }
if (bbr_probe_rtt_mode_ms > 0 && filter_expired && if (bbr_probe_rtt_mode_ms > 0 && filter_expired &&
...@@ -754,7 +754,7 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) ...@@ -754,7 +754,7 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
/* Maintain min packets in flight for max(200 ms, 1 round). */ /* Maintain min packets in flight for max(200 ms, 1 round). */
if (!bbr->probe_rtt_done_stamp && if (!bbr->probe_rtt_done_stamp &&
tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) { tcp_packets_in_flight(tp) <= bbr_cwnd_min_target) {
bbr->probe_rtt_done_stamp = tcp_time_stamp + bbr->probe_rtt_done_stamp = tcp_jiffies32 +
msecs_to_jiffies(bbr_probe_rtt_mode_ms); msecs_to_jiffies(bbr_probe_rtt_mode_ms);
bbr->probe_rtt_round_done = 0; bbr->probe_rtt_round_done = 0;
bbr->next_rtt_delivered = tp->delivered; bbr->next_rtt_delivered = tp->delivered;
...@@ -762,8 +762,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs) ...@@ -762,8 +762,8 @@ static void bbr_update_min_rtt(struct sock *sk, const struct rate_sample *rs)
if (bbr->round_start) if (bbr->round_start)
bbr->probe_rtt_round_done = 1; bbr->probe_rtt_round_done = 1;
if (bbr->probe_rtt_round_done && if (bbr->probe_rtt_round_done &&
after(tcp_time_stamp, bbr->probe_rtt_done_stamp)) { after(tcp_jiffies32, bbr->probe_rtt_done_stamp)) {
bbr->min_rtt_stamp = tcp_time_stamp; bbr->min_rtt_stamp = tcp_jiffies32;
bbr->restore_cwnd = 1; /* snap to prior_cwnd */ bbr->restore_cwnd = 1; /* snap to prior_cwnd */
bbr_reset_mode(sk); bbr_reset_mode(sk);
} }
...@@ -810,7 +810,7 @@ static void bbr_init(struct sock *sk) ...@@ -810,7 +810,7 @@ static void bbr_init(struct sock *sk)
bbr->probe_rtt_done_stamp = 0; bbr->probe_rtt_done_stamp = 0;
bbr->probe_rtt_round_done = 0; bbr->probe_rtt_round_done = 0;
bbr->min_rtt_us = tcp_min_rtt(tp); bbr->min_rtt_us = tcp_min_rtt(tp);
bbr->min_rtt_stamp = tcp_time_stamp; bbr->min_rtt_stamp = tcp_jiffies32;
minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */ minmax_reset(&bbr->bw, bbr->rtt_cnt, 0); /* init max bw to 0 */
...@@ -825,7 +825,7 @@ static void bbr_init(struct sock *sk) ...@@ -825,7 +825,7 @@ static void bbr_init(struct sock *sk)
bbr->idle_restart = 0; bbr->idle_restart = 0;
bbr->full_bw = 0; bbr->full_bw = 0;
bbr->full_bw_cnt = 0; bbr->full_bw_cnt = 0;
bbr->cycle_mstamp.v64 = 0; bbr->cycle_mstamp = 0;
bbr->cycle_idx = 0; bbr->cycle_idx = 0;
bbr_reset_lt_bw_sampling(sk); bbr_reset_lt_bw_sampling(sk);
bbr_reset_startup_mode(sk); bbr_reset_startup_mode(sk);
......
...@@ -84,14 +84,14 @@ static void bictcp_init(struct sock *sk) ...@@ -84,14 +84,14 @@ static void bictcp_init(struct sock *sk)
static inline void bictcp_update(struct bictcp *ca, u32 cwnd) static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
{ {
if (ca->last_cwnd == cwnd && if (ca->last_cwnd == cwnd &&
(s32)(tcp_time_stamp - ca->last_time) <= HZ / 32) (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
return; return;
ca->last_cwnd = cwnd; ca->last_cwnd = cwnd;
ca->last_time = tcp_time_stamp; ca->last_time = tcp_jiffies32;
if (ca->epoch_start == 0) /* record the beginning of an epoch */ if (ca->epoch_start == 0) /* record the beginning of an epoch */
ca->epoch_start = tcp_time_stamp; ca->epoch_start = tcp_jiffies32;
/* start off normal */ /* start off normal */
if (cwnd <= low_window) { if (cwnd <= low_window) {
......
...@@ -155,7 +155,7 @@ static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event) ...@@ -155,7 +155,7 @@ static void bictcp_cwnd_event(struct sock *sk, enum tcp_ca_event event)
{ {
if (event == CA_EVENT_TX_START) { if (event == CA_EVENT_TX_START) {
struct bictcp *ca = inet_csk_ca(sk); struct bictcp *ca = inet_csk_ca(sk);
u32 now = tcp_time_stamp; u32 now = tcp_jiffies32;
s32 delta; s32 delta;
delta = now - tcp_sk(sk)->lsndtime; delta = now - tcp_sk(sk)->lsndtime;
...@@ -231,21 +231,21 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked) ...@@ -231,21 +231,21 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
ca->ack_cnt += acked; /* count the number of ACKed packets */ ca->ack_cnt += acked; /* count the number of ACKed packets */
if (ca->last_cwnd == cwnd && if (ca->last_cwnd == cwnd &&
(s32)(tcp_time_stamp - ca->last_time) <= HZ / 32) (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
return; return;
/* The CUBIC function can update ca->cnt at most once per jiffy. /* The CUBIC function can update ca->cnt at most once per jiffy.
* On all cwnd reduction events, ca->epoch_start is set to 0, * On all cwnd reduction events, ca->epoch_start is set to 0,
* which will force a recalculation of ca->cnt. * which will force a recalculation of ca->cnt.
*/ */
if (ca->epoch_start && tcp_time_stamp == ca->last_time) if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
goto tcp_friendliness; goto tcp_friendliness;
ca->last_cwnd = cwnd; ca->last_cwnd = cwnd;
ca->last_time = tcp_time_stamp; ca->last_time = tcp_jiffies32;
if (ca->epoch_start == 0) { if (ca->epoch_start == 0) {
ca->epoch_start = tcp_time_stamp; /* record beginning */ ca->epoch_start = tcp_jiffies32; /* record beginning */
ca->ack_cnt = acked; /* start counting */ ca->ack_cnt = acked; /* start counting */
ca->tcp_cwnd = cwnd; /* syn with cubic */ ca->tcp_cwnd = cwnd; /* syn with cubic */
...@@ -276,7 +276,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked) ...@@ -276,7 +276,7 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
* if the cwnd < 1 million packets !!! * if the cwnd < 1 million packets !!!
*/ */
t = (s32)(tcp_time_stamp - ca->epoch_start); t = (s32)(tcp_jiffies32 - ca->epoch_start);
t += msecs_to_jiffies(ca->delay_min >> 3); t += msecs_to_jiffies(ca->delay_min >> 3);
/* change the unit from HZ to bictcp_HZ */ /* change the unit from HZ to bictcp_HZ */
t <<= BICTCP_HZ; t <<= BICTCP_HZ;
...@@ -448,7 +448,7 @@ static void bictcp_acked(struct sock *sk, const struct ack_sample *sample) ...@@ -448,7 +448,7 @@ static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
return; return;
/* Discard delay samples right after fast recovery */ /* Discard delay samples right after fast recovery */
if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ) if (ca->epoch_start && (s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
return; return;
delay = (sample->rtt_us << 3) / USEC_PER_MSEC; delay = (sample->rtt_us << 3) / USEC_PER_MSEC;
......
...@@ -104,7 +104,7 @@ static void measure_achieved_throughput(struct sock *sk, ...@@ -104,7 +104,7 @@ static void measure_achieved_throughput(struct sock *sk,
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
struct htcp *ca = inet_csk_ca(sk); struct htcp *ca = inet_csk_ca(sk);
u32 now = tcp_time_stamp; u32 now = tcp_jiffies32;
if (icsk->icsk_ca_state == TCP_CA_Open) if (icsk->icsk_ca_state == TCP_CA_Open)
ca->pkts_acked = sample->pkts_acked; ca->pkts_acked = sample->pkts_acked;
......
This diff is collapsed.
...@@ -376,8 +376,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) ...@@ -376,8 +376,9 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
struct sock *sk; struct sock *sk;
struct sk_buff *skb; struct sk_buff *skb;
struct request_sock *fastopen; struct request_sock *fastopen;
__u32 seq, snd_una; u32 seq, snd_una;
__u32 remaining; s32 remaining;
u32 delta_us;
int err; int err;
struct net *net = dev_net(icmp_skb->dev); struct net *net = dev_net(icmp_skb->dev);
...@@ -483,11 +484,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info) ...@@ -483,11 +484,12 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
skb = tcp_write_queue_head(sk); skb = tcp_write_queue_head(sk);
BUG_ON(!skb); BUG_ON(!skb);
tcp_mstamp_refresh(tp);
delta_us = (u32)(tp->tcp_mstamp - skb->skb_mstamp);
remaining = icsk->icsk_rto - remaining = icsk->icsk_rto -
min(icsk->icsk_rto, usecs_to_jiffies(delta_us);
tcp_time_stamp - tcp_skb_timestamp(skb));
if (remaining) { if (remaining > 0) {
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS, inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
remaining, TCP_RTO_MAX); remaining, TCP_RTO_MAX);
} else { } else {
...@@ -811,7 +813,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb) ...@@ -811,7 +813,7 @@ static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_v4_send_ack(sk, skb, tcp_v4_send_ack(sk, skb,
tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_time_stamp + tcptw->tw_ts_offset, tcp_time_stamp_raw() + tcptw->tw_ts_offset,
tcptw->tw_ts_recent, tcptw->tw_ts_recent,
tw->tw_bound_dev_if, tw->tw_bound_dev_if,
tcp_twsk_md5_key(tcptw), tcp_twsk_md5_key(tcptw),
...@@ -839,7 +841,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, ...@@ -839,7 +841,7 @@ static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
tcp_v4_send_ack(sk, skb, seq, tcp_v4_send_ack(sk, skb, seq,
tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
tcp_time_stamp + tcp_rsk(req)->ts_off, tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
req->ts_recent, req->ts_recent,
0, 0,
tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr, tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#include <net/tcp.h> #include <net/tcp.h>
/* resolution of owd */ /* resolution of owd */
#define LP_RESOL 1000 #define LP_RESOL TCP_TS_HZ
/** /**
* enum tcp_lp_state * enum tcp_lp_state
...@@ -147,9 +147,9 @@ static u32 tcp_lp_remote_hz_estimator(struct sock *sk) ...@@ -147,9 +147,9 @@ static u32 tcp_lp_remote_hz_estimator(struct sock *sk)
tp->rx_opt.rcv_tsecr == lp->local_ref_time) tp->rx_opt.rcv_tsecr == lp->local_ref_time)
goto out; goto out;
m = HZ * (tp->rx_opt.rcv_tsval - m = TCP_TS_HZ *
lp->remote_ref_time) / (tp->rx_opt.rcv_tsecr - (tp->rx_opt.rcv_tsval - lp->remote_ref_time) /
lp->local_ref_time); (tp->rx_opt.rcv_tsecr - lp->local_ref_time);
if (m < 0) if (m < 0)
m = -m; m = -m;
...@@ -194,7 +194,7 @@ static u32 tcp_lp_owd_calculator(struct sock *sk) ...@@ -194,7 +194,7 @@ static u32 tcp_lp_owd_calculator(struct sock *sk)
if (lp->flag & LP_VALID_RHZ) { if (lp->flag & LP_VALID_RHZ) {
owd = owd =
tp->rx_opt.rcv_tsval * (LP_RESOL / lp->remote_hz) - tp->rx_opt.rcv_tsval * (LP_RESOL / lp->remote_hz) -
tp->rx_opt.rcv_tsecr * (LP_RESOL / HZ); tp->rx_opt.rcv_tsecr * (LP_RESOL / TCP_TS_HZ);
if (owd < 0) if (owd < 0)
owd = -owd; owd = -owd;
} }
...@@ -264,18 +264,19 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample) ...@@ -264,18 +264,19 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct lp *lp = inet_csk_ca(sk); struct lp *lp = inet_csk_ca(sk);
u32 now = tcp_time_stamp(tp);
u32 delta; u32 delta;
if (sample->rtt_us > 0) if (sample->rtt_us > 0)
tcp_lp_rtt_sample(sk, sample->rtt_us); tcp_lp_rtt_sample(sk, sample->rtt_us);
/* calc inference */ /* calc inference */
delta = tcp_time_stamp - tp->rx_opt.rcv_tsecr; delta = now - tp->rx_opt.rcv_tsecr;
if ((s32)delta > 0) if ((s32)delta > 0)
lp->inference = 3 * delta; lp->inference = 3 * delta;
/* test if within inference */ /* test if within inference */
if (lp->last_drop && (tcp_time_stamp - lp->last_drop < lp->inference)) if (lp->last_drop && (now - lp->last_drop < lp->inference))
lp->flag |= LP_WITHIN_INF; lp->flag |= LP_WITHIN_INF;
else else
lp->flag &= ~LP_WITHIN_INF; lp->flag &= ~LP_WITHIN_INF;
...@@ -312,7 +313,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample) ...@@ -312,7 +313,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
tp->snd_cwnd = max(tp->snd_cwnd >> 1U, 1U); tp->snd_cwnd = max(tp->snd_cwnd >> 1U, 1U);
/* record this drop time */ /* record this drop time */
lp->last_drop = tcp_time_stamp; lp->last_drop = now;
} }
static struct tcp_congestion_ops tcp_lp __read_mostly = { static struct tcp_congestion_ops tcp_lp __read_mostly = {
......
...@@ -524,7 +524,7 @@ void tcp_init_metrics(struct sock *sk) ...@@ -524,7 +524,7 @@ void tcp_init_metrics(struct sock *sk)
tp->snd_cwnd = 1; tp->snd_cwnd = 1;
else else
tp->snd_cwnd = tcp_init_cwnd(tp, dst); tp->snd_cwnd = tcp_init_cwnd(tp, dst);
tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_stamp = tcp_jiffies32;
} }
bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst) bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst)
......
...@@ -445,9 +445,9 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, ...@@ -445,9 +445,9 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->srtt_us = 0; newtp->srtt_us = 0;
newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
minmax_reset(&newtp->rtt_min, tcp_time_stamp, ~0U); minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
newicsk->icsk_rto = TCP_TIMEOUT_INIT; newicsk->icsk_rto = TCP_TIMEOUT_INIT;
newicsk->icsk_ack.lrcvtime = tcp_time_stamp; newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
newtp->packets_out = 0; newtp->packets_out = 0;
newtp->retrans_out = 0; newtp->retrans_out = 0;
...@@ -455,7 +455,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, ...@@ -455,7 +455,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->fackets_out = 0; newtp->fackets_out = 0;
newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
newtp->tlp_high_seq = 0; newtp->tlp_high_seq = 0;
newtp->lsndtime = treq->snt_synack.stamp_jiffies; newtp->lsndtime = tcp_jiffies32;
newsk->sk_txhash = treq->txhash; newsk->sk_txhash = treq->txhash;
newtp->last_oow_ack_time = 0; newtp->last_oow_ack_time = 0;
newtp->total_retrans = req->num_retrans; newtp->total_retrans = req->num_retrans;
...@@ -526,7 +526,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk, ...@@ -526,7 +526,7 @@ struct sock *tcp_create_openreq_child(const struct sock *sk,
newtp->fastopen_req = NULL; newtp->fastopen_req = NULL;
newtp->fastopen_rsk = NULL; newtp->fastopen_rsk = NULL;
newtp->syn_data_acked = 0; newtp->syn_data_acked = 0;
newtp->rack.mstamp.v64 = 0; newtp->rack.mstamp = 0;
newtp->rack.advanced = 0; newtp->rack.advanced = 0;
__TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
......
...@@ -151,7 +151,7 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta) ...@@ -151,7 +151,7 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta)
while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd) while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
cwnd >>= 1; cwnd >>= 1;
tp->snd_cwnd = max(cwnd, restart_cwnd); tp->snd_cwnd = max(cwnd, restart_cwnd);
tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_stamp = tcp_jiffies32;
tp->snd_cwnd_used = 0; tp->snd_cwnd_used = 0;
} }
...@@ -160,7 +160,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp, ...@@ -160,7 +160,7 @@ static void tcp_event_data_sent(struct tcp_sock *tp,
struct sock *sk) struct sock *sk)
{ {
struct inet_connection_sock *icsk = inet_csk(sk); struct inet_connection_sock *icsk = inet_csk(sk);
const u32 now = tcp_time_stamp; const u32 now = tcp_jiffies32;
if (tcp_packets_in_flight(tp) == 0) if (tcp_packets_in_flight(tp) == 0)
tcp_ca_event(sk, CA_EVENT_TX_START); tcp_ca_event(sk, CA_EVENT_TX_START);
...@@ -997,8 +997,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, ...@@ -997,8 +997,8 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
BUG_ON(!skb || !tcp_skb_pcount(skb)); BUG_ON(!skb || !tcp_skb_pcount(skb));
tp = tcp_sk(sk); tp = tcp_sk(sk);
skb->skb_mstamp = tp->tcp_mstamp;
if (clone_it) { if (clone_it) {
skb_mstamp_get(&skb->skb_mstamp);
TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq TCP_SKB_CB(skb)->tx.in_flight = TCP_SKB_CB(skb)->end_seq
- tp->snd_una; - tp->snd_una;
tcp_rate_skb_sent(sk, skb); tcp_rate_skb_sent(sk, skb);
...@@ -1475,7 +1475,7 @@ void tcp_mtup_init(struct sock *sk) ...@@ -1475,7 +1475,7 @@ void tcp_mtup_init(struct sock *sk)
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss); icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, net->ipv4.sysctl_tcp_base_mss);
icsk->icsk_mtup.probe_size = 0; icsk->icsk_mtup.probe_size = 0;
if (icsk->icsk_mtup.enabled) if (icsk->icsk_mtup.enabled)
icsk->icsk_mtup.probe_timestamp = tcp_time_stamp; icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
} }
EXPORT_SYMBOL(tcp_mtup_init); EXPORT_SYMBOL(tcp_mtup_init);
...@@ -1576,7 +1576,7 @@ static void tcp_cwnd_application_limited(struct sock *sk) ...@@ -1576,7 +1576,7 @@ static void tcp_cwnd_application_limited(struct sock *sk)
} }
tp->snd_cwnd_used = 0; tp->snd_cwnd_used = 0;
} }
tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_stamp = tcp_jiffies32;
} }
static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
...@@ -1597,14 +1597,14 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited) ...@@ -1597,14 +1597,14 @@ static void tcp_cwnd_validate(struct sock *sk, bool is_cwnd_limited)
if (tcp_is_cwnd_limited(sk)) { if (tcp_is_cwnd_limited(sk)) {
/* Network is feed fully. */ /* Network is feed fully. */
tp->snd_cwnd_used = 0; tp->snd_cwnd_used = 0;
tp->snd_cwnd_stamp = tcp_time_stamp; tp->snd_cwnd_stamp = tcp_jiffies32;
} else { } else {
/* Network starves. */ /* Network starves. */
if (tp->packets_out > tp->snd_cwnd_used) if (tp->packets_out > tp->snd_cwnd_used)
tp->snd_cwnd_used = tp->packets_out; tp->snd_cwnd_used = tp->packets_out;
if (sysctl_tcp_slow_start_after_idle && if (sysctl_tcp_slow_start_after_idle &&
(s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto && (s32)(tcp_jiffies32 - tp->snd_cwnd_stamp) >= inet_csk(sk)->icsk_rto &&
!ca_ops->cong_control) !ca_ops->cong_control)
tcp_cwnd_application_limited(sk); tcp_cwnd_application_limited(sk);
...@@ -1906,7 +1906,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, ...@@ -1906,7 +1906,6 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
const struct inet_connection_sock *icsk = inet_csk(sk); const struct inet_connection_sock *icsk = inet_csk(sk);
u32 age, send_win, cong_win, limit, in_flight; u32 age, send_win, cong_win, limit, in_flight;
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct skb_mstamp now;
struct sk_buff *head; struct sk_buff *head;
int win_divisor; int win_divisor;
...@@ -1919,7 +1918,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, ...@@ -1919,7 +1918,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
/* Avoid bursty behavior by allowing defer /* Avoid bursty behavior by allowing defer
* only if the last write was recent. * only if the last write was recent.
*/ */
if ((s32)(tcp_time_stamp - tp->lsndtime) > 0) if ((s32)(tcp_jiffies32 - tp->lsndtime) > 0)
goto send_now; goto send_now;
in_flight = tcp_packets_in_flight(tp); in_flight = tcp_packets_in_flight(tp);
...@@ -1962,8 +1961,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb, ...@@ -1962,8 +1961,8 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
} }
head = tcp_write_queue_head(sk); head = tcp_write_queue_head(sk);
skb_mstamp_get(&now);
age = skb_mstamp_us_delta(&now, &head->skb_mstamp); age = tcp_stamp_us_delta(tp->tcp_mstamp, head->skb_mstamp);
/* If next ACK is likely to come too late (half srtt), do not defer */ /* If next ACK is likely to come too late (half srtt), do not defer */
if (age < (tp->srtt_us >> 4)) if (age < (tp->srtt_us >> 4))
goto send_now; goto send_now;
...@@ -1988,7 +1987,7 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk) ...@@ -1988,7 +1987,7 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
s32 delta; s32 delta;
interval = net->ipv4.sysctl_tcp_probe_interval; interval = net->ipv4.sysctl_tcp_probe_interval;
delta = tcp_time_stamp - icsk->icsk_mtup.probe_timestamp; delta = tcp_jiffies32 - icsk->icsk_mtup.probe_timestamp;
if (unlikely(delta >= interval * HZ)) { if (unlikely(delta >= interval * HZ)) {
int mss = tcp_current_mss(sk); int mss = tcp_current_mss(sk);
...@@ -2000,7 +1999,7 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk) ...@@ -2000,7 +1999,7 @@ static inline void tcp_mtu_check_reprobe(struct sock *sk)
icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss); icsk->icsk_mtup.search_low = tcp_mss_to_mtu(sk, mss);
/* Update probe time stamp */ /* Update probe time stamp */
icsk->icsk_mtup.probe_timestamp = tcp_time_stamp; icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
} }
} }
...@@ -2203,7 +2202,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb, ...@@ -2203,7 +2202,7 @@ static bool tcp_small_queue_check(struct sock *sk, const struct sk_buff *skb,
static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new) static void tcp_chrono_set(struct tcp_sock *tp, const enum tcp_chrono new)
{ {
const u32 now = tcp_time_stamp; const u32 now = tcp_jiffies32;
if (tp->chrono_type > TCP_CHRONO_UNSPEC) if (tp->chrono_type > TCP_CHRONO_UNSPEC)
tp->chrono_stat[tp->chrono_type - 1] += now - tp->chrono_start; tp->chrono_stat[tp->chrono_type - 1] += now - tp->chrono_start;
...@@ -2280,6 +2279,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, ...@@ -2280,6 +2279,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
} }
max_segs = tcp_tso_segs(sk, mss_now); max_segs = tcp_tso_segs(sk, mss_now);
tcp_mstamp_refresh(tp);
while ((skb = tcp_send_head(sk))) { while ((skb = tcp_send_head(sk))) {
unsigned int limit; unsigned int limit;
...@@ -2291,7 +2291,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle, ...@@ -2291,7 +2291,7 @@ static bool tcp_write_xmit(struct sock *sk, unsigned int mss_now, int nonagle,
if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) { if (unlikely(tp->repair) && tp->repair_queue == TCP_SEND_QUEUE) {
/* "skb_mstamp" is used as a start point for the retransmit timer */ /* "skb_mstamp" is used as a start point for the retransmit timer */
skb_mstamp_get(&skb->skb_mstamp); skb->skb_mstamp = tp->tcp_mstamp;
goto repair; /* Skip network transmission */ goto repair; /* Skip network transmission */
} }
...@@ -2418,10 +2418,10 @@ bool tcp_schedule_loss_probe(struct sock *sk) ...@@ -2418,10 +2418,10 @@ bool tcp_schedule_loss_probe(struct sock *sk)
timeout = max_t(u32, timeout, msecs_to_jiffies(10)); timeout = max_t(u32, timeout, msecs_to_jiffies(10));
/* If RTO is shorter, just schedule TLP in its place. */ /* If RTO is shorter, just schedule TLP in its place. */
tlp_time_stamp = tcp_time_stamp + timeout; tlp_time_stamp = tcp_jiffies32 + timeout;
rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout; rto_time_stamp = (u32)inet_csk(sk)->icsk_timeout;
if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) { if ((s32)(tlp_time_stamp - rto_time_stamp) > 0) {
s32 delta = rto_time_stamp - tcp_time_stamp; s32 delta = rto_time_stamp - tcp_jiffies32;
if (delta > 0) if (delta > 0)
timeout = delta; timeout = delta;
} }
...@@ -2879,7 +2879,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs) ...@@ -2879,7 +2879,7 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs)
skb_headroom(skb) >= 0xFFFF)) { skb_headroom(skb) >= 0xFFFF)) {
struct sk_buff *nskb; struct sk_buff *nskb;
skb_mstamp_get(&skb->skb_mstamp); skb->skb_mstamp = tp->tcp_mstamp;
nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC); nskb = __pskb_copy(skb, MAX_TCP_HEADER, GFP_ATOMIC);
err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : err = nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) :
-ENOBUFS; -ENOBUFS;
...@@ -3095,7 +3095,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority) ...@@ -3095,7 +3095,7 @@ void tcp_send_active_reset(struct sock *sk, gfp_t priority)
skb_reserve(skb, MAX_TCP_HEADER); skb_reserve(skb, MAX_TCP_HEADER);
tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk),
TCPHDR_ACK | TCPHDR_RST); TCPHDR_ACK | TCPHDR_RST);
skb_mstamp_get(&skb->skb_mstamp); tcp_mstamp_refresh(tcp_sk(sk));
/* Send it off. */ /* Send it off. */
if (tcp_transmit_skb(sk, skb, 0, priority)) if (tcp_transmit_skb(sk, skb, 0, priority))
NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED); NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPABORTFAILED);
...@@ -3191,10 +3191,10 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst, ...@@ -3191,10 +3191,10 @@ struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
memset(&opts, 0, sizeof(opts)); memset(&opts, 0, sizeof(opts));
#ifdef CONFIG_SYN_COOKIES #ifdef CONFIG_SYN_COOKIES
if (unlikely(req->cookie_ts)) if (unlikely(req->cookie_ts))
skb->skb_mstamp.stamp_jiffies = cookie_init_timestamp(req); skb->skb_mstamp = cookie_init_timestamp(req);
else else
#endif #endif
skb_mstamp_get(&skb->skb_mstamp); skb->skb_mstamp = tcp_clock_us();
#ifdef CONFIG_TCP_MD5SIG #ifdef CONFIG_TCP_MD5SIG
rcu_read_lock(); rcu_read_lock();
...@@ -3324,7 +3324,7 @@ static void tcp_connect_init(struct sock *sk) ...@@ -3324,7 +3324,7 @@ static void tcp_connect_init(struct sock *sk)
if (likely(!tp->repair)) if (likely(!tp->repair))
tp->rcv_nxt = 0; tp->rcv_nxt = 0;
else else
tp->rcv_tstamp = tcp_time_stamp; tp->rcv_tstamp = tcp_jiffies32;
tp->rcv_wup = tp->rcv_nxt; tp->rcv_wup = tp->rcv_nxt;
tp->copied_seq = tp->rcv_nxt; tp->copied_seq = tp->rcv_nxt;
...@@ -3453,7 +3453,8 @@ int tcp_connect(struct sock *sk) ...@@ -3453,7 +3453,8 @@ int tcp_connect(struct sock *sk)
return -ENOBUFS; return -ENOBUFS;
tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN); tcp_init_nondata_skb(buff, tp->write_seq++, TCPHDR_SYN);
tp->retrans_stamp = tcp_time_stamp; tcp_mstamp_refresh(tp);
tp->retrans_stamp = tcp_time_stamp(tp);
tcp_connect_queue_skb(sk, buff); tcp_connect_queue_skb(sk, buff);
tcp_ecn_send_syn(sk, buff); tcp_ecn_send_syn(sk, buff);
...@@ -3572,7 +3573,6 @@ void tcp_send_ack(struct sock *sk) ...@@ -3572,7 +3573,6 @@ void tcp_send_ack(struct sock *sk)
skb_set_tcp_pure_ack(buff); skb_set_tcp_pure_ack(buff);
/* Send it off, this clears delayed acks for us. */ /* Send it off, this clears delayed acks for us. */
skb_mstamp_get(&buff->skb_mstamp);
tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0); tcp_transmit_skb(sk, buff, 0, (__force gfp_t)0);
} }
EXPORT_SYMBOL_GPL(tcp_send_ack); EXPORT_SYMBOL_GPL(tcp_send_ack);
...@@ -3606,15 +3606,16 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib) ...@@ -3606,15 +3606,16 @@ static int tcp_xmit_probe_skb(struct sock *sk, int urgent, int mib)
* send it. * send it.
*/ */
tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK);
skb_mstamp_get(&skb->skb_mstamp);
NET_INC_STATS(sock_net(sk), mib); NET_INC_STATS(sock_net(sk), mib);
return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0); return tcp_transmit_skb(sk, skb, 0, (__force gfp_t)0);
} }
/* Called from setsockopt( ... TCP_REPAIR ) */
void tcp_send_window_probe(struct sock *sk) void tcp_send_window_probe(struct sock *sk)
{ {
if (sk->sk_state == TCP_ESTABLISHED) { if (sk->sk_state == TCP_ESTABLISHED) {
tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1; tcp_sk(sk)->snd_wl1 = tcp_sk(sk)->rcv_nxt - 1;
tcp_mstamp_refresh(tcp_sk(sk));
tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE); tcp_xmit_probe_skb(sk, 0, LINUX_MIB_TCPWINPROBE);
} }
} }
......
...@@ -78,7 +78,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, ...@@ -78,7 +78,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct tcp_skb_cb *scb = TCP_SKB_CB(skb); struct tcp_skb_cb *scb = TCP_SKB_CB(skb);
if (!scb->tx.delivered_mstamp.v64) if (!scb->tx.delivered_mstamp)
return; return;
if (!rs->prior_delivered || if (!rs->prior_delivered ||
...@@ -89,9 +89,9 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, ...@@ -89,9 +89,9 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
rs->is_retrans = scb->sacked & TCPCB_RETRANS; rs->is_retrans = scb->sacked & TCPCB_RETRANS;
/* Find the duration of the "send phase" of this window: */ /* Find the duration of the "send phase" of this window: */
rs->interval_us = skb_mstamp_us_delta( rs->interval_us = tcp_stamp_us_delta(
&skb->skb_mstamp, skb->skb_mstamp,
&scb->tx.first_tx_mstamp); scb->tx.first_tx_mstamp);
/* Record send time of most recently ACKed packet: */ /* Record send time of most recently ACKed packet: */
tp->first_tx_mstamp = skb->skb_mstamp; tp->first_tx_mstamp = skb->skb_mstamp;
...@@ -101,7 +101,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb, ...@@ -101,7 +101,7 @@ void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
* we don't need to reset since it'll be freed soon. * we don't need to reset since it'll be freed soon.
*/ */
if (scb->sacked & TCPCB_SACKED_ACKED) if (scb->sacked & TCPCB_SACKED_ACKED)
scb->tx.delivered_mstamp.v64 = 0; scb->tx.delivered_mstamp = 0;
} }
/* Update the connection delivery information and generate a rate sample. */ /* Update the connection delivery information and generate a rate sample. */
...@@ -125,7 +125,7 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, ...@@ -125,7 +125,7 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
rs->acked_sacked = delivered; /* freshly ACKed or SACKed */ rs->acked_sacked = delivered; /* freshly ACKed or SACKed */
rs->losses = lost; /* freshly marked lost */ rs->losses = lost; /* freshly marked lost */
/* Return an invalid sample if no timing information is available. */ /* Return an invalid sample if no timing information is available. */
if (!rs->prior_mstamp.v64) { if (!rs->prior_mstamp) {
rs->delivered = -1; rs->delivered = -1;
rs->interval_us = -1; rs->interval_us = -1;
return; return;
...@@ -138,8 +138,8 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost, ...@@ -138,8 +138,8 @@ void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
* longer phase. * longer phase.
*/ */
snd_us = rs->interval_us; /* send phase */ snd_us = rs->interval_us; /* send phase */
ack_us = skb_mstamp_us_delta(&tp->tcp_mstamp, ack_us = tcp_stamp_us_delta(tp->tcp_mstamp,
&rs->prior_mstamp); /* ack phase */ rs->prior_mstamp); /* ack phase */
rs->interval_us = max(snd_us, ack_us); rs->interval_us = max(snd_us, ack_us);
/* Normally we expect interval_us >= min-rtt. /* Normally we expect interval_us >= min-rtt.
......
...@@ -17,12 +17,9 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb) ...@@ -17,12 +17,9 @@ static void tcp_rack_mark_skb_lost(struct sock *sk, struct sk_buff *skb)
} }
} }
static bool tcp_rack_sent_after(const struct skb_mstamp *t1, static bool tcp_rack_sent_after(u64 t1, u64 t2, u32 seq1, u32 seq2)
const struct skb_mstamp *t2,
u32 seq1, u32 seq2)
{ {
return skb_mstamp_after(t1, t2) || return t1 > t2 || (t1 == t2 && after(seq1, seq2));
(t1->v64 == t2->v64 && after(seq1, seq2));
} }
/* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01): /* RACK loss detection (IETF draft draft-ietf-tcpm-rack-01):
...@@ -72,14 +69,14 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout) ...@@ -72,14 +69,14 @@ static void tcp_rack_detect_loss(struct sock *sk, u32 *reo_timeout)
scb->sacked & TCPCB_SACKED_ACKED) scb->sacked & TCPCB_SACKED_ACKED)
continue; continue;
if (tcp_rack_sent_after(&tp->rack.mstamp, &skb->skb_mstamp, if (tcp_rack_sent_after(tp->rack.mstamp, skb->skb_mstamp,
tp->rack.end_seq, scb->end_seq)) { tp->rack.end_seq, scb->end_seq)) {
/* Step 3 in draft-cheng-tcpm-rack-00.txt: /* Step 3 in draft-cheng-tcpm-rack-00.txt:
* A packet is lost if its elapsed time is beyond * A packet is lost if its elapsed time is beyond
* the recent RTT plus the reordering window. * the recent RTT plus the reordering window.
*/ */
u32 elapsed = skb_mstamp_us_delta(&tp->tcp_mstamp, u32 elapsed = tcp_stamp_us_delta(tp->tcp_mstamp,
&skb->skb_mstamp); skb->skb_mstamp);
s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed; s32 remaining = tp->rack.rtt_us + reo_wnd - elapsed;
if (remaining < 0) { if (remaining < 0) {
...@@ -127,16 +124,16 @@ void tcp_rack_mark_lost(struct sock *sk) ...@@ -127,16 +124,16 @@ void tcp_rack_mark_lost(struct sock *sk)
* draft-cheng-tcpm-rack-00.txt * draft-cheng-tcpm-rack-00.txt
*/ */
void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
const struct skb_mstamp *xmit_time) u64 xmit_time)
{ {
u32 rtt_us; u32 rtt_us;
if (tp->rack.mstamp.v64 && if (tp->rack.mstamp &&
!tcp_rack_sent_after(xmit_time, &tp->rack.mstamp, !tcp_rack_sent_after(xmit_time, tp->rack.mstamp,
end_seq, tp->rack.end_seq)) end_seq, tp->rack.end_seq))
return; return;
rtt_us = skb_mstamp_us_delta(&tp->tcp_mstamp, xmit_time); rtt_us = tcp_stamp_us_delta(tp->tcp_mstamp, xmit_time);
if (sacked & TCPCB_RETRANS) { if (sacked & TCPCB_RETRANS) {
/* If the sacked packet was retransmitted, it's ambiguous /* If the sacked packet was retransmitted, it's ambiguous
* whether the retransmission or the original (or the prior * whether the retransmission or the original (or the prior
...@@ -152,7 +149,7 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq, ...@@ -152,7 +149,7 @@ void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
return; return;
} }
tp->rack.rtt_us = rtt_us; tp->rack.rtt_us = rtt_us;
tp->rack.mstamp = *xmit_time; tp->rack.mstamp = xmit_time;
tp->rack.end_seq = end_seq; tp->rack.end_seq = end_seq;
tp->rack.advanced = 1; tp->rack.advanced = 1;
} }
...@@ -166,7 +163,6 @@ void tcp_rack_reo_timeout(struct sock *sk) ...@@ -166,7 +163,6 @@ void tcp_rack_reo_timeout(struct sock *sk)
u32 timeout, prior_inflight; u32 timeout, prior_inflight;
prior_inflight = tcp_packets_in_flight(tp); prior_inflight = tcp_packets_in_flight(tp);
skb_mstamp_get(&tp->tcp_mstamp);
tcp_rack_detect_loss(sk, &timeout); tcp_rack_detect_loss(sk, &timeout);
if (prior_inflight != tcp_packets_in_flight(tp)) { if (prior_inflight != tcp_packets_in_flight(tp)) {
if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) { if (inet_csk(sk)->icsk_ca_state != TCP_CA_Recovery) {
......
...@@ -63,7 +63,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) ...@@ -63,7 +63,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
/* If peer does not open window for long time, or did not transmit /* If peer does not open window for long time, or did not transmit
* anything for long time, penalize it. */ * anything for long time, penalize it. */
if ((s32)(tcp_time_stamp - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset) if ((s32)(tcp_jiffies32 - tp->lsndtime) > 2*TCP_RTO_MAX || !do_reset)
shift++; shift++;
/* If some dubious ICMP arrived, penalize even more. */ /* If some dubious ICMP arrived, penalize even more. */
...@@ -73,7 +73,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset) ...@@ -73,7 +73,7 @@ static int tcp_out_of_resources(struct sock *sk, bool do_reset)
if (tcp_check_oom(sk, shift)) { if (tcp_check_oom(sk, shift)) {
/* Catch exceptional cases, when connection requires reset. /* Catch exceptional cases, when connection requires reset.
* 1. Last segment was sent recently. */ * 1. Last segment was sent recently. */
if ((s32)(tcp_time_stamp - tp->lsndtime) <= TCP_TIMEWAIT_LEN || if ((s32)(tcp_jiffies32 - tp->lsndtime) <= TCP_TIMEWAIT_LEN ||
/* 2. Window is closed. */ /* 2. Window is closed. */
(!tp->snd_wnd && !tp->packets_out)) (!tp->snd_wnd && !tp->packets_out))
do_reset = true; do_reset = true;
...@@ -115,7 +115,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk) ...@@ -115,7 +115,7 @@ static void tcp_mtu_probing(struct inet_connection_sock *icsk, struct sock *sk)
if (net->ipv4.sysctl_tcp_mtu_probing) { if (net->ipv4.sysctl_tcp_mtu_probing) {
if (!icsk->icsk_mtup.enabled) { if (!icsk->icsk_mtup.enabled) {
icsk->icsk_mtup.enabled = 1; icsk->icsk_mtup.enabled = 1;
icsk->icsk_mtup.probe_timestamp = tcp_time_stamp; icsk->icsk_mtup.probe_timestamp = tcp_jiffies32;
tcp_sync_mss(sk, icsk->icsk_pmtu_cookie); tcp_sync_mss(sk, icsk->icsk_pmtu_cookie);
} else { } else {
struct net *net = sock_net(sk); struct net *net = sock_net(sk);
...@@ -153,8 +153,8 @@ static bool retransmits_timed_out(struct sock *sk, ...@@ -153,8 +153,8 @@ static bool retransmits_timed_out(struct sock *sk,
unsigned int timeout, unsigned int timeout,
bool syn_set) bool syn_set)
{ {
unsigned int linear_backoff_thresh, start_ts;
unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN; unsigned int rto_base = syn_set ? TCP_TIMEOUT_INIT : TCP_RTO_MIN;
unsigned int linear_backoff_thresh, start_ts;
if (!inet_csk(sk)->icsk_retransmits) if (!inet_csk(sk)->icsk_retransmits)
return false; return false;
...@@ -172,7 +172,7 @@ static bool retransmits_timed_out(struct sock *sk, ...@@ -172,7 +172,7 @@ static bool retransmits_timed_out(struct sock *sk,
timeout = ((2 << linear_backoff_thresh) - 1) * rto_base + timeout = ((2 << linear_backoff_thresh) - 1) * rto_base +
(boundary - linear_backoff_thresh) * TCP_RTO_MAX; (boundary - linear_backoff_thresh) * TCP_RTO_MAX;
} }
return (tcp_time_stamp - start_ts) >= timeout; return (tcp_time_stamp(tcp_sk(sk)) - start_ts) >= jiffies_to_msecs(timeout);
} }
/* A write timeout has occurred. Process the after effects. */ /* A write timeout has occurred. Process the after effects. */
...@@ -339,9 +339,9 @@ static void tcp_probe_timer(struct sock *sk) ...@@ -339,9 +339,9 @@ static void tcp_probe_timer(struct sock *sk)
*/ */
start_ts = tcp_skb_timestamp(tcp_send_head(sk)); start_ts = tcp_skb_timestamp(tcp_send_head(sk));
if (!start_ts) if (!start_ts)
skb_mstamp_get(&tcp_send_head(sk)->skb_mstamp); tcp_send_head(sk)->skb_mstamp = tp->tcp_mstamp;
else if (icsk->icsk_user_timeout && else if (icsk->icsk_user_timeout &&
(s32)(tcp_time_stamp - start_ts) > icsk->icsk_user_timeout) (s32)(tcp_time_stamp(tp) - start_ts) > icsk->icsk_user_timeout)
goto abort; goto abort;
max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2; max_probes = sock_net(sk)->ipv4.sysctl_tcp_retries2;
...@@ -451,7 +451,7 @@ void tcp_retransmit_timer(struct sock *sk) ...@@ -451,7 +451,7 @@ void tcp_retransmit_timer(struct sock *sk)
tp->snd_una, tp->snd_nxt); tp->snd_una, tp->snd_nxt);
} }
#endif #endif
if (tcp_time_stamp - tp->rcv_tstamp > TCP_RTO_MAX) { if (tcp_jiffies32 - tp->rcv_tstamp > TCP_RTO_MAX) {
tcp_write_err(sk); tcp_write_err(sk);
goto out; goto out;
} }
...@@ -561,6 +561,7 @@ void tcp_write_timer_handler(struct sock *sk) ...@@ -561,6 +561,7 @@ void tcp_write_timer_handler(struct sock *sk)
goto out; goto out;
} }
tcp_mstamp_refresh(tcp_sk(sk));
event = icsk->icsk_pending; event = icsk->icsk_pending;
switch (event) { switch (event) {
......
...@@ -68,7 +68,7 @@ static void tcp_westwood_init(struct sock *sk) ...@@ -68,7 +68,7 @@ static void tcp_westwood_init(struct sock *sk)
w->cumul_ack = 0; w->cumul_ack = 0;
w->reset_rtt_min = 1; w->reset_rtt_min = 1;
w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT; w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
w->rtt_win_sx = tcp_time_stamp; w->rtt_win_sx = tcp_jiffies32;
w->snd_una = tcp_sk(sk)->snd_una; w->snd_una = tcp_sk(sk)->snd_una;
w->first_ack = 1; w->first_ack = 1;
} }
...@@ -116,7 +116,7 @@ static void tcp_westwood_pkts_acked(struct sock *sk, ...@@ -116,7 +116,7 @@ static void tcp_westwood_pkts_acked(struct sock *sk,
static void westwood_update_window(struct sock *sk) static void westwood_update_window(struct sock *sk)
{ {
struct westwood *w = inet_csk_ca(sk); struct westwood *w = inet_csk_ca(sk);
s32 delta = tcp_time_stamp - w->rtt_win_sx; s32 delta = tcp_jiffies32 - w->rtt_win_sx;
/* Initialize w->snd_una with the first acked sequence number in order /* Initialize w->snd_una with the first acked sequence number in order
* to fix mismatch between tp->snd_una and w->snd_una for the first * to fix mismatch between tp->snd_una and w->snd_una for the first
...@@ -140,7 +140,7 @@ static void westwood_update_window(struct sock *sk) ...@@ -140,7 +140,7 @@ static void westwood_update_window(struct sock *sk)
westwood_filter(w, delta); westwood_filter(w, delta);
w->bk = 0; w->bk = 0;
w->rtt_win_sx = tcp_time_stamp; w->rtt_win_sx = tcp_jiffies32;
} }
} }
......
...@@ -211,7 +211,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb) ...@@ -211,7 +211,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
ireq->wscale_ok = tcp_opt.wscale_ok; ireq->wscale_ok = tcp_opt.wscale_ok;
ireq->tstamp_ok = tcp_opt.saw_tstamp; ireq->tstamp_ok = tcp_opt.saw_tstamp;
req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0;
treq->snt_synack.v64 = 0; treq->snt_synack = 0;
treq->rcv_isn = ntohl(th->seq) - 1; treq->rcv_isn = ntohl(th->seq) - 1;
treq->snt_isn = cookie; treq->snt_isn = cookie;
treq->ts_off = 0; treq->ts_off = 0;
......
...@@ -949,7 +949,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb) ...@@ -949,7 +949,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt, tcp_v6_send_ack(sk, skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale, tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
tcp_time_stamp + tcptw->tw_ts_offset, tcp_time_stamp_raw() + tcptw->tw_ts_offset,
tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw), tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel)); tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
...@@ -971,7 +971,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb, ...@@ -971,7 +971,7 @@ static void tcp_v6_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt, tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt,
req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale, req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
tcp_time_stamp + tcp_rsk(req)->ts_off, tcp_time_stamp_raw() + tcp_rsk(req)->ts_off,
req->ts_recent, sk->sk_bound_dev_if, req->ts_recent, sk->sk_bound_dev_if,
tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr), tcp_v6_md5_do_lookup(sk, &ipv6_hdr(skb)->daddr),
0, 0); 0, 0);
......
...@@ -152,7 +152,7 @@ void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info, ...@@ -152,7 +152,7 @@ void synproxy_init_timestamp_cookie(const struct xt_synproxy_info *info,
struct synproxy_options *opts) struct synproxy_options *opts)
{ {
opts->tsecr = opts->tsval; opts->tsecr = opts->tsval;
opts->tsval = tcp_time_stamp & ~0x3f; opts->tsval = tcp_time_stamp_raw() & ~0x3f;
if (opts->options & XT_SYNPROXY_OPT_WSCALE) { if (opts->options & XT_SYNPROXY_OPT_WSCALE) {
opts->tsval |= opts->wscale; opts->tsval |= opts->wscale;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment