Commit f1722a1b authored by Yuchung Cheng's avatar Yuchung Cheng Committed by David S. Miller

tcp: consolidate congestion control undo functions

Most TCP congestion controls are using identical logic to undo
cwnd except BBR. This patch consolidates these similar functions
to the one used currently by Reno and others.
Suggested-by: default avatarNeal Cardwell <ncardwell@google.com>
Signed-off-by: default avatarYuchung Cheng <ycheng@google.com>
Signed-off-by: default avatarNeal Cardwell <ncardwell@google.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 4faf7839
...@@ -49,7 +49,6 @@ MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wma ...@@ -49,7 +49,6 @@ MODULE_PARM_DESC(smooth_part, "log(B/(B*Smin))/log(B/(B-1))+B, # of RTT from Wma
struct bictcp { struct bictcp {
u32 cnt; /* increase cwnd by 1 after ACKs */ u32 cnt; /* increase cwnd by 1 after ACKs */
u32 last_max_cwnd; /* last maximum snd_cwnd */ u32 last_max_cwnd; /* last maximum snd_cwnd */
u32 loss_cwnd; /* congestion window at last loss */
u32 last_cwnd; /* the last snd_cwnd */ u32 last_cwnd; /* the last snd_cwnd */
u32 last_time; /* time when updated last_cwnd */ u32 last_time; /* time when updated last_cwnd */
u32 epoch_start; /* beginning of an epoch */ u32 epoch_start; /* beginning of an epoch */
...@@ -72,7 +71,6 @@ static void bictcp_init(struct sock *sk) ...@@ -72,7 +71,6 @@ static void bictcp_init(struct sock *sk)
struct bictcp *ca = inet_csk_ca(sk); struct bictcp *ca = inet_csk_ca(sk);
bictcp_reset(ca); bictcp_reset(ca);
ca->loss_cwnd = 0;
if (initial_ssthresh) if (initial_ssthresh)
tcp_sk(sk)->snd_ssthresh = initial_ssthresh; tcp_sk(sk)->snd_ssthresh = initial_ssthresh;
...@@ -172,22 +170,12 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk) ...@@ -172,22 +170,12 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk)
else else
ca->last_max_cwnd = tp->snd_cwnd; ca->last_max_cwnd = tp->snd_cwnd;
ca->loss_cwnd = tp->snd_cwnd;
if (tp->snd_cwnd <= low_window) if (tp->snd_cwnd <= low_window)
return max(tp->snd_cwnd >> 1U, 2U); return max(tp->snd_cwnd >> 1U, 2U);
else else
return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
} }
static u32 bictcp_undo_cwnd(struct sock *sk)
{
const struct tcp_sock *tp = tcp_sk(sk);
const struct bictcp *ca = inet_csk_ca(sk);
return max(tp->snd_cwnd, ca->loss_cwnd);
}
static void bictcp_state(struct sock *sk, u8 new_state) static void bictcp_state(struct sock *sk, u8 new_state)
{ {
if (new_state == TCP_CA_Loss) if (new_state == TCP_CA_Loss)
...@@ -214,7 +202,7 @@ static struct tcp_congestion_ops bictcp __read_mostly = { ...@@ -214,7 +202,7 @@ static struct tcp_congestion_ops bictcp __read_mostly = {
.ssthresh = bictcp_recalc_ssthresh, .ssthresh = bictcp_recalc_ssthresh,
.cong_avoid = bictcp_cong_avoid, .cong_avoid = bictcp_cong_avoid,
.set_state = bictcp_state, .set_state = bictcp_state,
.undo_cwnd = bictcp_undo_cwnd, .undo_cwnd = tcp_reno_undo_cwnd,
.pkts_acked = bictcp_acked, .pkts_acked = bictcp_acked,
.owner = THIS_MODULE, .owner = THIS_MODULE,
.name = "bic", .name = "bic",
......
...@@ -85,7 +85,6 @@ struct cdg { ...@@ -85,7 +85,6 @@ struct cdg {
u8 state; u8 state;
u8 delack; u8 delack;
u32 rtt_seq; u32 rtt_seq;
u32 undo_cwnd;
u32 shadow_wnd; u32 shadow_wnd;
u16 backoff_cnt; u16 backoff_cnt;
u16 sample_cnt; u16 sample_cnt;
...@@ -330,8 +329,6 @@ static u32 tcp_cdg_ssthresh(struct sock *sk) ...@@ -330,8 +329,6 @@ static u32 tcp_cdg_ssthresh(struct sock *sk)
struct cdg *ca = inet_csk_ca(sk); struct cdg *ca = inet_csk_ca(sk);
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
ca->undo_cwnd = tp->snd_cwnd;
if (ca->state == CDG_BACKOFF) if (ca->state == CDG_BACKOFF)
return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10); return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10);
...@@ -344,13 +341,6 @@ static u32 tcp_cdg_ssthresh(struct sock *sk) ...@@ -344,13 +341,6 @@ static u32 tcp_cdg_ssthresh(struct sock *sk)
return max(2U, tp->snd_cwnd >> 1); return max(2U, tp->snd_cwnd >> 1);
} }
static u32 tcp_cdg_undo_cwnd(struct sock *sk)
{
struct cdg *ca = inet_csk_ca(sk);
return max(tcp_sk(sk)->snd_cwnd, ca->undo_cwnd);
}
static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev) static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
{ {
struct cdg *ca = inet_csk_ca(sk); struct cdg *ca = inet_csk_ca(sk);
...@@ -403,7 +393,7 @@ struct tcp_congestion_ops tcp_cdg __read_mostly = { ...@@ -403,7 +393,7 @@ struct tcp_congestion_ops tcp_cdg __read_mostly = {
.cong_avoid = tcp_cdg_cong_avoid, .cong_avoid = tcp_cdg_cong_avoid,
.cwnd_event = tcp_cdg_cwnd_event, .cwnd_event = tcp_cdg_cwnd_event,
.pkts_acked = tcp_cdg_acked, .pkts_acked = tcp_cdg_acked,
.undo_cwnd = tcp_cdg_undo_cwnd, .undo_cwnd = tcp_reno_undo_cwnd,
.ssthresh = tcp_cdg_ssthresh, .ssthresh = tcp_cdg_ssthresh,
.release = tcp_cdg_release, .release = tcp_cdg_release,
.init = tcp_cdg_init, .init = tcp_cdg_init,
......
...@@ -83,7 +83,6 @@ MODULE_PARM_DESC(hystart_ack_delta, "spacing between ack's indicating train (mse ...@@ -83,7 +83,6 @@ MODULE_PARM_DESC(hystart_ack_delta, "spacing between ack's indicating train (mse
struct bictcp { struct bictcp {
u32 cnt; /* increase cwnd by 1 after ACKs */ u32 cnt; /* increase cwnd by 1 after ACKs */
u32 last_max_cwnd; /* last maximum snd_cwnd */ u32 last_max_cwnd; /* last maximum snd_cwnd */
u32 loss_cwnd; /* congestion window at last loss */
u32 last_cwnd; /* the last snd_cwnd */ u32 last_cwnd; /* the last snd_cwnd */
u32 last_time; /* time when updated last_cwnd */ u32 last_time; /* time when updated last_cwnd */
u32 bic_origin_point;/* origin point of bic function */ u32 bic_origin_point;/* origin point of bic function */
...@@ -142,7 +141,6 @@ static void bictcp_init(struct sock *sk) ...@@ -142,7 +141,6 @@ static void bictcp_init(struct sock *sk)
struct bictcp *ca = inet_csk_ca(sk); struct bictcp *ca = inet_csk_ca(sk);
bictcp_reset(ca); bictcp_reset(ca);
ca->loss_cwnd = 0;
if (hystart) if (hystart)
bictcp_hystart_reset(sk); bictcp_hystart_reset(sk);
...@@ -366,18 +364,9 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk) ...@@ -366,18 +364,9 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk)
else else
ca->last_max_cwnd = tp->snd_cwnd; ca->last_max_cwnd = tp->snd_cwnd;
ca->loss_cwnd = tp->snd_cwnd;
return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U); return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
} }
static u32 bictcp_undo_cwnd(struct sock *sk)
{
struct bictcp *ca = inet_csk_ca(sk);
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}
static void bictcp_state(struct sock *sk, u8 new_state) static void bictcp_state(struct sock *sk, u8 new_state)
{ {
if (new_state == TCP_CA_Loss) { if (new_state == TCP_CA_Loss) {
...@@ -470,7 +459,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = { ...@@ -470,7 +459,7 @@ static struct tcp_congestion_ops cubictcp __read_mostly = {
.ssthresh = bictcp_recalc_ssthresh, .ssthresh = bictcp_recalc_ssthresh,
.cong_avoid = bictcp_cong_avoid, .cong_avoid = bictcp_cong_avoid,
.set_state = bictcp_state, .set_state = bictcp_state,
.undo_cwnd = bictcp_undo_cwnd, .undo_cwnd = tcp_reno_undo_cwnd,
.cwnd_event = bictcp_cwnd_event, .cwnd_event = bictcp_cwnd_event,
.pkts_acked = bictcp_acked, .pkts_acked = bictcp_acked,
.owner = THIS_MODULE, .owner = THIS_MODULE,
......
...@@ -94,7 +94,6 @@ static const struct hstcp_aimd_val { ...@@ -94,7 +94,6 @@ static const struct hstcp_aimd_val {
struct hstcp { struct hstcp {
u32 ai; u32 ai;
u32 loss_cwnd;
}; };
static void hstcp_init(struct sock *sk) static void hstcp_init(struct sock *sk)
...@@ -153,22 +152,14 @@ static u32 hstcp_ssthresh(struct sock *sk) ...@@ -153,22 +152,14 @@ static u32 hstcp_ssthresh(struct sock *sk)
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
struct hstcp *ca = inet_csk_ca(sk); struct hstcp *ca = inet_csk_ca(sk);
ca->loss_cwnd = tp->snd_cwnd;
/* Do multiplicative decrease */ /* Do multiplicative decrease */
return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U); return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
} }
static u32 hstcp_cwnd_undo(struct sock *sk)
{
const struct hstcp *ca = inet_csk_ca(sk);
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}
static struct tcp_congestion_ops tcp_highspeed __read_mostly = { static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
.init = hstcp_init, .init = hstcp_init,
.ssthresh = hstcp_ssthresh, .ssthresh = hstcp_ssthresh,
.undo_cwnd = hstcp_cwnd_undo, .undo_cwnd = tcp_reno_undo_cwnd,
.cong_avoid = hstcp_cong_avoid, .cong_avoid = hstcp_cong_avoid,
.owner = THIS_MODULE, .owner = THIS_MODULE,
......
...@@ -48,7 +48,6 @@ struct illinois { ...@@ -48,7 +48,6 @@ struct illinois {
u32 end_seq; /* right edge of current RTT */ u32 end_seq; /* right edge of current RTT */
u32 alpha; /* Additive increase */ u32 alpha; /* Additive increase */
u32 beta; /* Muliplicative decrease */ u32 beta; /* Muliplicative decrease */
u32 loss_cwnd; /* cwnd on loss */
u16 acked; /* # packets acked by current ACK */ u16 acked; /* # packets acked by current ACK */
u8 rtt_above; /* average rtt has gone above threshold */ u8 rtt_above; /* average rtt has gone above threshold */
u8 rtt_low; /* # of rtts measurements below threshold */ u8 rtt_low; /* # of rtts measurements below threshold */
...@@ -297,18 +296,10 @@ static u32 tcp_illinois_ssthresh(struct sock *sk) ...@@ -297,18 +296,10 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
struct illinois *ca = inet_csk_ca(sk); struct illinois *ca = inet_csk_ca(sk);
ca->loss_cwnd = tp->snd_cwnd;
/* Multiplicative decrease */ /* Multiplicative decrease */
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U); return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
} }
static u32 tcp_illinois_cwnd_undo(struct sock *sk)
{
const struct illinois *ca = inet_csk_ca(sk);
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}
/* Extract info for Tcp socket info provided via netlink. */ /* Extract info for Tcp socket info provided via netlink. */
static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr, static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
union tcp_cc_info *info) union tcp_cc_info *info)
...@@ -336,7 +327,7 @@ static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr, ...@@ -336,7 +327,7 @@ static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
static struct tcp_congestion_ops tcp_illinois __read_mostly = { static struct tcp_congestion_ops tcp_illinois __read_mostly = {
.init = tcp_illinois_init, .init = tcp_illinois_init,
.ssthresh = tcp_illinois_ssthresh, .ssthresh = tcp_illinois_ssthresh,
.undo_cwnd = tcp_illinois_cwnd_undo, .undo_cwnd = tcp_reno_undo_cwnd,
.cong_avoid = tcp_illinois_cong_avoid, .cong_avoid = tcp_illinois_cong_avoid,
.set_state = tcp_illinois_state, .set_state = tcp_illinois_state,
.get_info = tcp_illinois_info, .get_info = tcp_illinois_info,
......
...@@ -86,7 +86,6 @@ struct tcpnv { ...@@ -86,7 +86,6 @@ struct tcpnv {
* < 0 => less than 1 packet/RTT */ * < 0 => less than 1 packet/RTT */
u8 available8; u8 available8;
u16 available16; u16 available16;
u32 loss_cwnd; /* cwnd at last loss */
u8 nv_allow_cwnd_growth:1, /* whether cwnd can grow */ u8 nv_allow_cwnd_growth:1, /* whether cwnd can grow */
nv_reset:1, /* whether to reset values */ nv_reset:1, /* whether to reset values */
nv_catchup:1; /* whether we are growing because nv_catchup:1; /* whether we are growing because
...@@ -121,7 +120,6 @@ static inline void tcpnv_reset(struct tcpnv *ca, struct sock *sk) ...@@ -121,7 +120,6 @@ static inline void tcpnv_reset(struct tcpnv *ca, struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
ca->nv_reset = 0; ca->nv_reset = 0;
ca->loss_cwnd = 0;
ca->nv_no_cong_cnt = 0; ca->nv_no_cong_cnt = 0;
ca->nv_rtt_cnt = 0; ca->nv_rtt_cnt = 0;
ca->nv_last_rtt = 0; ca->nv_last_rtt = 0;
...@@ -177,19 +175,10 @@ static void tcpnv_cong_avoid(struct sock *sk, u32 ack, u32 acked) ...@@ -177,19 +175,10 @@ static void tcpnv_cong_avoid(struct sock *sk, u32 ack, u32 acked)
static u32 tcpnv_recalc_ssthresh(struct sock *sk) static u32 tcpnv_recalc_ssthresh(struct sock *sk)
{ {
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
struct tcpnv *ca = inet_csk_ca(sk);
ca->loss_cwnd = tp->snd_cwnd;
return max((tp->snd_cwnd * nv_loss_dec_factor) >> 10, 2U); return max((tp->snd_cwnd * nv_loss_dec_factor) >> 10, 2U);
} }
static u32 tcpnv_undo_cwnd(struct sock *sk)
{
struct tcpnv *ca = inet_csk_ca(sk);
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}
static void tcpnv_state(struct sock *sk, u8 new_state) static void tcpnv_state(struct sock *sk, u8 new_state)
{ {
struct tcpnv *ca = inet_csk_ca(sk); struct tcpnv *ca = inet_csk_ca(sk);
...@@ -446,7 +435,7 @@ static struct tcp_congestion_ops tcpnv __read_mostly = { ...@@ -446,7 +435,7 @@ static struct tcp_congestion_ops tcpnv __read_mostly = {
.ssthresh = tcpnv_recalc_ssthresh, .ssthresh = tcpnv_recalc_ssthresh,
.cong_avoid = tcpnv_cong_avoid, .cong_avoid = tcpnv_cong_avoid,
.set_state = tcpnv_state, .set_state = tcpnv_state,
.undo_cwnd = tcpnv_undo_cwnd, .undo_cwnd = tcp_reno_undo_cwnd,
.pkts_acked = tcpnv_acked, .pkts_acked = tcpnv_acked,
.get_info = tcpnv_get_info, .get_info = tcpnv_get_info,
......
...@@ -15,10 +15,6 @@ ...@@ -15,10 +15,6 @@
#define TCP_SCALABLE_AI_CNT 50U #define TCP_SCALABLE_AI_CNT 50U
#define TCP_SCALABLE_MD_SCALE 3 #define TCP_SCALABLE_MD_SCALE 3
struct scalable {
u32 loss_cwnd;
};
static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
{ {
struct tcp_sock *tp = tcp_sk(sk); struct tcp_sock *tp = tcp_sk(sk);
...@@ -36,23 +32,13 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked) ...@@ -36,23 +32,13 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
static u32 tcp_scalable_ssthresh(struct sock *sk) static u32 tcp_scalable_ssthresh(struct sock *sk)
{ {
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
struct scalable *ca = inet_csk_ca(sk);
ca->loss_cwnd = tp->snd_cwnd;
return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U); return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
} }
static u32 tcp_scalable_cwnd_undo(struct sock *sk)
{
const struct scalable *ca = inet_csk_ca(sk);
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
}
static struct tcp_congestion_ops tcp_scalable __read_mostly = { static struct tcp_congestion_ops tcp_scalable __read_mostly = {
.ssthresh = tcp_scalable_ssthresh, .ssthresh = tcp_scalable_ssthresh,
.undo_cwnd = tcp_scalable_cwnd_undo, .undo_cwnd = tcp_reno_undo_cwnd,
.cong_avoid = tcp_scalable_cong_avoid, .cong_avoid = tcp_scalable_cong_avoid,
.owner = THIS_MODULE, .owner = THIS_MODULE,
......
...@@ -30,7 +30,6 @@ struct veno { ...@@ -30,7 +30,6 @@ struct veno {
u32 basertt; /* the min of all Veno rtt measurements seen (in usec) */ u32 basertt; /* the min of all Veno rtt measurements seen (in usec) */
u32 inc; /* decide whether to increase cwnd */ u32 inc; /* decide whether to increase cwnd */
u32 diff; /* calculate the diff rate */ u32 diff; /* calculate the diff rate */
u32 loss_cwnd; /* cwnd when loss occured */
}; };
/* There are several situations when we must "re-start" Veno: /* There are several situations when we must "re-start" Veno:
...@@ -194,7 +193,6 @@ static u32 tcp_veno_ssthresh(struct sock *sk) ...@@ -194,7 +193,6 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
const struct tcp_sock *tp = tcp_sk(sk); const struct tcp_sock *tp = tcp_sk(sk);
struct veno *veno = inet_csk_ca(sk); struct veno *veno = inet_csk_ca(sk);
veno->loss_cwnd = tp->snd_cwnd;
if (veno->diff < beta) if (veno->diff < beta)
/* in "non-congestive state", cut cwnd by 1/5 */ /* in "non-congestive state", cut cwnd by 1/5 */
return max(tp->snd_cwnd * 4 / 5, 2U); return max(tp->snd_cwnd * 4 / 5, 2U);
...@@ -203,17 +201,10 @@ static u32 tcp_veno_ssthresh(struct sock *sk) ...@@ -203,17 +201,10 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
return max(tp->snd_cwnd >> 1U, 2U); return max(tp->snd_cwnd >> 1U, 2U);
} }
static u32 tcp_veno_cwnd_undo(struct sock *sk)
{
const struct veno *veno = inet_csk_ca(sk);
return max(tcp_sk(sk)->snd_cwnd, veno->loss_cwnd);
}
static struct tcp_congestion_ops tcp_veno __read_mostly = { static struct tcp_congestion_ops tcp_veno __read_mostly = {
.init = tcp_veno_init, .init = tcp_veno_init,
.ssthresh = tcp_veno_ssthresh, .ssthresh = tcp_veno_ssthresh,
.undo_cwnd = tcp_veno_cwnd_undo, .undo_cwnd = tcp_reno_undo_cwnd,
.cong_avoid = tcp_veno_cong_avoid, .cong_avoid = tcp_veno_cong_avoid,
.pkts_acked = tcp_veno_pkts_acked, .pkts_acked = tcp_veno_pkts_acked,
.set_state = tcp_veno_state, .set_state = tcp_veno_state,
......
...@@ -37,7 +37,6 @@ struct yeah { ...@@ -37,7 +37,6 @@ struct yeah {
u32 fast_count; u32 fast_count;
u32 pkts_acked; u32 pkts_acked;
u32 loss_cwnd;
}; };
static void tcp_yeah_init(struct sock *sk) static void tcp_yeah_init(struct sock *sk)
...@@ -220,22 +219,14 @@ static u32 tcp_yeah_ssthresh(struct sock *sk) ...@@ -220,22 +219,14 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)
yeah->fast_count = 0; yeah->fast_count = 0;
yeah->reno_count = max(yeah->reno_count>>1, 2U); yeah->reno_count = max(yeah->reno_count>>1, 2U);
yeah->loss_cwnd = tp->snd_cwnd;
return max_t(int, tp->snd_cwnd - reduction, 2); return max_t(int, tp->snd_cwnd - reduction, 2);
} }
static u32 tcp_yeah_cwnd_undo(struct sock *sk)
{
const struct yeah *yeah = inet_csk_ca(sk);
return max(tcp_sk(sk)->snd_cwnd, yeah->loss_cwnd);
}
static struct tcp_congestion_ops tcp_yeah __read_mostly = { static struct tcp_congestion_ops tcp_yeah __read_mostly = {
.init = tcp_yeah_init, .init = tcp_yeah_init,
.ssthresh = tcp_yeah_ssthresh, .ssthresh = tcp_yeah_ssthresh,
.undo_cwnd = tcp_yeah_cwnd_undo, .undo_cwnd = tcp_reno_undo_cwnd,
.cong_avoid = tcp_yeah_cong_avoid, .cong_avoid = tcp_yeah_cong_avoid,
.set_state = tcp_vegas_state, .set_state = tcp_vegas_state,
.cwnd_event = tcp_vegas_cwnd_event, .cwnd_event = tcp_vegas_cwnd_event,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment