tcp: add accessors to read/set tp->snd_cwnd
We had various bugs over the years with code breaking the assumption that tp->snd_cwnd is greater than zero. Lately, syzbot reported the WARN_ON_ONCE(!tp->prior_cwnd) added in commit 8b8a321ff72c ("tcp: fix zero cwnd in tcp_cwnd_reduction") can trigger, and without a repro we would have to spend considerable time finding the bug. Instead of complaining too late, we want to catch where and when tp->snd_cwnd is set to an illegal value. Signed-off-by: Eric Dumazet <edumazet@google.com> Suggested-by: Yuchung Cheng <ycheng@google.com> Cc: Neal Cardwell <ncardwell@google.com> Acked-by: Yuchung Cheng <ycheng@google.com> Link: https://lore.kernel.org/r/20220405233538.947344-1-eric.dumazet@gmail.com Signed-off-by: Jakub Kicinski <kuba@kernel.org> Signed-off-by: Juhyung Park <qkrwngud825@gmail.com>
This commit is contained in:
parent
33eb09a194
commit
c8a587ff65
27 changed files with 207 additions and 191 deletions
|
@ -1260,9 +1260,20 @@ static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
|
||||||
|
|
||||||
#define TCP_INFINITE_SSTHRESH 0x7fffffff
|
#define TCP_INFINITE_SSTHRESH 0x7fffffff
|
||||||
|
|
||||||
|
static inline u32 tcp_snd_cwnd(const struct tcp_sock *tp)
|
||||||
|
{
|
||||||
|
return tp->snd_cwnd;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void tcp_snd_cwnd_set(struct tcp_sock *tp, u32 val)
|
||||||
|
{
|
||||||
|
WARN_ON_ONCE((int)val <= 0);
|
||||||
|
tp->snd_cwnd = val;
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
|
static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
|
||||||
{
|
{
|
||||||
return tp->snd_cwnd < tp->snd_ssthresh;
|
return tcp_snd_cwnd(tp) < tp->snd_ssthresh;
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
|
static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
|
||||||
|
@ -1288,8 +1299,8 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
|
||||||
return tp->snd_ssthresh;
|
return tp->snd_ssthresh;
|
||||||
else
|
else
|
||||||
return max(tp->snd_ssthresh,
|
return max(tp->snd_ssthresh,
|
||||||
((tp->snd_cwnd >> 1) +
|
((tcp_snd_cwnd(tp) >> 1) +
|
||||||
(tp->snd_cwnd >> 2)));
|
(tcp_snd_cwnd(tp) >> 2)));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Use define here intentionally to get WARN_ON location shown at the caller */
|
/* Use define here intentionally to get WARN_ON location shown at the caller */
|
||||||
|
@ -1334,7 +1345,7 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
|
||||||
|
|
||||||
/* If in slow start, ensure cwnd grows to twice what was ACKed. */
|
/* If in slow start, ensure cwnd grows to twice what was ACKed. */
|
||||||
if (tcp_in_slow_start(tp))
|
if (tcp_in_slow_start(tp))
|
||||||
return tp->snd_cwnd < 2 * tp->max_packets_out;
|
return tcp_snd_cwnd(tp) < 2 * tp->max_packets_out;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
|
@ -268,7 +268,7 @@ TRACE_EVENT(tcp_probe,
|
||||||
__entry->data_len = skb->len - __tcp_hdrlen(th);
|
__entry->data_len = skb->len - __tcp_hdrlen(th);
|
||||||
__entry->snd_nxt = tp->snd_nxt;
|
__entry->snd_nxt = tp->snd_nxt;
|
||||||
__entry->snd_una = tp->snd_una;
|
__entry->snd_una = tp->snd_una;
|
||||||
__entry->snd_cwnd = tp->snd_cwnd;
|
__entry->snd_cwnd = tcp_snd_cwnd(tp);
|
||||||
__entry->snd_wnd = tp->snd_wnd;
|
__entry->snd_wnd = tp->snd_wnd;
|
||||||
__entry->rcv_wnd = tp->rcv_wnd;
|
__entry->rcv_wnd = tp->rcv_wnd;
|
||||||
__entry->ssthresh = tcp_current_ssthresh(sk);
|
__entry->ssthresh = tcp_current_ssthresh(sk);
|
||||||
|
|
|
@ -4883,7 +4883,7 @@ static int _bpf_setsockopt(struct sock *sk, int level, int optname,
|
||||||
if (val <= 0 || tp->data_segs_out > tp->syn_data)
|
if (val <= 0 || tp->data_segs_out > tp->syn_data)
|
||||||
ret = -EINVAL;
|
ret = -EINVAL;
|
||||||
else
|
else
|
||||||
tp->snd_cwnd = val;
|
tcp_snd_cwnd_set(tp, val);
|
||||||
break;
|
break;
|
||||||
case TCP_BPF_SNDCWND_CLAMP:
|
case TCP_BPF_SNDCWND_CLAMP:
|
||||||
if (val <= 0) {
|
if (val <= 0) {
|
||||||
|
|
|
@ -433,7 +433,7 @@ void tcp_init_sock(struct sock *sk)
|
||||||
* algorithms that we must have the following bandaid to talk
|
* algorithms that we must have the following bandaid to talk
|
||||||
* efficiently to them. -DaveM
|
* efficiently to them. -DaveM
|
||||||
*/
|
*/
|
||||||
tp->snd_cwnd = TCP_INIT_CWND;
|
tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
|
||||||
|
|
||||||
/* There's a bubble in the pipe until at least the first ACK. */
|
/* There's a bubble in the pipe until at least the first ACK. */
|
||||||
tp->app_limited = ~0U;
|
tp->app_limited = ~0U;
|
||||||
|
@ -2819,7 +2819,7 @@ int tcp_disconnect(struct sock *sk, int flags)
|
||||||
icsk->icsk_rto_min = TCP_RTO_MIN;
|
icsk->icsk_rto_min = TCP_RTO_MIN;
|
||||||
icsk->icsk_delack_max = TCP_DELACK_MAX;
|
icsk->icsk_delack_max = TCP_DELACK_MAX;
|
||||||
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
|
||||||
tp->snd_cwnd = TCP_INIT_CWND;
|
tcp_snd_cwnd_set(tp, TCP_INIT_CWND);
|
||||||
tp->snd_cwnd_cnt = 0;
|
tp->snd_cwnd_cnt = 0;
|
||||||
tp->is_cwnd_limited = 0;
|
tp->is_cwnd_limited = 0;
|
||||||
tp->max_packets_out = 0;
|
tp->max_packets_out = 0;
|
||||||
|
@ -3533,7 +3533,7 @@ void tcp_get_info(struct sock *sk, struct tcp_info *info)
|
||||||
info->tcpi_max_pacing_rate = rate64;
|
info->tcpi_max_pacing_rate = rate64;
|
||||||
|
|
||||||
info->tcpi_reordering = tp->reordering;
|
info->tcpi_reordering = tp->reordering;
|
||||||
info->tcpi_snd_cwnd = tp->snd_cwnd;
|
info->tcpi_snd_cwnd = tcp_snd_cwnd(tp);
|
||||||
|
|
||||||
if (info->tcpi_state == TCP_LISTEN) {
|
if (info->tcpi_state == TCP_LISTEN) {
|
||||||
/* listeners aliased fields :
|
/* listeners aliased fields :
|
||||||
|
@ -3691,7 +3691,7 @@ struct sk_buff *tcp_get_timestamping_opt_stats(const struct sock *sk,
|
||||||
rate64 = tcp_compute_delivery_rate(tp);
|
rate64 = tcp_compute_delivery_rate(tp);
|
||||||
nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
|
nla_put_u64_64bit(stats, TCP_NLA_DELIVERY_RATE, rate64, TCP_NLA_PAD);
|
||||||
|
|
||||||
nla_put_u32(stats, TCP_NLA_SND_CWND, tp->snd_cwnd);
|
nla_put_u32(stats, TCP_NLA_SND_CWND, tcp_snd_cwnd(tp));
|
||||||
nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
|
nla_put_u32(stats, TCP_NLA_REORDERING, tp->reordering);
|
||||||
nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
|
nla_put_u32(stats, TCP_NLA_MIN_RTT, tcp_min_rtt(tp));
|
||||||
|
|
||||||
|
|
|
@ -274,7 +274,7 @@ static void bbr_init_pacing_rate_from_rtt(struct sock *sk)
|
||||||
} else { /* no RTT sample yet */
|
} else { /* no RTT sample yet */
|
||||||
rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
|
rtt_us = USEC_PER_MSEC; /* use nominal default RTT */
|
||||||
}
|
}
|
||||||
bw = (u64)tp->snd_cwnd * BW_UNIT;
|
bw = (u64)tcp_snd_cwnd(tp) * BW_UNIT;
|
||||||
do_div(bw, rtt_us);
|
do_div(bw, rtt_us);
|
||||||
sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
|
sk->sk_pacing_rate = bbr_bw_to_pacing_rate(sk, bw, bbr_high_gain);
|
||||||
}
|
}
|
||||||
|
@ -321,9 +321,9 @@ static void bbr_save_cwnd(struct sock *sk)
|
||||||
struct bbr *bbr = inet_csk_ca(sk);
|
struct bbr *bbr = inet_csk_ca(sk);
|
||||||
|
|
||||||
if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
|
if (bbr->prev_ca_state < TCP_CA_Recovery && bbr->mode != BBR_PROBE_RTT)
|
||||||
bbr->prior_cwnd = tp->snd_cwnd; /* this cwnd is good enough */
|
bbr->prior_cwnd = tcp_snd_cwnd(tp); /* this cwnd is good enough */
|
||||||
else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
|
else /* loss recovery or BBR_PROBE_RTT have temporarily cut cwnd */
|
||||||
bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd);
|
bbr->prior_cwnd = max(bbr->prior_cwnd, tcp_snd_cwnd(tp));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
|
static void bbr_cwnd_event(struct sock *sk, enum tcp_ca_event event)
|
||||||
|
@ -480,7 +480,7 @@ static bool bbr_set_cwnd_to_recover_or_restore(
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
struct bbr *bbr = inet_csk_ca(sk);
|
struct bbr *bbr = inet_csk_ca(sk);
|
||||||
u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
|
u8 prev_state = bbr->prev_ca_state, state = inet_csk(sk)->icsk_ca_state;
|
||||||
u32 cwnd = tp->snd_cwnd;
|
u32 cwnd = tcp_snd_cwnd(tp);
|
||||||
|
|
||||||
/* An ACK for P pkts should release at most 2*P packets. We do this
|
/* An ACK for P pkts should release at most 2*P packets. We do this
|
||||||
* in two steps. First, here we deduct the number of lost packets.
|
* in two steps. First, here we deduct the number of lost packets.
|
||||||
|
@ -518,7 +518,7 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
|
||||||
{
|
{
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
struct bbr *bbr = inet_csk_ca(sk);
|
struct bbr *bbr = inet_csk_ca(sk);
|
||||||
u32 cwnd = tp->snd_cwnd, target_cwnd = 0;
|
u32 cwnd = tcp_snd_cwnd(tp), target_cwnd = 0;
|
||||||
|
|
||||||
if (!acked)
|
if (!acked)
|
||||||
goto done; /* no packet fully ACKed; just apply caps */
|
goto done; /* no packet fully ACKed; just apply caps */
|
||||||
|
@ -542,9 +542,9 @@ static void bbr_set_cwnd(struct sock *sk, const struct rate_sample *rs,
|
||||||
cwnd = max(cwnd, bbr_cwnd_min_target);
|
cwnd = max(cwnd, bbr_cwnd_min_target);
|
||||||
|
|
||||||
done:
|
done:
|
||||||
tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp); /* apply global cap */
|
tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp)); /* apply global cap */
|
||||||
if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */
|
if (bbr->mode == BBR_PROBE_RTT) /* drain queue, refresh min_rtt */
|
||||||
tp->snd_cwnd = min(tp->snd_cwnd, bbr_cwnd_min_target);
|
tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), bbr_cwnd_min_target));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* End cycle phase if it's time and/or we hit the phase's in-flight target. */
|
/* End cycle phase if it's time and/or we hit the phase's in-flight target. */
|
||||||
|
@ -854,7 +854,7 @@ static void bbr_update_ack_aggregation(struct sock *sk,
|
||||||
bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
|
bbr->ack_epoch_acked = min_t(u32, 0xFFFFF,
|
||||||
bbr->ack_epoch_acked + rs->acked_sacked);
|
bbr->ack_epoch_acked + rs->acked_sacked);
|
||||||
extra_acked = bbr->ack_epoch_acked - expected_acked;
|
extra_acked = bbr->ack_epoch_acked - expected_acked;
|
||||||
extra_acked = min(extra_acked, tp->snd_cwnd);
|
extra_acked = min(extra_acked, tcp_snd_cwnd(tp));
|
||||||
if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
|
if (extra_acked > bbr->extra_acked[bbr->extra_acked_win_idx])
|
||||||
bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
|
bbr->extra_acked[bbr->extra_acked_win_idx] = extra_acked;
|
||||||
}
|
}
|
||||||
|
@ -912,7 +912,7 @@ static void bbr_check_probe_rtt_done(struct sock *sk)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */
|
bbr->min_rtt_stamp = tcp_jiffies32; /* wait a while until PROBE_RTT */
|
||||||
tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd);
|
tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp), bbr->prior_cwnd));
|
||||||
bbr_reset_mode(sk);
|
bbr_reset_mode(sk);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1091,7 +1091,7 @@ static u32 bbr_undo_cwnd(struct sock *sk)
|
||||||
bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */
|
bbr->full_bw = 0; /* spurious slow-down; reset full pipe detection */
|
||||||
bbr->full_bw_cnt = 0;
|
bbr->full_bw_cnt = 0;
|
||||||
bbr_reset_lt_bw_sampling(sk);
|
bbr_reset_lt_bw_sampling(sk);
|
||||||
return tcp_sk(sk)->snd_cwnd;
|
return tcp_snd_cwnd(tcp_sk(sk));
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
|
/* Entering loss recovery, so save cwnd for when we exit or undo recovery. */
|
||||||
|
|
|
@ -150,7 +150,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
if (!acked)
|
if (!acked)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
bictcp_update(ca, tp->snd_cwnd);
|
bictcp_update(ca, tcp_snd_cwnd(tp));
|
||||||
tcp_cong_avoid_ai(tp, ca->cnt, acked);
|
tcp_cong_avoid_ai(tp, ca->cnt, acked);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,16 +166,16 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk)
|
||||||
ca->epoch_start = 0; /* end of epoch */
|
ca->epoch_start = 0; /* end of epoch */
|
||||||
|
|
||||||
/* Wmax and fast convergence */
|
/* Wmax and fast convergence */
|
||||||
if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
|
if (tcp_snd_cwnd(tp) < ca->last_max_cwnd && fast_convergence)
|
||||||
ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
|
ca->last_max_cwnd = (tcp_snd_cwnd(tp) * (BICTCP_BETA_SCALE + beta))
|
||||||
/ (2 * BICTCP_BETA_SCALE);
|
/ (2 * BICTCP_BETA_SCALE);
|
||||||
else
|
else
|
||||||
ca->last_max_cwnd = tp->snd_cwnd;
|
ca->last_max_cwnd = tcp_snd_cwnd(tp);
|
||||||
|
|
||||||
if (tp->snd_cwnd <= low_window)
|
if (tcp_snd_cwnd(tp) <= low_window)
|
||||||
return max(tp->snd_cwnd >> 1U, 2U);
|
return max(tcp_snd_cwnd(tp) >> 1U, 2U);
|
||||||
else
|
else
|
||||||
return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
|
return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bictcp_state(struct sock *sk, u8 new_state)
|
static void bictcp_state(struct sock *sk, u8 new_state)
|
||||||
|
|
|
@ -161,8 +161,8 @@ static void tcp_cdg_hystart_update(struct sock *sk)
|
||||||
LINUX_MIB_TCPHYSTARTTRAINDETECT);
|
LINUX_MIB_TCPHYSTARTTRAINDETECT);
|
||||||
NET_ADD_STATS(sock_net(sk),
|
NET_ADD_STATS(sock_net(sk),
|
||||||
LINUX_MIB_TCPHYSTARTTRAINCWND,
|
LINUX_MIB_TCPHYSTARTTRAINCWND,
|
||||||
tp->snd_cwnd);
|
tcp_snd_cwnd(tp));
|
||||||
tp->snd_ssthresh = tp->snd_cwnd;
|
tp->snd_ssthresh = tcp_snd_cwnd(tp);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -180,8 +180,8 @@ static void tcp_cdg_hystart_update(struct sock *sk)
|
||||||
LINUX_MIB_TCPHYSTARTDELAYDETECT);
|
LINUX_MIB_TCPHYSTARTDELAYDETECT);
|
||||||
NET_ADD_STATS(sock_net(sk),
|
NET_ADD_STATS(sock_net(sk),
|
||||||
LINUX_MIB_TCPHYSTARTDELAYCWND,
|
LINUX_MIB_TCPHYSTARTDELAYCWND,
|
||||||
tp->snd_cwnd);
|
tcp_snd_cwnd(tp));
|
||||||
tp->snd_ssthresh = tp->snd_cwnd;
|
tp->snd_ssthresh = tcp_snd_cwnd(tp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -252,7 +252,7 @@ static bool tcp_cdg_backoff(struct sock *sk, u32 grad)
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
ca->shadow_wnd = max(ca->shadow_wnd, tp->snd_cwnd);
|
ca->shadow_wnd = max(ca->shadow_wnd, tcp_snd_cwnd(tp));
|
||||||
ca->state = CDG_BACKOFF;
|
ca->state = CDG_BACKOFF;
|
||||||
tcp_enter_cwr(sk);
|
tcp_enter_cwr(sk);
|
||||||
return true;
|
return true;
|
||||||
|
@ -285,14 +285,14 @@ static void tcp_cdg_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!tcp_is_cwnd_limited(sk)) {
|
if (!tcp_is_cwnd_limited(sk)) {
|
||||||
ca->shadow_wnd = min(ca->shadow_wnd, tp->snd_cwnd);
|
ca->shadow_wnd = min(ca->shadow_wnd, tcp_snd_cwnd(tp));
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
prior_snd_cwnd = tp->snd_cwnd;
|
prior_snd_cwnd = tcp_snd_cwnd(tp);
|
||||||
tcp_reno_cong_avoid(sk, ack, acked);
|
tcp_reno_cong_avoid(sk, ack, acked);
|
||||||
|
|
||||||
incr = tp->snd_cwnd - prior_snd_cwnd;
|
incr = tcp_snd_cwnd(tp) - prior_snd_cwnd;
|
||||||
ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr);
|
ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -331,15 +331,15 @@ static u32 tcp_cdg_ssthresh(struct sock *sk)
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
if (ca->state == CDG_BACKOFF)
|
if (ca->state == CDG_BACKOFF)
|
||||||
return max(2U, (tp->snd_cwnd * min(1024U, backoff_beta)) >> 10);
|
return max(2U, (tcp_snd_cwnd(tp) * min(1024U, backoff_beta)) >> 10);
|
||||||
|
|
||||||
if (ca->state == CDG_NONFULL && use_tolerance)
|
if (ca->state == CDG_NONFULL && use_tolerance)
|
||||||
return tp->snd_cwnd;
|
return tcp_snd_cwnd(tp);
|
||||||
|
|
||||||
ca->shadow_wnd = min(ca->shadow_wnd >> 1, tp->snd_cwnd);
|
ca->shadow_wnd = min(ca->shadow_wnd >> 1, tcp_snd_cwnd(tp));
|
||||||
if (use_shadow)
|
if (use_shadow)
|
||||||
return max3(2U, ca->shadow_wnd, tp->snd_cwnd >> 1);
|
return max3(2U, ca->shadow_wnd, tcp_snd_cwnd(tp) >> 1);
|
||||||
return max(2U, tp->snd_cwnd >> 1);
|
return max(2U, tcp_snd_cwnd(tp) >> 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
|
static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
|
||||||
|
@ -357,7 +357,7 @@ static void tcp_cdg_cwnd_event(struct sock *sk, const enum tcp_ca_event ev)
|
||||||
|
|
||||||
ca->gradients = gradients;
|
ca->gradients = gradients;
|
||||||
ca->rtt_seq = tp->snd_nxt;
|
ca->rtt_seq = tp->snd_nxt;
|
||||||
ca->shadow_wnd = tp->snd_cwnd;
|
ca->shadow_wnd = tcp_snd_cwnd(tp);
|
||||||
break;
|
break;
|
||||||
case CA_EVENT_COMPLETE_CWR:
|
case CA_EVENT_COMPLETE_CWR:
|
||||||
ca->state = CDG_UNKNOWN;
|
ca->state = CDG_UNKNOWN;
|
||||||
|
@ -381,7 +381,7 @@ static void tcp_cdg_init(struct sock *sk)
|
||||||
ca->gradients = kcalloc(window, sizeof(ca->gradients[0]),
|
ca->gradients = kcalloc(window, sizeof(ca->gradients[0]),
|
||||||
GFP_NOWAIT | __GFP_NOWARN);
|
GFP_NOWAIT | __GFP_NOWARN);
|
||||||
ca->rtt_seq = tp->snd_nxt;
|
ca->rtt_seq = tp->snd_nxt;
|
||||||
ca->shadow_wnd = tp->snd_cwnd;
|
ca->shadow_wnd = tcp_snd_cwnd(tp);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcp_cdg_release(struct sock *sk)
|
static void tcp_cdg_release(struct sock *sk)
|
||||||
|
|
|
@ -396,10 +396,10 @@ int tcp_set_congestion_control(struct sock *sk, const char *name, bool load,
|
||||||
*/
|
*/
|
||||||
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
|
u32 tcp_slow_start(struct tcp_sock *tp, u32 acked)
|
||||||
{
|
{
|
||||||
u32 cwnd = min(tp->snd_cwnd + acked, tp->snd_ssthresh);
|
u32 cwnd = min(tcp_snd_cwnd(tp) + acked, tp->snd_ssthresh);
|
||||||
|
|
||||||
acked -= cwnd - tp->snd_cwnd;
|
acked -= cwnd - tcp_snd_cwnd(tp);
|
||||||
tp->snd_cwnd = min(cwnd, tp->snd_cwnd_clamp);
|
tcp_snd_cwnd_set(tp, min(cwnd, tp->snd_cwnd_clamp));
|
||||||
|
|
||||||
return acked;
|
return acked;
|
||||||
}
|
}
|
||||||
|
@ -413,7 +413,7 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
|
||||||
/* If credits accumulated at a higher w, apply them gently now. */
|
/* If credits accumulated at a higher w, apply them gently now. */
|
||||||
if (tp->snd_cwnd_cnt >= w) {
|
if (tp->snd_cwnd_cnt >= w) {
|
||||||
tp->snd_cwnd_cnt = 0;
|
tp->snd_cwnd_cnt = 0;
|
||||||
tp->snd_cwnd++;
|
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
|
||||||
}
|
}
|
||||||
|
|
||||||
tp->snd_cwnd_cnt += acked;
|
tp->snd_cwnd_cnt += acked;
|
||||||
|
@ -421,9 +421,9 @@ void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked)
|
||||||
u32 delta = tp->snd_cwnd_cnt / w;
|
u32 delta = tp->snd_cwnd_cnt / w;
|
||||||
|
|
||||||
tp->snd_cwnd_cnt -= delta * w;
|
tp->snd_cwnd_cnt -= delta * w;
|
||||||
tp->snd_cwnd += delta;
|
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + delta);
|
||||||
}
|
}
|
||||||
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_cwnd_clamp);
|
tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp));
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
|
EXPORT_SYMBOL_GPL(tcp_cong_avoid_ai);
|
||||||
|
|
||||||
|
@ -448,7 +448,7 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
/* In dangerous area, increase slowly. */
|
/* In dangerous area, increase slowly. */
|
||||||
tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
|
tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
|
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
|
||||||
|
|
||||||
|
@ -457,7 +457,7 @@ u32 tcp_reno_ssthresh(struct sock *sk)
|
||||||
{
|
{
|
||||||
const struct tcp_sock *tp = tcp_sk(sk);
|
const struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
return max(tp->snd_cwnd >> 1U, 2U);
|
return max(tcp_snd_cwnd(tp) >> 1U, 2U);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
|
EXPORT_SYMBOL_GPL(tcp_reno_ssthresh);
|
||||||
|
|
||||||
|
@ -465,7 +465,7 @@ u32 tcp_reno_undo_cwnd(struct sock *sk)
|
||||||
{
|
{
|
||||||
const struct tcp_sock *tp = tcp_sk(sk);
|
const struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
return max(tp->snd_cwnd, tp->prior_cwnd);
|
return max(tcp_snd_cwnd(tp), tp->prior_cwnd);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
|
EXPORT_SYMBOL_GPL(tcp_reno_undo_cwnd);
|
||||||
|
|
||||||
|
|
|
@ -341,7 +341,7 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
if (!acked)
|
if (!acked)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
bictcp_update(ca, tp->snd_cwnd, acked);
|
bictcp_update(ca, tcp_snd_cwnd(tp), acked);
|
||||||
tcp_cong_avoid_ai(tp, ca->cnt, acked);
|
tcp_cong_avoid_ai(tp, ca->cnt, acked);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -353,13 +353,13 @@ static u32 bictcp_recalc_ssthresh(struct sock *sk)
|
||||||
ca->epoch_start = 0; /* end of epoch */
|
ca->epoch_start = 0; /* end of epoch */
|
||||||
|
|
||||||
/* Wmax and fast convergence */
|
/* Wmax and fast convergence */
|
||||||
if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
|
if (tcp_snd_cwnd(tp) < ca->last_max_cwnd && fast_convergence)
|
||||||
ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
|
ca->last_max_cwnd = (tcp_snd_cwnd(tp) * (BICTCP_BETA_SCALE + beta))
|
||||||
/ (2 * BICTCP_BETA_SCALE);
|
/ (2 * BICTCP_BETA_SCALE);
|
||||||
else
|
else
|
||||||
ca->last_max_cwnd = tp->snd_cwnd;
|
ca->last_max_cwnd = tcp_snd_cwnd(tp);
|
||||||
|
|
||||||
return max((tp->snd_cwnd * beta) / BICTCP_BETA_SCALE, 2U);
|
return max((tcp_snd_cwnd(tp) * beta) / BICTCP_BETA_SCALE, 2U);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void bictcp_state(struct sock *sk, u8 new_state)
|
static void bictcp_state(struct sock *sk, u8 new_state)
|
||||||
|
@ -420,13 +420,13 @@ static void hystart_update(struct sock *sk, u32 delay)
|
||||||
ca->found = 1;
|
ca->found = 1;
|
||||||
pr_debug("hystart_ack_train (%u > %u) delay_min %u (+ ack_delay %u) cwnd %u\n",
|
pr_debug("hystart_ack_train (%u > %u) delay_min %u (+ ack_delay %u) cwnd %u\n",
|
||||||
now - ca->round_start, threshold,
|
now - ca->round_start, threshold,
|
||||||
ca->delay_min, hystart_ack_delay(sk), tp->snd_cwnd);
|
ca->delay_min, hystart_ack_delay(sk), tcp_snd_cwnd(tp));
|
||||||
NET_INC_STATS(sock_net(sk),
|
NET_INC_STATS(sock_net(sk),
|
||||||
LINUX_MIB_TCPHYSTARTTRAINDETECT);
|
LINUX_MIB_TCPHYSTARTTRAINDETECT);
|
||||||
NET_ADD_STATS(sock_net(sk),
|
NET_ADD_STATS(sock_net(sk),
|
||||||
LINUX_MIB_TCPHYSTARTTRAINCWND,
|
LINUX_MIB_TCPHYSTARTTRAINCWND,
|
||||||
tp->snd_cwnd);
|
tcp_snd_cwnd(tp));
|
||||||
tp->snd_ssthresh = tp->snd_cwnd;
|
tp->snd_ssthresh = tcp_snd_cwnd(tp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -445,8 +445,8 @@ static void hystart_update(struct sock *sk, u32 delay)
|
||||||
LINUX_MIB_TCPHYSTARTDELAYDETECT);
|
LINUX_MIB_TCPHYSTARTDELAYDETECT);
|
||||||
NET_ADD_STATS(sock_net(sk),
|
NET_ADD_STATS(sock_net(sk),
|
||||||
LINUX_MIB_TCPHYSTARTDELAYCWND,
|
LINUX_MIB_TCPHYSTARTDELAYCWND,
|
||||||
tp->snd_cwnd);
|
tcp_snd_cwnd(tp));
|
||||||
tp->snd_ssthresh = tp->snd_cwnd;
|
tp->snd_ssthresh = tcp_snd_cwnd(tp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -476,7 +476,7 @@ static void bictcp_acked(struct sock *sk, const struct ack_sample *sample)
|
||||||
|
|
||||||
/* hystart triggers when cwnd is larger than some threshold */
|
/* hystart triggers when cwnd is larger than some threshold */
|
||||||
if (!ca->found && tcp_in_slow_start(tp) && hystart &&
|
if (!ca->found && tcp_in_slow_start(tp) && hystart &&
|
||||||
tp->snd_cwnd >= hystart_low_window)
|
tcp_snd_cwnd(tp) >= hystart_low_window)
|
||||||
hystart_update(sk, delay);
|
hystart_update(sk, delay);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -115,8 +115,8 @@ static u32 dctcp_ssthresh(struct sock *sk)
|
||||||
struct dctcp *ca = inet_csk_ca(sk);
|
struct dctcp *ca = inet_csk_ca(sk);
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
ca->loss_cwnd = tp->snd_cwnd;
|
ca->loss_cwnd = tcp_snd_cwnd(tp);
|
||||||
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
|
return max(tcp_snd_cwnd(tp) - ((tcp_snd_cwnd(tp) * ca->dctcp_alpha) >> 11U), 2U);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dctcp_update_alpha(struct sock *sk, u32 flags)
|
static void dctcp_update_alpha(struct sock *sk, u32 flags)
|
||||||
|
@ -157,8 +157,8 @@ static void dctcp_react_to_loss(struct sock *sk)
|
||||||
struct dctcp *ca = inet_csk_ca(sk);
|
struct dctcp *ca = inet_csk_ca(sk);
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
ca->loss_cwnd = tp->snd_cwnd;
|
ca->loss_cwnd = tcp_snd_cwnd(tp);
|
||||||
tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U);
|
tp->snd_ssthresh = max(tcp_snd_cwnd(tp) >> 1U, 2U);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void dctcp_state(struct sock *sk, u8 new_state)
|
static void dctcp_state(struct sock *sk, u8 new_state)
|
||||||
|
@ -220,8 +220,9 @@ static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
|
||||||
static u32 dctcp_cwnd_undo(struct sock *sk)
|
static u32 dctcp_cwnd_undo(struct sock *sk)
|
||||||
{
|
{
|
||||||
const struct dctcp *ca = inet_csk_ca(sk);
|
const struct dctcp *ca = inet_csk_ca(sk);
|
||||||
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
|
return max(tcp_snd_cwnd(tp), ca->loss_cwnd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcp_congestion_ops dctcp __read_mostly = {
|
static struct tcp_congestion_ops dctcp __read_mostly = {
|
||||||
|
|
|
@ -127,22 +127,22 @@ static void hstcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
* snd_cwnd <=
|
* snd_cwnd <=
|
||||||
* hstcp_aimd_vals[ca->ai].cwnd
|
* hstcp_aimd_vals[ca->ai].cwnd
|
||||||
*/
|
*/
|
||||||
if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
|
if (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd) {
|
||||||
while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
|
while (tcp_snd_cwnd(tp) > hstcp_aimd_vals[ca->ai].cwnd &&
|
||||||
ca->ai < HSTCP_AIMD_MAX - 1)
|
ca->ai < HSTCP_AIMD_MAX - 1)
|
||||||
ca->ai++;
|
ca->ai++;
|
||||||
} else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) {
|
} else if (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd) {
|
||||||
while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd)
|
while (ca->ai && tcp_snd_cwnd(tp) <= hstcp_aimd_vals[ca->ai-1].cwnd)
|
||||||
ca->ai--;
|
ca->ai--;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Do additive increase */
|
/* Do additive increase */
|
||||||
if (tp->snd_cwnd < tp->snd_cwnd_clamp) {
|
if (tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) {
|
||||||
/* cwnd = cwnd + a(w) / cwnd */
|
/* cwnd = cwnd + a(w) / cwnd */
|
||||||
tp->snd_cwnd_cnt += ca->ai + 1;
|
tp->snd_cwnd_cnt += ca->ai + 1;
|
||||||
if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
|
if (tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) {
|
||||||
tp->snd_cwnd_cnt -= tp->snd_cwnd;
|
tp->snd_cwnd_cnt -= tcp_snd_cwnd(tp);
|
||||||
tp->snd_cwnd++;
|
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -154,7 +154,7 @@ static u32 hstcp_ssthresh(struct sock *sk)
|
||||||
struct hstcp *ca = inet_csk_ca(sk);
|
struct hstcp *ca = inet_csk_ca(sk);
|
||||||
|
|
||||||
/* Do multiplicative decrease */
|
/* Do multiplicative decrease */
|
||||||
return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
|
return max(tcp_snd_cwnd(tp) - ((tcp_snd_cwnd(tp) * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
|
static struct tcp_congestion_ops tcp_highspeed __read_mostly = {
|
||||||
|
|
|
@ -124,7 +124,7 @@ static void measure_achieved_throughput(struct sock *sk,
|
||||||
|
|
||||||
ca->packetcount += sample->pkts_acked;
|
ca->packetcount += sample->pkts_acked;
|
||||||
|
|
||||||
if (ca->packetcount >= tp->snd_cwnd - (ca->alpha >> 7 ? : 1) &&
|
if (ca->packetcount >= tcp_snd_cwnd(tp) - (ca->alpha >> 7 ? : 1) &&
|
||||||
now - ca->lasttime >= ca->minRTT &&
|
now - ca->lasttime >= ca->minRTT &&
|
||||||
ca->minRTT > 0) {
|
ca->minRTT > 0) {
|
||||||
__u32 cur_Bi = ca->packetcount * HZ / (now - ca->lasttime);
|
__u32 cur_Bi = ca->packetcount * HZ / (now - ca->lasttime);
|
||||||
|
@ -225,7 +225,7 @@ static u32 htcp_recalc_ssthresh(struct sock *sk)
|
||||||
const struct htcp *ca = inet_csk_ca(sk);
|
const struct htcp *ca = inet_csk_ca(sk);
|
||||||
|
|
||||||
htcp_param_update(sk);
|
htcp_param_update(sk);
|
||||||
return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
|
return max((tcp_snd_cwnd(tp) * ca->beta) >> 7, 2U);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
|
@ -242,9 +242,9 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
/* In dangerous area, increase slowly.
|
/* In dangerous area, increase slowly.
|
||||||
* In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
|
* In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
|
||||||
*/
|
*/
|
||||||
if ((tp->snd_cwnd_cnt * ca->alpha)>>7 >= tp->snd_cwnd) {
|
if ((tp->snd_cwnd_cnt * ca->alpha)>>7 >= tcp_snd_cwnd(tp)) {
|
||||||
if (tp->snd_cwnd < tp->snd_cwnd_clamp)
|
if (tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp)
|
||||||
tp->snd_cwnd++;
|
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
|
||||||
tp->snd_cwnd_cnt = 0;
|
tp->snd_cwnd_cnt = 0;
|
||||||
htcp_alpha_update(ca);
|
htcp_alpha_update(ca);
|
||||||
} else
|
} else
|
||||||
|
|
|
@ -54,7 +54,7 @@ static void hybla_init(struct sock *sk)
|
||||||
ca->rho2_7ls = 0;
|
ca->rho2_7ls = 0;
|
||||||
ca->snd_cwnd_cents = 0;
|
ca->snd_cwnd_cents = 0;
|
||||||
ca->hybla_en = true;
|
ca->hybla_en = true;
|
||||||
tp->snd_cwnd = 2;
|
tcp_snd_cwnd_set(tp, 2);
|
||||||
tp->snd_cwnd_clamp = 65535;
|
tp->snd_cwnd_clamp = 65535;
|
||||||
|
|
||||||
/* 1st Rho measurement based on initial srtt */
|
/* 1st Rho measurement based on initial srtt */
|
||||||
|
@ -62,7 +62,7 @@ static void hybla_init(struct sock *sk)
|
||||||
|
|
||||||
/* set minimum rtt as this is the 1st ever seen */
|
/* set minimum rtt as this is the 1st ever seen */
|
||||||
ca->minrtt_us = tp->srtt_us;
|
ca->minrtt_us = tp->srtt_us;
|
||||||
tp->snd_cwnd = ca->rho;
|
tcp_snd_cwnd_set(tp, ca->rho);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void hybla_state(struct sock *sk, u8 ca_state)
|
static void hybla_state(struct sock *sk, u8 ca_state)
|
||||||
|
@ -137,31 +137,31 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
* as long as increment is estimated as (rho<<7)/window
|
* as long as increment is estimated as (rho<<7)/window
|
||||||
* it already is <<7 and we can easily count its fractions.
|
* it already is <<7 and we can easily count its fractions.
|
||||||
*/
|
*/
|
||||||
increment = ca->rho2_7ls / tp->snd_cwnd;
|
increment = ca->rho2_7ls / tcp_snd_cwnd(tp);
|
||||||
if (increment < 128)
|
if (increment < 128)
|
||||||
tp->snd_cwnd_cnt++;
|
tp->snd_cwnd_cnt++;
|
||||||
}
|
}
|
||||||
|
|
||||||
odd = increment % 128;
|
odd = increment % 128;
|
||||||
tp->snd_cwnd += increment >> 7;
|
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + (increment >> 7));
|
||||||
ca->snd_cwnd_cents += odd;
|
ca->snd_cwnd_cents += odd;
|
||||||
|
|
||||||
/* check when fractions goes >=128 and increase cwnd by 1. */
|
/* check when fractions goes >=128 and increase cwnd by 1. */
|
||||||
while (ca->snd_cwnd_cents >= 128) {
|
while (ca->snd_cwnd_cents >= 128) {
|
||||||
tp->snd_cwnd++;
|
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
|
||||||
ca->snd_cwnd_cents -= 128;
|
ca->snd_cwnd_cents -= 128;
|
||||||
tp->snd_cwnd_cnt = 0;
|
tp->snd_cwnd_cnt = 0;
|
||||||
}
|
}
|
||||||
/* check when cwnd has not been incremented for a while */
|
/* check when cwnd has not been incremented for a while */
|
||||||
if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tp->snd_cwnd) {
|
if (increment == 0 && odd == 0 && tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) {
|
||||||
tp->snd_cwnd++;
|
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
|
||||||
tp->snd_cwnd_cnt = 0;
|
tp->snd_cwnd_cnt = 0;
|
||||||
}
|
}
|
||||||
/* clamp down slowstart cwnd to ssthresh value. */
|
/* clamp down slowstart cwnd to ssthresh value. */
|
||||||
if (is_slowstart)
|
if (is_slowstart)
|
||||||
tp->snd_cwnd = min(tp->snd_cwnd, tp->snd_ssthresh);
|
tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_ssthresh));
|
||||||
|
|
||||||
tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
|
tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp), tp->snd_cwnd_clamp));
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcp_congestion_ops tcp_hybla __read_mostly = {
|
static struct tcp_congestion_ops tcp_hybla __read_mostly = {
|
||||||
|
|
|
@ -224,7 +224,7 @@ static void update_params(struct sock *sk)
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
struct illinois *ca = inet_csk_ca(sk);
|
struct illinois *ca = inet_csk_ca(sk);
|
||||||
|
|
||||||
if (tp->snd_cwnd < win_thresh) {
|
if (tcp_snd_cwnd(tp) < win_thresh) {
|
||||||
ca->alpha = ALPHA_BASE;
|
ca->alpha = ALPHA_BASE;
|
||||||
ca->beta = BETA_BASE;
|
ca->beta = BETA_BASE;
|
||||||
} else if (ca->cnt_rtt > 0) {
|
} else if (ca->cnt_rtt > 0) {
|
||||||
|
@ -284,9 +284,9 @@ static void tcp_illinois_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
* tp->snd_cwnd += alpha/tp->snd_cwnd
|
* tp->snd_cwnd += alpha/tp->snd_cwnd
|
||||||
*/
|
*/
|
||||||
delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
|
delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
|
||||||
if (delta >= tp->snd_cwnd) {
|
if (delta >= tcp_snd_cwnd(tp)) {
|
||||||
tp->snd_cwnd = min(tp->snd_cwnd + delta / tp->snd_cwnd,
|
tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp) + delta / tcp_snd_cwnd(tp),
|
||||||
(u32)tp->snd_cwnd_clamp);
|
(u32)tp->snd_cwnd_clamp));
|
||||||
tp->snd_cwnd_cnt = 0;
|
tp->snd_cwnd_cnt = 0;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -296,9 +296,11 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
|
||||||
{
|
{
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
struct illinois *ca = inet_csk_ca(sk);
|
struct illinois *ca = inet_csk_ca(sk);
|
||||||
|
u32 decr;
|
||||||
|
|
||||||
/* Multiplicative decrease */
|
/* Multiplicative decrease */
|
||||||
return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
|
decr = (tcp_snd_cwnd(tp) * ca->beta) >> BETA_SHIFT;
|
||||||
|
return max(tcp_snd_cwnd(tp) - decr, 2U);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Extract info for Tcp socket info provided via netlink. */
|
/* Extract info for Tcp socket info provided via netlink. */
|
||||||
|
|
|
@ -426,7 +426,7 @@ static void tcp_sndbuf_expand(struct sock *sk)
|
||||||
per_mss = roundup_pow_of_two(per_mss) +
|
per_mss = roundup_pow_of_two(per_mss) +
|
||||||
SKB_DATA_ALIGN(sizeof(struct sk_buff));
|
SKB_DATA_ALIGN(sizeof(struct sk_buff));
|
||||||
|
|
||||||
nr_segs = max_t(u32, TCP_INIT_CWND, tp->snd_cwnd);
|
nr_segs = max_t(u32, TCP_INIT_CWND, tcp_snd_cwnd(tp));
|
||||||
nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
|
nr_segs = max_t(u32, nr_segs, tp->reordering + 1);
|
||||||
|
|
||||||
/* Fast Recovery (RFC 5681 3.2) :
|
/* Fast Recovery (RFC 5681 3.2) :
|
||||||
|
@ -914,12 +914,12 @@ static void tcp_update_pacing_rate(struct sock *sk)
|
||||||
* If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
|
* If snd_cwnd >= (tp->snd_ssthresh / 2), we are approaching
|
||||||
* end of slow start and should slow down.
|
* end of slow start and should slow down.
|
||||||
*/
|
*/
|
||||||
if (tp->snd_cwnd < tp->snd_ssthresh / 2)
|
if (tcp_snd_cwnd(tp) < tp->snd_ssthresh / 2)
|
||||||
rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio;
|
rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ss_ratio;
|
||||||
else
|
else
|
||||||
rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio;
|
rate *= sock_net(sk)->ipv4.sysctl_tcp_pacing_ca_ratio;
|
||||||
|
|
||||||
rate *= max(tp->snd_cwnd, tp->packets_out);
|
rate *= max(tcp_snd_cwnd(tp), tp->packets_out);
|
||||||
|
|
||||||
if (likely(tp->srtt_us))
|
if (likely(tp->srtt_us))
|
||||||
do_div(rate, tp->srtt_us);
|
do_div(rate, tp->srtt_us);
|
||||||
|
@ -2169,12 +2169,12 @@ void tcp_enter_loss(struct sock *sk)
|
||||||
!after(tp->high_seq, tp->snd_una) ||
|
!after(tp->high_seq, tp->snd_una) ||
|
||||||
(icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
|
(icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
|
||||||
tp->prior_ssthresh = tcp_current_ssthresh(sk);
|
tp->prior_ssthresh = tcp_current_ssthresh(sk);
|
||||||
tp->prior_cwnd = tp->snd_cwnd;
|
tp->prior_cwnd = tcp_snd_cwnd(tp);
|
||||||
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
|
tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
|
||||||
tcp_ca_event(sk, CA_EVENT_LOSS);
|
tcp_ca_event(sk, CA_EVENT_LOSS);
|
||||||
tcp_init_undo(tp);
|
tcp_init_undo(tp);
|
||||||
}
|
}
|
||||||
tp->snd_cwnd = tcp_packets_in_flight(tp) + 1;
|
tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + 1);
|
||||||
tp->snd_cwnd_cnt = 0;
|
tp->snd_cwnd_cnt = 0;
|
||||||
tp->snd_cwnd_stamp = tcp_jiffies32;
|
tp->snd_cwnd_stamp = tcp_jiffies32;
|
||||||
|
|
||||||
|
@ -2485,7 +2485,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
|
||||||
pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
|
pr_debug("Undo %s %pI4/%u c%u l%u ss%u/%u p%u\n",
|
||||||
msg,
|
msg,
|
||||||
&inet->inet_daddr, ntohs(inet->inet_dport),
|
&inet->inet_daddr, ntohs(inet->inet_dport),
|
||||||
tp->snd_cwnd, tcp_left_out(tp),
|
tcp_snd_cwnd(tp), tcp_left_out(tp),
|
||||||
tp->snd_ssthresh, tp->prior_ssthresh,
|
tp->snd_ssthresh, tp->prior_ssthresh,
|
||||||
tp->packets_out);
|
tp->packets_out);
|
||||||
}
|
}
|
||||||
|
@ -2494,7 +2494,7 @@ static void DBGUNDO(struct sock *sk, const char *msg)
|
||||||
pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
|
pr_debug("Undo %s %pI6/%u c%u l%u ss%u/%u p%u\n",
|
||||||
msg,
|
msg,
|
||||||
&sk->sk_v6_daddr, ntohs(inet->inet_dport),
|
&sk->sk_v6_daddr, ntohs(inet->inet_dport),
|
||||||
tp->snd_cwnd, tcp_left_out(tp),
|
tcp_snd_cwnd(tp), tcp_left_out(tp),
|
||||||
tp->snd_ssthresh, tp->prior_ssthresh,
|
tp->snd_ssthresh, tp->prior_ssthresh,
|
||||||
tp->packets_out);
|
tp->packets_out);
|
||||||
}
|
}
|
||||||
|
@ -2519,7 +2519,7 @@ static void tcp_undo_cwnd_reduction(struct sock *sk, bool unmark_loss)
|
||||||
if (tp->prior_ssthresh) {
|
if (tp->prior_ssthresh) {
|
||||||
const struct inet_connection_sock *icsk = inet_csk(sk);
|
const struct inet_connection_sock *icsk = inet_csk(sk);
|
||||||
|
|
||||||
tp->snd_cwnd = icsk->icsk_ca_ops->undo_cwnd(sk);
|
tcp_snd_cwnd_set(tp, icsk->icsk_ca_ops->undo_cwnd(sk));
|
||||||
|
|
||||||
if (tp->prior_ssthresh > tp->snd_ssthresh) {
|
if (tp->prior_ssthresh > tp->snd_ssthresh) {
|
||||||
tp->snd_ssthresh = tp->prior_ssthresh;
|
tp->snd_ssthresh = tp->prior_ssthresh;
|
||||||
|
@ -2637,7 +2637,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk)
|
||||||
tp->high_seq = tp->snd_nxt;
|
tp->high_seq = tp->snd_nxt;
|
||||||
tp->tlp_high_seq = 0;
|
tp->tlp_high_seq = 0;
|
||||||
tp->snd_cwnd_cnt = 0;
|
tp->snd_cwnd_cnt = 0;
|
||||||
tp->prior_cwnd = tp->snd_cwnd;
|
tp->prior_cwnd = tcp_snd_cwnd(tp);
|
||||||
tp->prr_delivered = 0;
|
tp->prr_delivered = 0;
|
||||||
tp->prr_out = 0;
|
tp->prr_out = 0;
|
||||||
tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
|
tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
|
||||||
|
@ -2668,7 +2668,7 @@ void tcp_cwnd_reduction(struct sock *sk, int newly_acked_sacked, int flag)
|
||||||
}
|
}
|
||||||
/* Force a fast retransmit upon entering fast recovery */
|
/* Force a fast retransmit upon entering fast recovery */
|
||||||
sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
|
sndcnt = max(sndcnt, (tp->prr_out ? 0 : 1));
|
||||||
tp->snd_cwnd = tcp_packets_in_flight(tp) + sndcnt;
|
tcp_snd_cwnd_set(tp, tcp_packets_in_flight(tp) + sndcnt);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void tcp_end_cwnd_reduction(struct sock *sk)
|
static inline void tcp_end_cwnd_reduction(struct sock *sk)
|
||||||
|
@ -2681,7 +2681,7 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
|
||||||
/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
|
/* Reset cwnd to ssthresh in CWR or Recovery (unless it's undone) */
|
||||||
if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
|
if (tp->snd_ssthresh < TCP_INFINITE_SSTHRESH &&
|
||||||
(inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
|
(inet_csk(sk)->icsk_ca_state == TCP_CA_CWR || tp->undo_marker)) {
|
||||||
tp->snd_cwnd = tp->snd_ssthresh;
|
tcp_snd_cwnd_set(tp, tp->snd_ssthresh);
|
||||||
tp->snd_cwnd_stamp = tcp_jiffies32;
|
tp->snd_cwnd_stamp = tcp_jiffies32;
|
||||||
}
|
}
|
||||||
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
|
tcp_ca_event(sk, CA_EVENT_COMPLETE_CWR);
|
||||||
|
@ -2749,10 +2749,10 @@ static void tcp_mtup_probe_success(struct sock *sk)
|
||||||
|
|
||||||
tp->prior_ssthresh = tcp_current_ssthresh(sk);
|
tp->prior_ssthresh = tcp_current_ssthresh(sk);
|
||||||
|
|
||||||
val = (u64)tp->snd_cwnd * tcp_mss_to_mtu(sk, tp->mss_cache);
|
val = (u64)tcp_snd_cwnd(tp) * tcp_mss_to_mtu(sk, tp->mss_cache);
|
||||||
do_div(val, icsk->icsk_mtup.probe_size);
|
do_div(val, icsk->icsk_mtup.probe_size);
|
||||||
WARN_ON_ONCE((u32)val != val);
|
WARN_ON_ONCE((u32)val != val);
|
||||||
tp->snd_cwnd = max_t(u32, 1U, val);
|
tcp_snd_cwnd_set(tp, max_t(u32, 1U, val));
|
||||||
|
|
||||||
tp->snd_cwnd_cnt = 0;
|
tp->snd_cwnd_cnt = 0;
|
||||||
tp->snd_cwnd_stamp = tcp_jiffies32;
|
tp->snd_cwnd_stamp = tcp_jiffies32;
|
||||||
|
@ -3061,7 +3061,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
|
||||||
tp->snd_una == tp->mtu_probe.probe_seq_start) {
|
tp->snd_una == tp->mtu_probe.probe_seq_start) {
|
||||||
tcp_mtup_probe_failed(sk);
|
tcp_mtup_probe_failed(sk);
|
||||||
/* Restores the reduction we did in tcp_mtup_probe() */
|
/* Restores the reduction we did in tcp_mtup_probe() */
|
||||||
tp->snd_cwnd++;
|
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
|
||||||
tcp_simple_retransmit(sk);
|
tcp_simple_retransmit(sk);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -5468,7 +5468,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
/* If we filled the congestion window, do not expand. */
|
/* If we filled the congestion window, do not expand. */
|
||||||
if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
|
if (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
|
@ -6036,9 +6036,9 @@ void tcp_init_transfer(struct sock *sk, int bpf_op, struct sk_buff *skb)
|
||||||
* retransmission has occurred.
|
* retransmission has occurred.
|
||||||
*/
|
*/
|
||||||
if (tp->total_retrans > 1 && tp->undo_marker)
|
if (tp->total_retrans > 1 && tp->undo_marker)
|
||||||
tp->snd_cwnd = 1;
|
tcp_snd_cwnd_set(tp, 1);
|
||||||
else
|
else
|
||||||
tp->snd_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
|
tcp_snd_cwnd_set(tp, tcp_init_cwnd(tp, __sk_dst_get(sk)));
|
||||||
tp->snd_cwnd_stamp = tcp_jiffies32;
|
tp->snd_cwnd_stamp = tcp_jiffies32;
|
||||||
|
|
||||||
bpf_skops_established(sk, bpf_op, skb);
|
bpf_skops_established(sk, bpf_op, skb);
|
||||||
|
|
|
@ -2638,7 +2638,7 @@ static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
|
||||||
jiffies_to_clock_t(icsk->icsk_rto),
|
jiffies_to_clock_t(icsk->icsk_rto),
|
||||||
jiffies_to_clock_t(icsk->icsk_ack.ato),
|
jiffies_to_clock_t(icsk->icsk_ack.ato),
|
||||||
(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
|
(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sk),
|
||||||
tp->snd_cwnd,
|
tcp_snd_cwnd(tp),
|
||||||
state == TCP_LISTEN ?
|
state == TCP_LISTEN ?
|
||||||
fastopenq->max_qlen :
|
fastopenq->max_qlen :
|
||||||
(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
|
(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
|
||||||
|
|
|
@ -290,7 +290,7 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
|
||||||
lp->flag &= ~LP_WITHIN_THR;
|
lp->flag &= ~LP_WITHIN_THR;
|
||||||
|
|
||||||
pr_debug("TCP-LP: %05o|%5u|%5u|%15u|%15u|%15u\n", lp->flag,
|
pr_debug("TCP-LP: %05o|%5u|%5u|%15u|%15u|%15u\n", lp->flag,
|
||||||
tp->snd_cwnd, lp->remote_hz, lp->owd_min, lp->owd_max,
|
tcp_snd_cwnd(tp), lp->remote_hz, lp->owd_min, lp->owd_max,
|
||||||
lp->sowd >> 3);
|
lp->sowd >> 3);
|
||||||
|
|
||||||
if (lp->flag & LP_WITHIN_THR)
|
if (lp->flag & LP_WITHIN_THR)
|
||||||
|
@ -306,12 +306,12 @@ static void tcp_lp_pkts_acked(struct sock *sk, const struct ack_sample *sample)
|
||||||
/* happened within inference
|
/* happened within inference
|
||||||
* drop snd_cwnd into 1 */
|
* drop snd_cwnd into 1 */
|
||||||
if (lp->flag & LP_WITHIN_INF)
|
if (lp->flag & LP_WITHIN_INF)
|
||||||
tp->snd_cwnd = 1U;
|
tcp_snd_cwnd_set(tp, 1U);
|
||||||
|
|
||||||
/* happened after inference
|
/* happened after inference
|
||||||
* cut snd_cwnd into half */
|
* cut snd_cwnd into half */
|
||||||
else
|
else
|
||||||
tp->snd_cwnd = max(tp->snd_cwnd >> 1U, 1U);
|
tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp) >> 1U, 1U));
|
||||||
|
|
||||||
/* record this drop time */
|
/* record this drop time */
|
||||||
lp->last_drop = now;
|
lp->last_drop = now;
|
||||||
|
|
|
@ -408,15 +408,15 @@ void tcp_update_metrics(struct sock *sk)
|
||||||
if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
|
if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
|
||||||
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
|
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH)) {
|
||||||
val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
|
val = tcp_metric_get(tm, TCP_METRIC_SSTHRESH);
|
||||||
if (val && (tp->snd_cwnd >> 1) > val)
|
if (val && (tcp_snd_cwnd(tp) >> 1) > val)
|
||||||
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
|
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
|
||||||
tp->snd_cwnd >> 1);
|
tcp_snd_cwnd(tp) >> 1);
|
||||||
}
|
}
|
||||||
if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
|
if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
|
||||||
val = tcp_metric_get(tm, TCP_METRIC_CWND);
|
val = tcp_metric_get(tm, TCP_METRIC_CWND);
|
||||||
if (tp->snd_cwnd > val)
|
if (tcp_snd_cwnd(tp) > val)
|
||||||
tcp_metric_set(tm, TCP_METRIC_CWND,
|
tcp_metric_set(tm, TCP_METRIC_CWND,
|
||||||
tp->snd_cwnd);
|
tcp_snd_cwnd(tp));
|
||||||
}
|
}
|
||||||
} else if (!tcp_in_slow_start(tp) &&
|
} else if (!tcp_in_slow_start(tp) &&
|
||||||
icsk->icsk_ca_state == TCP_CA_Open) {
|
icsk->icsk_ca_state == TCP_CA_Open) {
|
||||||
|
@ -424,10 +424,10 @@ void tcp_update_metrics(struct sock *sk)
|
||||||
if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
|
if (!READ_ONCE(net->ipv4.sysctl_tcp_no_ssthresh_metrics_save) &&
|
||||||
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
|
!tcp_metric_locked(tm, TCP_METRIC_SSTHRESH))
|
||||||
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
|
tcp_metric_set(tm, TCP_METRIC_SSTHRESH,
|
||||||
max(tp->snd_cwnd >> 1, tp->snd_ssthresh));
|
max(tcp_snd_cwnd(tp) >> 1, tp->snd_ssthresh));
|
||||||
if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
|
if (!tcp_metric_locked(tm, TCP_METRIC_CWND)) {
|
||||||
val = tcp_metric_get(tm, TCP_METRIC_CWND);
|
val = tcp_metric_get(tm, TCP_METRIC_CWND);
|
||||||
tcp_metric_set(tm, TCP_METRIC_CWND, (val + tp->snd_cwnd) >> 1);
|
tcp_metric_set(tm, TCP_METRIC_CWND, (val + tcp_snd_cwnd(tp)) >> 1);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
/* Else slow start did not finish, cwnd is non-sense,
|
/* Else slow start did not finish, cwnd is non-sense,
|
||||||
|
|
|
@ -198,10 +198,10 @@ static void tcpnv_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
}
|
}
|
||||||
|
|
||||||
if (ca->cwnd_growth_factor < 0) {
|
if (ca->cwnd_growth_factor < 0) {
|
||||||
cnt = tp->snd_cwnd << -ca->cwnd_growth_factor;
|
cnt = tcp_snd_cwnd(tp) << -ca->cwnd_growth_factor;
|
||||||
tcp_cong_avoid_ai(tp, cnt, acked);
|
tcp_cong_avoid_ai(tp, cnt, acked);
|
||||||
} else {
|
} else {
|
||||||
cnt = max(4U, tp->snd_cwnd >> ca->cwnd_growth_factor);
|
cnt = max(4U, tcp_snd_cwnd(tp) >> ca->cwnd_growth_factor);
|
||||||
tcp_cong_avoid_ai(tp, cnt, acked);
|
tcp_cong_avoid_ai(tp, cnt, acked);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -210,7 +210,7 @@ static u32 tcpnv_recalc_ssthresh(struct sock *sk)
|
||||||
{
|
{
|
||||||
const struct tcp_sock *tp = tcp_sk(sk);
|
const struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
return max((tp->snd_cwnd * nv_loss_dec_factor) >> 10, 2U);
|
return max((tcp_snd_cwnd(tp) * nv_loss_dec_factor) >> 10, 2U);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcpnv_state(struct sock *sk, u8 new_state)
|
static void tcpnv_state(struct sock *sk, u8 new_state)
|
||||||
|
@ -258,7 +258,7 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Stop cwnd growth if we were in catch up mode */
|
/* Stop cwnd growth if we were in catch up mode */
|
||||||
if (ca->nv_catchup && tp->snd_cwnd >= nv_min_cwnd) {
|
if (ca->nv_catchup && tcp_snd_cwnd(tp) >= nv_min_cwnd) {
|
||||||
ca->nv_catchup = 0;
|
ca->nv_catchup = 0;
|
||||||
ca->nv_allow_cwnd_growth = 0;
|
ca->nv_allow_cwnd_growth = 0;
|
||||||
}
|
}
|
||||||
|
@ -372,7 +372,7 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
|
||||||
* if cwnd < max_win, grow cwnd
|
* if cwnd < max_win, grow cwnd
|
||||||
* else leave the same
|
* else leave the same
|
||||||
*/
|
*/
|
||||||
if (tp->snd_cwnd > max_win) {
|
if (tcp_snd_cwnd(tp) > max_win) {
|
||||||
/* there is congestion, check that it is ok
|
/* there is congestion, check that it is ok
|
||||||
* to make a CA decision
|
* to make a CA decision
|
||||||
* 1. We should have at least nv_dec_eval_min_calls
|
* 1. We should have at least nv_dec_eval_min_calls
|
||||||
|
@ -399,20 +399,20 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
|
||||||
ca->nv_allow_cwnd_growth = 0;
|
ca->nv_allow_cwnd_growth = 0;
|
||||||
tp->snd_ssthresh =
|
tp->snd_ssthresh =
|
||||||
(nv_ssthresh_factor * max_win) >> 3;
|
(nv_ssthresh_factor * max_win) >> 3;
|
||||||
if (tp->snd_cwnd - max_win > 2) {
|
if (tcp_snd_cwnd(tp) - max_win > 2) {
|
||||||
/* gap > 2, we do exponential cwnd decrease */
|
/* gap > 2, we do exponential cwnd decrease */
|
||||||
int dec;
|
int dec;
|
||||||
|
|
||||||
dec = max(2U, ((tp->snd_cwnd - max_win) *
|
dec = max(2U, ((tcp_snd_cwnd(tp) - max_win) *
|
||||||
nv_cong_dec_mult) >> 7);
|
nv_cong_dec_mult) >> 7);
|
||||||
tp->snd_cwnd -= dec;
|
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - dec);
|
||||||
} else if (nv_cong_dec_mult > 0) {
|
} else if (nv_cong_dec_mult > 0) {
|
||||||
tp->snd_cwnd = max_win;
|
tcp_snd_cwnd_set(tp, max_win);
|
||||||
}
|
}
|
||||||
if (ca->cwnd_growth_factor > 0)
|
if (ca->cwnd_growth_factor > 0)
|
||||||
ca->cwnd_growth_factor = 0;
|
ca->cwnd_growth_factor = 0;
|
||||||
ca->nv_no_cong_cnt = 0;
|
ca->nv_no_cong_cnt = 0;
|
||||||
} else if (tp->snd_cwnd <= max_win - nv_pad_buffer) {
|
} else if (tcp_snd_cwnd(tp) <= max_win - nv_pad_buffer) {
|
||||||
/* There is no congestion, grow cwnd if allowed*/
|
/* There is no congestion, grow cwnd if allowed*/
|
||||||
if (ca->nv_eval_call_cnt < nv_inc_eval_min_calls)
|
if (ca->nv_eval_call_cnt < nv_inc_eval_min_calls)
|
||||||
return;
|
return;
|
||||||
|
@ -445,8 +445,8 @@ static void tcpnv_acked(struct sock *sk, const struct ack_sample *sample)
|
||||||
* (it wasn't before, if it is now is because nv
|
* (it wasn't before, if it is now is because nv
|
||||||
* decreased it).
|
* decreased it).
|
||||||
*/
|
*/
|
||||||
if (tp->snd_cwnd < nv_min_cwnd)
|
if (tcp_snd_cwnd(tp) < nv_min_cwnd)
|
||||||
tp->snd_cwnd = nv_min_cwnd;
|
tcp_snd_cwnd_set(tp, nv_min_cwnd);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -150,7 +150,7 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta)
|
||||||
{
|
{
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
|
u32 restart_cwnd = tcp_init_cwnd(tp, __sk_dst_get(sk));
|
||||||
u32 cwnd = tp->snd_cwnd;
|
u32 cwnd = tcp_snd_cwnd(tp);
|
||||||
|
|
||||||
tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
|
tcp_ca_event(sk, CA_EVENT_CWND_RESTART);
|
||||||
|
|
||||||
|
@ -159,7 +159,7 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta)
|
||||||
|
|
||||||
while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
|
while ((delta -= inet_csk(sk)->icsk_rto) > 0 && cwnd > restart_cwnd)
|
||||||
cwnd >>= 1;
|
cwnd >>= 1;
|
||||||
tp->snd_cwnd = max(cwnd, restart_cwnd);
|
tcp_snd_cwnd_set(tp, max(cwnd, restart_cwnd));
|
||||||
tp->snd_cwnd_stamp = tcp_jiffies32;
|
tp->snd_cwnd_stamp = tcp_jiffies32;
|
||||||
tp->snd_cwnd_used = 0;
|
tp->snd_cwnd_used = 0;
|
||||||
}
|
}
|
||||||
|
@ -1019,7 +1019,7 @@ static void tcp_tsq_write(struct sock *sk)
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
if (tp->lost_out > tp->retrans_out &&
|
if (tp->lost_out > tp->retrans_out &&
|
||||||
tp->snd_cwnd > tcp_packets_in_flight(tp)) {
|
tcp_snd_cwnd(tp) > tcp_packets_in_flight(tp)) {
|
||||||
tcp_mstamp_refresh(tp);
|
tcp_mstamp_refresh(tp);
|
||||||
tcp_xmit_retransmit_queue(sk);
|
tcp_xmit_retransmit_queue(sk);
|
||||||
}
|
}
|
||||||
|
@ -1900,9 +1900,9 @@ static void tcp_cwnd_application_limited(struct sock *sk)
|
||||||
/* Limited by application or receiver window. */
|
/* Limited by application or receiver window. */
|
||||||
u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
|
u32 init_win = tcp_init_cwnd(tp, __sk_dst_get(sk));
|
||||||
u32 win_used = max(tp->snd_cwnd_used, init_win);
|
u32 win_used = max(tp->snd_cwnd_used, init_win);
|
||||||
if (win_used < tp->snd_cwnd) {
|
if (win_used < tcp_snd_cwnd(tp)) {
|
||||||
tp->snd_ssthresh = tcp_current_ssthresh(sk);
|
tp->snd_ssthresh = tcp_current_ssthresh(sk);
|
||||||
tp->snd_cwnd = (tp->snd_cwnd + win_used) >> 1;
|
tcp_snd_cwnd_set(tp, (tcp_snd_cwnd(tp) + win_used) >> 1);
|
||||||
}
|
}
|
||||||
tp->snd_cwnd_used = 0;
|
tp->snd_cwnd_used = 0;
|
||||||
}
|
}
|
||||||
|
@ -2079,7 +2079,7 @@ static inline unsigned int tcp_cwnd_test(const struct tcp_sock *tp,
|
||||||
return 1;
|
return 1;
|
||||||
|
|
||||||
in_flight = tcp_packets_in_flight(tp);
|
in_flight = tcp_packets_in_flight(tp);
|
||||||
cwnd = tp->snd_cwnd;
|
cwnd = tcp_snd_cwnd(tp);
|
||||||
if (in_flight >= cwnd)
|
if (in_flight >= cwnd)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
|
@ -2235,12 +2235,12 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
|
||||||
in_flight = tcp_packets_in_flight(tp);
|
in_flight = tcp_packets_in_flight(tp);
|
||||||
|
|
||||||
BUG_ON(tcp_skb_pcount(skb) <= 1);
|
BUG_ON(tcp_skb_pcount(skb) <= 1);
|
||||||
BUG_ON(tp->snd_cwnd <= in_flight);
|
BUG_ON(tcp_snd_cwnd(tp) <= in_flight);
|
||||||
|
|
||||||
send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
|
send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq;
|
||||||
|
|
||||||
/* From in_flight test above, we know that cwnd > in_flight. */
|
/* From in_flight test above, we know that cwnd > in_flight. */
|
||||||
cong_win = (tp->snd_cwnd - in_flight) * tp->mss_cache;
|
cong_win = (tcp_snd_cwnd(tp) - in_flight) * tp->mss_cache;
|
||||||
|
|
||||||
limit = min(send_win, cong_win);
|
limit = min(send_win, cong_win);
|
||||||
|
|
||||||
|
@ -2254,7 +2254,7 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb,
|
||||||
|
|
||||||
win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
|
win_divisor = READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_tso_win_divisor);
|
||||||
if (win_divisor) {
|
if (win_divisor) {
|
||||||
u32 chunk = min(tp->snd_wnd, tp->snd_cwnd * tp->mss_cache);
|
u32 chunk = min(tp->snd_wnd, tcp_snd_cwnd(tp) * tp->mss_cache);
|
||||||
|
|
||||||
/* If at least some fraction of a window is available,
|
/* If at least some fraction of a window is available,
|
||||||
* just use it.
|
* just use it.
|
||||||
|
@ -2382,7 +2382,7 @@ static int tcp_mtu_probe(struct sock *sk)
|
||||||
if (likely(!icsk->icsk_mtup.enabled ||
|
if (likely(!icsk->icsk_mtup.enabled ||
|
||||||
icsk->icsk_mtup.probe_size ||
|
icsk->icsk_mtup.probe_size ||
|
||||||
inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
|
inet_csk(sk)->icsk_ca_state != TCP_CA_Open ||
|
||||||
tp->snd_cwnd < 11 ||
|
tcp_snd_cwnd(tp) < 11 ||
|
||||||
tp->rx_opt.num_sacks || tp->rx_opt.dsack))
|
tp->rx_opt.num_sacks || tp->rx_opt.dsack))
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
|
@ -2418,7 +2418,7 @@ static int tcp_mtu_probe(struct sock *sk)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
/* Do we need to wait to drain cwnd? With none in flight, don't stall */
|
/* Do we need to wait to drain cwnd? With none in flight, don't stall */
|
||||||
if (tcp_packets_in_flight(tp) + 2 > tp->snd_cwnd) {
|
if (tcp_packets_in_flight(tp) + 2 > tcp_snd_cwnd(tp)) {
|
||||||
if (!tcp_packets_in_flight(tp))
|
if (!tcp_packets_in_flight(tp))
|
||||||
return -1;
|
return -1;
|
||||||
else
|
else
|
||||||
|
@ -2489,7 +2489,7 @@ static int tcp_mtu_probe(struct sock *sk)
|
||||||
if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
|
if (!tcp_transmit_skb(sk, nskb, 1, GFP_ATOMIC)) {
|
||||||
/* Decrement cwnd here because we are sending
|
/* Decrement cwnd here because we are sending
|
||||||
* effectively two packets. */
|
* effectively two packets. */
|
||||||
tp->snd_cwnd--;
|
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
|
||||||
tcp_event_new_data_sent(sk, nskb);
|
tcp_event_new_data_sent(sk, nskb);
|
||||||
|
|
||||||
icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
|
icsk->icsk_mtup.probe_size = tcp_mss_to_mtu(sk, nskb->len);
|
||||||
|
@ -2762,7 +2762,7 @@ repair:
|
||||||
else
|
else
|
||||||
tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
|
tcp_chrono_stop(sk, TCP_CHRONO_RWND_LIMITED);
|
||||||
|
|
||||||
is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tp->snd_cwnd);
|
is_cwnd_limited |= (tcp_packets_in_flight(tp) >= tcp_snd_cwnd(tp));
|
||||||
if (likely(sent_pkts || is_cwnd_limited))
|
if (likely(sent_pkts || is_cwnd_limited))
|
||||||
tcp_cwnd_validate(sk, is_cwnd_limited);
|
tcp_cwnd_validate(sk, is_cwnd_limited);
|
||||||
|
|
||||||
|
@ -2869,7 +2869,7 @@ void tcp_send_loss_probe(struct sock *sk)
|
||||||
if (unlikely(!skb)) {
|
if (unlikely(!skb)) {
|
||||||
WARN_ONCE(tp->packets_out,
|
WARN_ONCE(tp->packets_out,
|
||||||
"invalid inflight: %u state %u cwnd %u mss %d\n",
|
"invalid inflight: %u state %u cwnd %u mss %d\n",
|
||||||
tp->packets_out, sk->sk_state, tp->snd_cwnd, mss);
|
tp->packets_out, sk->sk_state, tcp_snd_cwnd(tp), mss);
|
||||||
inet_csk(sk)->icsk_pending = 0;
|
inet_csk(sk)->icsk_pending = 0;
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -3381,7 +3381,7 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
|
||||||
if (!hole)
|
if (!hole)
|
||||||
tp->retransmit_skb_hint = skb;
|
tp->retransmit_skb_hint = skb;
|
||||||
|
|
||||||
segs = tp->snd_cwnd - tcp_packets_in_flight(tp);
|
segs = tcp_snd_cwnd(tp) - tcp_packets_in_flight(tp);
|
||||||
if (segs <= 0)
|
if (segs <= 0)
|
||||||
break;
|
break;
|
||||||
sacked = TCP_SKB_CB(skb)->sacked;
|
sacked = TCP_SKB_CB(skb)->sacked;
|
||||||
|
|
|
@ -195,7 +195,7 @@ void tcp_rate_check_app_limited(struct sock *sk)
|
||||||
/* Nothing in sending host's qdisc queues or NIC tx queue. */
|
/* Nothing in sending host's qdisc queues or NIC tx queue. */
|
||||||
sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) &&
|
sk_wmem_alloc_get(sk) < SKB_TRUESIZE(1) &&
|
||||||
/* We are not limited by CWND. */
|
/* We are not limited by CWND. */
|
||||||
tcp_packets_in_flight(tp) < tp->snd_cwnd &&
|
tcp_packets_in_flight(tp) < tcp_snd_cwnd(tp) &&
|
||||||
/* All lost packets have been retransmitted. */
|
/* All lost packets have been retransmitted. */
|
||||||
tp->lost_out <= tp->retrans_out)
|
tp->lost_out <= tp->retrans_out)
|
||||||
tp->app_limited =
|
tp->app_limited =
|
||||||
|
|
|
@ -27,7 +27,7 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
if (!acked)
|
if (!acked)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
|
tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT),
|
||||||
acked);
|
acked);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ static u32 tcp_scalable_ssthresh(struct sock *sk)
|
||||||
{
|
{
|
||||||
const struct tcp_sock *tp = tcp_sk(sk);
|
const struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
return max(tp->snd_cwnd - (tp->snd_cwnd>>TCP_SCALABLE_MD_SCALE), 2U);
|
return max(tcp_snd_cwnd(tp) - (tcp_snd_cwnd(tp)>>TCP_SCALABLE_MD_SCALE), 2U);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcp_congestion_ops tcp_scalable __read_mostly = {
|
static struct tcp_congestion_ops tcp_scalable __read_mostly = {
|
||||||
|
|
|
@ -159,7 +159,7 @@ EXPORT_SYMBOL_GPL(tcp_vegas_cwnd_event);
|
||||||
|
|
||||||
static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
|
static inline u32 tcp_vegas_ssthresh(struct tcp_sock *tp)
|
||||||
{
|
{
|
||||||
return min(tp->snd_ssthresh, tp->snd_cwnd);
|
return min(tp->snd_ssthresh, tcp_snd_cwnd(tp));
|
||||||
}
|
}
|
||||||
|
|
||||||
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
|
@ -217,14 +217,14 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
* This is:
|
* This is:
|
||||||
* (actual rate in segments) * baseRTT
|
* (actual rate in segments) * baseRTT
|
||||||
*/
|
*/
|
||||||
target_cwnd = (u64)tp->snd_cwnd * vegas->baseRTT;
|
target_cwnd = (u64)tcp_snd_cwnd(tp) * vegas->baseRTT;
|
||||||
do_div(target_cwnd, rtt);
|
do_div(target_cwnd, rtt);
|
||||||
|
|
||||||
/* Calculate the difference between the window we had,
|
/* Calculate the difference between the window we had,
|
||||||
* and the window we would like to have. This quantity
|
* and the window we would like to have. This quantity
|
||||||
* is the "Diff" from the Arizona Vegas papers.
|
* is the "Diff" from the Arizona Vegas papers.
|
||||||
*/
|
*/
|
||||||
diff = tp->snd_cwnd * (rtt-vegas->baseRTT) / vegas->baseRTT;
|
diff = tcp_snd_cwnd(tp) * (rtt-vegas->baseRTT) / vegas->baseRTT;
|
||||||
|
|
||||||
if (diff > gamma && tcp_in_slow_start(tp)) {
|
if (diff > gamma && tcp_in_slow_start(tp)) {
|
||||||
/* Going too fast. Time to slow down
|
/* Going too fast. Time to slow down
|
||||||
|
@ -238,7 +238,8 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
* truncation robs us of full link
|
* truncation robs us of full link
|
||||||
* utilization.
|
* utilization.
|
||||||
*/
|
*/
|
||||||
tp->snd_cwnd = min(tp->snd_cwnd, (u32)target_cwnd+1);
|
tcp_snd_cwnd_set(tp, min(tcp_snd_cwnd(tp),
|
||||||
|
(u32)target_cwnd + 1));
|
||||||
tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
|
tp->snd_ssthresh = tcp_vegas_ssthresh(tp);
|
||||||
|
|
||||||
} else if (tcp_in_slow_start(tp)) {
|
} else if (tcp_in_slow_start(tp)) {
|
||||||
|
@ -254,14 +255,14 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
/* The old window was too fast, so
|
/* The old window was too fast, so
|
||||||
* we slow down.
|
* we slow down.
|
||||||
*/
|
*/
|
||||||
tp->snd_cwnd--;
|
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - 1);
|
||||||
tp->snd_ssthresh
|
tp->snd_ssthresh
|
||||||
= tcp_vegas_ssthresh(tp);
|
= tcp_vegas_ssthresh(tp);
|
||||||
} else if (diff < alpha) {
|
} else if (diff < alpha) {
|
||||||
/* We don't have enough extra packets
|
/* We don't have enough extra packets
|
||||||
* in the network, so speed up.
|
* in the network, so speed up.
|
||||||
*/
|
*/
|
||||||
tp->snd_cwnd++;
|
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
|
||||||
} else {
|
} else {
|
||||||
/* Sending just as fast as we
|
/* Sending just as fast as we
|
||||||
* should be.
|
* should be.
|
||||||
|
@ -269,10 +270,10 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (tp->snd_cwnd < 2)
|
if (tcp_snd_cwnd(tp) < 2)
|
||||||
tp->snd_cwnd = 2;
|
tcp_snd_cwnd_set(tp, 2);
|
||||||
else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
|
else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp)
|
||||||
tp->snd_cwnd = tp->snd_cwnd_clamp;
|
tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp);
|
||||||
|
|
||||||
tp->snd_ssthresh = tcp_current_ssthresh(sk);
|
tp->snd_ssthresh = tcp_current_ssthresh(sk);
|
||||||
}
|
}
|
||||||
|
|
|
@ -146,11 +146,11 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
|
|
||||||
rtt = veno->minrtt;
|
rtt = veno->minrtt;
|
||||||
|
|
||||||
target_cwnd = (u64)tp->snd_cwnd * veno->basertt;
|
target_cwnd = (u64)tcp_snd_cwnd(tp) * veno->basertt;
|
||||||
target_cwnd <<= V_PARAM_SHIFT;
|
target_cwnd <<= V_PARAM_SHIFT;
|
||||||
do_div(target_cwnd, rtt);
|
do_div(target_cwnd, rtt);
|
||||||
|
|
||||||
veno->diff = (tp->snd_cwnd << V_PARAM_SHIFT) - target_cwnd;
|
veno->diff = (tcp_snd_cwnd(tp) << V_PARAM_SHIFT) - target_cwnd;
|
||||||
|
|
||||||
if (tcp_in_slow_start(tp)) {
|
if (tcp_in_slow_start(tp)) {
|
||||||
/* Slow start. */
|
/* Slow start. */
|
||||||
|
@ -164,15 +164,15 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
/* In the "non-congestive state", increase cwnd
|
/* In the "non-congestive state", increase cwnd
|
||||||
* every rtt.
|
* every rtt.
|
||||||
*/
|
*/
|
||||||
tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
|
tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
|
||||||
} else {
|
} else {
|
||||||
/* In the "congestive state", increase cwnd
|
/* In the "congestive state", increase cwnd
|
||||||
* every other rtt.
|
* every other rtt.
|
||||||
*/
|
*/
|
||||||
if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
|
if (tp->snd_cwnd_cnt >= tcp_snd_cwnd(tp)) {
|
||||||
if (veno->inc &&
|
if (veno->inc &&
|
||||||
tp->snd_cwnd < tp->snd_cwnd_clamp) {
|
tcp_snd_cwnd(tp) < tp->snd_cwnd_clamp) {
|
||||||
tp->snd_cwnd++;
|
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) + 1);
|
||||||
veno->inc = 0;
|
veno->inc = 0;
|
||||||
} else
|
} else
|
||||||
veno->inc = 1;
|
veno->inc = 1;
|
||||||
|
@ -181,10 +181,10 @@ static void tcp_veno_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
tp->snd_cwnd_cnt += acked;
|
tp->snd_cwnd_cnt += acked;
|
||||||
}
|
}
|
||||||
done:
|
done:
|
||||||
if (tp->snd_cwnd < 2)
|
if (tcp_snd_cwnd(tp) < 2)
|
||||||
tp->snd_cwnd = 2;
|
tcp_snd_cwnd_set(tp, 2);
|
||||||
else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
|
else if (tcp_snd_cwnd(tp) > tp->snd_cwnd_clamp)
|
||||||
tp->snd_cwnd = tp->snd_cwnd_clamp;
|
tcp_snd_cwnd_set(tp, tp->snd_cwnd_clamp);
|
||||||
}
|
}
|
||||||
/* Wipe the slate clean for the next rtt. */
|
/* Wipe the slate clean for the next rtt. */
|
||||||
/* veno->cntrtt = 0; */
|
/* veno->cntrtt = 0; */
|
||||||
|
@ -199,10 +199,10 @@ static u32 tcp_veno_ssthresh(struct sock *sk)
|
||||||
|
|
||||||
if (veno->diff < beta)
|
if (veno->diff < beta)
|
||||||
/* in "non-congestive state", cut cwnd by 1/5 */
|
/* in "non-congestive state", cut cwnd by 1/5 */
|
||||||
return max(tp->snd_cwnd * 4 / 5, 2U);
|
return max(tcp_snd_cwnd(tp) * 4 / 5, 2U);
|
||||||
else
|
else
|
||||||
/* in "congestive state", cut cwnd by 1/2 */
|
/* in "congestive state", cut cwnd by 1/2 */
|
||||||
return max(tp->snd_cwnd >> 1U, 2U);
|
return max(tcp_snd_cwnd(tp) >> 1U, 2U);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcp_congestion_ops tcp_veno __read_mostly = {
|
static struct tcp_congestion_ops tcp_veno __read_mostly = {
|
||||||
|
|
|
@ -244,7 +244,8 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
|
||||||
|
|
||||||
switch (event) {
|
switch (event) {
|
||||||
case CA_EVENT_COMPLETE_CWR:
|
case CA_EVENT_COMPLETE_CWR:
|
||||||
tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
|
tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
|
||||||
|
tcp_snd_cwnd_set(tp, tp->snd_ssthresh);
|
||||||
break;
|
break;
|
||||||
case CA_EVENT_LOSS:
|
case CA_EVENT_LOSS:
|
||||||
tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
|
tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
|
||||||
|
|
|
@ -71,11 +71,11 @@ static void tcp_yeah_cong_avoid(struct sock *sk, u32 ack, u32 acked)
|
||||||
|
|
||||||
if (!yeah->doing_reno_now) {
|
if (!yeah->doing_reno_now) {
|
||||||
/* Scalable */
|
/* Scalable */
|
||||||
tcp_cong_avoid_ai(tp, min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT),
|
tcp_cong_avoid_ai(tp, min(tcp_snd_cwnd(tp), TCP_SCALABLE_AI_CNT),
|
||||||
acked);
|
acked);
|
||||||
} else {
|
} else {
|
||||||
/* Reno */
|
/* Reno */
|
||||||
tcp_cong_avoid_ai(tp, tp->snd_cwnd, acked);
|
tcp_cong_avoid_ai(tp, tcp_snd_cwnd(tp), acked);
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
|
/* The key players are v_vegas.beg_snd_una and v_beg_snd_nxt.
|
||||||
|
@ -130,7 +130,7 @@ do_vegas:
|
||||||
/* Compute excess number of packets above bandwidth
|
/* Compute excess number of packets above bandwidth
|
||||||
* Avoid doing full 64 bit divide.
|
* Avoid doing full 64 bit divide.
|
||||||
*/
|
*/
|
||||||
bw = tp->snd_cwnd;
|
bw = tcp_snd_cwnd(tp);
|
||||||
bw *= rtt - yeah->vegas.baseRTT;
|
bw *= rtt - yeah->vegas.baseRTT;
|
||||||
do_div(bw, rtt);
|
do_div(bw, rtt);
|
||||||
queue = bw;
|
queue = bw;
|
||||||
|
@ -138,20 +138,20 @@ do_vegas:
|
||||||
if (queue > TCP_YEAH_ALPHA ||
|
if (queue > TCP_YEAH_ALPHA ||
|
||||||
rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
|
rtt - yeah->vegas.baseRTT > (yeah->vegas.baseRTT / TCP_YEAH_PHY)) {
|
||||||
if (queue > TCP_YEAH_ALPHA &&
|
if (queue > TCP_YEAH_ALPHA &&
|
||||||
tp->snd_cwnd > yeah->reno_count) {
|
tcp_snd_cwnd(tp) > yeah->reno_count) {
|
||||||
u32 reduction = min(queue / TCP_YEAH_GAMMA ,
|
u32 reduction = min(queue / TCP_YEAH_GAMMA ,
|
||||||
tp->snd_cwnd >> TCP_YEAH_EPSILON);
|
tcp_snd_cwnd(tp) >> TCP_YEAH_EPSILON);
|
||||||
|
|
||||||
tp->snd_cwnd -= reduction;
|
tcp_snd_cwnd_set(tp, tcp_snd_cwnd(tp) - reduction);
|
||||||
|
|
||||||
tp->snd_cwnd = max(tp->snd_cwnd,
|
tcp_snd_cwnd_set(tp, max(tcp_snd_cwnd(tp),
|
||||||
yeah->reno_count);
|
yeah->reno_count));
|
||||||
|
|
||||||
tp->snd_ssthresh = tp->snd_cwnd;
|
tp->snd_ssthresh = tcp_snd_cwnd(tp);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (yeah->reno_count <= 2)
|
if (yeah->reno_count <= 2)
|
||||||
yeah->reno_count = max(tp->snd_cwnd>>1, 2U);
|
yeah->reno_count = max(tcp_snd_cwnd(tp)>>1, 2U);
|
||||||
else
|
else
|
||||||
yeah->reno_count++;
|
yeah->reno_count++;
|
||||||
|
|
||||||
|
@ -176,7 +176,7 @@ do_vegas:
|
||||||
*/
|
*/
|
||||||
yeah->vegas.beg_snd_una = yeah->vegas.beg_snd_nxt;
|
yeah->vegas.beg_snd_una = yeah->vegas.beg_snd_nxt;
|
||||||
yeah->vegas.beg_snd_nxt = tp->snd_nxt;
|
yeah->vegas.beg_snd_nxt = tp->snd_nxt;
|
||||||
yeah->vegas.beg_snd_cwnd = tp->snd_cwnd;
|
yeah->vegas.beg_snd_cwnd = tcp_snd_cwnd(tp);
|
||||||
|
|
||||||
/* Wipe the slate clean for the next RTT. */
|
/* Wipe the slate clean for the next RTT. */
|
||||||
yeah->vegas.cntRTT = 0;
|
yeah->vegas.cntRTT = 0;
|
||||||
|
@ -193,16 +193,16 @@ static u32 tcp_yeah_ssthresh(struct sock *sk)
|
||||||
if (yeah->doing_reno_now < TCP_YEAH_RHO) {
|
if (yeah->doing_reno_now < TCP_YEAH_RHO) {
|
||||||
reduction = yeah->lastQ;
|
reduction = yeah->lastQ;
|
||||||
|
|
||||||
reduction = min(reduction, max(tp->snd_cwnd>>1, 2U));
|
reduction = min(reduction, max(tcp_snd_cwnd(tp)>>1, 2U));
|
||||||
|
|
||||||
reduction = max(reduction, tp->snd_cwnd >> TCP_YEAH_DELTA);
|
reduction = max(reduction, tcp_snd_cwnd(tp) >> TCP_YEAH_DELTA);
|
||||||
} else
|
} else
|
||||||
reduction = max(tp->snd_cwnd>>1, 2U);
|
reduction = max(tcp_snd_cwnd(tp)>>1, 2U);
|
||||||
|
|
||||||
yeah->fast_count = 0;
|
yeah->fast_count = 0;
|
||||||
yeah->reno_count = max(yeah->reno_count>>1, 2U);
|
yeah->reno_count = max(yeah->reno_count>>1, 2U);
|
||||||
|
|
||||||
return max_t(int, tp->snd_cwnd - reduction, 2);
|
return max_t(int, tcp_snd_cwnd(tp) - reduction, 2);
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct tcp_congestion_ops tcp_yeah __read_mostly = {
|
static struct tcp_congestion_ops tcp_yeah __read_mostly = {
|
||||||
|
|
|
@ -2038,7 +2038,7 @@ static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
|
||||||
jiffies_to_clock_t(icsk->icsk_rto),
|
jiffies_to_clock_t(icsk->icsk_rto),
|
||||||
jiffies_to_clock_t(icsk->icsk_ack.ato),
|
jiffies_to_clock_t(icsk->icsk_ack.ato),
|
||||||
(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
|
(icsk->icsk_ack.quick << 1) | inet_csk_in_pingpong_mode(sp),
|
||||||
tp->snd_cwnd,
|
tcp_snd_cwnd(tp),
|
||||||
state == TCP_LISTEN ?
|
state == TCP_LISTEN ?
|
||||||
fastopenq->max_qlen :
|
fastopenq->max_qlen :
|
||||||
(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
|
(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
|
||||||
|
|
Loading…
Reference in a new issue