mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-06 06:37:59 +00:00
tcp: disable RFC6675 loss detection
This patch disables RFC6675 loss detection and make sysctl net.ipv4.tcp_recovery = 1 controls a binary choice between RACK (1) or RFC6675 (0). Signed-off-by: Yuchung Cheng <ycheng@google.com> Signed-off-by: Neal Cardwell <ncardwell@google.com> Reviewed-by: Eric Dumazet <edumazet@google.com> Reviewed-by: Soheil Hassas Yeganeh <soheil@google.com> Reviewed-by: Priyaranjan Jha <priyarjha@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
20b654dfe1
commit
b38a51fec1
2 changed files with 10 additions and 5 deletions
|
@ -449,7 +449,8 @@ tcp_recovery - INTEGER
|
||||||
features.
|
features.
|
||||||
|
|
||||||
RACK: 0x1 enables the RACK loss detection for fast detection of lost
|
RACK: 0x1 enables the RACK loss detection for fast detection of lost
|
||||||
retransmissions and tail drops.
|
retransmissions and tail drops. It also subsumes and disables
|
||||||
|
RFC6675 recovery for SACK connections.
|
||||||
RACK: 0x2 makes RACK's reordering window static (min_rtt/4).
|
RACK: 0x2 makes RACK's reordering window static (min_rtt/4).
|
||||||
RACK: 0x4 disables RACK's DUPACK threshold heuristic
|
RACK: 0x4 disables RACK's DUPACK threshold heuristic
|
||||||
|
|
||||||
|
|
|
@ -2035,6 +2035,11 @@ static inline int tcp_dupack_heuristics(const struct tcp_sock *tp)
|
||||||
return tp->sacked_out + 1;
|
return tp->sacked_out + 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool tcp_is_rack(const struct sock *sk)
|
||||||
|
{
|
||||||
|
return sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION;
|
||||||
|
}
|
||||||
|
|
||||||
/* Linux NewReno/SACK/ECN state machine.
|
/* Linux NewReno/SACK/ECN state machine.
|
||||||
* --------------------------------------
|
* --------------------------------------
|
||||||
*
|
*
|
||||||
|
@ -2141,7 +2146,7 @@ static bool tcp_time_to_recover(struct sock *sk, int flag)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
/* Not-A-Trick#2 : Classic rule... */
|
/* Not-A-Trick#2 : Classic rule... */
|
||||||
if (tcp_dupack_heuristics(tp) > tp->reordering)
|
if (!tcp_is_rack(sk) && tcp_dupack_heuristics(tp) > tp->reordering)
|
||||||
return true;
|
return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
|
@ -2722,8 +2727,7 @@ static void tcp_rack_identify_loss(struct sock *sk, int *ack_flag)
|
||||||
{
|
{
|
||||||
struct tcp_sock *tp = tcp_sk(sk);
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
|
|
||||||
/* Use RACK to detect loss */
|
if (tcp_is_rack(sk)) {
|
||||||
if (sock_net(sk)->ipv4.sysctl_tcp_recovery & TCP_RACK_LOSS_DETECTION) {
|
|
||||||
u32 prior_retrans = tp->retrans_out;
|
u32 prior_retrans = tp->retrans_out;
|
||||||
|
|
||||||
tcp_rack_mark_lost(sk);
|
tcp_rack_mark_lost(sk);
|
||||||
|
@ -2862,7 +2866,7 @@ static void tcp_fastretrans_alert(struct sock *sk, const u32 prior_snd_una,
|
||||||
fast_rexmit = 1;
|
fast_rexmit = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (do_lost)
|
if (!tcp_is_rack(sk) && do_lost)
|
||||||
tcp_update_scoreboard(sk, fast_rexmit);
|
tcp_update_scoreboard(sk, fast_rexmit);
|
||||||
*rexmit = REXMIT_LOST;
|
*rexmit = REXMIT_LOST;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Add table
Reference in a new issue