mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 15:27:29 +00:00
tcp: improve latencies of timer triggered events
Modern TCP stack highly depends on tcp_write_timer() having a small latency, but current implementation doesn't exactly meet the expectations. When a timer fires but finds the socket is owned by the user, it rearms itself for an additional delay hoping next run will be more successful. tcp_write_timer() for example uses a 50ms delay for next try, and it defeats many attempts to get predictable TCP behavior in term of latencies. Use the recently introduced tcp_release_cb(), so that the user owning the socket will call various handlers right before socket release. This will permit us to post a followup patch to address the tcp_tso_should_defer() syndrome (some deferred packets have to wait RTO timer to be transmitted, while cwnd should allow us to send them sooner) Signed-off-by: Eric Dumazet <edumazet@google.com> Cc: Tom Herbert <therbert@google.com> Cc: Yuchung Cheng <ycheng@google.com> Cc: Neal Cardwell <ncardwell@google.com> Cc: Nandita Dukkipati <nanditad@google.com> Cc: H.K. Jerry Chu <hkchu@google.com> Cc: John Heffner <johnwheffner@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
9dc274151a
commit
6f458dfb40
4 changed files with 71 additions and 51 deletions
|
@ -32,17 +32,6 @@ int sysctl_tcp_retries2 __read_mostly = TCP_RETR2;
|
|||
int sysctl_tcp_orphan_retries __read_mostly;
|
||||
int sysctl_tcp_thin_linear_timeouts __read_mostly;
|
||||
|
||||
static void tcp_write_timer(unsigned long);
|
||||
static void tcp_delack_timer(unsigned long);
|
||||
static void tcp_keepalive_timer (unsigned long data);
|
||||
|
||||
void tcp_init_xmit_timers(struct sock *sk)
|
||||
{
|
||||
inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
|
||||
&tcp_keepalive_timer);
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_init_xmit_timers);
|
||||
|
||||
static void tcp_write_err(struct sock *sk)
|
||||
{
|
||||
sk->sk_err = sk->sk_err_soft ? : ETIMEDOUT;
|
||||
|
@ -205,21 +194,11 @@ static int tcp_write_timeout(struct sock *sk)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void tcp_delack_timer(unsigned long data)
|
||||
void tcp_delack_timer_handler(struct sock *sk)
|
||||
{
|
||||
struct sock *sk = (struct sock *)data;
|
||||
struct tcp_sock *tp = tcp_sk(sk);
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
|
||||
bh_lock_sock(sk);
|
||||
if (sock_owned_by_user(sk)) {
|
||||
/* Try again later. */
|
||||
icsk->icsk_ack.blocked = 1;
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
|
||||
sk_reset_timer(sk, &icsk->icsk_delack_timer, jiffies + TCP_DELACK_MIN);
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
sk_mem_reclaim_partial(sk);
|
||||
|
||||
if (sk->sk_state == TCP_CLOSE || !(icsk->icsk_ack.pending & ICSK_ACK_TIMER))
|
||||
|
@ -260,7 +239,21 @@ static void tcp_delack_timer(unsigned long data)
|
|||
out:
|
||||
if (sk_under_memory_pressure(sk))
|
||||
sk_mem_reclaim(sk);
|
||||
out_unlock:
|
||||
}
|
||||
|
||||
static void tcp_delack_timer(unsigned long data)
|
||||
{
|
||||
struct sock *sk = (struct sock *)data;
|
||||
|
||||
bh_lock_sock(sk);
|
||||
if (!sock_owned_by_user(sk)) {
|
||||
tcp_delack_timer_handler(sk);
|
||||
} else {
|
||||
inet_csk(sk)->icsk_ack.blocked = 1;
|
||||
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_DELAYEDACKLOCKED);
|
||||
/* deleguate our work to tcp_release_cb() */
|
||||
set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
|
||||
}
|
||||
bh_unlock_sock(sk);
|
||||
sock_put(sk);
|
||||
}
|
||||
|
@ -450,19 +443,11 @@ out_reset_timer:
|
|||
out:;
|
||||
}
|
||||
|
||||
static void tcp_write_timer(unsigned long data)
|
||||
void tcp_write_timer_handler(struct sock *sk)
|
||||
{
|
||||
struct sock *sk = (struct sock *)data;
|
||||
struct inet_connection_sock *icsk = inet_csk(sk);
|
||||
int event;
|
||||
|
||||
bh_lock_sock(sk);
|
||||
if (sock_owned_by_user(sk)) {
|
||||
/* Try again later */
|
||||
sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + (HZ / 20));
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
if (sk->sk_state == TCP_CLOSE || !icsk->icsk_pending)
|
||||
goto out;
|
||||
|
||||
|
@ -485,7 +470,19 @@ static void tcp_write_timer(unsigned long data)
|
|||
|
||||
out:
|
||||
sk_mem_reclaim(sk);
|
||||
out_unlock:
|
||||
}
|
||||
|
||||
static void tcp_write_timer(unsigned long data)
|
||||
{
|
||||
struct sock *sk = (struct sock *)data;
|
||||
|
||||
bh_lock_sock(sk);
|
||||
if (!sock_owned_by_user(sk)) {
|
||||
tcp_write_timer_handler(sk);
|
||||
} else {
|
||||
/* deleguate our work to tcp_release_cb() */
|
||||
set_bit(TCP_WRITE_TIMER_DEFERRED, &tcp_sk(sk)->tsq_flags);
|
||||
}
|
||||
bh_unlock_sock(sk);
|
||||
sock_put(sk);
|
||||
}
|
||||
|
@ -602,3 +599,10 @@ out:
|
|||
bh_unlock_sock(sk);
|
||||
sock_put(sk);
|
||||
}
|
||||
|
||||
void tcp_init_xmit_timers(struct sock *sk)
|
||||
{
|
||||
inet_csk_init_xmit_timers(sk, &tcp_write_timer, &tcp_delack_timer,
|
||||
&tcp_keepalive_timer);
|
||||
}
|
||||
EXPORT_SYMBOL(tcp_init_xmit_timers);
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue