mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-21 22:32:45 +00:00
tcp: introduce tcp_under_memory_pressure()
Introduce an optimized version of sk_under_memory_pressure() for TCP. Our intent is to use it in fast paths. Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
a6c5ea4ccf
commit
b8da51ebb1
4 changed files with 15 additions and 7 deletions
|
@ -359,7 +359,7 @@ static void tcp_grow_window(struct sock *sk, const struct sk_buff *skb)
|
|||
/* Check #1 */
|
||||
if (tp->rcv_ssthresh < tp->window_clamp &&
|
||||
(int)tp->rcv_ssthresh < tcp_space(sk) &&
|
||||
!sk_under_memory_pressure(sk)) {
|
||||
!tcp_under_memory_pressure(sk)) {
|
||||
int incr;
|
||||
|
||||
/* Check #2. Increase window, if skb with such overhead
|
||||
|
@ -446,7 +446,7 @@ static void tcp_clamp_window(struct sock *sk)
|
|||
|
||||
if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
|
||||
!(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
|
||||
!sk_under_memory_pressure(sk) &&
|
||||
!tcp_under_memory_pressure(sk) &&
|
||||
sk_memory_allocated(sk) < sk_prot_mem_limits(sk, 0)) {
|
||||
sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
|
||||
sysctl_tcp_rmem[2]);
|
||||
|
@ -4781,7 +4781,7 @@ static int tcp_prune_queue(struct sock *sk)
|
|||
|
||||
if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf)
|
||||
tcp_clamp_window(sk);
|
||||
else if (sk_under_memory_pressure(sk))
|
||||
else if (tcp_under_memory_pressure(sk))
|
||||
tp->rcv_ssthresh = min(tp->rcv_ssthresh, 4U * tp->advmss);
|
||||
|
||||
tcp_collapse_ofo_queue(sk);
|
||||
|
@ -4825,7 +4825,7 @@ static bool tcp_should_expand_sndbuf(const struct sock *sk)
|
|||
return false;
|
||||
|
||||
/* If we are under global TCP memory pressure, do not expand. */
|
||||
if (sk_under_memory_pressure(sk))
|
||||
if (tcp_under_memory_pressure(sk))
|
||||
return false;
|
||||
|
||||
/* If we are under soft global TCP memory pressure, do not expand. */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue