mirror of
https://github.com/Fishwaldo/linux-bl808.git
synced 2025-06-17 20:25:19 +00:00
tcp: Combat per-cpu skew in orphan tests.
As reported by Anton Blanchard when we use percpu_counter_read_positive() to make our orphan socket limit checks, the check can be off by up to num_cpus_online() * batch (which is 32 by default) which on a 128 cpu machine can be as large as the default orphan limit itself. Fix this by doing the full expensive sum check if the optimized check triggers. Reported-by: Anton Blanchard <anton@samba.org> Signed-off-by: David S. Miller <davem@davemloft.net> Acked-by: Eric Dumazet <eric.dumazet@gmail.com>
This commit is contained in:
parent
b2bc85631e
commit
ad1af0fedb
3 changed files with 19 additions and 12 deletions
|
@ -268,11 +268,21 @@ static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
|
|||
return seq3 - seq2 >= seq1 - seq2;
|
||||
}
|
||||
|
||||
static inline int tcp_too_many_orphans(struct sock *sk, int num)
|
||||
static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
|
||||
{
|
||||
return (num > sysctl_tcp_max_orphans) ||
|
||||
(sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
|
||||
atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2]);
|
||||
struct percpu_counter *ocp = sk->sk_prot->orphan_count;
|
||||
int orphans = percpu_counter_read_positive(ocp);
|
||||
|
||||
if (orphans << shift > sysctl_tcp_max_orphans) {
|
||||
orphans = percpu_counter_sum_positive(ocp);
|
||||
if (orphans << shift > sysctl_tcp_max_orphans)
|
||||
return true;
|
||||
}
|
||||
|
||||
if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
|
||||
atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
/* syncookies: remember time of last synqueue overflow */
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue