mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-04-25 07:43:59 +00:00
tun: properly test for IFF_UP
Same reasons than the ones explained in commit4179cb5a4c
("vxlan: test dev->flags & IFF_UP before calling netif_rx()") netif_rx_ni() or napi_gro_frags() must be called under a strict contract. At device dismantle phase, core networking clears IFF_UP and flush_all_backlogs() is called after rcu grace period to make sure no incoming packet might be in a cpu backlog and still referencing the device. A similar protocol is used for gro layer. Most drivers call netif_rx() from their interrupt handler, and since the interrupts are disabled at device dismantle, netif_rx() does not have to check dev->flags & IFF_UP Virtual drivers do not have this guarantee, and must therefore make the check themselves. Fixes:1bd4978a88
("tun: honor IFF_UP in tun_get_user()") Signed-off-by: Eric Dumazet <edumazet@google.com> Reported-by: syzbot <syzkaller@googlegroups.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
daa5c4d016
commit
4477138fa0
1 changed files with 11 additions and 4 deletions
|
@ -1763,9 +1763,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||||
int skb_xdp = 1;
|
int skb_xdp = 1;
|
||||||
bool frags = tun_napi_frags_enabled(tfile);
|
bool frags = tun_napi_frags_enabled(tfile);
|
||||||
|
|
||||||
if (!(tun->dev->flags & IFF_UP))
|
|
||||||
return -EIO;
|
|
||||||
|
|
||||||
if (!(tun->flags & IFF_NO_PI)) {
|
if (!(tun->flags & IFF_NO_PI)) {
|
||||||
if (len < sizeof(pi))
|
if (len < sizeof(pi))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
@ -1867,6 +1864,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||||
err = skb_copy_datagram_from_iter(skb, 0, from, len);
|
err = skb_copy_datagram_from_iter(skb, 0, from, len);
|
||||||
|
|
||||||
if (err) {
|
if (err) {
|
||||||
|
err = -EFAULT;
|
||||||
|
drop:
|
||||||
this_cpu_inc(tun->pcpu_stats->rx_dropped);
|
this_cpu_inc(tun->pcpu_stats->rx_dropped);
|
||||||
kfree_skb(skb);
|
kfree_skb(skb);
|
||||||
if (frags) {
|
if (frags) {
|
||||||
|
@ -1874,7 +1873,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||||
mutex_unlock(&tfile->napi_mutex);
|
mutex_unlock(&tfile->napi_mutex);
|
||||||
}
|
}
|
||||||
|
|
||||||
return -EFAULT;
|
return err;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1958,6 +1957,12 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||||
!tfile->detached)
|
!tfile->detached)
|
||||||
rxhash = __skb_get_hash_symmetric(skb);
|
rxhash = __skb_get_hash_symmetric(skb);
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
if (unlikely(!(tun->dev->flags & IFF_UP))) {
|
||||||
|
err = -EIO;
|
||||||
|
goto drop;
|
||||||
|
}
|
||||||
|
|
||||||
if (frags) {
|
if (frags) {
|
||||||
/* Exercise flow dissector code path. */
|
/* Exercise flow dissector code path. */
|
||||||
u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
|
u32 headlen = eth_get_headlen(skb->data, skb_headlen(skb));
|
||||||
|
@ -1965,6 +1970,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||||
if (unlikely(headlen > skb_headlen(skb))) {
|
if (unlikely(headlen > skb_headlen(skb))) {
|
||||||
this_cpu_inc(tun->pcpu_stats->rx_dropped);
|
this_cpu_inc(tun->pcpu_stats->rx_dropped);
|
||||||
napi_free_frags(&tfile->napi);
|
napi_free_frags(&tfile->napi);
|
||||||
|
rcu_read_unlock();
|
||||||
mutex_unlock(&tfile->napi_mutex);
|
mutex_unlock(&tfile->napi_mutex);
|
||||||
WARN_ON(1);
|
WARN_ON(1);
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -1992,6 +1998,7 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
|
||||||
} else {
|
} else {
|
||||||
netif_rx_ni(skb);
|
netif_rx_ni(skb);
|
||||||
}
|
}
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
stats = get_cpu_ptr(tun->pcpu_stats);
|
stats = get_cpu_ptr(tun->pcpu_stats);
|
||||||
u64_stats_update_begin(&stats->syncp);
|
u64_stats_update_begin(&stats->syncp);
|
||||||
|
|
Loading…
Add table
Reference in a new issue