mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-07 06:52:07 +00:00
ipv4: add (struct uncached_list)->quarantine list
This is an optimization to keep the per-cpu lists as short as possible: Whenever rt_flush_dev() changes one rtable dst.dev matching the disappearing device, it can can transfer the object to a quarantine list, waiting for a final rt_del_uncached_list(). Signed-off-by: Eric Dumazet <edumazet@google.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
ba55ef8163
commit
29e5375d7f
1 changed files with 9 additions and 3 deletions
|
@ -1485,6 +1485,7 @@ static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
|
||||||
struct uncached_list {
|
struct uncached_list {
|
||||||
spinlock_t lock;
|
spinlock_t lock;
|
||||||
struct list_head head;
|
struct list_head head;
|
||||||
|
struct list_head quarantine;
|
||||||
};
|
};
|
||||||
|
|
||||||
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
|
static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
|
||||||
|
@ -1506,7 +1507,7 @@ void rt_del_uncached_list(struct rtable *rt)
|
||||||
struct uncached_list *ul = rt->rt_uncached_list;
|
struct uncached_list *ul = rt->rt_uncached_list;
|
||||||
|
|
||||||
spin_lock_bh(&ul->lock);
|
spin_lock_bh(&ul->lock);
|
||||||
list_del(&rt->rt_uncached);
|
list_del_init(&rt->rt_uncached);
|
||||||
spin_unlock_bh(&ul->lock);
|
spin_unlock_bh(&ul->lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1521,20 +1522,24 @@ static void ipv4_dst_destroy(struct dst_entry *dst)
|
||||||
|
|
||||||
void rt_flush_dev(struct net_device *dev)
|
void rt_flush_dev(struct net_device *dev)
|
||||||
{
|
{
|
||||||
struct rtable *rt;
|
struct rtable *rt, *safe;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
for_each_possible_cpu(cpu) {
|
for_each_possible_cpu(cpu) {
|
||||||
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
|
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
|
||||||
|
|
||||||
|
if (list_empty(&ul->head))
|
||||||
|
continue;
|
||||||
|
|
||||||
spin_lock_bh(&ul->lock);
|
spin_lock_bh(&ul->lock);
|
||||||
list_for_each_entry(rt, &ul->head, rt_uncached) {
|
list_for_each_entry_safe(rt, safe, &ul->head, rt_uncached) {
|
||||||
if (rt->dst.dev != dev)
|
if (rt->dst.dev != dev)
|
||||||
continue;
|
continue;
|
||||||
rt->dst.dev = blackhole_netdev;
|
rt->dst.dev = blackhole_netdev;
|
||||||
dev_replace_track(dev, blackhole_netdev,
|
dev_replace_track(dev, blackhole_netdev,
|
||||||
&rt->dst.dev_tracker,
|
&rt->dst.dev_tracker,
|
||||||
GFP_ATOMIC);
|
GFP_ATOMIC);
|
||||||
|
list_move(&rt->rt_uncached, &ul->quarantine);
|
||||||
}
|
}
|
||||||
spin_unlock_bh(&ul->lock);
|
spin_unlock_bh(&ul->lock);
|
||||||
}
|
}
|
||||||
|
@ -3706,6 +3711,7 @@ int __init ip_rt_init(void)
|
||||||
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
|
struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
|
||||||
|
|
||||||
INIT_LIST_HEAD(&ul->head);
|
INIT_LIST_HEAD(&ul->head);
|
||||||
|
INIT_LIST_HEAD(&ul->quarantine);
|
||||||
spin_lock_init(&ul->lock);
|
spin_lock_init(&ul->lock);
|
||||||
}
|
}
|
||||||
#ifdef CONFIG_IP_ROUTE_CLASSID
|
#ifdef CONFIG_IP_ROUTE_CLASSID
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue