mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-07-23 07:12:09 +00:00
net: frag, move LRU list maintenance outside of rwlock
Updating the fragmentation queues LRU (Least-Recently-Used) list, required taking the hash writer lock. However, the LRU list isn't tied to the hash at all, so we can use a separate lock for it. Original-idea-by: Florian Westphal <fw@strlen.de> Signed-off-by: Jesper Dangaard Brouer <brouer@redhat.com> Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
6d7b857d54
commit
3ef0eb0db4
5 changed files with 33 additions and 14 deletions
|
@ -6,6 +6,7 @@
|
|||
struct netns_frags {
|
||||
int nqueues;
|
||||
struct list_head lru_list;
|
||||
spinlock_t lru_lock;
|
||||
|
||||
/* The percpu_counter "mem" need to be cacheline aligned.
|
||||
* mem.count must not share cacheline with other writers
|
||||
|
@ -116,4 +117,25 @@ static inline int sum_frag_mem_limit(struct netns_frags *nf)
|
|||
return percpu_counter_sum_positive(&nf->mem);
|
||||
}
|
||||
|
||||
static inline void inet_frag_lru_move(struct inet_frag_queue *q)
|
||||
{
|
||||
spin_lock(&q->net->lru_lock);
|
||||
list_move_tail(&q->lru_list, &q->net->lru_list);
|
||||
spin_unlock(&q->net->lru_lock);
|
||||
}
|
||||
|
||||
static inline void inet_frag_lru_del(struct inet_frag_queue *q)
|
||||
{
|
||||
spin_lock(&q->net->lru_lock);
|
||||
list_del(&q->lru_list);
|
||||
spin_unlock(&q->net->lru_lock);
|
||||
}
|
||||
|
||||
static inline void inet_frag_lru_add(struct netns_frags *nf,
|
||||
struct inet_frag_queue *q)
|
||||
{
|
||||
spin_lock(&nf->lru_lock);
|
||||
list_add_tail(&q->lru_list, &nf->lru_list);
|
||||
spin_unlock(&nf->lru_lock);
|
||||
}
|
||||
#endif
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue