mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-24 23:52:40 +00:00
inet: frags: better deal with smp races
Multiple cpus might attempt to insert a new fragment in rhashtable,
if for example RPS is buggy, as reported by 배석진 in
https://patchwork.ozlabs.org/patch/994601/
We use rhashtable_lookup_get_insert_key() instead of
rhashtable_insert_fast() to let cpus losing the race
free their own inet_frag_queue and use the one that
was inserted by another cpu.
Fixes: 648700f76b
("inet: frags: use rhashtables for reassembly units")
Signed-off-by: Eric Dumazet <edumazet@google.com>
Reported-by: 배석진 <soukjin.bae@samsung.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
This commit is contained in:
parent
e12c225258
commit
0d5b9311ba
1 changed files with 15 additions and 14 deletions
|
@ -178,21 +178,22 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
|
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
|
||||||
void *arg)
|
void *arg,
|
||||||
|
struct inet_frag_queue **prev)
|
||||||
{
|
{
|
||||||
struct inet_frags *f = nf->f;
|
struct inet_frags *f = nf->f;
|
||||||
struct inet_frag_queue *q;
|
struct inet_frag_queue *q;
|
||||||
int err;
|
|
||||||
|
|
||||||
q = inet_frag_alloc(nf, f, arg);
|
q = inet_frag_alloc(nf, f, arg);
|
||||||
if (!q)
|
if (!q) {
|
||||||
|
*prev = ERR_PTR(-ENOMEM);
|
||||||
return NULL;
|
return NULL;
|
||||||
|
}
|
||||||
mod_timer(&q->timer, jiffies + nf->timeout);
|
mod_timer(&q->timer, jiffies + nf->timeout);
|
||||||
|
|
||||||
err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
|
*prev = rhashtable_lookup_get_insert_key(&nf->rhashtable, &q->key,
|
||||||
f->rhash_params);
|
&q->node, f->rhash_params);
|
||||||
if (err < 0) {
|
if (*prev) {
|
||||||
q->flags |= INET_FRAG_COMPLETE;
|
q->flags |= INET_FRAG_COMPLETE;
|
||||||
inet_frag_kill(q);
|
inet_frag_kill(q);
|
||||||
inet_frag_destroy(q);
|
inet_frag_destroy(q);
|
||||||
|
@ -204,22 +205,22 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
|
||||||
/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
|
/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
|
||||||
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
|
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
|
||||||
{
|
{
|
||||||
struct inet_frag_queue *fq;
|
struct inet_frag_queue *fq = NULL, *prev;
|
||||||
|
|
||||||
if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
|
if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
rcu_read_lock();
|
rcu_read_lock();
|
||||||
|
|
||||||
fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
|
prev = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
|
||||||
if (fq) {
|
if (!prev)
|
||||||
|
fq = inet_frag_create(nf, key, &prev);
|
||||||
|
if (prev && !IS_ERR(prev)) {
|
||||||
|
fq = prev;
|
||||||
if (!refcount_inc_not_zero(&fq->refcnt))
|
if (!refcount_inc_not_zero(&fq->refcnt))
|
||||||
fq = NULL;
|
fq = NULL;
|
||||||
rcu_read_unlock();
|
|
||||||
return fq;
|
|
||||||
}
|
}
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
return fq;
|
||||||
return inet_frag_create(nf, key);
|
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(inet_frag_find);
|
EXPORT_SYMBOL(inet_frag_find);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue