mirror of
https://github.com/Fishwaldo/Star64_linux.git
synced 2025-06-21 06:01:23 +00:00
bpf: Refine memcg-based memory accounting for hashtab maps
Include percpu objects and the size of map metadata into the accounting. Signed-off-by: Roman Gushchin <guro@fb.com> Signed-off-by: Alexei Starovoitov <ast@kernel.org> Link: https://lore.kernel.org/bpf/20201201215900.3569844-13-guro@fb.com
This commit is contained in:
parent
1440290adf
commit
881456811a
1 changed files with 14 additions and 10 deletions
|
@ -292,7 +292,8 @@ static int prealloc_init(struct bpf_htab *htab)
|
||||||
u32 size = round_up(htab->map.value_size, 8);
|
u32 size = round_up(htab->map.value_size, 8);
|
||||||
void __percpu *pptr;
|
void __percpu *pptr;
|
||||||
|
|
||||||
pptr = __alloc_percpu_gfp(size, 8, GFP_USER | __GFP_NOWARN);
|
pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
|
||||||
|
GFP_USER | __GFP_NOWARN);
|
||||||
if (!pptr)
|
if (!pptr)
|
||||||
goto free_elems;
|
goto free_elems;
|
||||||
htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
|
htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
|
||||||
|
@ -346,7 +347,7 @@ static int alloc_extra_elems(struct bpf_htab *htab)
|
||||||
struct pcpu_freelist_node *l;
|
struct pcpu_freelist_node *l;
|
||||||
int cpu;
|
int cpu;
|
||||||
|
|
||||||
pptr = __alloc_percpu_gfp(sizeof(struct htab_elem *), 8,
|
pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
|
||||||
GFP_USER | __GFP_NOWARN);
|
GFP_USER | __GFP_NOWARN);
|
||||||
if (!pptr)
|
if (!pptr)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
|
@ -444,7 +445,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
||||||
int err, i;
|
int err, i;
|
||||||
u64 cost;
|
u64 cost;
|
||||||
|
|
||||||
htab = kzalloc(sizeof(*htab), GFP_USER);
|
htab = kzalloc(sizeof(*htab), GFP_USER | __GFP_ACCOUNT);
|
||||||
if (!htab)
|
if (!htab)
|
||||||
return ERR_PTR(-ENOMEM);
|
return ERR_PTR(-ENOMEM);
|
||||||
|
|
||||||
|
@ -502,8 +503,10 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
|
||||||
goto free_charge;
|
goto free_charge;
|
||||||
|
|
||||||
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
|
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
|
||||||
htab->map_locked[i] = __alloc_percpu_gfp(sizeof(int),
|
htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
|
||||||
sizeof(int), GFP_USER);
|
sizeof(int),
|
||||||
|
sizeof(int),
|
||||||
|
GFP_USER);
|
||||||
if (!htab->map_locked[i])
|
if (!htab->map_locked[i])
|
||||||
goto free_map_locked;
|
goto free_map_locked;
|
||||||
}
|
}
|
||||||
|
@ -925,7 +928,8 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
|
||||||
l_new = ERR_PTR(-E2BIG);
|
l_new = ERR_PTR(-E2BIG);
|
||||||
goto dec_count;
|
goto dec_count;
|
||||||
}
|
}
|
||||||
l_new = kmalloc_node(htab->elem_size, GFP_ATOMIC | __GFP_NOWARN,
|
l_new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
|
||||||
|
GFP_ATOMIC | __GFP_NOWARN,
|
||||||
htab->map.numa_node);
|
htab->map.numa_node);
|
||||||
if (!l_new) {
|
if (!l_new) {
|
||||||
l_new = ERR_PTR(-ENOMEM);
|
l_new = ERR_PTR(-ENOMEM);
|
||||||
|
@ -942,7 +946,7 @@ static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
|
||||||
pptr = htab_elem_get_ptr(l_new, key_size);
|
pptr = htab_elem_get_ptr(l_new, key_size);
|
||||||
} else {
|
} else {
|
||||||
/* alloc_percpu zero-fills */
|
/* alloc_percpu zero-fills */
|
||||||
pptr = __alloc_percpu_gfp(size, 8,
|
pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
|
||||||
GFP_ATOMIC | __GFP_NOWARN);
|
GFP_ATOMIC | __GFP_NOWARN);
|
||||||
if (!pptr) {
|
if (!pptr) {
|
||||||
kfree(l_new);
|
kfree(l_new);
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue