Skip to content

Commit

Permalink
bpf: introduce bpf_mem_alloc_size()
Browse files Browse the repository at this point in the history
Introduce helpers to get the memory usage of bpf_mem_alloc, includes the
bpf_mem_alloc pool and the in-use elements size. Note that we only count
the free list size in the bpf_mem_alloc pool but don't count other
lists, because there won't be too many elements on other lists. Ignoring
other lists could make the code simple.

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
  • Loading branch information
laoar authored and intel-lab-lkp committed Feb 2, 2023
1 parent 1d9c688 commit 996f3e2
Show file tree
Hide file tree
Showing 2 changed files with 72 additions and 0 deletions.
2 changes: 2 additions & 0 deletions include/linux/bpf_mem_alloc.h
Expand Up @@ -24,5 +24,7 @@ void bpf_mem_free(struct bpf_mem_alloc *ma, void *ptr);
/* kmem_cache_alloc/free equivalent: */
void *bpf_mem_cache_alloc(struct bpf_mem_alloc *ma);
void bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr);
unsigned long bpf_mem_alloc_size(struct bpf_mem_alloc *ma);
unsigned long bpf_mem_cache_elem_size(struct bpf_mem_alloc *ma, void *ptr);

#endif /* _BPF_MEM_ALLOC_H */
70 changes: 70 additions & 0 deletions kernel/bpf/memalloc.c
Expand Up @@ -224,6 +224,22 @@ static void free_one(struct bpf_mem_cache *c, void *obj)
kfree(obj);
}

unsigned long bpf_mem_cache_size(struct bpf_mem_cache *c, void *obj)
{
unsigned long size;

if (!obj)
return 0;

if (c->percpu_size) {
size = percpu_size(((void **)obj)[1]);
size += ksize(obj);
return size;
}

return ksize(obj);
}

static void __free_rcu(struct rcu_head *head)
{
struct bpf_mem_cache *c = container_of(head, struct bpf_mem_cache, rcu);
Expand Down Expand Up @@ -559,6 +575,41 @@ void bpf_mem_alloc_destroy(struct bpf_mem_alloc *ma)
}
}

/* We only account the elements on free list */
static unsigned long bpf_mem_cache_free_size(struct bpf_mem_cache *c)
{
return c->unit_size * c->free_cnt;
}

/* Get the free list size of a bpf_mem_alloc pool. */
unsigned long bpf_mem_alloc_size(struct bpf_mem_alloc *ma)
{
struct bpf_mem_caches *cc;
struct bpf_mem_cache *c;
unsigned long size = 0;
int cpu, i;

if (ma->cache) {
for_each_possible_cpu(cpu) {
c = per_cpu_ptr(ma->cache, cpu);
size += bpf_mem_cache_free_size(c);
}
size += percpu_size(ma->cache);
}
if (ma->caches) {
for_each_possible_cpu(cpu) {
cc = per_cpu_ptr(ma->caches, cpu);
for (i = 0; i < NUM_CACHES; i++) {
c = &cc->cache[i];
size += bpf_mem_cache_free_size(c);
}
}
size += percpu_size(ma->caches);
}

return size;
}

/* notrace is necessary here and in other functions to make sure
* bpf programs cannot attach to them and cause llist corruptions.
*/
Expand Down Expand Up @@ -675,3 +726,22 @@ void notrace bpf_mem_cache_free(struct bpf_mem_alloc *ma, void *ptr)

unit_free(this_cpu_ptr(ma->cache), ptr);
}

/* Get elemet size from the element pointer @ptr */
unsigned long notrace bpf_mem_cache_elem_size(struct bpf_mem_alloc *ma, void *ptr)
{
struct llist_node *llnode;
struct bpf_mem_cache *c;
unsigned long size;

if (!ptr)
return 0;

llnode = ptr - LLIST_NODE_SZ;
migrate_disable();
c = this_cpu_ptr(ma->cache);
size = bpf_mem_cache_size(c, llnode);
migrate_enable();

return size;
}

0 comments on commit 996f3e2

Please sign in to comment.