Skip to content

Commit

Permalink
bpf: add and use bpf map free helpers
Browse files Browse the repository at this point in the history
Some new helpers are introduced to free bpf memory, instead of using the
generic free helpers. Then we can do something in these new helpers to
track the free of bpf memory in the future.

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
  • Loading branch information
laoar authored and intel-lab-lkp committed Jan 12, 2023
1 parent 2616ebb commit e11706d
Show file tree
Hide file tree
Showing 15 changed files with 68 additions and 48 deletions.
19 changes: 19 additions & 0 deletions include/linux/bpf.h
Expand Up @@ -1869,6 +1869,24 @@ int generic_map_delete_batch(struct bpf_map *map,
struct bpf_map *bpf_map_get_curr_or_next(u32 *id);
struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id);


static inline void bpf_map_kfree(const void *ptr)
{
kfree(ptr);
}

static inline void bpf_map_kvfree(const void *ptr)
{
kvfree(ptr);
}

static inline void bpf_map_free_percpu(void __percpu *ptr)
{
free_percpu(ptr);
}

#define bpf_map_kfree_rcu(ptr, rhf...) kvfree_rcu(ptr, ## rhf)

#ifdef CONFIG_MEMCG_KMEM
void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
int node);
Expand All @@ -1877,6 +1895,7 @@ void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size,
gfp_t flags);
void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size,
size_t align, gfp_t flags);

#else
static inline void *
bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags,
Expand Down
4 changes: 2 additions & 2 deletions kernel/bpf/arraymap.c
Expand Up @@ -24,7 +24,7 @@ static void bpf_array_free_percpu(struct bpf_array *array)
int i;

for (i = 0; i < array->map.max_entries; i++) {
free_percpu(array->pptrs[i]);
bpf_map_free_percpu(array->pptrs[i]);
cond_resched();
}
}
Expand Down Expand Up @@ -1132,7 +1132,7 @@ static void prog_array_map_free(struct bpf_map *map)
list_del_init(&elem->list);
kfree(elem);
}
kfree(aux);
bpf_map_kfree(aux);
fd_array_map_free(map);
}

Expand Down
2 changes: 1 addition & 1 deletion kernel/bpf/bpf_cgrp_storage.c
Expand Up @@ -64,7 +64,7 @@ void bpf_cgrp_storage_free(struct cgroup *cgroup)
rcu_read_unlock();

if (free_cgroup_storage)
kfree_rcu(local_storage, rcu);
bpf_map_kfree_rcu(local_storage, rcu);
}

static struct bpf_local_storage_data *
Expand Down
2 changes: 1 addition & 1 deletion kernel/bpf/bpf_inode_storage.c
Expand Up @@ -78,7 +78,7 @@ void bpf_inode_storage_free(struct inode *inode)
rcu_read_unlock();

if (free_inode_storage)
kfree_rcu(local_storage, rcu);
bpf_map_kfree_rcu(local_storage, rcu);
}

static void *bpf_fd_inode_storage_lookup_elem(struct bpf_map *map, void *key)
Expand Down
20 changes: 10 additions & 10 deletions kernel/bpf/bpf_local_storage.c
Expand Up @@ -93,9 +93,9 @@ void bpf_local_storage_free_rcu(struct rcu_head *rcu)
*/
local_storage = container_of(rcu, struct bpf_local_storage, rcu);
if (rcu_trace_implies_rcu_gp())
kfree(local_storage);
bpf_map_kfree(local_storage);
else
kfree_rcu(local_storage, rcu);
bpf_map_kfree_rcu(local_storage, rcu);
}

static void bpf_selem_free_rcu(struct rcu_head *rcu)
Expand All @@ -104,9 +104,9 @@ static void bpf_selem_free_rcu(struct rcu_head *rcu)

selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
if (rcu_trace_implies_rcu_gp())
kfree(selem);
bpf_map_kfree(selem);
else
kfree_rcu(selem, rcu);
bpf_map_kfree_rcu(selem, rcu);
}

/* local_storage->lock must be held and selem->local_storage == local_storage.
Expand Down Expand Up @@ -162,7 +162,7 @@ static bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_stor
if (use_trace_rcu)
call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
else
kfree_rcu(selem, rcu);
bpf_map_kfree_rcu(selem, rcu);

return free_local_storage;
}
Expand Down Expand Up @@ -191,7 +191,7 @@ static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem,
call_rcu_tasks_trace(&local_storage->rcu,
bpf_local_storage_free_rcu);
else
kfree_rcu(local_storage, rcu);
bpf_map_kfree_rcu(local_storage, rcu);
}
}

Expand Down Expand Up @@ -358,7 +358,7 @@ int bpf_local_storage_alloc(void *owner,
return 0;

uncharge:
kfree(storage);
bpf_map_kfree(storage);
mem_uncharge(smap, owner, sizeof(*storage));
return err;
}
Expand Down Expand Up @@ -402,7 +402,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,

err = bpf_local_storage_alloc(owner, smap, selem, gfp_flags);
if (err) {
kfree(selem);
bpf_map_kfree(selem);
mem_uncharge(smap, owner, smap->elem_size);
return ERR_PTR(err);
}
Expand Down Expand Up @@ -496,7 +496,7 @@ bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
raw_spin_unlock_irqrestore(&local_storage->lock, flags);
if (selem) {
mem_uncharge(smap, owner, smap->elem_size);
kfree(selem);
bpf_map_kfree(selem);
}
return ERR_PTR(err);
}
Expand Down Expand Up @@ -713,6 +713,6 @@ void bpf_local_storage_map_free(struct bpf_map *map,
*/
synchronize_rcu();

kvfree(smap->buckets);
bpf_map_kvfree(smap->buckets);
bpf_map_area_free(smap);
}
2 changes: 1 addition & 1 deletion kernel/bpf/bpf_task_storage.c
Expand Up @@ -91,7 +91,7 @@ void bpf_task_storage_free(struct task_struct *task)
rcu_read_unlock();

if (free_task_storage)
kfree_rcu(local_storage, rcu);
bpf_map_kfree_rcu(local_storage, rcu);
}

static void *bpf_pid_task_storage_lookup_elem(struct bpf_map *map, void *key)
Expand Down
13 changes: 6 additions & 7 deletions kernel/bpf/cpumap.c
Expand Up @@ -164,8 +164,8 @@ static void put_cpu_map_entry(struct bpf_cpu_map_entry *rcpu)
/* The queue should be empty at this point */
__cpu_map_ring_cleanup(rcpu->queue);
ptr_ring_cleanup(rcpu->queue, NULL);
kfree(rcpu->queue);
kfree(rcpu);
bpf_map_kfree(rcpu->queue);
bpf_map_kfree(rcpu);
}
}

Expand Down Expand Up @@ -484,11 +484,11 @@ __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
free_ptr_ring:
ptr_ring_cleanup(rcpu->queue, NULL);
free_queue:
kfree(rcpu->queue);
bpf_map_kfree(rcpu->queue);
free_bulkq:
free_percpu(rcpu->bulkq);
bpf_map_free_percpu(rcpu->bulkq);
free_rcu:
kfree(rcpu);
bpf_map_kfree(rcpu);
return NULL;
}

Expand All @@ -502,8 +502,7 @@ static void __cpu_map_entry_free(struct rcu_head *rcu)
* find this entry.
*/
rcpu = container_of(rcu, struct bpf_cpu_map_entry, rcu);

free_percpu(rcpu->bulkq);
bpf_map_free_percpu(rcpu->bulkq);
/* Cannot kthread_stop() here, last put free rcpu resources */
put_cpu_map_entry(rcpu);
}
Expand Down
10 changes: 6 additions & 4 deletions kernel/bpf/devmap.c
Expand Up @@ -218,7 +218,7 @@ static void dev_map_free(struct bpf_map *map)
if (dev->xdp_prog)
bpf_prog_put(dev->xdp_prog);
dev_put(dev->dev);
kfree(dev);
bpf_map_kfree(dev);
}
}

Expand All @@ -234,7 +234,7 @@ static void dev_map_free(struct bpf_map *map)
if (dev->xdp_prog)
bpf_prog_put(dev->xdp_prog);
dev_put(dev->dev);
kfree(dev);
bpf_map_kfree(dev);
}

bpf_map_area_free(dtab->netdev_map);
Expand Down Expand Up @@ -791,12 +791,14 @@ static void *dev_map_hash_lookup_elem(struct bpf_map *map, void *key)
static void __dev_map_entry_free(struct rcu_head *rcu)
{
struct bpf_dtab_netdev *dev;
struct bpf_dtab *dtab;

dev = container_of(rcu, struct bpf_dtab_netdev, rcu);
if (dev->xdp_prog)
bpf_prog_put(dev->xdp_prog);
dev_put(dev->dev);
kfree(dev);
dtab = dev->dtab;
bpf_map_kfree(dev);
}

static int dev_map_delete_elem(struct bpf_map *map, void *key)
Expand Down Expand Up @@ -881,7 +883,7 @@ static struct bpf_dtab_netdev *__dev_map_alloc_node(struct net *net,
err_put_dev:
dev_put(dev->dev);
err_out:
kfree(dev);
bpf_map_kfree(dev);
return ERR_PTR(-EINVAL);
}

Expand Down
8 changes: 4 additions & 4 deletions kernel/bpf/hashtab.c
Expand Up @@ -266,7 +266,7 @@ static void htab_free_elems(struct bpf_htab *htab)

pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
htab->map.key_size);
free_percpu(pptr);
bpf_map_free_percpu(pptr);
cond_resched();
}
free_elems:
Expand Down Expand Up @@ -584,7 +584,7 @@ static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
if (htab->use_percpu_counter)
percpu_counter_destroy(&htab->pcount);
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
free_percpu(htab->map_locked[i]);
bpf_map_free_percpu(htab->map_locked[i]);
bpf_map_area_free(htab->buckets);
bpf_mem_alloc_destroy(&htab->pcpu_ma);
bpf_mem_alloc_destroy(&htab->ma);
Expand Down Expand Up @@ -1511,14 +1511,14 @@ static void htab_map_free(struct bpf_map *map)
prealloc_destroy(htab);
}

free_percpu(htab->extra_elems);
bpf_map_free_percpu(htab->extra_elems);
bpf_map_area_free(htab->buckets);
bpf_mem_alloc_destroy(&htab->pcpu_ma);
bpf_mem_alloc_destroy(&htab->ma);
if (htab->use_percpu_counter)
percpu_counter_destroy(&htab->pcount);
for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
free_percpu(htab->map_locked[i]);
bpf_map_free_percpu(htab->map_locked[i]);
lockdep_unregister_key(&htab->lockdep_key);
bpf_map_area_free(htab);
}
Expand Down
2 changes: 1 addition & 1 deletion kernel/bpf/helpers.c
Expand Up @@ -1372,7 +1372,7 @@ void bpf_timer_cancel_and_free(void *val)
*/
if (this_cpu_read(hrtimer_running) != t)
hrtimer_cancel(&t->timer);
kfree(t);
bpf_map_kfree(t);
}

BPF_CALL_2(bpf_kptr_xchg, void *, map_value, void *, ptr)
Expand Down
12 changes: 6 additions & 6 deletions kernel/bpf/local_storage.c
Expand Up @@ -174,7 +174,7 @@ static int cgroup_storage_update_elem(struct bpf_map *map, void *key,
check_and_init_map_value(map, new->data);

new = xchg(&storage->buf, new);
kfree_rcu(new, rcu);
bpf_map_kfree_rcu(new, rcu);

return 0;
}
Expand Down Expand Up @@ -526,7 +526,7 @@ struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog,
return storage;

enomem:
kfree(storage);
bpf_map_kfree(storage);
return ERR_PTR(-ENOMEM);
}

Expand All @@ -535,17 +535,17 @@ static void free_shared_cgroup_storage_rcu(struct rcu_head *rcu)
struct bpf_cgroup_storage *storage =
container_of(rcu, struct bpf_cgroup_storage, rcu);

kfree(storage->buf);
kfree(storage);
bpf_map_kfree(storage->buf);
bpf_map_kfree(storage);
}

static void free_percpu_cgroup_storage_rcu(struct rcu_head *rcu)
{
struct bpf_cgroup_storage *storage =
container_of(rcu, struct bpf_cgroup_storage, rcu);

free_percpu(storage->percpu_buf);
kfree(storage);
bpf_map_free_percpu(storage->percpu_buf);
bpf_map_kfree(storage);
}

void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage)
Expand Down
14 changes: 7 additions & 7 deletions kernel/bpf/lpm_trie.c
Expand Up @@ -379,7 +379,7 @@ static int trie_update_elem(struct bpf_map *map,
trie->n_entries--;

rcu_assign_pointer(*slot, new_node);
kfree_rcu(node, rcu);
bpf_map_kfree_rcu(node, rcu);

goto out;
}
Expand Down Expand Up @@ -421,8 +421,8 @@ static int trie_update_elem(struct bpf_map *map,
if (new_node)
trie->n_entries--;

kfree(new_node);
kfree(im_node);
bpf_map_kfree(new_node);
bpf_map_kfree(im_node);
}

spin_unlock_irqrestore(&trie->lock, irq_flags);
Expand Down Expand Up @@ -503,8 +503,8 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
else
rcu_assign_pointer(
*trim2, rcu_access_pointer(parent->child[0]));
kfree_rcu(parent, rcu);
kfree_rcu(node, rcu);
bpf_map_kfree_rcu(parent, rcu);
bpf_map_kfree_rcu(node, rcu);
goto out;
}

Expand All @@ -518,7 +518,7 @@ static int trie_delete_elem(struct bpf_map *map, void *_key)
rcu_assign_pointer(*trim, rcu_access_pointer(node->child[1]));
else
RCU_INIT_POINTER(*trim, NULL);
kfree_rcu(node, rcu);
bpf_map_kfree_rcu(node, rcu);

out:
spin_unlock_irqrestore(&trie->lock, irq_flags);
Expand Down Expand Up @@ -602,7 +602,7 @@ static void trie_free(struct bpf_map *map)
continue;
}

kfree(node);
bpf_map_kfree(node);
RCU_INIT_POINTER(*slot, NULL);
break;
}
Expand Down
4 changes: 2 additions & 2 deletions net/core/bpf_sk_storage.c
Expand Up @@ -64,7 +64,7 @@ void bpf_sk_storage_free(struct sock *sk)
rcu_read_unlock();

if (free_sk_storage)
kfree_rcu(sk_storage, rcu);
bpf_map_kfree_rcu(sk_storage, rcu);
}

static void bpf_sk_storage_map_free(struct bpf_map *map)
Expand Down Expand Up @@ -203,7 +203,7 @@ int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
} else {
ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC);
if (ret) {
kfree(copy_selem);
bpf_map_kfree(copy_selem);
atomic_sub(smap->elem_size,
&newsk->sk_omem_alloc);
bpf_map_put(map);
Expand Down
2 changes: 1 addition & 1 deletion net/core/sock_map.c
Expand Up @@ -888,7 +888,7 @@ static void sock_hash_free_elem(struct bpf_shtab *htab,
struct bpf_shtab_elem *elem)
{
atomic_dec(&htab->count);
kfree_rcu(elem, rcu);
bpf_map_kfree_rcu(elem, rcu);
}

static void sock_hash_delete_from_link(struct bpf_map *map, struct sock *sk,
Expand Down

0 comments on commit e11706d

Please sign in to comment.