From 690e4c8dcf2f9ee6fe7684e6169c8186c3fb8aa4 Mon Sep 17 00:00:00 2001 From: Leon Hwang Date: Thu, 16 Oct 2025 22:57:59 +0800 Subject: [PATCH 1/3] bpf: Fix possible memleak in [lru_,]percpu_hash map update As [lru_,]percpu_hash maps support BPF_KPTR_{REF,PERCPU}, missing calls to 'bpf_obj_free_fields()' in 'pcpu_copy_value()' can leak memory referenced by BPF_KPTR_{REF,PERCPU} fields. Fix this by calling 'bpf_obj_free_fields()' after 'copy_map_value[,_long]()' in 'pcpu_copy_value()'. Fixes: 65334e64a493 ("bpf: Support kptrs in percpu hashmap and percpu LRU hashmap") Signed-off-by: Leon Hwang --- kernel/bpf/hashtab.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index c2fcd0cd51e51..26308adc9ccb3 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -950,12 +950,14 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr, if (!onallcpus) { /* copy true value_size bytes */ copy_map_value(&htab->map, this_cpu_ptr(pptr), value); + bpf_obj_free_fields(htab->map.record, this_cpu_ptr(pptr)); } else { u32 size = round_up(htab->map.value_size, 8); int off = 0, cpu; for_each_possible_cpu(cpu) { copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off); + bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu)); off += size; } } From 9deb9ddc686ddc48b64e901f65106a757a2fff00 Mon Sep 17 00:00:00 2001 From: Leon Hwang Date: Thu, 16 Oct 2025 22:58:00 +0800 Subject: [PATCH 2/3] bpf: Fix possible memleak when updating hash maps with BPF_F_LOCK When updating hash maps with BPF_F_LOCK, the special fields were not freed after being replaced. This could cause memory referenced by BPF_KPTR_{REF,PERCPU} fields to leak. Fix this by calling 'check_and_free_fields()' after 'copy_map_value_locked()' to properly release the old fields. Fixes: 14a324f6a67e ("bpf: Wire up freeing of referenced kptr") Signed-off-by: Leon Hwang --- kernel/bpf/hashtab.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 26308adc9ccb3..65009ea3e9379 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1124,6 +1124,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value, copy_map_value_locked(map, htab_elem_value(l_old, key_size), value, false); + check_and_free_fields(htab, l_old); return 0; } /* fall through, grab the bucket lock and lookup again. @@ -1152,6 +1153,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value, copy_map_value_locked(map, htab_elem_value(l_old, key_size), value, false); + check_and_free_fields(htab, l_old); ret = 0; goto err; } From b19963c65562b68280e296ebb5117839499a6574 Mon Sep 17 00:00:00 2001 From: Leon Hwang Date: Thu, 16 Oct 2025 22:58:01 +0800 Subject: [PATCH 3/3] selftests/bpf: Add test to verify no memleak when updating hash maps Add two tests to verify that updating hash maps does not leak memory when BPF_KPTR_REF objects are involved. The test performs the following steps: 1. Call update_elem() to insert an initial value. 2. Use bpf_refcount_acquire() to increment the refcount. 3. Store the node pointer in the map value. 4. Add the node to a linked list. 5. Probe-read the refcount and verify it is *2*. 6. Call update_elem() again to trigger refcount decrement. 7. Verify that the field has been reset. Signed-off-by: Leon Hwang --- .../bpf/prog_tests/refcounted_kptr.c | 93 ++++++++++++++++ .../selftests/bpf/progs/refcounted_kptr.c | 101 ++++++++++++++++++ 2 files changed, 194 insertions(+) diff --git a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c index d6bd5e16e6372..9dc9a425cc65d 100644 --- a/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c @@ -44,3 +44,96 @@ void test_refcounted_kptr_wrong_owner(void) ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval"); refcounted_kptr__destroy(skel); } + +static void test_refcnt_leak(void *values, size_t values_sz, u64 flags, bool lock_hash) +{ + struct refcounted_kptr *skel; + int ret, fd, key = 0; + struct bpf_map *map; + LIBBPF_OPTS(bpf_test_run_opts, opts, + .data_in = &pkt_v4, + .data_size_in = sizeof(pkt_v4), + .repeat = 1, + ); + + skel = refcounted_kptr__open_and_load(); + if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load")) + return; + + map = skel->maps.pcpu_hash; + if (lock_hash) + map = skel->maps.lock_hash; + + ret = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, flags); + if (!ASSERT_OK(ret, "bpf_map__update_elem first")) + goto out; + + fd = bpf_program__fd(skel->progs.pcpu_hash_refcount_leak); + if (lock_hash) + fd = bpf_program__fd(skel->progs.hash_lock_refcount_leak); + + ret = bpf_prog_test_run_opts(fd, &opts); + if (!ASSERT_OK(ret, "test_run_opts")) + goto out; + if (!ASSERT_EQ(opts.retval, 2, "retval refcount")) + goto out; + + ret = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, flags); + if (!ASSERT_OK(ret, "bpf_map__update_elem second")) + goto out; + + fd = bpf_program__fd(skel->progs.check_pcpu_hash_refcount); + if (lock_hash) + fd = bpf_program__fd(skel->progs.check_hash_lock_refcount); + + ret = bpf_prog_test_run_opts(fd, &opts); + if (!ASSERT_OK(ret, "test_run_opts")) + goto out; + if (!ASSERT_EQ(opts.retval, 1, "retval")) + goto out; + +out: + refcounted_kptr__destroy(skel); +} + +static void test_percpu_hash_refcount_leak(void) +{ + size_t values_sz; + u64 *values; + int cpu_nr; + + cpu_nr = libbpf_num_possible_cpus(); + if (!ASSERT_GT(cpu_nr, 0, "libbpf_num_possible_cpus")) + return; + + values = calloc(cpu_nr, sizeof(u64)); + if (!ASSERT_OK_PTR(values, "calloc values")) + return; + + values_sz = cpu_nr * sizeof(u64); + memset(values, 0, values_sz); + + test_refcnt_leak(values, values_sz, 0, false); + + free(values); +} + +struct hash_lock_value { + struct bpf_spin_lock lock; + u64 node; +}; + +static void test_hash_lock_refcount_leak(void) +{ + struct hash_lock_value value = {}; + + test_refcnt_leak(&value, sizeof(value), BPF_F_LOCK, true); +} + +void test_refcount_leak(void) +{ + if (test__start_subtest("percpu_hash_refcount_leak")) + test_percpu_hash_refcount_leak(); + if (test__start_subtest("hash_lock_refcount_leak")) + test_hash_lock_refcount_leak(); +} diff --git a/tools/testing/selftests/bpf/progs/refcounted_kptr.c b/tools/testing/selftests/bpf/progs/refcounted_kptr.c index 893a4fdb4b6e9..8c41fe53da9e3 100644 --- a/tools/testing/selftests/bpf/progs/refcounted_kptr.c +++ b/tools/testing/selftests/bpf/progs/refcounted_kptr.c @@ -568,4 +568,105 @@ int BPF_PROG(rbtree_sleepable_rcu_no_explicit_rcu_lock, return 0; } +static int __insert_in_list(struct bpf_list_head *head, struct bpf_spin_lock *lock, + struct node_data __kptr **node) +{ + struct node_data *n, *m; + u32 refcnt; + void *ref; + + n = bpf_obj_new(typeof(*n)); + if (!n) + return -1; + + m = bpf_refcount_acquire(n); + n = bpf_kptr_xchg(node, n); + if (n) { + bpf_obj_drop(n); + bpf_obj_drop(m); + return -2; + } + + bpf_spin_lock(lock); + bpf_list_push_front(head, &m->l); + ref = (void *) &m->ref; + bpf_spin_unlock(lock); + + bpf_probe_read_kernel(&refcnt, sizeof(refcnt), ref); + return refcnt; +} + +static void *__lookup_map(void *map) +{ + int key = 0; + + return bpf_map_lookup_elem(map, &key); +} + +struct { + __uint(type, BPF_MAP_TYPE_PERCPU_HASH); + __type(key, int); + __type(value, struct map_value); + __uint(max_entries, 1); +} pcpu_hash SEC(".maps"); + +SEC("tc") +int pcpu_hash_refcount_leak(void *ctx) +{ + struct map_value *v; + + v = __lookup_map(&pcpu_hash); + if (!v) + return 0; + + return __insert_in_list(&head, &lock, &v->node); +} + +SEC("tc") +int check_pcpu_hash_refcount(void *ctx) +{ + struct map_value *v; + + v = __lookup_map(&pcpu_hash); + return v && v->node == NULL; +} + +struct hash_lock_map_value { + struct node_data __kptr *node; + struct bpf_spin_lock lock; + int value; +}; + +struct { + __uint(type, BPF_MAP_TYPE_HASH); + __type(key, int); + __type(value, struct hash_lock_map_value); + __uint(max_entries, 1); +} lock_hash SEC(".maps"); + +SEC("tc") +int hash_lock_refcount_leak(void *ctx) +{ + struct hash_lock_map_value *v; + + v = __lookup_map(&lock_hash); + if (!v) + return 0; + + bpf_spin_lock(&v->lock); + v->value = 42; + bpf_spin_unlock(&v->lock); + + return __insert_in_list(&head, &lock, &v->node); +} + +SEC("tc") +int check_hash_lock_refcount(void *ctx) +{ + struct hash_lock_map_value *v; + + v = __lookup_map(&lock_hash); + return v && v->node == NULL; +} + char _license[] SEC("license") = "GPL";