Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions kernel/bpf/hashtab.c
Original file line number Diff line number Diff line change
Expand Up @@ -950,12 +950,14 @@ static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
if (!onallcpus) {
/* copy true value_size bytes */
copy_map_value(&htab->map, this_cpu_ptr(pptr), value);
bpf_obj_free_fields(htab->map.record, this_cpu_ptr(pptr));
} else {
u32 size = round_up(htab->map.value_size, 8);
int off = 0, cpu;

for_each_possible_cpu(cpu) {
copy_map_value_long(&htab->map, per_cpu_ptr(pptr, cpu), value + off);
bpf_obj_free_fields(htab->map.record, per_cpu_ptr(pptr, cpu));
off += size;
}
}
Expand Down Expand Up @@ -1122,6 +1124,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
copy_map_value_locked(map,
htab_elem_value(l_old, key_size),
value, false);
check_and_free_fields(htab, l_old);
return 0;
}
/* fall through, grab the bucket lock and lookup again.
Expand Down Expand Up @@ -1150,6 +1153,7 @@ static long htab_map_update_elem(struct bpf_map *map, void *key, void *value,
copy_map_value_locked(map,
htab_elem_value(l_old, key_size),
value, false);
check_and_free_fields(htab, l_old);
ret = 0;
goto err;
}
Expand Down
93 changes: 93 additions & 0 deletions tools/testing/selftests/bpf/prog_tests/refcounted_kptr.c
Original file line number Diff line number Diff line change
Expand Up @@ -44,3 +44,96 @@ void test_refcounted_kptr_wrong_owner(void)
ASSERT_OK(opts.retval, "rbtree_wrong_owner_remove_fail_a2 retval");
refcounted_kptr__destroy(skel);
}

static void test_refcnt_leak(void *values, size_t values_sz, u64 flags, bool lock_hash)
{
struct refcounted_kptr *skel;
int ret, fd, key = 0;
struct bpf_map *map;
LIBBPF_OPTS(bpf_test_run_opts, opts,
.data_in = &pkt_v4,
.data_size_in = sizeof(pkt_v4),
.repeat = 1,
);

skel = refcounted_kptr__open_and_load();
if (!ASSERT_OK_PTR(skel, "refcounted_kptr__open_and_load"))
return;

map = skel->maps.pcpu_hash;
if (lock_hash)
map = skel->maps.lock_hash;

ret = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, flags);
if (!ASSERT_OK(ret, "bpf_map__update_elem first"))
goto out;

fd = bpf_program__fd(skel->progs.pcpu_hash_refcount_leak);
if (lock_hash)
fd = bpf_program__fd(skel->progs.hash_lock_refcount_leak);

ret = bpf_prog_test_run_opts(fd, &opts);
if (!ASSERT_OK(ret, "test_run_opts"))
goto out;
if (!ASSERT_EQ(opts.retval, 2, "retval refcount"))
goto out;

ret = bpf_map__update_elem(map, &key, sizeof(key), values, values_sz, flags);
if (!ASSERT_OK(ret, "bpf_map__update_elem second"))
goto out;

fd = bpf_program__fd(skel->progs.check_pcpu_hash_refcount);
if (lock_hash)
fd = bpf_program__fd(skel->progs.check_hash_lock_refcount);

ret = bpf_prog_test_run_opts(fd, &opts);
if (!ASSERT_OK(ret, "test_run_opts"))
goto out;
if (!ASSERT_EQ(opts.retval, 1, "retval"))
goto out;

out:
refcounted_kptr__destroy(skel);
}

static void test_percpu_hash_refcount_leak(void)
{
size_t values_sz;
u64 *values;
int cpu_nr;

cpu_nr = libbpf_num_possible_cpus();
if (!ASSERT_GT(cpu_nr, 0, "libbpf_num_possible_cpus"))
return;

values = calloc(cpu_nr, sizeof(u64));
if (!ASSERT_OK_PTR(values, "calloc values"))
return;

values_sz = cpu_nr * sizeof(u64);
memset(values, 0, values_sz);

test_refcnt_leak(values, values_sz, 0, false);

free(values);
}

struct hash_lock_value {
struct bpf_spin_lock lock;
u64 node;
};

static void test_hash_lock_refcount_leak(void)
{
struct hash_lock_value value = {};

test_refcnt_leak(&value, sizeof(value), BPF_F_LOCK, true);
}

void test_refcount_leak(void)
{
if (test__start_subtest("percpu_hash_refcount_leak"))
test_percpu_hash_refcount_leak();
if (test__start_subtest("hash_lock_refcount_leak"))
test_hash_lock_refcount_leak();
}
101 changes: 101 additions & 0 deletions tools/testing/selftests/bpf/progs/refcounted_kptr.c
Original file line number Diff line number Diff line change
Expand Up @@ -568,4 +568,105 @@ int BPF_PROG(rbtree_sleepable_rcu_no_explicit_rcu_lock,
return 0;
}

static int __insert_in_list(struct bpf_list_head *head, struct bpf_spin_lock *lock,
struct node_data __kptr **node)
{
struct node_data *n, *m;
u32 refcnt;
void *ref;

n = bpf_obj_new(typeof(*n));
if (!n)
return -1;

m = bpf_refcount_acquire(n);
n = bpf_kptr_xchg(node, n);
if (n) {
bpf_obj_drop(n);
bpf_obj_drop(m);
return -2;
}

bpf_spin_lock(lock);
bpf_list_push_front(head, &m->l);
ref = (void *) &m->ref;
bpf_spin_unlock(lock);

bpf_probe_read_kernel(&refcnt, sizeof(refcnt), ref);
return refcnt;
}

static void *__lookup_map(void *map)
{
int key = 0;

return bpf_map_lookup_elem(map, &key);
}

struct {
__uint(type, BPF_MAP_TYPE_PERCPU_HASH);
__type(key, int);
__type(value, struct map_value);
__uint(max_entries, 1);
} pcpu_hash SEC(".maps");

SEC("tc")
int pcpu_hash_refcount_leak(void *ctx)
{
struct map_value *v;

v = __lookup_map(&pcpu_hash);
if (!v)
return 0;

return __insert_in_list(&head, &lock, &v->node);
}

SEC("tc")
int check_pcpu_hash_refcount(void *ctx)
{
struct map_value *v;

v = __lookup_map(&pcpu_hash);
return v && v->node == NULL;
}

struct hash_lock_map_value {
struct node_data __kptr *node;
struct bpf_spin_lock lock;
int value;
};

struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, int);
__type(value, struct hash_lock_map_value);
__uint(max_entries, 1);
} lock_hash SEC(".maps");

SEC("tc")
int hash_lock_refcount_leak(void *ctx)
{
struct hash_lock_map_value *v;

v = __lookup_map(&lock_hash);
if (!v)
return 0;

bpf_spin_lock(&v->lock);
v->value = 42;
bpf_spin_unlock(&v->lock);

return __insert_in_list(&head, &lock, &v->node);
}

SEC("tc")
int check_hash_lock_refcount(void *ctx)
{
struct hash_lock_map_value *v;

v = __lookup_map(&lock_hash);
return v && v->node == NULL;
}

char _license[] SEC("license") = "GPL";
Loading