Skip to content

Commit

Permalink
selftests/bpf: Add selftests for cpumask iter
Browse files Browse the repository at this point in the history
Within the BPF program, we leverage the cgroup iterator to iterate through
percpu runqueue data, specifically the 'nr_running' metric. Subsequently
 we expose this data to userspace by means of a sequence file.

The CPU affinity for the cpumask is determined by the PID of a task:

- PID of the init task (PID 1)
  We typically don't set CPU affinity for init task and thus we can iterate
  across all possible CPUs. However, in scenarios where you've set CPU
  affinity for the init task, you should set the cpumask of your current
  task to full-F. Then proceed to iterate through all possible CPUs using
  the current task.
- PID of a task with defined CPU affinity
  The aim here is to iterate through a specific cpumask. This scenario
  aligns with tasks residing within a cpuset cgroup.
- Invalid PID (e.g., PID -1)
  No cpumask is available in this case.

The result as follows,
  torvalds#62/1    cpumask_iter/init_pid:OK
  torvalds#62/2    cpumask_iter/invalid_pid:OK
  torvalds#62/3    cpumask_iter/self_pid_one_cpu:OK
  torvalds#62/4    cpumask_iter/self_pid_multi_cpus:OK
  torvalds#62      cpumask_iter:OK
  Summary: 1/4 PASSED, 0 SKIPPED, 0 FAILED

Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
  • Loading branch information
laoar authored and intel-lab-lkp committed Dec 22, 2023
1 parent cb73ee5 commit 3e80327
Show file tree
Hide file tree
Showing 3 changed files with 186 additions and 0 deletions.
132 changes: 132 additions & 0 deletions tools/testing/selftests/bpf/prog_tests/cpumask_iter.c
@@ -0,0 +1,132 @@
// SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2023 Yafang Shao <laoar.shao@gmail.com> */

#define _GNU_SOURCE
#include <sched.h>
#include <stdio.h>
#include <unistd.h>

#include <test_progs.h>
#include "cgroup_helpers.h"
#include "test_cpumask_iter.skel.h"

static void verify_percpu_data(struct bpf_link *link, int nr_cpu_exp, int nr_running_exp)
{
int iter_fd, len, item, nr_running, nr_cpus;
static char buf[128];
size_t left;
char *p;

iter_fd = bpf_iter_create(bpf_link__fd(link));
if (!ASSERT_GE(iter_fd, 0, "iter_fd"))
return;

memset(buf, 0, sizeof(buf));
left = ARRAY_SIZE(buf);
p = buf;
while ((len = read(iter_fd, p, left)) > 0) {
p += len;
left -= len;
}

item = sscanf(buf, "nr_running %u nr_cpus %u\n", &nr_running, &nr_cpus);
if (nr_cpu_exp == -1) {
ASSERT_EQ(item, -1, "seq_format");
goto out;
}

ASSERT_EQ(item, 2, "seq_format");
ASSERT_GE(nr_running, nr_running_exp, "nr_running");
ASSERT_EQ(nr_cpus, nr_cpu_exp, "nr_cpus");

/* read() after iter finishes should be ok. */
if (len == 0)
ASSERT_OK(read(iter_fd, buf, sizeof(buf)), "second_read");

out:
close(iter_fd);
}

void test_cpumask_iter(void)
{
DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
int nr_possible, cgrp_fd, pid, err, cnt, i;
struct test_cpumask_iter *skel = NULL;
union bpf_iter_link_info linfo;
int cpu_ids[] = {1, 3, 4, 5};
struct bpf_link *link;
cpu_set_t set;

skel = test_cpumask_iter__open_and_load();
if (!ASSERT_OK_PTR(skel, "test_for_each_cpu__open_and_load"))
return;

if (setup_cgroup_environment())
goto destroy;

/* Utilize the cgroup iter */
cgrp_fd = get_root_cgroup();
if (!ASSERT_GE(cgrp_fd, 0, "create cgrp"))
goto cleanup;

memset(&linfo, 0, sizeof(linfo));
linfo.cgroup.cgroup_fd = cgrp_fd;
linfo.cgroup.order = BPF_CGROUP_ITER_SELF_ONLY;
opts.link_info = &linfo;
opts.link_info_len = sizeof(linfo);

link = bpf_program__attach_iter(skel->progs.cpu_cgroup, &opts);
if (!ASSERT_OK_PTR(link, "attach_iter"))
goto close_fd;

skel->bss->target_pid = 1;
/* In case init task is set CPU affinity */
err = sched_getaffinity(1, sizeof(set), &set);
if (!ASSERT_OK(err, "setaffinity"))
goto close_fd;

cnt = CPU_COUNT(&set);
nr_possible = bpf_num_possible_cpus();
if (test__start_subtest("init_pid"))
/* curent task is running. */
verify_percpu_data(link, cnt, cnt == nr_possible ? 1 : 0);

skel->bss->target_pid = -1;
if (test__start_subtest("invalid_pid"))
verify_percpu_data(link, -1, -1);

pid = getpid();
skel->bss->target_pid = pid;
CPU_ZERO(&set);
CPU_SET(0, &set);
err = sched_setaffinity(pid, sizeof(set), &set);
if (!ASSERT_OK(err, "setaffinity"))
goto free_link;

if (test__start_subtest("self_pid_one_cpu"))
verify_percpu_data(link, 1, 1);

/* Assume there are at least 8 CPUs on the testbed */
if (nr_possible < 8)
goto free_link;

CPU_ZERO(&set);
/* Set the CPU affinitiy: 1,3-5 */
for (i = 0; i < ARRAY_SIZE(cpu_ids); i++)
CPU_SET(cpu_ids[i], &set);
err = sched_setaffinity(pid, sizeof(set), &set);
if (!ASSERT_OK(err, "setaffinity"))
goto free_link;

if (test__start_subtest("self_pid_multi_cpus"))
verify_percpu_data(link, ARRAY_SIZE(cpu_ids), 1);

free_link:
bpf_link__destroy(link);
close_fd:
close(cgrp_fd);
cleanup:
cleanup_cgroup_environment();
destroy:
test_cpumask_iter__destroy(skel);
}
4 changes: 4 additions & 0 deletions tools/testing/selftests/bpf/progs/cpumask_common.h
Expand Up @@ -55,6 +55,10 @@ void bpf_cpumask_copy(struct bpf_cpumask *dst, const struct cpumask *src) __ksym
u32 bpf_cpumask_any_distribute(const struct cpumask *src) __ksym;
u32 bpf_cpumask_any_and_distribute(const struct cpumask *src1, const struct cpumask *src2) __ksym;
u32 bpf_cpumask_weight(const struct cpumask *cpumask) __ksym;
u32 bpf_iter_cpumask_new(struct bpf_iter_cpumask *it, struct cpumask *mask) __ksym;
u32 *bpf_iter_cpumask_next(struct bpf_iter_cpumask *it) __ksym;
void bpf_iter_cpumask_destroy(struct bpf_iter_cpumask *it) __ksym;
bool bpf_cpumask_set_from_pid(struct cpumask *cpumask, u32 pid) __ksym;

void bpf_rcu_read_lock(void) __ksym;
void bpf_rcu_read_unlock(void) __ksym;
Expand Down
50 changes: 50 additions & 0 deletions tools/testing/selftests/bpf/progs/test_cpumask_iter.c
@@ -0,0 +1,50 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Copyright (c) 2023 Yafang Shao <laoar.shao@gmail.com> */

#include "vmlinux.h"
#include <bpf/bpf_helpers.h>
#include <bpf/bpf_tracing.h>

#include "cpumask_common.h"

extern const struct rq runqueues __ksym __weak;

int target_pid;

SEC("iter/cgroup")
int BPF_PROG(cpu_cgroup, struct bpf_iter_meta *meta, struct cgroup *cgrp)
{
u32 *cpu, nr_running = 0, nr_cpus = 0;
struct bpf_cpumask *mask;
struct rq *rq;
int ret;

/* epilogue */
if (cgrp == NULL)
return 0;

mask = bpf_cpumask_create();
if (!mask)
return 1;

ret = bpf_cpumask_set_from_pid(&mask->cpumask, target_pid);
if (ret == false) {
bpf_cpumask_release(mask);
return 1;
}

bpf_for_each(cpumask, cpu, &mask->cpumask) {
rq = (struct rq *)bpf_per_cpu_ptr(&runqueues, *cpu);
if (!rq)
continue;

nr_running += rq->nr_running;
nr_cpus += 1;
}
BPF_SEQ_PRINTF(meta->seq, "nr_running %u nr_cpus %u\n", nr_running, nr_cpus);

bpf_cpumask_release(mask);
return 0;
}

char _license[] SEC("license") = "GPL";

0 comments on commit 3e80327

Please sign in to comment.