Skip to content

Commit 9066030

Browse files
Byte-LabAlexei Starovoitov
authored andcommitted
bpf: Add kfuncs for storing struct task_struct * as a kptr
Now that BPF supports adding new kernel functions with kfuncs, and storing kernel objects in maps with kptrs, we can add a set of kfuncs which allow struct task_struct objects to be stored in maps as referenced kptrs. The possible use cases for doing this are plentiful. During tracing, for example, it would be useful to be able to collect some tasks that performed a certain operation, and then periodically summarize who they are, which cgroup they're in, how much CPU time they've utilized, etc. In order to enable this, this patch adds three new kfuncs: struct task_struct *bpf_task_acquire(struct task_struct *p); struct task_struct *bpf_task_kptr_get(struct task_struct **pp); void bpf_task_release(struct task_struct *p); A follow-on patch will add selftests validating these kfuncs. Signed-off-by: David Vernet <void@manifault.com> Link: https://lore.kernel.org/r/20221120051004.3605026-4-void@manifault.com Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent 3f00c52 commit 9066030

File tree

1 file changed

+75
-3
lines changed

1 file changed

+75
-3
lines changed

kernel/bpf/helpers.c

Lines changed: 75 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1824,6 +1824,63 @@ struct bpf_list_node *bpf_list_pop_back(struct bpf_list_head *head)
18241824
return __bpf_list_del(head, true);
18251825
}
18261826

1827+
/**
1828+
* bpf_task_acquire - Acquire a reference to a task. A task acquired by this
1829+
* kfunc which is not stored in a map as a kptr, must be released by calling
1830+
* bpf_task_release().
1831+
* @p: The task on which a reference is being acquired.
1832+
*/
1833+
struct task_struct *bpf_task_acquire(struct task_struct *p)
1834+
{
1835+
refcount_inc(&p->rcu_users);
1836+
return p;
1837+
}
1838+
1839+
/**
1840+
* bpf_task_kptr_get - Acquire a reference on a struct task_struct kptr. A task
1841+
* kptr acquired by this kfunc which is not subsequently stored in a map, must
1842+
* be released by calling bpf_task_release().
1843+
* @pp: A pointer to a task kptr on which a reference is being acquired.
1844+
*/
1845+
struct task_struct *bpf_task_kptr_get(struct task_struct **pp)
1846+
{
1847+
struct task_struct *p;
1848+
1849+
rcu_read_lock();
1850+
p = READ_ONCE(*pp);
1851+
1852+
/* Another context could remove the task from the map and release it at
1853+
* any time, including after we've done the lookup above. This is safe
1854+
* because we're in an RCU read region, so the task is guaranteed to
1855+
* remain valid until at least the rcu_read_unlock() below.
1856+
*/
1857+
if (p && !refcount_inc_not_zero(&p->rcu_users))
1858+
/* If the task had been removed from the map and freed as
1859+
* described above, refcount_inc_not_zero() will return false.
1860+
* The task will be freed at some point after the current RCU
1861+
* gp has ended, so just return NULL to the user.
1862+
*/
1863+
p = NULL;
1864+
rcu_read_unlock();
1865+
1866+
return p;
1867+
}
1868+
1869+
/**
1870+
* bpf_task_release - Release the reference acquired on a struct task_struct *.
1871+
* If this kfunc is invoked in an RCU read region, the task_struct is
1872+
* guaranteed to not be freed until the current grace period has ended, even if
1873+
* its refcount drops to 0.
1874+
* @p: The task on which a reference is being released.
1875+
*/
1876+
void bpf_task_release(struct task_struct *p)
1877+
{
1878+
if (!p)
1879+
return;
1880+
1881+
put_task_struct_rcu_user(p);
1882+
}
1883+
18271884
__diag_pop();
18281885

18291886
BTF_SET8_START(generic_btf_ids)
@@ -1836,21 +1893,36 @@ BTF_ID_FLAGS(func, bpf_list_push_front)
18361893
BTF_ID_FLAGS(func, bpf_list_push_back)
18371894
BTF_ID_FLAGS(func, bpf_list_pop_front, KF_ACQUIRE | KF_RET_NULL)
18381895
BTF_ID_FLAGS(func, bpf_list_pop_back, KF_ACQUIRE | KF_RET_NULL)
1896+
BTF_ID_FLAGS(func, bpf_task_acquire, KF_ACQUIRE | KF_TRUSTED_ARGS)
1897+
BTF_ID_FLAGS(func, bpf_task_kptr_get, KF_ACQUIRE | KF_KPTR_GET | KF_RET_NULL)
1898+
BTF_ID_FLAGS(func, bpf_task_release, KF_RELEASE)
18391899
BTF_SET8_END(generic_btf_ids)
18401900

18411901
static const struct btf_kfunc_id_set generic_kfunc_set = {
18421902
.owner = THIS_MODULE,
18431903
.set = &generic_btf_ids,
18441904
};
18451905

1906+
BTF_ID_LIST(generic_dtor_ids)
1907+
BTF_ID(struct, task_struct)
1908+
BTF_ID(func, bpf_task_release)
1909+
18461910
static int __init kfunc_init(void)
18471911
{
18481912
int ret;
1913+
const struct btf_id_dtor_kfunc generic_dtors[] = {
1914+
{
1915+
.btf_id = generic_dtor_ids[0],
1916+
.kfunc_btf_id = generic_dtor_ids[1]
1917+
},
1918+
};
18491919

18501920
ret = register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &generic_kfunc_set);
1851-
if (ret)
1852-
return ret;
1853-
return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
1921+
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &generic_kfunc_set);
1922+
ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_STRUCT_OPS, &generic_kfunc_set);
1923+
return ret ?: register_btf_id_dtor_kfuncs(generic_dtors,
1924+
ARRAY_SIZE(generic_dtors),
1925+
THIS_MODULE);
18541926
}
18551927

18561928
late_initcall(kfunc_init);

0 commit comments

Comments
 (0)