Skip to content

Commit

Permalink
bpf: per-cgroup lsm flavor
Browse files Browse the repository at this point in the history
Allow attaching to lsm hooks in the cgroup context.

Attaching to per-cgroup LSM works exactly like attaching
to other per-cgroup hooks. New BPF_LSM_CGROUP is added
to trigger new mode; the actual lsm hook we attach to is
signaled via existing attach_btf_id.

For the hooks that have 'struct socket' as its first argument,
we use the cgroup associated with that socket. For the rest,
we use 'current' cgroup (this is all on default hierarchy == v2 only).
Note that for the hooks that work on 'struct sock' we still
take the cgroup from 'current' because most of the time,
the 'sock' argument is not properly initialized.

Behind the scenes, we allocate a shim program that is attached
to the trampoline and runs cgroup effective BPF programs array.
This shim has some rudimentary ref counting and can be shared
between several programs attaching to the same per-cgroup lsm hook.

Note that this patch bloats cgroup size because we add 211
cgroup_bpf_attach_type(s) for simplicity sake. This will be
addressed in the subsequent patch.

Signed-off-by: Stanislav Fomichev <sdf@google.com>
  • Loading branch information
fomichev authored and intel-lab-lkp committed Mar 28, 2022
1 parent db5ffb0 commit cf70645
Show file tree
Hide file tree
Showing 11 changed files with 384 additions and 12 deletions.
6 changes: 6 additions & 0 deletions include/linux/bpf-cgroup-defs.h
Expand Up @@ -10,6 +10,8 @@

struct bpf_prog_array;

#define CGROUP_LSM_NUM 211 /* will be addressed in the next patch */

enum cgroup_bpf_attach_type {
CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
CGROUP_INET_INGRESS = 0,
Expand All @@ -35,6 +37,10 @@ enum cgroup_bpf_attach_type {
CGROUP_INET4_GETSOCKNAME,
CGROUP_INET6_GETSOCKNAME,
CGROUP_INET_SOCK_RELEASE,
#ifdef CONFIG_BPF_LSM
CGROUP_LSM_START,
CGROUP_LSM_END = CGROUP_LSM_START + CGROUP_LSM_NUM - 1,
#endif
MAX_CGROUP_BPF_ATTACH_TYPE
};

Expand Down
14 changes: 14 additions & 0 deletions include/linux/bpf.h
Expand Up @@ -807,6 +807,9 @@ static __always_inline __nocfi unsigned int bpf_dispatcher_nop_func(
#ifdef CONFIG_BPF_JIT
int bpf_trampoline_link_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
int bpf_trampoline_unlink_prog(struct bpf_prog *prog, struct bpf_trampoline *tr);
int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog);
struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info);
void bpf_trampoline_put(struct bpf_trampoline *tr);
Expand Down Expand Up @@ -865,6 +868,15 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog,
{
return -ENOTSUPP;
}
static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog,
struct bpf_attach_target_info *tgt_info)
{
return -EOPNOTSUPP;
}
static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog)
{
return -EOPNOTSUPP;
}
static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info)
{
Expand Down Expand Up @@ -980,6 +992,7 @@ struct bpf_prog_aux {
u64 load_time; /* ns since boottime */
u32 verified_insns;
struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
int cgroup_atype; /* enum cgroup_bpf_attach_type */
char name[BPF_OBJ_NAME_LEN];
#ifdef CONFIG_SECURITY
void *security;
Expand Down Expand Up @@ -2383,6 +2396,7 @@ void *bpf_arch_text_copy(void *dst, void *src, size_t len);

struct btf_id_set;
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
int btf_id_set_index(const struct btf_id_set *set, u32 id);

#define MAX_BPRINTF_VARARGS 12

Expand Down
14 changes: 14 additions & 0 deletions include/linux/bpf_lsm.h
Expand Up @@ -42,6 +42,9 @@ extern const struct bpf_func_proto bpf_inode_storage_get_proto;
extern const struct bpf_func_proto bpf_inode_storage_delete_proto;
void bpf_inode_storage_free(struct inode *inode);

int bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog, bpf_func_t *bpf_func);
int bpf_lsm_hook_idx(u32 btf_id);

#else /* !CONFIG_BPF_LSM */

static inline bool bpf_lsm_is_sleepable_hook(u32 btf_id)
Expand All @@ -65,6 +68,17 @@ static inline void bpf_inode_storage_free(struct inode *inode)
{
}

static inline int bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
bpf_func_t *bpf_func)
{
return -ENOENT;
}

static inline int bpf_lsm_hook_idx(u32 btf_id)
{
return -EINVAL;
}

#endif /* CONFIG_BPF_LSM */

#endif /* _LINUX_BPF_LSM_H */
1 change: 1 addition & 0 deletions include/uapi/linux/bpf.h
Expand Up @@ -998,6 +998,7 @@ enum bpf_attach_type {
BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
BPF_PERF_EVENT,
BPF_TRACE_KPROBE_MULTI,
BPF_LSM_CGROUP,
__MAX_BPF_ATTACH_TYPE
};

Expand Down
92 changes: 92 additions & 0 deletions kernel/bpf/bpf_lsm.c
Expand Up @@ -35,6 +35,98 @@ BTF_SET_START(bpf_lsm_hooks)
#undef LSM_HOOK
BTF_SET_END(bpf_lsm_hooks)

static unsigned int __cgroup_bpf_run_lsm_socket(const void *ctx,
const struct bpf_insn *insn)
{
const struct bpf_prog *prog;
struct socket *sock;
struct cgroup *cgrp;
struct sock *sk;
int ret = 0;
u64 *regs;

regs = (u64 *)ctx;
sock = (void *)regs[BPF_REG_0];
/*prog = container_of(insn, struct bpf_prog, insnsi);*/
prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));

if (unlikely(!sock))
return 0;

sk = sock->sk;
if (unlikely(!sk))
return 0;

cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);
if (likely(cgrp))
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[prog->aux->cgroup_atype],
ctx, bpf_prog_run, 0);
return ret;
}

static unsigned int __cgroup_bpf_run_lsm_current(const void *ctx,
const struct bpf_insn *insn)
{
const struct bpf_prog *prog;
struct cgroup *cgrp;
int ret = 0;

if (unlikely(!current))
return 0;

/*prog = container_of(insn, struct bpf_prog, insnsi);*/
prog = (const struct bpf_prog *)((void *)insn - offsetof(struct bpf_prog, insnsi));

rcu_read_lock();
cgrp = task_dfl_cgroup(current);
if (likely(cgrp))
ret = BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[prog->aux->cgroup_atype],
ctx, bpf_prog_run, 0);
rcu_read_unlock();
return ret;
}

int bpf_lsm_find_cgroup_shim(const struct bpf_prog *prog,
bpf_func_t *bpf_func)
{
const struct btf_type *first_arg_type;
const struct btf_type *sock_type;
const struct btf *btf_vmlinux;
const struct btf_param *args;
s32 type_id;

if (!prog->aux->attach_func_proto ||
!btf_type_is_func_proto(prog->aux->attach_func_proto))
return -EINVAL;

if (btf_type_vlen(prog->aux->attach_func_proto) < 1)
return -EINVAL;

args = (const struct btf_param *)(prog->aux->attach_func_proto + 1);

btf_vmlinux = bpf_get_btf_vmlinux();
if (!btf_vmlinux)
return -EINVAL;

type_id = btf_find_by_name_kind(btf_vmlinux, "socket", BTF_KIND_STRUCT);
if (type_id < 0)
return -EINVAL;
sock_type = btf_type_by_id(btf_vmlinux, type_id);

first_arg_type = btf_type_resolve_ptr(btf_vmlinux, args[0].type, NULL);
if (first_arg_type == sock_type)
*bpf_func = __cgroup_bpf_run_lsm_socket;
else
*bpf_func = __cgroup_bpf_run_lsm_current;

return 0;
}

int bpf_lsm_hook_idx(u32 btf_id)
{
return btf_id_set_index(&bpf_lsm_hooks, btf_id);
}

int bpf_lsm_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog)
{
Expand Down
11 changes: 11 additions & 0 deletions kernel/bpf/btf.c
Expand Up @@ -4971,6 +4971,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,

if (arg == nr_args) {
switch (prog->expected_attach_type) {
case BPF_LSM_CGROUP:
case BPF_LSM_MAC:
case BPF_TRACE_FEXIT:
/* When LSM programs are attached to void LSM hooks
Expand Down Expand Up @@ -6396,6 +6397,16 @@ static int btf_id_cmp_func(const void *a, const void *b)
return *pa - *pb;
}

int btf_id_set_index(const struct btf_id_set *set, u32 id)
{
const u32 *p;

p = bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func);
if (!p)
return -1;
return p - set->ids;
}

bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
{
return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
Expand Down

0 comments on commit cf70645

Please sign in to comment.