Skip to content

Commit

Permalink
bpf: cgroup_sock lsm flavor
Browse files Browse the repository at this point in the history
Allow per-cgroup lsm attachment of a subset of hooks that operate
on the 'struct sock'

Expected usage:

1. attach raw tracepoint hook with expected_attach=BPF_LSM_CGROUP_SOCK
2. this causes fmod_ret trampoline that invokes __cgroup_bpf_run_lsm_sock
3. __cgroup_bpf_run_lsm_sock relies on existing cgroup_bpf->effective
   array which is extended to include new slots for lsm hooks
4. attach same program to the cgroup_fd

Current limitation:
- abusing x86 jit, not generic
- no proper error handling (detach tracepoint first will probably cause
  problems)
- 2 hooks for now for demonstration purposes
- lsm specific, maybe can be extended fentry/fexit/fmod_ret

Signed-off-by: Stanislav Fomichev <sdf@google.com>
  • Loading branch information
fomichev authored and intel-lab-lkp committed Feb 16, 2022
1 parent 8cbf062 commit 054de95
Show file tree
Hide file tree
Showing 10 changed files with 84 additions and 12 deletions.
27 changes: 19 additions & 8 deletions arch/x86/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1742,6 +1742,8 @@ static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
-(stack_size - i * 8));
}

extern int __cgroup_bpf_run_lsm_sock(u64 *, const struct bpf_prog *);

static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
struct bpf_prog *p, int stack_size, bool save_ret)
{
Expand All @@ -1767,14 +1769,23 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,

/* arg1: lea rdi, [rbp - stack_size] */
EMIT4(0x48, 0x8D, 0x7D, -stack_size);
/* arg2: progs[i]->insnsi for interpreter */
if (!p->jited)
emit_mov_imm64(&prog, BPF_REG_2,
(long) p->insnsi >> 32,
(u32) (long) p->insnsi);
/* call JITed bpf program or interpreter */
if (emit_call(&prog, p->bpf_func, prog))
return -EINVAL;

if (p->expected_attach_type == BPF_LSM_CGROUP_SOCK) {
/* arg2: progs[i] */
emit_mov_imm64(&prog, BPF_REG_2, (long) p >> 32, (u32) (long) p);
if (emit_call(&prog, __cgroup_bpf_run_lsm_sock, prog))
return -EINVAL;
} else {
/* arg2: progs[i]->insnsi for interpreter */
if (!p->jited)
emit_mov_imm64(&prog, BPF_REG_2,
(long) p->insnsi >> 32,
(u32) (long) p->insnsi);

/* call JITed bpf program or interpreter */
if (emit_call(&prog, p->bpf_func, prog))
return -EINVAL;
}

/*
* BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
Expand Down
4 changes: 4 additions & 0 deletions include/linux/bpf-cgroup-defs.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@

struct bpf_prog_array;

#define CGROUP_LSM_SOCK_NUM 2

enum cgroup_bpf_attach_type {
CGROUP_BPF_ATTACH_TYPE_INVALID = -1,
CGROUP_INET_INGRESS = 0,
Expand All @@ -35,6 +37,8 @@ enum cgroup_bpf_attach_type {
CGROUP_INET4_GETSOCKNAME,
CGROUP_INET6_GETSOCKNAME,
CGROUP_INET_SOCK_RELEASE,
CGROUP_LSM_SOCK_START,
CGROUP_LSM_SOCK_END = CGROUP_LSM_SOCK_START + CGROUP_LSM_SOCK_NUM,
MAX_CGROUP_BPF_ATTACH_TYPE
};

Expand Down
2 changes: 2 additions & 0 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -975,6 +975,7 @@ struct bpf_prog_aux {
u64 load_time; /* ns since boottime */
u32 verified_insns;
struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
int cgroup_atype; /* enum cgroup_bpf_attach_type */
char name[BPF_OBJ_NAME_LEN];
#ifdef CONFIG_SECURITY
void *security;
Expand Down Expand Up @@ -2367,6 +2368,7 @@ void *bpf_arch_text_copy(void *dst, void *src, size_t len);

struct btf_id_set;
bool btf_id_set_contains(const struct btf_id_set *set, u32 id);
int btf_id_set_index(const struct btf_id_set *set, u32 id);

#define MAX_BPRINTF_VARARGS 12

Expand Down
1 change: 1 addition & 0 deletions include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -997,6 +997,7 @@ enum bpf_attach_type {
BPF_SK_REUSEPORT_SELECT,
BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
BPF_PERF_EVENT,
BPF_LSM_CGROUP_SOCK,
__MAX_BPF_ATTACH_TYPE
};

Expand Down
10 changes: 10 additions & 0 deletions kernel/bpf/btf.c
Original file line number Diff line number Diff line change
Expand Up @@ -4928,6 +4928,7 @@ bool btf_ctx_access(int off, int size, enum bpf_access_type type,

if (arg == nr_args) {
switch (prog->expected_attach_type) {
case BPF_LSM_CGROUP_SOCK:
case BPF_LSM_MAC:
case BPF_TRACE_FEXIT:
/* When LSM programs are attached to void LSM hooks
Expand Down Expand Up @@ -6338,6 +6339,15 @@ static int btf_id_cmp_func(const void *a, const void *b)
return *pa - *pb;
}

int btf_id_set_index(const struct btf_id_set *set, u32 id)
{
const u32 *p;
p = bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func);
if (!p)
return -1;
return p - set->ids;
}

bool btf_id_set_contains(const struct btf_id_set *set, u32 id)
{
return bsearch(&id, set->ids, set->cnt, sizeof(u32), btf_id_cmp_func) != NULL;
Expand Down
43 changes: 40 additions & 3 deletions kernel/bpf/cgroup.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
#include <linux/string.h>
#include <linux/bpf.h>
#include <linux/bpf-cgroup.h>
#include <linux/btf_ids.h>
#include <net/sock.h>
#include <net/bpf_sk_storage.h>

Expand Down Expand Up @@ -417,6 +418,11 @@ static struct bpf_prog_list *find_attach_entry(struct list_head *progs,
return NULL;
}

BTF_SET_START(lsm_cgroup_sock)
BTF_ID(func, bpf_lsm_socket_post_create)
BTF_ID(func, bpf_lsm_socket_bind)
BTF_SET_END(lsm_cgroup_sock)

/**
* __cgroup_bpf_attach() - Attach the program or the link to a cgroup, and
* propagate the change to descendants
Expand Down Expand Up @@ -455,9 +461,24 @@ static int __cgroup_bpf_attach(struct cgroup *cgrp,
/* replace_prog implies BPF_F_REPLACE, and vice versa */
return -EINVAL;

atype = to_cgroup_bpf_attach_type(type);
if (atype < 0)
return -EINVAL;
if (prog->type == BPF_PROG_TYPE_LSM &&
prog->expected_attach_type == BPF_LSM_CGROUP_SOCK) {
int idx;

BUG_ON(lsm_cgroup_sock.cnt != CGROUP_LSM_SOCK_NUM);

idx = btf_id_set_index(&lsm_cgroup_sock, prog->aux->attach_btf_id);
if (idx < 0)
return -EINVAL;

atype = CGROUP_LSM_SOCK_START + idx;

prog->aux->cgroup_atype = atype;
} else {
atype = to_cgroup_bpf_attach_type(type);
if (atype < 0)
return -EINVAL;
}

progs = &cgrp->bpf.progs[atype];

Expand Down Expand Up @@ -1091,6 +1112,22 @@ int __cgroup_bpf_run_filter_skb(struct sock *sk,
}
EXPORT_SYMBOL(__cgroup_bpf_run_filter_skb);

int __cgroup_bpf_run_lsm_sock(u64 *regs, const struct bpf_prog *prog)
{
struct socket *sock = (void *)regs[BPF_REG_0];
struct cgroup *cgrp;
struct sock *sk;

sk = sock->sk;
if (!sk)
return 0;

cgrp = sock_cgroup_ptr(&sk->sk_cgrp_data);

return BPF_PROG_RUN_ARRAY_CG(cgrp->bpf.effective[prog->aux->cgroup_atype],
regs, bpf_prog_run, 0);
}

/**
* __cgroup_bpf_run_filter_sk() - Run a program on a sock
* @sk: sock structure to manipulate
Expand Down
6 changes: 5 additions & 1 deletion kernel/bpf/syscall.c
Original file line number Diff line number Diff line change
Expand Up @@ -2724,7 +2724,8 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
}
break;
case BPF_PROG_TYPE_LSM:
if (prog->expected_attach_type != BPF_LSM_MAC) {
if (prog->expected_attach_type != BPF_LSM_MAC &&
prog->expected_attach_type != BPF_LSM_CGROUP_SOCK) {
err = -EINVAL;
goto out_put_prog;
}
Expand Down Expand Up @@ -3184,6 +3185,8 @@ attach_type_to_prog_type(enum bpf_attach_type attach_type)
return BPF_PROG_TYPE_SK_LOOKUP;
case BPF_XDP:
return BPF_PROG_TYPE_XDP;
case BPF_LSM_CGROUP_SOCK:
return BPF_PROG_TYPE_LSM;
default:
return BPF_PROG_TYPE_UNSPEC;
}
Expand Down Expand Up @@ -3237,6 +3240,7 @@ static int bpf_prog_attach(const union bpf_attr *attr)
case BPF_PROG_TYPE_CGROUP_SOCKOPT:
case BPF_PROG_TYPE_CGROUP_SYSCTL:
case BPF_PROG_TYPE_SOCK_OPS:
case BPF_PROG_TYPE_LSM:
ret = cgroup_bpf_prog_attach(attr, ptype, prog);
break;
default:
Expand Down
1 change: 1 addition & 0 deletions kernel/bpf/trampoline.c
Original file line number Diff line number Diff line change
Expand Up @@ -406,6 +406,7 @@ static enum bpf_tramp_prog_type bpf_attach_type_to_tramp(struct bpf_prog *prog)
return BPF_TRAMP_MODIFY_RETURN;
case BPF_TRACE_FEXIT:
return BPF_TRAMP_FEXIT;
case BPF_LSM_CGROUP_SOCK:
case BPF_LSM_MAC:
if (!prog->aux->attach_func_proto->type)
/* The function returns void, we cannot modify its
Expand Down
1 change: 1 addition & 0 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -14105,6 +14105,7 @@ int bpf_check_attach_target(struct bpf_verifier_log *log,
fallthrough;
case BPF_MODIFY_RETURN:
case BPF_LSM_MAC:
case BPF_LSM_CGROUP_SOCK:
case BPF_TRACE_FENTRY:
case BPF_TRACE_FEXIT:
if (!btf_type_is_func(t)) {
Expand Down
1 change: 1 addition & 0 deletions tools/include/uapi/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -997,6 +997,7 @@ enum bpf_attach_type {
BPF_SK_REUSEPORT_SELECT,
BPF_SK_REUSEPORT_SELECT_OR_MIGRATE,
BPF_PERF_EVENT,
BPF_LSM_CGROUP_SOCK,
__MAX_BPF_ATTACH_TYPE
};

Expand Down

0 comments on commit 054de95

Please sign in to comment.