Skip to content

Commit

Permalink
net: netfilter: Add unstable CT lookup helper for XDP and TC-BPF
Browse files Browse the repository at this point in the history
This change adds conntrack lookup helpers using the unstable kfunc call
interface for the XDP and TC-BPF hooks.

Also add acquire/release functions (randomly returning NULL), and also
exercise the RET_PTR_TO_BTF_ID_OR_NULL path so that BPF program caller
has to check for NULL before dereferencing the pointer, for the TC hook.
These will be used in selftest.

Export get_net_ns_by_id and btf_type_by_id as nf_conntrack needs to call
them.

[ NOTE: Currently the btf_type check does not work, due to the problem
described in the thread mentioned in the comments ]

Signed-off-by: Kumar Kartikeya Dwivedi <memxor@gmail.com>
  • Loading branch information
kkdwivedi authored and intel-lab-lkp committed Oct 30, 2021
1 parent b6df0d5 commit f52e281
Show file tree
Hide file tree
Showing 7 changed files with 392 additions and 0 deletions.
22 changes: 22 additions & 0 deletions include/linux/bpf.h
Expand Up @@ -1647,6 +1647,10 @@ int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner);
bool bpf_prog_test_is_acquire_kfunc(u32 kfunc_id, struct module *owner);
bool bpf_prog_test_is_release_kfunc(u32 kfunc_id, struct module *owner);
enum bpf_return_type bpf_prog_test_get_kfunc_return_type(u32 kfunc_id,
struct module *owner);
bool btf_ctx_access(int off, int size, enum bpf_access_type type,
const struct bpf_prog *prog,
struct bpf_insn_access_aux *info);
Expand Down Expand Up @@ -1874,6 +1878,24 @@ static inline bool bpf_prog_test_check_kfunc_call(u32 kfunc_id,
return false;
}

static inline bool bpf_prog_test_is_acquire_kfunc(u32 kfunc_id,
struct module *owner)
{
return false;
}

static inline bool bpf_prog_test_is_release_kfunc(u32 kfunc_id,
struct module *owner)
{
return false;
}

static inline enum bpf_return_type
bpf_prog_test_get_kfunc_return_type(u32 kfunc_id, struct module *owner)
{
return __BPF_RET_TYPE_MAX;
}

static inline void bpf_map_put(struct bpf_map *map)
{
}
Expand Down
1 change: 1 addition & 0 deletions include/linux/btf.h
Expand Up @@ -321,5 +321,6 @@ static inline int bpf_btf_mod_struct_access(struct kfunc_btf_id_list *klist,

extern struct kfunc_btf_id_list bpf_tcp_ca_kfunc_list;
extern struct kfunc_btf_id_list prog_test_kfunc_list;
extern struct kfunc_btf_id_list xdp_kfunc_list;

#endif
2 changes: 2 additions & 0 deletions kernel/bpf/btf.c
Expand Up @@ -735,6 +735,7 @@ const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id)
return NULL;
return btf->types[type_id];
}
EXPORT_SYMBOL_GPL(btf_type_by_id);

/*
* Regular int is not a bit field and it must be either
Expand Down Expand Up @@ -6502,3 +6503,4 @@ int bpf_btf_mod_struct_access(struct kfunc_btf_id_list *klist,

DEFINE_KFUNC_BTF_ID_LIST(bpf_tcp_ca_kfunc_list);
DEFINE_KFUNC_BTF_ID_LIST(prog_test_kfunc_list);
DEFINE_KFUNC_BTF_ID_LIST(xdp_kfunc_list);
55 changes: 55 additions & 0 deletions net/bpf/test_run.c
Expand Up @@ -232,6 +232,28 @@ struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
return sk;
}

struct prog_test_ref_kfunc {
int a;
int b;
};

static struct prog_test_ref_kfunc prog_test_struct;

noinline struct prog_test_ref_kfunc *bpf_kfunc_call_test_acquire(char *ptr)
{
/* randomly return NULL */
if (get_jiffies_64() % 2)
return NULL;
prog_test_struct.a = 42;
prog_test_struct.b = 108;
return &prog_test_struct;
}

noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p)
{
return;
}

__diag_pop();

ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
Expand All @@ -240,15 +262,48 @@ BTF_SET_START(test_sk_kfunc_ids)
BTF_ID(func, bpf_kfunc_call_test1)
BTF_ID(func, bpf_kfunc_call_test2)
BTF_ID(func, bpf_kfunc_call_test3)
BTF_ID(func, bpf_kfunc_call_test_acquire)
BTF_ID(func, bpf_kfunc_call_test_release)
BTF_SET_END(test_sk_kfunc_ids)

BTF_ID_LIST(test_sk_acq_rel)
BTF_ID(func, bpf_kfunc_call_test_acquire)
BTF_ID(func, bpf_kfunc_call_test_release)

bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner)
{
if (btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id))
return true;
return bpf_check_mod_kfunc_call(&prog_test_kfunc_list, kfunc_id, owner);
}

bool bpf_prog_test_is_acquire_kfunc(u32 kfunc_id, struct module *owner)
{
if (!owner) /* bpf_kfunc_call_test_acquire */
return kfunc_id == test_sk_acq_rel[0];
return bpf_is_mod_acquire_kfunc(&prog_test_kfunc_list, kfunc_id, owner);
}

bool bpf_prog_test_is_release_kfunc(u32 kfunc_id, struct module *owner)
{
if (!owner) /* bpf_kfunc_call_test_release */
return kfunc_id == test_sk_acq_rel[1];
return bpf_is_mod_release_kfunc(&prog_test_kfunc_list, kfunc_id, owner);
}

enum bpf_return_type bpf_prog_test_get_kfunc_return_type(u32 kfunc_id,
struct module *owner)
{
if (!owner) {
if (kfunc_id == test_sk_acq_rel[0])
return RET_PTR_TO_BTF_ID_OR_NULL;
else
return __BPF_RET_TYPE_MAX;
}
return bpf_get_mod_kfunc_return_type(&prog_test_kfunc_list, kfunc_id,
owner);
}

static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
u32 headroom, u32 tailroom)
{
Expand Down
56 changes: 56 additions & 0 deletions net/core/filter.c
Expand Up @@ -9948,24 +9948,80 @@ const struct bpf_prog_ops sk_filter_prog_ops = {
.test_run = bpf_prog_test_run_skb,
};

static int xdp_btf_struct_access(struct bpf_verifier_log *log,
const struct btf *btf,
const struct btf_type *t, int off,
int size, enum bpf_access_type atype,
u32 *next_btf_id);

const struct bpf_verifier_ops tc_cls_act_verifier_ops = {
.get_func_proto = tc_cls_act_func_proto,
.is_valid_access = tc_cls_act_is_valid_access,
.convert_ctx_access = tc_cls_act_convert_ctx_access,
.gen_prologue = tc_cls_act_prologue,
.gen_ld_abs = bpf_gen_ld_abs,
.check_kfunc_call = bpf_prog_test_check_kfunc_call,
.is_acquire_kfunc = bpf_prog_test_is_acquire_kfunc,
.is_release_kfunc = bpf_prog_test_is_release_kfunc,
.get_kfunc_return_type = bpf_prog_test_get_kfunc_return_type,
/* resuse the callback, there is nothing xdp specific in it */
.btf_struct_access = xdp_btf_struct_access,
};

const struct bpf_prog_ops tc_cls_act_prog_ops = {
.test_run = bpf_prog_test_run_skb,
};

static bool xdp_is_acquire_kfunc(u32 kfunc_id, struct module *owner)
{
return bpf_is_mod_acquire_kfunc(&xdp_kfunc_list, kfunc_id, owner);
}

static bool xdp_is_release_kfunc(u32 kfunc_id, struct module *owner)
{
return bpf_is_mod_release_kfunc(&xdp_kfunc_list, kfunc_id, owner);
}

static enum bpf_return_type xdp_get_kfunc_return_type(u32 kfunc_id,
struct module *owner)
{
return bpf_get_mod_kfunc_return_type(&xdp_kfunc_list, kfunc_id, owner);
}

static int xdp_btf_struct_access(struct bpf_verifier_log *log,
const struct btf *btf,
const struct btf_type *t, int off,
int size, enum bpf_access_type atype,
u32 *next_btf_id)
{
int ret = __BPF_REG_TYPE_MAX;
struct module *mod;

if (atype != BPF_READ)
return -EACCES;

if (btf_is_module(btf)) {
mod = btf_try_get_module(btf);
if (!mod)
return -ENXIO;
ret = bpf_btf_mod_struct_access(&xdp_kfunc_list, mod, log, btf, t, off, size,
atype, next_btf_id);
module_put(mod);
}
if (ret == __BPF_REG_TYPE_MAX)
return btf_struct_access(log, btf, t, off, size, atype, next_btf_id);
return ret;
}

const struct bpf_verifier_ops xdp_verifier_ops = {
.get_func_proto = xdp_func_proto,
.is_valid_access = xdp_is_valid_access,
.convert_ctx_access = xdp_convert_ctx_access,
.gen_prologue = bpf_noop_prologue,
.is_acquire_kfunc = xdp_is_acquire_kfunc,
.is_release_kfunc = xdp_is_release_kfunc,
.get_kfunc_return_type = xdp_get_kfunc_return_type,
.btf_struct_access = xdp_btf_struct_access,
};

const struct bpf_prog_ops xdp_prog_ops = {
Expand Down
1 change: 1 addition & 0 deletions net/core/net_namespace.c
Expand Up @@ -299,6 +299,7 @@ struct net *get_net_ns_by_id(const struct net *net, int id)

return peer;
}
EXPORT_SYMBOL_GPL(get_net_ns_by_id);

/*
* setup_net runs the initializers for the network namespace object.
Expand Down

0 comments on commit f52e281

Please sign in to comment.