Skip to content

Commit

Permalink
Merge branch 'bpf: Avoid unnecessary deadlock detection and failure i…
Browse files Browse the repository at this point in the history
…n task storage'

Martin KaFai Lau says:

====================

From: Martin KaFai Lau <martin.lau@kernel.org>

The commit bc235cd ("bpf: Prevent deadlock from recursive bpf_task_storage_[get|delete]")
added deadlock detection to avoid a tracing program from recurring
on the bpf_task_storage_{get,delete}() helpers.  These helpers acquire
a spin lock and it will lead to deadlock.

It is unnecessary for the bpf_lsm and bpf_iter programs which do
not recur.  The situation is the same as the existing
bpf_pid_task_storage_{lookup,delete}_elem() which are
used in the syscall and they also do not have deadlock detection.

This set is to add new bpf_task_storage_{get,delete}() helper proto
without the deadlock detection.  The set also removes the prog->active
check from the bpf_lsm and bpf_iter program.  Please see the individual
patch for details.
====================

Signed-off-by: Alexei Starovoitov <ast@kernel.org>
  • Loading branch information
Alexei Starovoitov committed Oct 26, 2022
2 parents f3c51fe + 387b532 commit fdf4578
Show file tree
Hide file tree
Showing 12 changed files with 431 additions and 84 deletions.
9 changes: 2 additions & 7 deletions arch/arm64/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1649,13 +1649,8 @@ static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
struct bpf_prog *p = l->link.prog;
int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);

if (p->aux->sleepable) {
enter_prog = (u64)__bpf_prog_enter_sleepable;
exit_prog = (u64)__bpf_prog_exit_sleepable;
} else {
enter_prog = (u64)__bpf_prog_enter;
exit_prog = (u64)__bpf_prog_exit;
}
enter_prog = (u64)bpf_trampoline_enter(p);
exit_prog = (u64)bpf_trampoline_exit(p);

if (l->cookie == 0) {
/* if cookie is zero, one instruction is enough to store it */
Expand Down
19 changes: 2 additions & 17 deletions arch/x86/net/bpf_jit_comp.c
Original file line number Diff line number Diff line change
Expand Up @@ -1894,10 +1894,6 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
struct bpf_tramp_link *l, int stack_size,
int run_ctx_off, bool save_ret)
{
void (*exit)(struct bpf_prog *prog, u64 start,
struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_exit;
u64 (*enter)(struct bpf_prog *prog,
struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_enter;
u8 *prog = *pprog;
u8 *jmp_insn;
int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
Expand All @@ -1916,23 +1912,12 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
*/
emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);

if (p->aux->sleepable) {
enter = __bpf_prog_enter_sleepable;
exit = __bpf_prog_exit_sleepable;
} else if (p->type == BPF_PROG_TYPE_STRUCT_OPS) {
enter = __bpf_prog_enter_struct_ops;
exit = __bpf_prog_exit_struct_ops;
} else if (p->expected_attach_type == BPF_LSM_CGROUP) {
enter = __bpf_prog_enter_lsm_cgroup;
exit = __bpf_prog_exit_lsm_cgroup;
}

/* arg1: mov rdi, progs[i] */
emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
/* arg2: lea rsi, [rbp - ctx_cookie_off] */
EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);

if (emit_call(&prog, enter, prog))
if (emit_call(&prog, bpf_trampoline_enter(p), prog))
return -EINVAL;
/* remember prog start time returned by __bpf_prog_enter */
emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
Expand Down Expand Up @@ -1977,7 +1962,7 @@ static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
/* arg3: lea rdx, [rbp - run_ctx_off] */
EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
if (emit_call(&prog, exit, prog))
if (emit_call(&prog, bpf_trampoline_exit(p), prog))
return -EINVAL;

*pprog = prog;
Expand Down
26 changes: 12 additions & 14 deletions include/linux/bpf.h
Original file line number Diff line number Diff line change
Expand Up @@ -854,22 +854,18 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *tr, void *image, void *i
const struct btf_func_model *m, u32 flags,
struct bpf_tramp_links *tlinks,
void *orig_call);
/* these two functions are called from generated trampoline */
u64 notrace __bpf_prog_enter(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start, struct bpf_tramp_run_ctx *run_ctx);
u64 notrace __bpf_prog_enter_sleepable(struct bpf_prog *prog, struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit_sleepable(struct bpf_prog *prog, u64 start,
struct bpf_tramp_run_ctx *run_ctx);
u64 notrace __bpf_prog_enter_lsm_cgroup(struct bpf_prog *prog,
struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit_lsm_cgroup(struct bpf_prog *prog, u64 start,
struct bpf_tramp_run_ctx *run_ctx);
u64 notrace __bpf_prog_enter_struct_ops(struct bpf_prog *prog,
struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit_struct_ops(struct bpf_prog *prog, u64 start,
struct bpf_tramp_run_ctx *run_ctx);
u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog,
struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start,
struct bpf_tramp_run_ctx *run_ctx);
void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr);
void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr);
typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog,
struct bpf_tramp_run_ctx *run_ctx);
typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start,
struct bpf_tramp_run_ctx *run_ctx);
bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog);
bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog);

struct bpf_ksym {
unsigned long start;
Expand Down Expand Up @@ -2523,7 +2519,9 @@ extern const struct bpf_func_proto bpf_this_cpu_ptr_proto;
extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto;
extern const struct bpf_func_proto bpf_sock_from_file_proto;
extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto;
extern const struct bpf_func_proto bpf_task_storage_get_recur_proto;
extern const struct bpf_func_proto bpf_task_storage_get_proto;
extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto;
extern const struct bpf_func_proto bpf_task_storage_delete_proto;
extern const struct bpf_func_proto bpf_for_each_map_elem_proto;
extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto;
Expand Down
15 changes: 14 additions & 1 deletion include/linux/bpf_verifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -642,10 +642,23 @@ static inline u32 type_flag(u32 type)
}

/* only use after check_attach_btf_id() */
static inline enum bpf_prog_type resolve_prog_type(struct bpf_prog *prog)
static inline enum bpf_prog_type resolve_prog_type(const struct bpf_prog *prog)
{
return prog->type == BPF_PROG_TYPE_EXT ?
prog->aux->dst_prog->type : prog->type;
}

static inline bool bpf_prog_check_recur(const struct bpf_prog *prog)
{
switch (resolve_prog_type(prog)) {
case BPF_PROG_TYPE_TRACING:
return prog->expected_attach_type != BPF_TRACE_ITER;
case BPF_PROG_TYPE_STRUCT_OPS:
case BPF_PROG_TYPE_LSM:
return false;
default:
return true;
}
}

#endif /* _LINUX_BPF_VERIFIER_H */
1 change: 1 addition & 0 deletions kernel/bpf/bpf_local_storage.c
Original file line number Diff line number Diff line change
Expand Up @@ -242,6 +242,7 @@ void bpf_selem_unlink(struct bpf_local_storage_elem *selem, bool use_trace_rcu)
__bpf_selem_unlink_storage(selem, use_trace_rcu);
}

/* If cacheit_lockit is false, this lookup function is lockless */
struct bpf_local_storage_data *
bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
struct bpf_local_storage_map *smap,
Expand Down
119 changes: 94 additions & 25 deletions kernel/bpf/bpf_task_storage.c
Original file line number Diff line number Diff line change
Expand Up @@ -184,14 +184,18 @@ static int bpf_pid_task_storage_update_elem(struct bpf_map *map, void *key,
return err;
}

static int task_storage_delete(struct task_struct *task, struct bpf_map *map)
static int task_storage_delete(struct task_struct *task, struct bpf_map *map,
bool nobusy)
{
struct bpf_local_storage_data *sdata;

sdata = task_storage_lookup(task, map, false);
if (!sdata)
return -ENOENT;

if (!nobusy)
return -EBUSY;

bpf_selem_unlink(SELEM(sdata), true);

return 0;
Expand Down Expand Up @@ -220,63 +224,108 @@ static int bpf_pid_task_storage_delete_elem(struct bpf_map *map, void *key)
}

bpf_task_storage_lock();
err = task_storage_delete(task, map);
err = task_storage_delete(task, map, true);
bpf_task_storage_unlock();
out:
put_pid(pid);
return err;
}

/* *gfp_flags* is a hidden argument provided by the verifier */
BPF_CALL_5(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
task, void *, value, u64, flags, gfp_t, gfp_flags)
/* Called by bpf_task_storage_get*() helpers */
static void *__bpf_task_storage_get(struct bpf_map *map,
struct task_struct *task, void *value,
u64 flags, gfp_t gfp_flags, bool nobusy)
{
struct bpf_local_storage_data *sdata;

WARN_ON_ONCE(!bpf_rcu_lock_held());
if (flags & ~(BPF_LOCAL_STORAGE_GET_F_CREATE))
return (unsigned long)NULL;

if (!task)
return (unsigned long)NULL;

if (!bpf_task_storage_trylock())
return (unsigned long)NULL;

sdata = task_storage_lookup(task, map, true);
sdata = task_storage_lookup(task, map, nobusy);
if (sdata)
goto unlock;
return sdata->data;

/* only allocate new storage, when the task is refcounted */
if (refcount_read(&task->usage) &&
(flags & BPF_LOCAL_STORAGE_GET_F_CREATE))
(flags & BPF_LOCAL_STORAGE_GET_F_CREATE) && nobusy) {
sdata = bpf_local_storage_update(
task, (struct bpf_local_storage_map *)map, value,
BPF_NOEXIST, gfp_flags);
return IS_ERR(sdata) ? NULL : sdata->data;
}

return NULL;
}

unlock:
/* *gfp_flags* is a hidden argument provided by the verifier */
BPF_CALL_5(bpf_task_storage_get_recur, struct bpf_map *, map, struct task_struct *,
task, void *, value, u64, flags, gfp_t, gfp_flags)
{
bool nobusy;
void *data;

WARN_ON_ONCE(!bpf_rcu_lock_held());
if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
return (unsigned long)NULL;

nobusy = bpf_task_storage_trylock();
data = __bpf_task_storage_get(map, task, value, flags,
gfp_flags, nobusy);
if (nobusy)
bpf_task_storage_unlock();
return (unsigned long)data;
}

/* *gfp_flags* is a hidden argument provided by the verifier */
BPF_CALL_5(bpf_task_storage_get, struct bpf_map *, map, struct task_struct *,
task, void *, value, u64, flags, gfp_t, gfp_flags)
{
void *data;

WARN_ON_ONCE(!bpf_rcu_lock_held());
if (flags & ~BPF_LOCAL_STORAGE_GET_F_CREATE || !task)
return (unsigned long)NULL;

bpf_task_storage_lock();
data = __bpf_task_storage_get(map, task, value, flags,
gfp_flags, true);
bpf_task_storage_unlock();
return IS_ERR_OR_NULL(sdata) ? (unsigned long)NULL :
(unsigned long)sdata->data;
return (unsigned long)data;
}

BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
BPF_CALL_2(bpf_task_storage_delete_recur, struct bpf_map *, map, struct task_struct *,
task)
{
bool nobusy;
int ret;

WARN_ON_ONCE(!bpf_rcu_lock_held());
if (!task)
return -EINVAL;

if (!bpf_task_storage_trylock())
return -EBUSY;
nobusy = bpf_task_storage_trylock();
/* This helper must only be called from places where the lifetime of the task
* is guaranteed. Either by being refcounted or by being protected
* by an RCU read-side critical section.
*/
ret = task_storage_delete(task, map, nobusy);
if (nobusy)
bpf_task_storage_unlock();
return ret;
}

BPF_CALL_2(bpf_task_storage_delete, struct bpf_map *, map, struct task_struct *,
task)
{
int ret;

WARN_ON_ONCE(!bpf_rcu_lock_held());
if (!task)
return -EINVAL;

bpf_task_storage_lock();
/* This helper must only be called from places where the lifetime of the task
* is guaranteed. Either by being refcounted or by being protected
* by an RCU read-side critical section.
*/
ret = task_storage_delete(task, map);
ret = task_storage_delete(task, map, true);
bpf_task_storage_unlock();
return ret;
}
Expand Down Expand Up @@ -322,6 +371,17 @@ const struct bpf_map_ops task_storage_map_ops = {
.map_owner_storage_ptr = task_storage_ptr,
};

const struct bpf_func_proto bpf_task_storage_get_recur_proto = {
.func = bpf_task_storage_get_recur,
.gpl_only = false,
.ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
.arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL,
.arg4_type = ARG_ANYTHING,
};

const struct bpf_func_proto bpf_task_storage_get_proto = {
.func = bpf_task_storage_get,
.gpl_only = false,
Expand All @@ -333,6 +393,15 @@ const struct bpf_func_proto bpf_task_storage_get_proto = {
.arg4_type = ARG_ANYTHING,
};

const struct bpf_func_proto bpf_task_storage_delete_recur_proto = {
.func = bpf_task_storage_delete_recur,
.gpl_only = false,
.ret_type = RET_INTEGER,
.arg1_type = ARG_CONST_MAP_PTR,
.arg2_type = ARG_PTR_TO_BTF_ID,
.arg2_btf_id = &btf_tracing_ids[BTF_TRACING_TYPE_TASK],
};

const struct bpf_func_proto bpf_task_storage_delete_proto = {
.func = bpf_task_storage_delete,
.gpl_only = false,
Expand Down
5 changes: 3 additions & 2 deletions kernel/bpf/syscall.c
Original file line number Diff line number Diff line change
Expand Up @@ -5133,13 +5133,14 @@ int kern_sys_bpf(int cmd, union bpf_attr *attr, unsigned int size)

run_ctx.bpf_cookie = 0;
run_ctx.saved_run_ctx = NULL;
if (!__bpf_prog_enter_sleepable(prog, &run_ctx)) {
if (!__bpf_prog_enter_sleepable_recur(prog, &run_ctx)) {
/* recursion detected */
bpf_prog_put(prog);
return -EBUSY;
}
attr->test.retval = bpf_prog_run(prog, (void *) (long) attr->test.ctx_in);
__bpf_prog_exit_sleepable(prog, 0 /* bpf_prog_run does runtime stats */, &run_ctx);
__bpf_prog_exit_sleepable_recur(prog, 0 /* bpf_prog_run does runtime stats */,
&run_ctx);
bpf_prog_put(prog);
return 0;
#endif
Expand Down

0 comments on commit fdf4578

Please sign in to comment.