Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Improvements for tracking scalars in the BPF verifier #6182

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion include/linux/bpf_verifier.h
Original file line number Diff line number Diff line change
Expand Up @@ -453,7 +453,7 @@ struct bpf_verifier_state {

#define bpf_get_spilled_reg(slot, frame, mask) \
(((slot < frame->allocated_stack / BPF_REG_SIZE) && \
((1 << frame->stack[slot].slot_type[0]) & (mask))) \
((1 << frame->stack[slot].slot_type[BPF_REG_SIZE - 1]) & (mask))) \
? &frame->stack[slot].spilled_ptr : NULL)

/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
Expand Down
160 changes: 133 additions & 27 deletions kernel/bpf/verifier.c
Original file line number Diff line number Diff line change
Expand Up @@ -1168,6 +1168,12 @@ static void mark_stack_slot_misc(struct bpf_verifier_env *env, u8 *stype)
*stype = STACK_MISC;
}

static bool is_spilled_scalar_reg64(const struct bpf_stack_state *stack)
{
return stack->slot_type[0] == STACK_SPILL &&
stack->spilled_ptr.type == SCALAR_VALUE;
}

static void scrub_spilled_slot(u8 *stype)
{
if (*stype != STACK_INVALID)
Expand Down Expand Up @@ -4387,11 +4393,6 @@ static bool __is_scalar_unbounded(struct bpf_reg_state *reg)
reg->u32_min_value == 0 && reg->u32_max_value == U32_MAX;
}

static bool register_is_bounded(struct bpf_reg_state *reg)
{
return reg->type == SCALAR_VALUE && !__is_scalar_unbounded(reg);
}

static bool __is_pointer_value(bool allow_ptr_leaks,
const struct bpf_reg_state *reg)
{
Expand All @@ -4401,6 +4402,18 @@ static bool __is_pointer_value(bool allow_ptr_leaks,
return reg->type != SCALAR_VALUE;
}

static void assign_scalar_id_before_mov(struct bpf_verifier_env *env,
struct bpf_reg_state *src_reg)
{
if (src_reg->type == SCALAR_VALUE && !src_reg->id &&
!tnum_is_const(src_reg->var_off))
/* Ensure that src_reg has a valid ID that will be copied to
* dst_reg and then will be used by find_equal_scalars() to
* propagate min/max range.
*/
src_reg->id = ++env->id_gen;
}

/* Copy src state preserving dst->parent and dst->live fields */
static void copy_register_state(struct bpf_reg_state *dst, const struct bpf_reg_state *src)
{
Expand Down Expand Up @@ -4436,6 +4449,11 @@ static bool is_bpf_st_mem(struct bpf_insn *insn)
return BPF_CLASS(insn->code) == BPF_ST && BPF_MODE(insn->code) == BPF_MEM;
}

static int get_reg_width(struct bpf_reg_state *reg)
{
return fls64(reg->umax_value);
}

/* check_stack_{read,write}_fixed_off functions track spill/fill of registers,
* stack boundary and alignment are checked in check_mem_access()
*/
Expand Down Expand Up @@ -4485,10 +4503,16 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
return err;

mark_stack_slot_scratched(env, spi);
if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) && env->bpf_capable) {
if (reg && !(off % BPF_REG_SIZE) && reg->type == SCALAR_VALUE && env->bpf_capable) {
bool reg_value_fits;

reg_value_fits = get_reg_width(reg) <= BITS_PER_BYTE * size;
/* Make sure that reg had an ID to build a relation on spill. */
if (reg_value_fits)
assign_scalar_id_before_mov(env, reg);
save_register_state(env, state, spi, reg, size);
/* Break the relation on a narrowing spill. */
if (fls64(reg->umax_value) > BITS_PER_BYTE * size)
if (!reg_value_fits)
state->stack[spi].spilled_ptr.id = 0;
} else if (!reg && !(off % BPF_REG_SIZE) && is_bpf_st_mem(insn) &&
insn->imm != 0 && env->bpf_capable) {
Expand Down Expand Up @@ -4754,14 +4778,26 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
if (dst_regno < 0)
return 0;

if (!(off % BPF_REG_SIZE) && size == spill_size) {
if (size <= spill_size &&
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
!(off % BPF_REG_SIZE)
#else
!((off + size - spill_size) % BPF_REG_SIZE)
#endif
) {
/* The earlier check_reg_arg() has decided the
* subreg_def for this insn. Save it first.
*/
s32 subreg_def = state->regs[dst_regno].subreg_def;

copy_register_state(&state->regs[dst_regno], reg);
state->regs[dst_regno].subreg_def = subreg_def;

/* Break the relation on a narrowing fill.
* coerce_reg_to_size will adjust the boundaries.
*/
if (get_reg_width(reg) > size * BITS_PER_BYTE)
state->regs[dst_regno].id = 0;
} else {
int spill_cnt = 0, zero_cnt = 0;

Expand Down Expand Up @@ -6037,10 +6073,10 @@ static void coerce_reg_to_size(struct bpf_reg_state *reg, int size)
* values are also truncated so we push 64-bit bounds into
* 32-bit bounds. Above were truncated < 32-bits already.
*/
if (size < 4) {
if (size < 4)
__mark_reg32_unbounded(reg);
reg_bounds_sync(reg);
}

reg_bounds_sync(reg);
}

static void set_sext64_default_val(struct bpf_reg_state *reg, int size)
Expand Down Expand Up @@ -13884,20 +13920,13 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
if (BPF_SRC(insn->code) == BPF_X) {
struct bpf_reg_state *src_reg = regs + insn->src_reg;
struct bpf_reg_state *dst_reg = regs + insn->dst_reg;
bool need_id = src_reg->type == SCALAR_VALUE && !src_reg->id &&
!tnum_is_const(src_reg->var_off);

if (BPF_CLASS(insn->code) == BPF_ALU64) {
if (insn->off == 0) {
/* case: R1 = R2
* copy register state to dest reg
*/
if (need_id)
/* Assign src and dst registers the same ID
* that will be used by find_equal_scalars()
* to propagate min/max range.
*/
src_reg->id = ++env->id_gen;
assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
dst_reg->live |= REG_LIVE_WRITTEN;
dst_reg->subreg_def = DEF_NOT_SUBREG;
Expand All @@ -13912,8 +13941,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
bool no_sext;

no_sext = src_reg->umax_value < (1ULL << (insn->off - 1));
if (no_sext && need_id)
src_reg->id = ++env->id_gen;
if (no_sext)
assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
if (!no_sext)
dst_reg->id = 0;
Expand All @@ -13933,10 +13962,10 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
return -EACCES;
} else if (src_reg->type == SCALAR_VALUE) {
if (insn->off == 0) {
bool is_src_reg_u32 = src_reg->umax_value <= U32_MAX;
bool is_src_reg_u32 = get_reg_width(src_reg) <= 32;

if (is_src_reg_u32 && need_id)
src_reg->id = ++env->id_gen;
if (is_src_reg_u32)
assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
/* Make sure ID is cleared if src_reg is not in u32
* range otherwise dst_reg min/max could be incorrectly
Expand All @@ -13950,8 +13979,8 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
/* case: W1 = (s8, s16)W2 */
bool no_sext = src_reg->umax_value < (1ULL << (insn->off - 1));

if (no_sext && need_id)
src_reg->id = ++env->id_gen;
if (no_sext)
assign_scalar_id_before_mov(env, src_reg);
copy_register_state(dst_reg, src_reg);
if (!no_sext)
dst_reg->id = 0;
Expand Down Expand Up @@ -16424,11 +16453,45 @@ static bool regsafe(struct bpf_verifier_env *env, struct bpf_reg_state *rold,
}
}

static bool is_stack_zero64(struct bpf_stack_state *stack)
{
u32 i;

for (i = 0; i < ARRAY_SIZE(stack->slot_type); ++i)
if (stack->slot_type[i] != STACK_ZERO)
return false;
return true;
}

static bool is_stack_unbound_slot64(struct bpf_verifier_env *env,
struct bpf_stack_state *stack)
{
u32 i;

for (i = 0; i < ARRAY_SIZE(stack->slot_type); ++i)
if (stack->slot_type[i] != STACK_ZERO &&
stack->slot_type[i] != STACK_MISC &&
(!env->allow_uninit_stack || stack->slot_type[i] != STACK_INVALID))
return false;
return true;
}

static bool is_spilled_unbound_scalar_reg64(struct bpf_stack_state *stack)
{
return is_spilled_scalar_reg64(stack) && __is_scalar_unbounded(&stack->spilled_ptr);
}

static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
struct bpf_func_state *cur, struct bpf_idmap *idmap, bool exact)
{
struct bpf_reg_state unbound_reg = {};
struct bpf_reg_state zero_reg = {};
int i, spi;

__mark_reg_unknown(env, &unbound_reg);
__mark_reg_const_zero(env, &zero_reg);
zero_reg.precise = true;

/* walk slots of the explored stack and ignore any additional
* slots in the current stack, since explored(safe) state
* didn't use them
Expand All @@ -16449,6 +16512,49 @@ static bool stacksafe(struct bpf_verifier_env *env, struct bpf_func_state *old,
continue;
}

/* load of stack value with all MISC and ZERO slots produces unbounded
* scalar value, call regsafe to ensure scalar ids are compared.
*/
if (is_spilled_unbound_scalar_reg64(&old->stack[spi]) &&
is_stack_unbound_slot64(env, &cur->stack[spi])) {
i += BPF_REG_SIZE - 1;
if (!regsafe(env, &old->stack[spi].spilled_ptr, &unbound_reg,
idmap, exact))
return false;
continue;
}

if (is_stack_unbound_slot64(env, &old->stack[spi]) &&
is_spilled_unbound_scalar_reg64(&cur->stack[spi])) {
i += BPF_REG_SIZE - 1;
if (!regsafe(env, &unbound_reg, &cur->stack[spi].spilled_ptr,
idmap, exact))
return false;
continue;
}

/* load of stack value with all ZERO slots produces scalar value 0,
* call regsafe to ensure scalar ids are compared and precision
* flags are taken into account.
*/
if (is_spilled_scalar_reg64(&old->stack[spi]) &&
is_stack_zero64(&cur->stack[spi])) {
if (!regsafe(env, &old->stack[spi].spilled_ptr, &zero_reg,
idmap, exact))
return false;
i += BPF_REG_SIZE - 1;
continue;
}

if (is_stack_zero64(&old->stack[spi]) &&
is_spilled_scalar_reg64(&cur->stack[spi])) {
if (!regsafe(env, &zero_reg, &cur->stack[spi].spilled_ptr,
idmap, exact))
return false;
i += BPF_REG_SIZE - 1;
continue;
}

if (old->stack[spi].slot_type[i % BPF_REG_SIZE] == STACK_INVALID)
continue;

Expand Down Expand Up @@ -17006,7 +17112,7 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
}
/* attempt to detect infinite loop to avoid unnecessary doomed work */
if (states_maybe_looping(&sl->state, cur) &&
states_equal(env, &sl->state, cur, false) &&
states_equal(env, &sl->state, cur, true) &&
!iter_active_depths_differ(&sl->state, cur) &&
sl->state.callback_unroll_depth == cur->callback_unroll_depth) {
verbose_linfo(env, insn_idx, "; ");
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -568,7 +568,7 @@ l0_%=: r0 = 0; \

SEC("tc")
__description("direct packet access: test23 (x += pkt_ptr, 4)")
__failure __msg("invalid access to packet, off=0 size=8, R5(id=2,off=0,r=0)")
__failure __msg("invalid access to packet, off=0 size=8, R5(id=3,off=0,r=0)")
__flag(BPF_F_ANY_ALIGNMENT)
__naked void test23_x_pkt_ptr_4(void)
{
Expand Down
24 changes: 24 additions & 0 deletions tools/testing/selftests/bpf/progs/verifier_loops1.c
Original file line number Diff line number Diff line change
Expand Up @@ -259,4 +259,28 @@ l0_%=: r2 += r1; \
" ::: __clobber_all);
}

SEC("xdp")
__success
__naked void not_an_inifinite_loop(void)
{
asm volatile (" \
call %[bpf_get_prandom_u32]; \
r0 &= 0xff; \
*(u64 *)(r10 - 8) = r0; \
r0 = 0; \
loop_%=: \
r0 = *(u64 *)(r10 - 8); \
if r0 > 10 goto exit_%=; \
r0 += 1; \
*(u64 *)(r10 - 8) = r0; \
r0 = 0; \
goto loop_%=; \
exit_%=: \
r0 = 0; \
exit; \
" :
: __imm(bpf_get_prandom_u32)
: __clobber_all);
}

char _license[] SEC("license") = "GPL";