Skip to content

Commit aced132

Browse files
liu-song-6Alexei Starovoitov
authored andcommitted
bpf: Add range tracking for BPF_NEG
Add range tracking for instruction BPF_NEG. Without this logic, a trivial program like the following will fail volatile bool found_value_b; SEC("lsm.s/socket_connect") int BPF_PROG(test_socket_connect) { if (!found_value_b) return -1; return 0; } with verifier log: "At program exit the register R0 has smin=0 smax=4294967295 should have been in [-4095, 0]". This is because range information is lost in BPF_NEG: 0: R1=ctx() R10=fp0 ; if (!found_value_b) @ xxxx.c:24 0: (18) r1 = 0xffa00000011e7048 ; R1_w=map_value(...) 2: (71) r0 = *(u8 *)(r1 +0) ; R0_w=scalar(smin32=0,smax=255) 3: (a4) w0 ^= 1 ; R0_w=scalar(smin32=0,smax=255) 4: (84) w0 = -w0 ; R0_w=scalar(range info lost) Note that, the log above is manually modified to highlight relevant bits. Fix this by maintaining proper range information with BPF_NEG, so that the verifier will know: 4: (84) w0 = -w0 ; R0_w=scalar(smin32=-255,smax=0) Also updated selftests based on the expected behavior. Signed-off-by: Song Liu <song@kernel.org> Acked-by: Eduard Zingerman <eddyz87@gmail.com> Link: https://lore.kernel.org/r/20250625164025.3310203-2-song@kernel.org Signed-off-by: Alexei Starovoitov <ast@kernel.org>
1 parent d69bafe commit aced132

File tree

5 files changed

+46
-11
lines changed

5 files changed

+46
-11
lines changed

include/linux/tnum.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -40,6 +40,8 @@ struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness);
4040
struct tnum tnum_add(struct tnum a, struct tnum b);
4141
/* Subtract two tnums, return @a - @b */
4242
struct tnum tnum_sub(struct tnum a, struct tnum b);
43+
/* Neg of a tnum, return 0 - @a */
44+
struct tnum tnum_neg(struct tnum a);
4345
/* Bitwise-AND, return @a & @b */
4446
struct tnum tnum_and(struct tnum a, struct tnum b);
4547
/* Bitwise-OR, return @a | @b */

kernel/bpf/tnum.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -83,6 +83,11 @@ struct tnum tnum_sub(struct tnum a, struct tnum b)
8383
return TNUM(dv & ~mu, mu);
8484
}
8585

86+
struct tnum tnum_neg(struct tnum a)
87+
{
88+
return tnum_sub(TNUM(0, 0), a);
89+
}
90+
8691
struct tnum tnum_and(struct tnum a, struct tnum b)
8792
{
8893
u64 alpha, beta, v;

kernel/bpf/verifier.c

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15182,6 +15182,7 @@ static bool is_safe_to_compute_dst_reg_range(struct bpf_insn *insn,
1518215182
switch (BPF_OP(insn->code)) {
1518315183
case BPF_ADD:
1518415184
case BPF_SUB:
15185+
case BPF_NEG:
1518515186
case BPF_AND:
1518615187
case BPF_XOR:
1518715188
case BPF_OR:
@@ -15250,6 +15251,13 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
1525015251
scalar_min_max_sub(dst_reg, &src_reg);
1525115252
dst_reg->var_off = tnum_sub(dst_reg->var_off, src_reg.var_off);
1525215253
break;
15254+
case BPF_NEG:
15255+
env->fake_reg[0] = *dst_reg;
15256+
__mark_reg_known(dst_reg, 0);
15257+
scalar32_min_max_sub(dst_reg, &env->fake_reg[0]);
15258+
scalar_min_max_sub(dst_reg, &env->fake_reg[0]);
15259+
dst_reg->var_off = tnum_neg(env->fake_reg[0].var_off);
15260+
break;
1525315261
case BPF_MUL:
1525415262
dst_reg->var_off = tnum_mul(dst_reg->var_off, src_reg.var_off);
1525515263
scalar32_min_max_mul(dst_reg, &src_reg);
@@ -15473,7 +15481,14 @@ static int check_alu_op(struct bpf_verifier_env *env, struct bpf_insn *insn)
1547315481
}
1547415482

1547515483
/* check dest operand */
15476-
err = check_reg_arg(env, insn->dst_reg, DST_OP);
15484+
if (opcode == BPF_NEG) {
15485+
err = check_reg_arg(env, insn->dst_reg, DST_OP_NO_MARK);
15486+
err = err ?: adjust_scalar_min_max_vals(env, insn,
15487+
&regs[insn->dst_reg],
15488+
regs[insn->dst_reg]);
15489+
} else {
15490+
err = check_reg_arg(env, insn->dst_reg, DST_OP);
15491+
}
1547715492
if (err)
1547815493
return err;
1547915494

tools/testing/selftests/bpf/progs/verifier_bounds_deduction.c

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -159,13 +159,16 @@ __failure_unpriv
159159
__naked void deducing_bounds_from_const_10(void)
160160
{
161161
asm volatile (" \
162+
r6 = r1; \
162163
r0 = 0; \
163164
if r0 s<= 0 goto l0_%=; \
164-
l0_%=: /* Marks reg as unknown. */ \
165-
r0 = -r0; \
166-
r0 -= r1; \
165+
l0_%=: /* Marks r0 as unknown. */ \
166+
call %[bpf_get_prandom_u32]; \
167+
r0 -= r6; \
167168
exit; \
168-
" ::: __clobber_all);
169+
" :
170+
: __imm(bpf_get_prandom_u32)
171+
: __clobber_all);
169172
}
170173

171174
char _license[] SEC("license") = "GPL";

tools/testing/selftests/bpf/progs/verifier_value_ptr_arith.c

Lines changed: 16 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -231,6 +231,10 @@ __retval(1)
231231
__naked void ptr_unknown_vs_unknown_lt(void)
232232
{
233233
asm volatile (" \
234+
r8 = r1; \
235+
call %[bpf_get_prandom_u32]; \
236+
r9 = r0; \
237+
r1 = r8; \
234238
r0 = *(u32*)(r1 + %[__sk_buff_len]); \
235239
r1 = 0; \
236240
*(u64*)(r10 - 8) = r1; \
@@ -245,11 +249,11 @@ l1_%=: call %[bpf_map_lookup_elem]; \
245249
r4 = *(u8*)(r0 + 0); \
246250
if r4 == 1 goto l3_%=; \
247251
r1 = 6; \
248-
r1 = -r1; \
252+
r1 = r9; \
249253
r1 &= 0x3; \
250254
goto l4_%=; \
251255
l3_%=: r1 = 6; \
252-
r1 = -r1; \
256+
r1 = r9; \
253257
r1 &= 0x7; \
254258
l4_%=: r1 += r0; \
255259
r0 = *(u8*)(r1 + 0); \
@@ -259,7 +263,8 @@ l2_%=: r0 = 1; \
259263
: __imm(bpf_map_lookup_elem),
260264
__imm_addr(map_array_48b),
261265
__imm_addr(map_hash_16b),
262-
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
266+
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
267+
__imm(bpf_get_prandom_u32)
263268
: __clobber_all);
264269
}
265270

@@ -271,6 +276,10 @@ __retval(1)
271276
__naked void ptr_unknown_vs_unknown_gt(void)
272277
{
273278
asm volatile (" \
279+
r8 = r1; \
280+
call %[bpf_get_prandom_u32]; \
281+
r9 = r0; \
282+
r1 = r8; \
274283
r0 = *(u32*)(r1 + %[__sk_buff_len]); \
275284
r1 = 0; \
276285
*(u64*)(r10 - 8) = r1; \
@@ -285,11 +294,11 @@ l1_%=: call %[bpf_map_lookup_elem]; \
285294
r4 = *(u8*)(r0 + 0); \
286295
if r4 == 1 goto l3_%=; \
287296
r1 = 6; \
288-
r1 = -r1; \
297+
r1 = r9; \
289298
r1 &= 0x7; \
290299
goto l4_%=; \
291300
l3_%=: r1 = 6; \
292-
r1 = -r1; \
301+
r1 = r9; \
293302
r1 &= 0x3; \
294303
l4_%=: r1 += r0; \
295304
r0 = *(u8*)(r1 + 0); \
@@ -299,7 +308,8 @@ l2_%=: r0 = 1; \
299308
: __imm(bpf_map_lookup_elem),
300309
__imm_addr(map_array_48b),
301310
__imm_addr(map_hash_16b),
302-
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len))
311+
__imm_const(__sk_buff_len, offsetof(struct __sk_buff, len)),
312+
__imm(bpf_get_prandom_u32)
303313
: __clobber_all);
304314
}
305315

0 commit comments

Comments
 (0)