@@ -451,10 +451,24 @@ static bool reg_type_not_null(enum bpf_reg_type type)
451451 type == PTR_TO_SOCK_COMMON ;
452452}
453453
454+ static struct btf_record * reg_btf_record (const struct bpf_reg_state * reg )
455+ {
456+ struct btf_record * rec = NULL ;
457+ struct btf_struct_meta * meta ;
458+
459+ if (reg -> type == PTR_TO_MAP_VALUE ) {
460+ rec = reg -> map_ptr -> record ;
461+ } else if (reg -> type == (PTR_TO_BTF_ID | MEM_ALLOC )) {
462+ meta = btf_find_struct_meta (reg -> btf , reg -> btf_id );
463+ if (meta )
464+ rec = meta -> record ;
465+ }
466+ return rec ;
467+ }
468+
454469static bool reg_may_point_to_spin_lock (const struct bpf_reg_state * reg )
455470{
456- return reg -> type == PTR_TO_MAP_VALUE &&
457- btf_record_has_field (reg -> map_ptr -> record , BPF_SPIN_LOCK );
471+ return btf_record_has_field (reg_btf_record (reg ), BPF_SPIN_LOCK );
458472}
459473
460474static bool type_is_rdonly_mem (u32 type )
@@ -5564,52 +5578,65 @@ int check_kfunc_mem_size_reg(struct bpf_verifier_env *env, struct bpf_reg_state
55645578}
55655579
55665580/* Implementation details:
5567- * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL
5581+ * bpf_map_lookup returns PTR_TO_MAP_VALUE_OR_NULL.
5582+ * bpf_obj_new returns PTR_TO_BTF_ID | MEM_ALLOC | PTR_MAYBE_NULL.
55685583 * Two bpf_map_lookups (even with the same key) will have different reg->id.
5569- * For traditional PTR_TO_MAP_VALUE the verifier clears reg->id after
5570- * value_or_null->value transition, since the verifier only cares about
5571- * the range of access to valid map value pointer and doesn't care about actual
5572- * address of the map element.
5584+ * Two separate bpf_obj_new will also have different reg->id.
5585+ * For traditional PTR_TO_MAP_VALUE or PTR_TO_BTF_ID | MEM_ALLOC, the verifier
5586+ * clears reg->id after value_or_null->value transition, since the verifier only
5587+ * cares about the range of access to valid map value pointer and doesn't care
5588+ * about actual address of the map element.
55735589 * For maps with 'struct bpf_spin_lock' inside map value the verifier keeps
55745590 * reg->id > 0 after value_or_null->value transition. By doing so
55755591 * two bpf_map_lookups will be considered two different pointers that
5576- * point to different bpf_spin_locks.
5592+ * point to different bpf_spin_locks. Likewise for pointers to allocated objects
5593+ * returned from bpf_obj_new.
55775594 * The verifier allows taking only one bpf_spin_lock at a time to avoid
55785595 * dead-locks.
55795596 * Since only one bpf_spin_lock is allowed the checks are simpler than
55805597 * reg_is_refcounted() logic. The verifier needs to remember only
55815598 * one spin_lock instead of array of acquired_refs.
5582- * cur_state->active_spin_lock remembers which map value element got locked
5583- * and clears it after bpf_spin_unlock.
5599+ * cur_state->active_spin_lock remembers which map value element or allocated
5600+ * object got locked and clears it after bpf_spin_unlock.
55845601 */
55855602static int process_spin_lock (struct bpf_verifier_env * env , int regno ,
55865603 bool is_lock )
55875604{
55885605 struct bpf_reg_state * regs = cur_regs (env ), * reg = & regs [regno ];
55895606 struct bpf_verifier_state * cur = env -> cur_state ;
55905607 bool is_const = tnum_is_const (reg -> var_off );
5591- struct bpf_map * map = reg -> map_ptr ;
55925608 u64 val = reg -> var_off .value ;
5609+ struct bpf_map * map = NULL ;
5610+ struct btf * btf = NULL ;
5611+ struct btf_record * rec ;
55935612
55945613 if (!is_const ) {
55955614 verbose (env ,
55965615 "R%d doesn't have constant offset. bpf_spin_lock has to be at the constant offset\n" ,
55975616 regno );
55985617 return - EINVAL ;
55995618 }
5600- if (!map -> btf ) {
5601- verbose (env ,
5602- "map '%s' has to have BTF in order to use bpf_spin_lock\n" ,
5603- map -> name );
5604- return - EINVAL ;
5619+ if (reg -> type == PTR_TO_MAP_VALUE ) {
5620+ map = reg -> map_ptr ;
5621+ if (!map -> btf ) {
5622+ verbose (env ,
5623+ "map '%s' has to have BTF in order to use bpf_spin_lock\n" ,
5624+ map -> name );
5625+ return - EINVAL ;
5626+ }
5627+ } else {
5628+ btf = reg -> btf ;
56055629 }
5606- if (!btf_record_has_field (map -> record , BPF_SPIN_LOCK )) {
5607- verbose (env , "map '%s' has no valid bpf_spin_lock\n" , map -> name );
5630+
5631+ rec = reg_btf_record (reg );
5632+ if (!btf_record_has_field (rec , BPF_SPIN_LOCK )) {
5633+ verbose (env , "%s '%s' has no valid bpf_spin_lock\n" , map ? "map" : "local" ,
5634+ map ? map -> name : "kptr" );
56085635 return - EINVAL ;
56095636 }
5610- if (map -> record -> spin_lock_off != val + reg -> off ) {
5637+ if (rec -> spin_lock_off != val + reg -> off ) {
56115638 verbose (env , "off %lld doesn't point to 'struct bpf_spin_lock' that is at %d\n" ,
5612- val + reg -> off , map -> record -> spin_lock_off );
5639+ val + reg -> off , rec -> spin_lock_off );
56135640 return - EINVAL ;
56145641 }
56155642 if (is_lock ) {
@@ -5815,13 +5842,19 @@ static const struct bpf_reg_types int_ptr_types = {
58155842 },
58165843};
58175844
5845+ static const struct bpf_reg_types spin_lock_types = {
5846+ .types = {
5847+ PTR_TO_MAP_VALUE ,
5848+ PTR_TO_BTF_ID | MEM_ALLOC ,
5849+ }
5850+ };
5851+
58185852static const struct bpf_reg_types fullsock_types = { .types = { PTR_TO_SOCKET } };
58195853static const struct bpf_reg_types scalar_types = { .types = { SCALAR_VALUE } };
58205854static const struct bpf_reg_types context_types = { .types = { PTR_TO_CTX } };
58215855static const struct bpf_reg_types ringbuf_mem_types = { .types = { PTR_TO_MEM | MEM_RINGBUF } };
58225856static const struct bpf_reg_types const_map_ptr_types = { .types = { CONST_PTR_TO_MAP } };
58235857static const struct bpf_reg_types btf_ptr_types = { .types = { PTR_TO_BTF_ID } };
5824- static const struct bpf_reg_types spin_lock_types = { .types = { PTR_TO_MAP_VALUE } };
58255858static const struct bpf_reg_types percpu_btf_ptr_types = { .types = { PTR_TO_BTF_ID | MEM_PERCPU } };
58265859static const struct bpf_reg_types func_ptr_types = { .types = { PTR_TO_FUNC } };
58275860static const struct bpf_reg_types stack_ptr_types = { .types = { PTR_TO_STACK } };
@@ -5946,6 +5979,11 @@ static int check_reg_type(struct bpf_verifier_env *env, u32 regno,
59465979 return - EACCES ;
59475980 }
59485981 }
5982+ } else if (type_is_alloc (reg -> type )) {
5983+ if (meta -> func_id != BPF_FUNC_spin_lock && meta -> func_id != BPF_FUNC_spin_unlock ) {
5984+ verbose (env , "verifier internal error: unimplemented handling of MEM_ALLOC\n" );
5985+ return - EFAULT ;
5986+ }
59495987 }
59505988
59515989 return 0 ;
@@ -6062,7 +6100,8 @@ static int check_func_arg(struct bpf_verifier_env *env, u32 arg,
60626100 goto skip_type_check ;
60636101
60646102 /* arg_btf_id and arg_size are in a union. */
6065- if (base_type (arg_type ) == ARG_PTR_TO_BTF_ID )
6103+ if (base_type (arg_type ) == ARG_PTR_TO_BTF_ID ||
6104+ base_type (arg_type ) == ARG_PTR_TO_SPIN_LOCK )
60666105 arg_btf_id = fn -> arg_btf_id [arg ];
60676106
60686107 err = check_reg_type (env , regno , arg_type , arg_btf_id , meta );
@@ -6680,9 +6719,10 @@ static bool check_btf_id_ok(const struct bpf_func_proto *fn)
66806719 int i ;
66816720
66826721 for (i = 0 ; i < ARRAY_SIZE (fn -> arg_type ); i ++ ) {
6683- if (base_type (fn -> arg_type [i ]) == ARG_PTR_TO_BTF_ID && !fn -> arg_btf_id [i ])
6684- return false;
6685-
6722+ if (base_type (fn -> arg_type [i ]) == ARG_PTR_TO_BTF_ID )
6723+ return !!fn -> arg_btf_id [i ];
6724+ if (base_type (fn -> arg_type [i ]) == ARG_PTR_TO_SPIN_LOCK )
6725+ return fn -> arg_btf_id [i ] == BPF_PTR_POISON ;
66866726 if (base_type (fn -> arg_type [i ]) != ARG_PTR_TO_BTF_ID && fn -> arg_btf_id [i ] &&
66876727 /* arg_btf_id and arg_size are in a union. */
66886728 (base_type (fn -> arg_type [i ]) != ARG_PTR_TO_MEM ||
0 commit comments