@@ -922,9 +922,10 @@ __bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
922922 * @cpus_allowed: cpumask of allowed CPUs
923923 * @flags: %SCX_PICK_IDLE* flags
924924 *
925- * Can only be called from ops.select_cpu() or ops.enqueue() if the
926- * built-in CPU selection is enabled: ops.update_idle() is missing or
927- * %SCX_OPS_KEEP_BUILTIN_IDLE is set.
925+ * Can be called from ops.select_cpu(), ops.enqueue(), or from an unlocked
926+ * context such as a BPF test_run() call, as long as built-in CPU selection
927+ * is enabled: ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE
928+ * is set.
928929 *
929930 * @p, @prev_cpu and @wake_flags match ops.select_cpu().
930931 *
@@ -936,6 +937,7 @@ __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64
936937 const struct cpumask * cpus_allowed , u64 flags )
937938{
938939 struct rq * rq ;
940+ struct rq_flags rf ;
939941 s32 cpu ;
940942
941943 if (!kf_cpu_valid (prev_cpu , NULL ))
@@ -944,15 +946,26 @@ __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64
944946 if (!check_builtin_idle_enabled ())
945947 return - EBUSY ;
946948
947- if (!scx_kf_allowed (SCX_KF_SELECT_CPU | SCX_KF_ENQUEUE ))
948- return - EPERM ;
949+ /*
950+ * If called from an unlocked context, acquire the task's rq lock,
951+ * so that we can safely access p->cpus_ptr and p->nr_cpus_allowed.
952+ *
953+ * Otherwise, allow to use this kfunc only from ops.select_cpu()
954+ * and ops.select_enqueue().
955+ */
956+ if (scx_kf_allowed_if_unlocked ()) {
957+ rq = task_rq_lock (p , & rf );
958+ } else {
959+ if (!scx_kf_allowed (SCX_KF_SELECT_CPU | SCX_KF_ENQUEUE ))
960+ return - EPERM ;
961+ rq = scx_locked_rq ();
962+ }
949963
950964 /*
951965 * Validate locking correctness to access p->cpus_ptr and
952966 * p->nr_cpus_allowed: if we're holding an rq lock, we're safe;
953967 * otherwise, assert that p->pi_lock is held.
954968 */
955- rq = scx_locked_rq ();
956969 if (!rq )
957970 lockdep_assert_held (& p -> pi_lock );
958971
@@ -966,13 +979,17 @@ __bpf_kfunc s32 scx_bpf_select_cpu_and(struct task_struct *p, s32 prev_cpu, u64
966979 if (p -> nr_cpus_allowed == 1 ) {
967980 if (cpumask_test_cpu (prev_cpu , cpus_allowed ) &&
968981 scx_idle_test_and_clear_cpu (prev_cpu ))
969- return prev_cpu ;
970- return - EBUSY ;
982+ cpu = prev_cpu ;
983+ else
984+ cpu = - EBUSY ;
985+ } else {
986+ cpu = scx_select_cpu_dfl (p , prev_cpu , wake_flags , cpus_allowed , flags );
971987 }
972- cpu = scx_select_cpu_dfl (p , prev_cpu , wake_flags , cpus_allowed , flags );
973988#else
974989 cpu = - EBUSY ;
975990#endif
991+ if (scx_kf_allowed_if_unlocked ())
992+ task_rq_unlock (rq , p , & rf );
976993
977994 return cpu ;
978995}
@@ -1276,6 +1293,7 @@ BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu_node, KF_RCU)
12761293BTF_ID_FLAGS (func , scx_bpf_pick_idle_cpu , KF_RCU )
12771294BTF_ID_FLAGS (func , scx_bpf_pick_any_cpu_node , KF_RCU )
12781295BTF_ID_FLAGS (func , scx_bpf_pick_any_cpu , KF_RCU )
1296+ BTF_ID_FLAGS (func , scx_bpf_select_cpu_and , KF_RCU )
12791297BTF_KFUNCS_END (scx_kfunc_ids_idle )
12801298
12811299static const struct btf_kfunc_id_set scx_kfunc_set_idle = {
@@ -1285,7 +1303,6 @@ static const struct btf_kfunc_id_set scx_kfunc_set_idle = {
12851303
12861304BTF_KFUNCS_START (scx_kfunc_ids_select_cpu )
12871305BTF_ID_FLAGS (func , scx_bpf_select_cpu_dfl , KF_RCU )
1288- BTF_ID_FLAGS (func , scx_bpf_select_cpu_and , KF_RCU )
12891306BTF_KFUNCS_END (scx_kfunc_ids_select_cpu )
12901307
12911308static const struct btf_kfunc_id_set scx_kfunc_set_select_cpu = {
0 commit comments