Skip to content
This repository has been archived by the owner on Jun 18, 2024. It is now read-only.

Commit

Permalink
Merge pull request #174 from sched-ext/htejun/misc
Browse files Browse the repository at this point in the history
scx: Cosmetic changes to kfuncs
  • Loading branch information
htejun committed Apr 5, 2024
2 parents e74f696 + 5e32ef4 commit 779538f
Showing 1 changed file with 113 additions and 87 deletions.
200 changes: 113 additions & 87 deletions kernel/sched/ext.c
Original file line number Diff line number Diff line change
Expand Up @@ -2141,21 +2141,6 @@ static s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
return cpu;
}

__bpf_kfunc_start_defs();

__bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
u64 wake_flags, bool *found)
{
if (!scx_kf_allowed(SCX_KF_SELECT_CPU)) {
*found = false;
return prev_cpu;
}

return scx_select_cpu_dfl(p, prev_cpu, wake_flags, found);
}

__bpf_kfunc_end_defs();

static int select_task_rq_scx(struct task_struct *p, int prev_cpu, int wake_flags)
{
/*
Expand Down Expand Up @@ -4535,6 +4520,8 @@ __bpf_kfunc s32 scx_bpf_create_dsq(u64 dsq_id, s32 node)
return PTR_ERR_OR_ZERO(create_dsq(dsq_id, node));
}

__bpf_kfunc_end_defs();

BTF_KFUNCS_START(scx_kfunc_ids_sleepable)
BTF_ID_FLAGS(func, scx_bpf_create_dsq, KF_SLEEPABLE)
BTF_KFUNCS_END(scx_kfunc_ids_sleepable)
Expand All @@ -4544,8 +4531,6 @@ static const struct btf_kfunc_id_set scx_kfunc_set_sleepable = {
.set = &scx_kfunc_ids_sleepable,
};

__bpf_kfunc_end_defs();

static bool scx_dispatch_preamble(struct task_struct *p, u64 enq_flags)
{
if (!scx_kf_allowed(SCX_KF_ENQUEUE | SCX_KF_DISPATCH))
Expand Down Expand Up @@ -4677,6 +4662,8 @@ __bpf_kfunc void scx_bpf_dispatch_vtime(struct task_struct *p, u64 dsq_id,
scx_dispatch_commit(p, dsq_id, enq_flags | SCX_ENQ_DSQ_PRIQ);
}

__bpf_kfunc_end_defs();

BTF_KFUNCS_START(scx_kfunc_ids_enqueue_dispatch)
BTF_ID_FLAGS(func, scx_bpf_dispatch, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_dispatch_vtime, KF_RCU)
Expand All @@ -4687,6 +4674,8 @@ static const struct btf_kfunc_id_set scx_kfunc_set_enqueue_dispatch = {
.set = &scx_kfunc_ids_enqueue_dispatch,
};

__bpf_kfunc_start_defs();

/**
* scx_bpf_dispatch_nr_slots - Return the number of remaining dispatch slots
*
Expand Down Expand Up @@ -4764,6 +4753,8 @@ __bpf_kfunc bool scx_bpf_consume(u64 dsq_id)
}
}

__bpf_kfunc_end_defs();

BTF_KFUNCS_START(scx_kfunc_ids_dispatch)
BTF_ID_FLAGS(func, scx_bpf_dispatch_nr_slots)
BTF_ID_FLAGS(func, scx_bpf_dispatch_cancel)
Expand All @@ -4775,6 +4766,8 @@ static const struct btf_kfunc_id_set scx_kfunc_set_dispatch = {
.set = &scx_kfunc_ids_dispatch,
};

__bpf_kfunc_start_defs();

/**
* scx_bpf_reenqueue_local - Re-enqueue tasks on a local DSQ
*
Expand Down Expand Up @@ -4817,6 +4810,8 @@ __bpf_kfunc u32 scx_bpf_reenqueue_local(void)
return nr_enqueued;
}

__bpf_kfunc_end_defs();

BTF_KFUNCS_START(scx_kfunc_ids_cpu_release)
BTF_ID_FLAGS(func, scx_bpf_reenqueue_local)
BTF_KFUNCS_END(scx_kfunc_ids_cpu_release)
Expand All @@ -4826,6 +4821,8 @@ static const struct btf_kfunc_id_set scx_kfunc_set_cpu_release = {
.set = &scx_kfunc_ids_cpu_release,
};

__bpf_kfunc_start_defs();

/**
* scx_bpf_kick_cpu - Trigger reschedule on a CPU
* @cpu: cpu to kick
Expand Down Expand Up @@ -5006,63 +5003,60 @@ __bpf_kfunc s32 scx_bpf_pick_any_cpu(const struct cpumask *cpus_allowed,
}

/**
* scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
* per-CPU cpumask.
* scx_bpf_destroy_dsq - Destroy a custom DSQ
* @dsq_id: DSQ to destroy
*
* Returns NULL if idle tracking is not enabled, or running on a UP kernel.
* Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
* scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
* empty and no further tasks are dispatched to it. Ignored if called on a DSQ
* which doesn't exist. Can be called from any online scx_ops operations.
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
__bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
{
if (!static_branch_likely(&scx_builtin_idle_enabled)) {
scx_ops_error("built-in idle tracking is disabled");
return cpu_none_mask;
}

#ifdef CONFIG_SMP
return idle_masks.cpu;
#else
return cpu_none_mask;
#endif
destroy_dsq(dsq_id);
}

/**
* scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
* per-physical-core cpumask. Can be used to determine if an entire physical
* core is free.
* scx_bpf_select_cpu_dfl - The default implementation of ops.select_cpu()
* @p: task_struct to select a CPU for
* @prev_cpu: CPU @p was on previously
* @wake_flags: %SCX_WAKE_* flags
* @is_idle: out parameter indicating whether the returned CPU is idle
*
* Returns NULL if idle tracking is not enabled, or running on a UP kernel.
* Can only be called from ops.select_cpu() if the built-in CPU selection is
* enabled - ops.update_idle() is missing or %SCX_OPS_KEEP_BUILTIN_IDLE is set.
* @p, @prev_cpu and @wake_flags match ops.select_cpu().
*
* Returns the picked CPU with *@is_idle indicating whether the picked CPU is
* currently idle and thus a good candidate for direct dispatching.
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
__bpf_kfunc s32 scx_bpf_select_cpu_dfl(struct task_struct *p, s32 prev_cpu,
u64 wake_flags, bool *is_idle)
{
if (!static_branch_likely(&scx_builtin_idle_enabled)) {
scx_ops_error("built-in idle tracking is disabled");
return cpu_none_mask;
if (!scx_kf_allowed(SCX_KF_SELECT_CPU)) {
*is_idle = false;
return prev_cpu;
}

#ifdef CONFIG_SMP
if (sched_smt_active())
return idle_masks.smt;
else
return idle_masks.cpu;
#else
return cpu_none_mask;
#endif
return scx_select_cpu_dfl(p, prev_cpu, wake_flags, is_idle);
}

/**
* scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
* either the percpu, or SMT idle-tracking cpumask.
*/
__bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
{
/*
* Empty function body because we aren't actually acquiring or
* releasing a reference to a global idle cpumask, which is read-only
* in the caller and is never released. The acquire / release semantics
* here are just used to make the cpumask is a trusted pointer in the
* caller.
*/
}
__bpf_kfunc_end_defs();

BTF_KFUNCS_START(scx_kfunc_ids_ops_only)
BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
BTF_KFUNCS_END(scx_kfunc_ids_ops_only)

static const struct btf_kfunc_id_set scx_kfunc_set_ops_only = {
.owner = THIS_MODULE,
.set = &scx_kfunc_ids_ops_only,
};

struct scx_bpf_error_bstr_bufs {
u64 data[MAX_BPRINTF_VARARGS];
Expand Down Expand Up @@ -5118,6 +5112,8 @@ static void bpf_exit_bstr_common(enum scx_exit_kind kind, s64 exit_code,

}

__bpf_kfunc_start_defs();

/**
* scx_bpf_exit_bstr - Gracefully exit the BPF scheduler.
* @exit_code: Exit value to pass to user space via struct scx_exit_info.
Expand Down Expand Up @@ -5153,17 +5149,62 @@ __bpf_kfunc void scx_bpf_error_bstr(char *fmt, unsigned long long *data,
}

/**
* scx_bpf_destroy_dsq - Destroy a custom DSQ
* @dsq_id: DSQ to destroy
* scx_bpf_get_idle_cpumask - Get a referenced kptr to the idle-tracking
* per-CPU cpumask.
*
* Destroy the custom DSQ identified by @dsq_id. Only DSQs created with
* scx_bpf_create_dsq() can be destroyed. The caller must ensure that the DSQ is
* empty and no further tasks are dispatched to it. Ignored if called on a DSQ
* which doesn't exist. Can be called from any online scx_ops operations.
* Returns NULL if idle tracking is not enabled, or running on a UP kernel.
*/
__bpf_kfunc void scx_bpf_destroy_dsq(u64 dsq_id)
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_cpumask(void)
{
destroy_dsq(dsq_id);
if (!static_branch_likely(&scx_builtin_idle_enabled)) {
scx_ops_error("built-in idle tracking is disabled");
return cpu_none_mask;
}

#ifdef CONFIG_SMP
return idle_masks.cpu;
#else
return cpu_none_mask;
#endif
}

/**
* scx_bpf_get_idle_smtmask - Get a referenced kptr to the idle-tracking,
* per-physical-core cpumask. Can be used to determine if an entire physical
* core is free.
*
* Returns NULL if idle tracking is not enabled, or running on a UP kernel.
*/
__bpf_kfunc const struct cpumask *scx_bpf_get_idle_smtmask(void)
{
if (!static_branch_likely(&scx_builtin_idle_enabled)) {
scx_ops_error("built-in idle tracking is disabled");
return cpu_none_mask;
}

#ifdef CONFIG_SMP
if (sched_smt_active())
return idle_masks.smt;
else
return idle_masks.cpu;
#else
return cpu_none_mask;
#endif
}

/**
* scx_bpf_put_idle_cpumask - Release a previously acquired referenced kptr to
* either the percpu, or SMT idle-tracking cpumask.
*/
__bpf_kfunc void scx_bpf_put_idle_cpumask(const struct cpumask *idle_mask)
{
/*
* Empty function body because we aren't actually acquiring or
* releasing a reference to a global idle cpumask, which is read-only
* in the caller and is never released. The acquire / release semantics
* here are just used to make the cpumask is a trusted pointer in the
* caller.
*/
}

/**
Expand Down Expand Up @@ -5219,27 +5260,14 @@ __bpf_kfunc struct cgroup *scx_bpf_task_cgroup(struct task_struct *p)
}
#endif

BTF_KFUNCS_START(scx_kfunc_ids_ops_only)
BTF_ID_FLAGS(func, scx_bpf_kick_cpu)
BTF_ID_FLAGS(func, scx_bpf_dsq_nr_queued)
BTF_ID_FLAGS(func, scx_bpf_test_and_clear_cpu_idle)
BTF_ID_FLAGS(func, scx_bpf_pick_idle_cpu, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_pick_any_cpu, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_destroy_dsq)
BTF_ID_FLAGS(func, scx_bpf_select_cpu_dfl, KF_RCU)
BTF_KFUNCS_END(scx_kfunc_ids_ops_only)

static const struct btf_kfunc_id_set scx_kfunc_set_ops_only = {
.owner = THIS_MODULE,
.set = &scx_kfunc_ids_ops_only,
};
__bpf_kfunc_end_defs();

BTF_KFUNCS_START(scx_kfunc_ids_any)
BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, scx_bpf_get_idle_cpumask, KF_ACQUIRE)
BTF_ID_FLAGS(func, scx_bpf_get_idle_smtmask, KF_ACQUIRE)
BTF_ID_FLAGS(func, scx_bpf_put_idle_cpumask, KF_RELEASE)
BTF_ID_FLAGS(func, scx_bpf_error_bstr, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, scx_bpf_exit_bstr, KF_TRUSTED_ARGS)
BTF_ID_FLAGS(func, scx_bpf_task_running, KF_RCU)
BTF_ID_FLAGS(func, scx_bpf_task_cpu, KF_RCU)
#ifdef CONFIG_CGROUP_SCHED
Expand All @@ -5252,8 +5280,6 @@ static const struct btf_kfunc_id_set scx_kfunc_set_any = {
.set = &scx_kfunc_ids_any,
};

__bpf_kfunc_end_defs();

static int __init scx_init(void)
{
int ret;
Expand Down

0 comments on commit 779538f

Please sign in to comment.