Skip to content

Commit d73249f

Browse files
arighihtejun
authored andcommitted
sched_ext: idle: Make idle static keys private
Make all the static keys used by the idle CPU selection policy private to ext_idle.c. This avoids unnecessary exposure in headers and improves code encapsulation. Cc: Yury Norov <yury.norov@gmail.com> Signed-off-by: Andrea Righi <arighi@nvidia.com> Signed-off-by: Tejun Heo <tj@kernel.org>
1 parent f09177c commit d73249f

File tree

3 files changed

+32
-28
lines changed

3 files changed

+32
-28
lines changed

kernel/sched/ext.c

Lines changed: 2 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -4765,7 +4765,7 @@ static void scx_ops_disable_workfn(struct kthread_work *work)
47654765
static_branch_disable(&scx_ops_enq_exiting);
47664766
static_branch_disable(&scx_ops_enq_migration_disabled);
47674767
static_branch_disable(&scx_ops_cpu_preempt);
4768-
static_branch_disable(&scx_builtin_idle_enabled);
4768+
scx_idle_disable();
47694769
synchronize_rcu();
47704770

47714771
if (ei->kind >= SCX_EXIT_ERROR) {
@@ -5403,12 +5403,7 @@ static int scx_ops_enable(struct sched_ext_ops *ops, struct bpf_link *link)
54035403
if (scx_ops.cpu_acquire || scx_ops.cpu_release)
54045404
static_branch_enable(&scx_ops_cpu_preempt);
54055405

5406-
if (!ops->update_idle || (ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
5407-
scx_idle_reset_masks();
5408-
static_branch_enable(&scx_builtin_idle_enabled);
5409-
} else {
5410-
static_branch_disable(&scx_builtin_idle_enabled);
5411-
}
5406+
scx_idle_enable(ops);
54125407

54135408
/*
54145409
* Lock out forks, cgroup on/offlining and moves before opening the

kernel/sched/ext_idle.c

Lines changed: 26 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
#include "ext_idle.h"
1313

1414
/* Enable/disable built-in idle CPU selection policy */
15-
DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
15+
static DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
1616

1717
#ifdef CONFIG_SMP
1818
#ifdef CONFIG_CPUMASK_OFFSTACK
@@ -22,10 +22,10 @@ DEFINE_STATIC_KEY_FALSE(scx_builtin_idle_enabled);
2222
#endif
2323

2424
/* Enable/disable LLC aware optimizations */
25-
DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
25+
static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_llc);
2626

2727
/* Enable/disable NUMA aware optimizations */
28-
DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
28+
static DEFINE_STATIC_KEY_FALSE(scx_selcpu_topo_numa);
2929

3030
static struct {
3131
cpumask_var_t cpu;
@@ -441,16 +441,6 @@ s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool
441441
return cpu;
442442
}
443443

444-
void scx_idle_reset_masks(void)
445-
{
446-
/*
447-
* Consider all online cpus idle. Should converge to the actual state
448-
* quickly.
449-
*/
450-
cpumask_copy(idle_masks.cpu, cpu_online_mask);
451-
cpumask_copy(idle_masks.smt, cpu_online_mask);
452-
}
453-
454444
void scx_idle_init_masks(void)
455445
{
456446
BUG_ON(!alloc_cpumask_var(&idle_masks.cpu, GFP_KERNEL));
@@ -532,6 +522,29 @@ void __scx_update_idle(struct rq *rq, bool idle, bool do_notify)
532522
}
533523
#endif /* CONFIG_SMP */
534524

525+
void scx_idle_enable(struct sched_ext_ops *ops)
526+
{
527+
if (ops->update_idle && !(ops->flags & SCX_OPS_KEEP_BUILTIN_IDLE)) {
528+
static_branch_disable(&scx_builtin_idle_enabled);
529+
return;
530+
}
531+
static_branch_enable(&scx_builtin_idle_enabled);
532+
533+
#ifdef CONFIG_SMP
534+
/*
535+
* Consider all online cpus idle. Should converge to the actual state
536+
* quickly.
537+
*/
538+
cpumask_copy(idle_masks.cpu, cpu_online_mask);
539+
cpumask_copy(idle_masks.smt, cpu_online_mask);
540+
#endif
541+
}
542+
543+
void scx_idle_disable(void)
544+
{
545+
static_branch_disable(&scx_builtin_idle_enabled);
546+
}
547+
535548
/********************************************************************************
536549
* Helpers that can be called from the BPF scheduler.
537550
*/

kernel/sched/ext_idle.h

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -10,20 +10,15 @@
1010
#ifndef _KERNEL_SCHED_EXT_IDLE_H
1111
#define _KERNEL_SCHED_EXT_IDLE_H
1212

13-
extern struct static_key_false scx_builtin_idle_enabled;
13+
struct sched_ext_ops;
1414

1515
#ifdef CONFIG_SMP
16-
extern struct static_key_false scx_selcpu_topo_llc;
17-
extern struct static_key_false scx_selcpu_topo_numa;
18-
1916
void scx_idle_update_selcpu_topology(void);
20-
void scx_idle_reset_masks(void);
2117
void scx_idle_init_masks(void);
2218
bool scx_idle_test_and_clear_cpu(int cpu);
2319
s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags);
2420
#else /* !CONFIG_SMP */
2521
static inline void scx_idle_update_selcpu_topology(void) {}
26-
static inline void scx_idle_reset_masks(void) {}
2722
static inline void scx_idle_init_masks(void) {}
2823
static inline bool scx_idle_test_and_clear_cpu(int cpu) { return false; }
2924
static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flags)
@@ -33,7 +28,8 @@ static inline s32 scx_pick_idle_cpu(const struct cpumask *cpus_allowed, u64 flag
3328
#endif /* CONFIG_SMP */
3429

3530
s32 scx_select_cpu_dfl(struct task_struct *p, s32 prev_cpu, u64 wake_flags, bool *found);
36-
37-
extern int scx_idle_init(void);
31+
void scx_idle_enable(struct sched_ext_ops *ops);
32+
void scx_idle_disable(void);
33+
int scx_idle_init(void);
3834

3935
#endif /* _KERNEL_SCHED_EXT_IDLE_H */

0 commit comments

Comments
 (0)