Skip to content

Commit 857b158

Browse files
author
Peter Zijlstra
committed
sched/eevdf: Use sched_attr::sched_runtime to set request/slice suggestion
Allow applications to directly set a suggested request/slice length using sched_attr::sched_runtime. The implementation clamps the value to: 0.1[ms] <= slice <= 100[ms] which is 1/10 the size of HZ=1000 and 10 times the size of HZ=100. Applications should strive to use their periodic runtime at a high confidence interval (95%+) as the target slice. Using a smaller slice will introduce undue preemptions, while using a larger value will increase latency. For all the following examples assume a scheduling quantum of 8, and for consistency all examples have W=4: {A,B,C,D}(w=1,r=8): ABCD... +---+---+---+--- t=0, V=1.5 t=1, V=3.5 A |------< A |------< B |------< B |------< C |------< C |------< D |------< D |------< ---+*------+-------+--- ---+--*----+-------+--- t=2, V=5.5 t=3, V=7.5 A |------< A |------< B |------< B |------< C |------< C |------< D |------< D |------< ---+----*--+-------+--- ---+------*+-------+--- Note: 4 identical tasks in FIFO order ~~~ {A,B}(w=1,r=16) C(w=2,r=16) AACCBBCC... +---+---+---+--- t=0, V=1.25 t=2, V=5.25 A |--------------< A |--------------< B |--------------< B |--------------< C |------< C |------< ---+*------+-------+--- ---+----*--+-------+--- t=4, V=8.25 t=6, V=12.25 A |--------------< A |--------------< B |--------------< B |--------------< C |------< C |------< ---+-------*-------+--- ---+-------+---*---+--- Note: 1 heavy task -- because q=8, double r such that the deadline of the w=2 task doesn't go below q. Note: observe the full schedule becomes: W*max(r_i/w_i) = 4*2q = 8q in length. Note: the period of the heavy task is half the full period at: W*(r_i/w_i) = 4*(2q/2) = 4q ~~~ {A,C,D}(w=1,r=16) B(w=1,r=8): BAACCBDD... +---+---+---+--- t=0, V=1.5 t=1, V=3.5 A |--------------< A |---------------< B |------< B |------< C |--------------< C |--------------< D |--------------< D |--------------< ---+*------+-------+--- ---+--*----+-------+--- t=3, V=7.5 t=5, V=11.5 A |---------------< A |---------------< B |------< B |------< C |--------------< C |--------------< D |--------------< D |--------------< ---+------*+-------+--- ---+-------+--*----+--- t=6, V=13.5 A |---------------< B |------< C |--------------< D |--------------< ---+-------+----*--+--- Note: 1 short task -- again double r so that the deadline of the short task won't be below q. Made B short because its not the leftmost task, but is eligible with the 0,1,2,3 spread. Note: like with the heavy task, the period of the short task observes: W*(r_i/w_i) = 4*(1q/1) = 4q ~~~ A(w=1,r=16) B(w=1,r=8) C(w=2,r=16) BCCAABCC... +---+---+---+--- t=0, V=1.25 t=1, V=3.25 A |--------------< A |--------------< B |------< B |------< C |------< C |------< ---+*------+-------+--- ---+--*----+-------+--- t=3, V=7.25 t=5, V=11.25 A |--------------< A |--------------< B |------< B |------< C |------< C |------< ---+------*+-------+--- ---+-------+--*----+--- t=6, V=13.25 A |--------------< B |------< C |------< ---+-------+----*--+--- Note: 1 heavy and 1 short task -- combine them all. Note: both the short and heavy task end up with a period of 4q ~~~ A(w=1,r=16) B(w=2,r=16) C(w=1,r=8) BBCAABBC... +---+---+---+--- t=0, V=1 t=2, V=5 A |--------------< A |--------------< B |------< B |------< C |------< C |------< ---+*------+-------+--- ---+----*--+-------+--- t=3, V=7 t=5, V=11 A |--------------< A |--------------< B |------< B |------< C |------< C |------< ---+------*+-------+--- ---+-------+--*----+--- t=7, V=15 A |--------------< B |------< C |------< ---+-------+------*+--- Note: as before but permuted ~~~ From all this it can be deduced that, for the steady state: - the total period (P) of a schedule is: W*max(r_i/w_i) - the average period of a task is: W*(r_i/w_i) - each task obtains the fair share: w_i/W of each full period P Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Valentin Schneider <vschneid@redhat.com> Link: https://lkml.kernel.org/r/20240727105030.842834421@infradead.org
1 parent 85e511d commit 857b158

File tree

5 files changed

+33
-10
lines changed

5 files changed

+33
-10
lines changed

include/linux/sched.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -547,6 +547,7 @@ struct sched_entity {
547547
unsigned char on_rq;
548548
unsigned char sched_delayed;
549549
unsigned char rel_deadline;
550+
unsigned char custom_slice;
550551
/* hole */
551552

552553
u64 exec_start;

kernel/sched/core.c

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4390,7 +4390,6 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
43904390
p->se.nr_migrations = 0;
43914391
p->se.vruntime = 0;
43924392
p->se.vlag = 0;
4393-
p->se.slice = sysctl_sched_base_slice;
43944393
INIT_LIST_HEAD(&p->se.group_node);
43954394

43964395
/* A delayed task cannot be in clone(). */
@@ -4643,6 +4642,8 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
46434642

46444643
p->prio = p->normal_prio = p->static_prio;
46454644
set_load_weight(p, false);
4645+
p->se.custom_slice = 0;
4646+
p->se.slice = sysctl_sched_base_slice;
46464647

46474648
/*
46484649
* We don't need the reset flag anymore after the fork. It has
@@ -8412,6 +8413,7 @@ void __init sched_init(void)
84128413
}
84138414

84148415
set_load_weight(&init_task, false);
8416+
init_task.se.slice = sysctl_sched_base_slice,
84158417

84168418
/*
84178419
* The boot idle thread does lazy MMU switching as well:

kernel/sched/debug.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -739,11 +739,12 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
739739
else
740740
SEQ_printf(m, " %c", task_state_to_char(p));
741741

742-
SEQ_printf(m, "%15s %5d %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
742+
SEQ_printf(m, "%15s %5d %9Ld.%06ld %c %9Ld.%06ld %c %9Ld.%06ld %9Ld.%06ld %9Ld %5d ",
743743
p->comm, task_pid_nr(p),
744744
SPLIT_NS(p->se.vruntime),
745745
entity_eligible(cfs_rq_of(&p->se), &p->se) ? 'E' : 'N',
746746
SPLIT_NS(p->se.deadline),
747+
p->se.custom_slice ? 'S' : ' ',
747748
SPLIT_NS(p->se.slice),
748749
SPLIT_NS(p->se.sum_exec_runtime),
749750
(long long)(p->nvcsw + p->nivcsw),

kernel/sched/fair.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -983,7 +983,8 @@ static bool update_deadline(struct cfs_rq *cfs_rq, struct sched_entity *se)
983983
* nice) while the request time r_i is determined by
984984
* sysctl_sched_base_slice.
985985
*/
986-
se->slice = sysctl_sched_base_slice;
986+
if (!se->custom_slice)
987+
se->slice = sysctl_sched_base_slice;
987988

988989
/*
989990
* EEVDF: vd_i = ve_i + r_i / w_i
@@ -5227,7 +5228,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
52275228
u64 vslice, vruntime = avg_vruntime(cfs_rq);
52285229
s64 lag = 0;
52295230

5230-
se->slice = sysctl_sched_base_slice;
5231+
if (!se->custom_slice)
5232+
se->slice = sysctl_sched_base_slice;
52315233
vslice = calc_delta_fair(se->slice, se);
52325234

52335235
/*

kernel/sched/syscalls.c

Lines changed: 23 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -401,10 +401,20 @@ static void __setscheduler_params(struct task_struct *p,
401401

402402
p->policy = policy;
403403

404-
if (dl_policy(policy))
404+
if (dl_policy(policy)) {
405405
__setparam_dl(p, attr);
406-
else if (fair_policy(policy))
406+
} else if (fair_policy(policy)) {
407407
p->static_prio = NICE_TO_PRIO(attr->sched_nice);
408+
if (attr->sched_runtime) {
409+
p->se.custom_slice = 1;
410+
p->se.slice = clamp_t(u64, attr->sched_runtime,
411+
NSEC_PER_MSEC/10, /* HZ=1000 * 10 */
412+
NSEC_PER_MSEC*100); /* HZ=100 / 10 */
413+
} else {
414+
p->se.custom_slice = 0;
415+
p->se.slice = sysctl_sched_base_slice;
416+
}
417+
}
408418

409419
/*
410420
* __sched_setscheduler() ensures attr->sched_priority == 0 when
@@ -700,7 +710,9 @@ int __sched_setscheduler(struct task_struct *p,
700710
* but store a possible modification of reset_on_fork.
701711
*/
702712
if (unlikely(policy == p->policy)) {
703-
if (fair_policy(policy) && attr->sched_nice != task_nice(p))
713+
if (fair_policy(policy) &&
714+
(attr->sched_nice != task_nice(p) ||
715+
(attr->sched_runtime != p->se.slice)))
704716
goto change;
705717
if (rt_policy(policy) && attr->sched_priority != p->rt_priority)
706718
goto change;
@@ -846,6 +858,9 @@ static int _sched_setscheduler(struct task_struct *p, int policy,
846858
.sched_nice = PRIO_TO_NICE(p->static_prio),
847859
};
848860

861+
if (p->se.custom_slice)
862+
attr.sched_runtime = p->se.slice;
863+
849864
/* Fixup the legacy SCHED_RESET_ON_FORK hack. */
850865
if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) {
851866
attr.sched_flags |= SCHED_FLAG_RESET_ON_FORK;
@@ -1012,12 +1027,14 @@ static int sched_copy_attr(struct sched_attr __user *uattr, struct sched_attr *a
10121027

10131028
static void get_params(struct task_struct *p, struct sched_attr *attr)
10141029
{
1015-
if (task_has_dl_policy(p))
1030+
if (task_has_dl_policy(p)) {
10161031
__getparam_dl(p, attr);
1017-
else if (task_has_rt_policy(p))
1032+
} else if (task_has_rt_policy(p)) {
10181033
attr->sched_priority = p->rt_priority;
1019-
else
1034+
} else {
10201035
attr->sched_nice = task_nice(p);
1036+
attr->sched_runtime = p->se.slice;
1037+
}
10211038
}
10221039

10231040
/**

0 commit comments

Comments
 (0)