Skip to content

Commit 5f6bd38

Browse files
author
Peter Zijlstra
committed
sched/rt: Remove default bandwidth control
Now that fair_server exists, we no longer need RT bandwidth control unless RT_GROUP_SCHED. Enable fair_server with parameters equivalent to RT throttling. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Signed-off-by: "Peter Zijlstra (Intel)" <peterz@infradead.org> Signed-off-by: Daniel Bristot de Oliveira <bristot@kernel.org> Signed-off-by: "Vineeth Pillai (Google)" <vineeth@bitbyteword.org> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Juri Lelli <juri.lelli@redhat.com> Link: https://lore.kernel.org/r/14d562db55df5c3c780d91940743acb166895ef7.1716811044.git.bristot@kernel.org
1 parent c8a8539 commit 5f6bd38

File tree

5 files changed

+120
-142
lines changed

5 files changed

+120
-142
lines changed

kernel/sched/core.c

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8266,8 +8266,6 @@ void __init sched_init(void)
82668266
#endif /* CONFIG_RT_GROUP_SCHED */
82678267
}
82688268

8269-
init_rt_bandwidth(&def_rt_bandwidth, global_rt_period(), global_rt_runtime());
8270-
82718269
#ifdef CONFIG_SMP
82728270
init_defrootdomain();
82738271
#endif
@@ -8322,8 +8320,13 @@ void __init sched_init(void)
83228320
init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);
83238321
#endif /* CONFIG_FAIR_GROUP_SCHED */
83248322

8325-
rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;
83268323
#ifdef CONFIG_RT_GROUP_SCHED
8324+
/*
8325+
* This is required for init cpu because rt.c:__enable_runtime()
8326+
* starts working after scheduler_running, which is not the case
8327+
* yet.
8328+
*/
8329+
rq->rt.rt_runtime = global_rt_runtime();
83278330
init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);
83288331
#endif
83298332
#ifdef CONFIG_SMP

kernel/sched/deadline.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1554,6 +1554,7 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
15541554
if (dl_se == &rq->fair_server)
15551555
return;
15561556

1557+
#ifdef CONFIG_RT_GROUP_SCHED
15571558
/*
15581559
* Because -- for now -- we share the rt bandwidth, we need to
15591560
* account our runtime there too, otherwise actual rt tasks
@@ -1578,6 +1579,7 @@ static void update_curr_dl_se(struct rq *rq, struct sched_dl_entity *dl_se, s64
15781579
rt_rq->rt_time += delta_exec;
15791580
raw_spin_unlock(&rt_rq->rt_runtime_lock);
15801581
}
1582+
#endif
15811583
}
15821584

15831585
/*
@@ -1632,8 +1634,7 @@ void dl_server_start(struct sched_dl_entity *dl_se)
16321634
* this before getting generic.
16331635
*/
16341636
if (!dl_server(dl_se)) {
1635-
/* Disabled */
1636-
u64 runtime = 0;
1637+
u64 runtime = 50 * NSEC_PER_MSEC;
16371638
u64 period = 1000 * NSEC_PER_MSEC;
16381639

16391640
dl_server_apply_params(dl_se, runtime, period, 1);

kernel/sched/debug.c

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -885,9 +885,12 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
885885
SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
886886

887887
PU(rt_nr_running);
888+
889+
#ifdef CONFIG_RT_GROUP_SCHED
888890
P(rt_throttled);
889891
PN(rt_time);
890892
PN(rt_runtime);
893+
#endif
891894

892895
#undef PN
893896
#undef PU

0 commit comments

Comments
 (0)