Skip to content

Commit 4fbe2b5

Browse files
ingomolnarSasha Levin
authored andcommitted
sched/fair: Rename cfs_rq::avg_load to cfs_rq::sum_weight
[ Upstream commit 4ff674f ] The ::avg_load field is a long-standing misnomer: it says it's an 'average load', but in reality it's the momentary sum of the load of all currently runnable tasks. We'd have to also perform a division by nr_running (or use time-decay) to arrive at any sort of average value. This is clear from comments about the math of fair scheduling: * \Sum w_i := cfs_rq->avg_load The sum of all weights is ... the sum of all weights, not the average of all weights. To make it doubly confusing, there's also an ::avg_load in the load-balancing struct sg_lb_stats, which *is* a true average. The second part of the field's name is a minor misnomer as well: it says 'load', and it is indeed a load_weight structure as it shares code with the load-balancer - but it's only in an SMP load-balancing context where load = weight, in the fair scheduling context the primary purpose is the weighting of different nice levels. So rename the field to ::sum_weight instead, which makes the terminology of the EEVDF math match up with our implementation of it: * \Sum w_i := cfs_rq->sum_weight Signed-off-by: Ingo Molnar <mingo@kernel.org> Link: https://patch.msgid.link/20251201064647.1851919-6-mingo@kernel.org Stable-dep-of: b3d99f4 ("sched/fair: Fix zero_vruntime tracking") Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent bca6d30 commit 4fbe2b5

File tree

2 files changed

+9
-9
lines changed

2 files changed

+9
-9
lines changed

kernel/sched/fair.c

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -608,7 +608,7 @@ static inline s64 entity_key(struct cfs_rq *cfs_rq, struct sched_entity *se)
608608
*
609609
* v0 := cfs_rq->zero_vruntime
610610
* \Sum (v_i - v0) * w_i := cfs_rq->avg_vruntime
611-
* \Sum w_i := cfs_rq->avg_load
611+
* \Sum w_i := cfs_rq->sum_weight
612612
*
613613
* Since zero_vruntime closely tracks the per-task service, these
614614
* deltas: (v_i - v), will be in the order of the maximal (virtual) lag
@@ -625,7 +625,7 @@ avg_vruntime_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
625625
s64 key = entity_key(cfs_rq, se);
626626

627627
cfs_rq->avg_vruntime += key * weight;
628-
cfs_rq->avg_load += weight;
628+
cfs_rq->sum_weight += weight;
629629
}
630630

631631
static void
@@ -635,16 +635,16 @@ avg_vruntime_sub(struct cfs_rq *cfs_rq, struct sched_entity *se)
635635
s64 key = entity_key(cfs_rq, se);
636636

637637
cfs_rq->avg_vruntime -= key * weight;
638-
cfs_rq->avg_load -= weight;
638+
cfs_rq->sum_weight -= weight;
639639
}
640640

641641
static inline
642642
void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
643643
{
644644
/*
645-
* v' = v + d ==> avg_vruntime' = avg_runtime - d*avg_load
645+
* v' = v + d ==> avg_vruntime' = avg_runtime - d*sum_weight
646646
*/
647-
cfs_rq->avg_vruntime -= cfs_rq->avg_load * delta;
647+
cfs_rq->avg_vruntime -= cfs_rq->sum_weight * delta;
648648
}
649649

650650
/*
@@ -655,7 +655,7 @@ u64 avg_vruntime(struct cfs_rq *cfs_rq)
655655
{
656656
struct sched_entity *curr = cfs_rq->curr;
657657
s64 avg = cfs_rq->avg_vruntime;
658-
long load = cfs_rq->avg_load;
658+
long load = cfs_rq->sum_weight;
659659

660660
if (curr && curr->on_rq) {
661661
unsigned long weight = scale_load_down(curr->load.weight);
@@ -723,7 +723,7 @@ static int vruntime_eligible(struct cfs_rq *cfs_rq, u64 vruntime)
723723
{
724724
struct sched_entity *curr = cfs_rq->curr;
725725
s64 avg = cfs_rq->avg_vruntime;
726-
long load = cfs_rq->avg_load;
726+
long load = cfs_rq->sum_weight;
727727

728728
if (curr && curr->on_rq) {
729729
unsigned long weight = scale_load_down(curr->load.weight);
@@ -5175,7 +5175,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
51755175
*
51765176
* vl_i = (W + w_i)*vl'_i / W
51775177
*/
5178-
load = cfs_rq->avg_load;
5178+
load = cfs_rq->sum_weight;
51795179
if (curr && curr->on_rq)
51805180
load += scale_load_down(curr->load.weight);
51815181

kernel/sched/sched.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -679,7 +679,7 @@ struct cfs_rq {
679679
unsigned int h_nr_idle; /* SCHED_IDLE */
680680

681681
s64 avg_vruntime;
682-
u64 avg_load;
682+
u64 sum_weight;
683683

684684
u64 zero_vruntime;
685685
#ifdef CONFIG_SCHED_CORE

0 commit comments

Comments
 (0)