@@ -5849,10 +5849,10 @@ static bool throttle_cfs_rq(struct cfs_rq *cfs_rq)
58495849 /* At this point se is NULL and we are at root level*/
58505850 sub_nr_running (rq , task_delta );
58515851
5852- done :
58535852 /* Stop the fair server if throttling resulted in no runnable tasks */
58545853 if (rq_h_nr_running && !rq -> cfs .h_nr_running )
58555854 dl_server_stop (& rq -> fair_server );
5855+ done :
58565856 /*
58575857 * Note: distribution will already see us throttled via the
58585858 * throttled-list. rq->lock protects completion.
@@ -5940,16 +5940,16 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
59405940 goto unthrottle_throttle ;
59415941 }
59425942
5943+ /* Start the fair server if un-throttling resulted in new runnable tasks */
5944+ if (!rq_h_nr_running && rq -> cfs .h_nr_running )
5945+ dl_server_start (& rq -> fair_server );
5946+
59435947 /* At this point se is NULL and we are at root level*/
59445948 add_nr_running (rq , task_delta );
59455949
59465950unthrottle_throttle :
59475951 assert_list_leaf_cfs_rq (rq );
59485952
5949- /* Start the fair server if un-throttling resulted in new runnable tasks */
5950- if (!rq_h_nr_running && rq -> cfs .h_nr_running )
5951- dl_server_start (& rq -> fair_server );
5952-
59535953 /* Determine whether we need to wake up potentially idle CPU: */
59545954 if (rq -> curr == rq -> idle && rq -> cfs .nr_running )
59555955 resched_curr (rq );
@@ -6771,6 +6771,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
67716771 struct sched_entity * se = & p -> se ;
67726772 int idle_h_nr_running = task_has_idle_policy (p );
67736773 int task_new = !(flags & ENQUEUE_WAKEUP );
6774+ int rq_h_nr_running = rq -> cfs .h_nr_running ;
67746775
67756776 /*
67766777 * The code below (indirectly) updates schedutil which looks at
@@ -6780,13 +6781,6 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
67806781 */
67816782 util_est_enqueue (& rq -> cfs , p );
67826783
6783- if (!throttled_hierarchy (task_cfs_rq (p )) && !rq -> cfs .h_nr_running ) {
6784- /* Account for idle runtime */
6785- if (!rq -> nr_running )
6786- dl_server_update_idle_time (rq , rq -> curr );
6787- dl_server_start (& rq -> fair_server );
6788- }
6789-
67906784 /*
67916785 * If in_iowait is set, the code below may not trigger any cpufreq
67926786 * utilization updates, so do it here explicitly with the IOWAIT flag
@@ -6832,6 +6826,13 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
68326826 goto enqueue_throttle ;
68336827 }
68346828
6829+ if (!rq_h_nr_running && rq -> cfs .h_nr_running ) {
6830+ /* Account for idle runtime */
6831+ if (!rq -> nr_running )
6832+ dl_server_update_idle_time (rq , rq -> curr );
6833+ dl_server_start (& rq -> fair_server );
6834+ }
6835+
68356836 /* At this point se is NULL and we are at root level*/
68366837 add_nr_running (rq , 1 );
68376838
@@ -6872,6 +6873,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
68726873 int task_sleep = flags & DEQUEUE_SLEEP ;
68736874 int idle_h_nr_running = task_has_idle_policy (p );
68746875 bool was_sched_idle = sched_idle_rq (rq );
6876+ int rq_h_nr_running = rq -> cfs .h_nr_running ;
68756877
68766878 util_est_dequeue (& rq -> cfs , p );
68776879
@@ -6926,14 +6928,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
69266928 /* At this point se is NULL and we are at root level*/
69276929 sub_nr_running (rq , 1 );
69286930
6931+ if (rq_h_nr_running && !rq -> cfs .h_nr_running )
6932+ dl_server_stop (& rq -> fair_server );
6933+
69296934 /* balance early to pull high priority tasks */
69306935 if (unlikely (!was_sched_idle && sched_idle_rq (rq )))
69316936 rq -> next_balance = jiffies ;
69326937
69336938dequeue_throttle :
6934- if (!throttled_hierarchy (task_cfs_rq (p )) && !rq -> cfs .h_nr_running )
6935- dl_server_stop (& rq -> fair_server );
6936-
69376939 util_est_update (& rq -> cfs , p , task_sleep );
69386940 hrtick_update (rq );
69396941}
0 commit comments