Skip to content

Commit

Permalink
sched/rt: Fix call to cpufreq_update_util()
Browse files Browse the repository at this point in the history
With commit:

  8f111bc ("cpufreq/schedutil: Rewrite CPUFREQ_RT support")

the schedutil governor uses rq->rt.rt_nr_running to detect whether an
RT task is currently running on the CPU and to set frequency to max
if necessary.

cpufreq_update_util() is called in enqueue/dequeue_top_rt_rq() but
rq->rt.rt_nr_running has not been updated yet when dequeue_top_rt_rq() is
called so schedutil still considers that an RT task is running when the
last task is dequeued. The update of rq->rt.rt_nr_running happens later
in dequeue_rt_stack().

In fact, we can take advantage of the sequence that the dequeue then
re-enqueue rt entities when a rt task is enqueued or dequeued;
As a result enqueue_top_rt_rq() is always called when a task is
enqueued or dequeued and also when groups are throttled or unthrottled.
The only place that not use enqueue_top_rt_rq() is when root rt_rq is
throttled.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: efault@gmx.de
Cc: juri.lelli@redhat.com
Cc: patrick.bellasi@arm.com
Cc: viresh.kumar@linaro.org
Fixes: 8f111bc ('cpufreq/schedutil: Rewrite CPUFREQ_RT support')
Link: http://lkml.kernel.org/r/1530021202-21695-1-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
vingu-linaro authored and Ingo Molnar committed Jul 3, 2018
1 parent d9c0ffc commit 296b2ff
Show file tree
Hide file tree
Showing 3 changed files with 16 additions and 7 deletions.
2 changes: 1 addition & 1 deletion kernel/sched/cpufreq_schedutil.c
Expand Up @@ -192,7 +192,7 @@ static unsigned long sugov_aggregate_util(struct sugov_cpu *sg_cpu)
{
struct rq *rq = cpu_rq(sg_cpu->cpu);

if (rq->rt.rt_nr_running)
if (rt_rq_is_runnable(&rq->rt))
return sg_cpu->max;

/*
Expand Down
16 changes: 10 additions & 6 deletions kernel/sched/rt.c
Expand Up @@ -508,8 +508,11 @@ static void sched_rt_rq_dequeue(struct rt_rq *rt_rq)

rt_se = rt_rq->tg->rt_se[cpu];

if (!rt_se)
if (!rt_se) {
dequeue_top_rt_rq(rt_rq);
/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq_of_rt_rq(rt_rq), 0);
}
else if (on_rt_rq(rt_se))
dequeue_rt_entity(rt_se, 0);
}
Expand Down Expand Up @@ -1001,8 +1004,6 @@ dequeue_top_rt_rq(struct rt_rq *rt_rq)
sub_nr_running(rq, rt_rq->rt_nr_running);
rt_rq->rt_queued = 0;

/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq, 0);
}

static void
Expand All @@ -1014,11 +1015,14 @@ enqueue_top_rt_rq(struct rt_rq *rt_rq)

if (rt_rq->rt_queued)
return;
if (rt_rq_throttled(rt_rq) || !rt_rq->rt_nr_running)

if (rt_rq_throttled(rt_rq))
return;

add_nr_running(rq, rt_rq->rt_nr_running);
rt_rq->rt_queued = 1;
if (rt_rq->rt_nr_running) {
add_nr_running(rq, rt_rq->rt_nr_running);
rt_rq->rt_queued = 1;
}

/* Kick cpufreq (see the comment in kernel/sched/sched.h). */
cpufreq_update_util(rq, 0);
Expand Down
5 changes: 5 additions & 0 deletions kernel/sched/sched.h
Expand Up @@ -609,6 +609,11 @@ struct rt_rq {
#endif
};

static inline bool rt_rq_is_runnable(struct rt_rq *rt_rq)
{
return rt_rq->rt_queued && rt_rq->rt_nr_running;
}

/* Deadline class' related fields in a runqueue */
struct dl_rq {
/* runqueue is an rbtree, ordered by deadline */
Expand Down

0 comments on commit 296b2ff

Please sign in to comment.