Skip to content

Commit

Permalink
scheduler: Re-compute time-average nr_running on read
Browse files Browse the repository at this point in the history
Re-compute time-average nr_running when it is read. This would
prevent reading stalled average value if there were no run-queue
changes for a long time. New average value is returned to the reader,
but not stored to avoid concurrent writes. Light-weight sequential
counter synchronization is used to assure data consistency for
re-computing average.

Change-Id: I8e4ea1b28ea00b3ddaf6ef7cdcd27866f87d360b
Signed-off-by: Alex Frid <afrid@nvidia.com>
(cherry picked from commit 527a759d9b40bf57958eb002edd2bb82014dab99)
Reviewed-on: http://git-master/r/111637
Reviewed-by: Sai Gurrappadi <sgurrappadi@nvidia.com>
Tested-by: Sai Gurrappadi <sgurrappadi@nvidia.com>
Reviewed-by: Automatic_Commit_Validation_User
Reviewed-by: Peter Boonstoppel <pboonstoppel@nvidia.com>
Reviewed-by: Yu-Huan Hsu <yhsu@nvidia.com>

forward ported to Linux 3.4 for use on Mako

Signed-off-by: faux123 <reioux@gmail.com>
Signed-off-by: Seongmin Park <r_data@naver.com>
  • Loading branch information
faux123 authored and Seongmin Park committed Mar 29, 2013
1 parent a17e04f commit d732a3d
Show file tree
Hide file tree
Showing 2 changed files with 35 additions and 9 deletions.
21 changes: 19 additions & 2 deletions kernel/sched/core.c
Expand Up @@ -2153,9 +2153,26 @@ unsigned long nr_iowait(void)
unsigned long avg_nr_running(void)
{
unsigned long i, sum = 0;
unsigned int seqcnt, ave_nr_running;

for_each_online_cpu(i)
sum += cpu_rq(i)->ave_nr_running;
for_each_online_cpu(i) {
struct rq *q = cpu_rq(i);

/*
* Update average to avoid reading stalled value if there were
* no run-queue changes for a long time. On the other hand if
* the changes are happening right now, just read current value
* directly.
*/
seqcnt = read_seqcount_begin(&q->ave_seqcnt);
ave_nr_running = do_avg_nr_running(q);
if (read_seqcount_retry(&q->ave_seqcnt, seqcnt)) {
read_seqcount_begin(&q->ave_seqcnt);
ave_nr_running = q->ave_nr_running;
}

sum += ave_nr_running;
}

return sum;
}
Expand Down
23 changes: 16 additions & 7 deletions kernel/sched/sched.h
Expand Up @@ -366,6 +366,7 @@ struct rq {
/* time-based average load */
u64 nr_last_stamp;
unsigned int ave_nr_running;
seqcount_t ave_seqcnt;

/* capture load from *all* tasks on this cpu: */
struct load_weight load;
Expand Down Expand Up @@ -925,31 +926,39 @@ static inline void cpuacct_charge(struct task_struct *tsk, u64 cputime) {}
#define NR_AVE_PERIOD (1 << NR_AVE_PERIOD_EXP)
#define NR_AVE_DIV_PERIOD(x) ((x) >> NR_AVE_PERIOD_EXP)

static inline void do_avg_nr_running(struct rq *rq)
static inline unsigned int do_avg_nr_running(struct rq *rq)
{
s64 nr, deltax;
unsigned int ave_nr_running = rq->ave_nr_running;

deltax = rq->clock_task - rq->nr_last_stamp;
rq->nr_last_stamp = rq->clock_task;
nr = NR_AVE_SCALE(rq->nr_running);

if (deltax > NR_AVE_PERIOD)
rq->ave_nr_running = nr;
ave_nr_running = nr;
else
rq->ave_nr_running +=
NR_AVE_DIV_PERIOD(deltax * (nr - rq->ave_nr_running));
ave_nr_running +=
NR_AVE_DIV_PERIOD(deltax * (nr - ave_nr_running));

return ave_nr_running;
}

static inline void inc_nr_running(struct rq *rq)
{
do_avg_nr_running(rq);
write_seqcount_begin(&rq->ave_seqcnt);
rq->ave_nr_running = do_avg_nr_running(rq);
rq->nr_last_stamp = rq->clock_task;
rq->nr_running++;
write_seqcount_end(&rq->ave_seqcnt);
}

static inline void dec_nr_running(struct rq *rq)
{
do_avg_nr_running(rq);
write_seqcount_begin(&rq->ave_seqcnt);
rq->ave_nr_running = do_avg_nr_running(rq);
rq->nr_last_stamp = rq->clock_task;
rq->nr_running--;
write_seqcount_end(&rq->ave_seqcnt);
}

extern void update_rq_clock(struct rq *rq);
Expand Down

0 comments on commit d732a3d

Please sign in to comment.