Permalink
Browse files

nr_pull_none and nr_push_max_tries added

Count how may time a pull operation doesn't find
anyone to pull and how many time we quit trying
to find a rq to lock for a push becase DL(RT)_MAX_
TRIES has been reached.
  • Loading branch information...
1 parent afcfe44 commit 6c06def8ec41a063852aa6fd43f9e746545c2136 @jlelli committed Jan 28, 2013
Showing with 16 additions and 2 deletions.
  1. +4 −0 kernel/sched/deadline.c
  2. +4 −0 kernel/sched/debug.c
  3. +4 −0 kernel/sched/rt.c
  4. +4 −2 kernel/sched/sched.h
@@ -1309,6 +1309,8 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
/* Otherwise we try again. */
double_unlock_balance(rq, later_rq);
+ if (tries == 2)
+ schedstat_inc(&rq->dl, nr_push_max_tries);
later_rq = NULL;
}
@@ -1500,6 +1502,8 @@ static int pull_dl_task(struct rq *this_rq)
out:
schedstat_add(&this_rq->dl, pull_cycles, get_cycles() - x);
schedstat_inc(&this_rq->dl, nr_pull);
+ if (ret == 0)
+ schedstat_inc(&this_rq->dl, nr_pull_none);
return ret;
}
@@ -246,6 +246,7 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
#ifdef CONFIG_SMP
P(nr_pushed_away);
P(nr_retry_push);
+ P(nr_push_max_tries);
P(nr_pulled_here);
P(push_cycles);
P(nr_push);
@@ -255,6 +256,7 @@ void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
P(nr_push_set);
P(pull_cycles);
P(nr_pull);
+ P(nr_pull_none);
P(pull_find_cycles);
P(nr_pull_find);
P(pull_set_cycles);
@@ -307,6 +309,7 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
#ifdef CONFIG_SMP
P(nr_pushed_away);
P(nr_retry_push);
+ P(nr_push_max_tries);
P(nr_pulled_here);
P(push_cycles);
P(nr_push);
@@ -316,6 +319,7 @@ void print_dl_rq(struct seq_file *m, int cpu, struct dl_rq *dl_rq)
P(nr_push_set);
P(pull_cycles);
P(nr_pull);
+ P(nr_pull_none);
P(pull_find_cycles);
P(nr_pull_find);
P(pull_set_cycles);
View
@@ -1627,6 +1627,8 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
/* try again */
double_unlock_balance(rq, lowest_rq);
+ if (tries == 2)
+ schedstat_inc(&rq->rt, nr_push_max_tries);
lowest_rq = NULL;
}
@@ -1835,6 +1837,8 @@ static int pull_rt_task(struct rq *this_rq)
out:
schedstat_add(&this_rq->rt, pull_cycles, get_cycles() - x);
schedstat_inc(&this_rq->rt, nr_pull);
+ if (ret == 0)
+ schedstat_inc(&this_rq->rt, nr_pull_none);
return ret;
}
@@ -383,6 +383,7 @@ struct rt_rq {
raw_spinlock_t rt_runtime_lock;
#ifdef CONFIG_SCHEDSTATS
unsigned long nr_retry_push;
+ unsigned long nr_push_max_tries;
unsigned long nr_pushed_away;
unsigned long nr_pulled_here;
@@ -391,7 +392,7 @@ struct rt_rq {
u64 push_cycles, pull_cycles, push_find_cycles, push_set_cycles,
pull_find_cycles, pull_set_cycles;
unsigned long nr_push, nr_pull, nr_push_find, nr_push_set,
- nr_pull_find, nr_pull_set;
+ nr_pull_find, nr_pull_set, nr_pull_none;
#endif
#ifdef CONFIG_RT_GROUP_SCHED
@@ -414,6 +415,7 @@ struct dl_rq {
u64 exec_clock;
#ifdef CONFIG_SCHEDSTATS
unsigned long nr_retry_push;
+ unsigned long nr_push_max_tries;
unsigned long nr_pushed_away;
unsigned long nr_pulled_here;
@@ -422,7 +424,7 @@ struct dl_rq {
u64 push_cycles, pull_cycles, push_find_cycles, push_set_cycles,
pull_find_cycles, pull_set_cycles;
unsigned long nr_push, nr_pull, nr_push_find, nr_push_set,
- nr_pull_find, nr_pull_set;
+ nr_pull_find, nr_pull_set, nr_pull_none;
#endif
#ifdef CONFIG_SMP

0 comments on commit 6c06def

Please sign in to comment.