Skip to content

Commit

Permalink
Merge branch 'test' into weekly
Browse files Browse the repository at this point in the history
  • Loading branch information
Seongmin Park committed Apr 14, 2013
2 parents 47ee3e6 + a515441 commit 7a5ad58
Show file tree
Hide file tree
Showing 5 changed files with 30 additions and 106 deletions.
54 changes: 0 additions & 54 deletions arch/arm/kernel/smp.c
Expand Up @@ -24,7 +24,6 @@
#include <linux/percpu.h>
#include <linux/clockchips.h>
#include <linux/completion.h>
#include <linux/cpufreq.h>

#include <linux/atomic.h>
#include <asm/smp.h>
Expand Down Expand Up @@ -717,56 +716,3 @@ int setup_profiling_timer(unsigned int multiplier)
{
return -EINVAL;
}

#ifdef CONFIG_CPU_FREQ

static DEFINE_PER_CPU(unsigned long, l_p_j_ref);
static DEFINE_PER_CPU(unsigned long, l_p_j_ref_freq);
static unsigned long global_l_p_j_ref;
static unsigned long global_l_p_j_ref_freq;

static int cpufreq_callback(struct notifier_block *nb,
unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;
int cpu = freq->cpu;

if (freq->flags & CPUFREQ_CONST_LOOPS)
return NOTIFY_OK;

if (!per_cpu(l_p_j_ref, cpu)) {
per_cpu(l_p_j_ref, cpu) =
per_cpu(cpu_data, cpu).loops_per_jiffy;
per_cpu(l_p_j_ref_freq, cpu) = freq->old;
if (!global_l_p_j_ref) {
global_l_p_j_ref = loops_per_jiffy;
global_l_p_j_ref_freq = freq->old;
}
}

if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
(val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
(val == CPUFREQ_RESUMECHANGE || val == CPUFREQ_SUSPENDCHANGE)) {
loops_per_jiffy = cpufreq_scale(global_l_p_j_ref,
global_l_p_j_ref_freq,
freq->new);
per_cpu(cpu_data, cpu).loops_per_jiffy =
cpufreq_scale(per_cpu(l_p_j_ref, cpu),
per_cpu(l_p_j_ref_freq, cpu),
freq->new);
}
return NOTIFY_OK;
}

static struct notifier_block cpufreq_notifier = {
.notifier_call = cpufreq_callback,
};

static int __init register_cpufreq_notifier(void)
{
return cpufreq_register_notifier(&cpufreq_notifier,
CPUFREQ_TRANSITION_NOTIFIER);
}
core_initcall(register_cpufreq_notifier);

#endif
20 changes: 1 addition & 19 deletions include/linux/sched.h
Expand Up @@ -271,28 +271,10 @@ extern void init_idle_bootup_task(struct task_struct *idle);

extern int runqueue_is_locked(int cpu);

#ifdef CONFIG_SMP
extern int sched_select_non_idle_cpu(void);
#else
static inline int sched_select_non_idle_cpu(void)
{
return smp_processor_id();
}
#endif

#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ)
extern void select_nohz_load_balancer(int stop_tick);
extern void set_cpu_sd_state_idle(void);

/*
* In the semi idle case, use the nearest busy cpu for migrating timers
* from an idle cpu. This is good for power-savings.
*
* We don't do similar optimization for completely idle system, as
* selecting an idle cpu will add more delays to the timers than intended
* (as that cpu's timer base may not be uptodate wrt jiffies etc).
*/
#define get_nohz_timer_target() sched_select_non_idle_cpu()
extern int get_nohz_timer_target(void);
#else
static inline void select_nohz_load_balancer(int stop_tick) { }
static inline void set_cpu_sd_state_idle(void) { }
Expand Down
2 changes: 1 addition & 1 deletion kernel/hrtimer.c
Expand Up @@ -160,7 +160,7 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
static int hrtimer_get_target(int this_cpu, int pinned)
{
#ifdef CONFIG_NO_HZ
if (!pinned && get_sysctl_timer_migration())
if (!pinned && get_sysctl_timer_migration() && idle_cpu(this_cpu))
return get_nohz_timer_target();
#endif
return this_cpu;
Expand Down
58 changes: 27 additions & 31 deletions kernel/sched/core.c
Expand Up @@ -542,6 +542,33 @@ void resched_cpu(int cpu)
}

#ifdef CONFIG_NO_HZ
/*
* In the semi idle case, use the nearest busy cpu for migrating timers
* from an idle cpu. This is good for power-savings.
*
* We don't do similar optimization for completely idle system, as
* selecting an idle cpu will add more delays to the timers than intended
* (as that cpu's timer base may not be uptodate wrt jiffies etc).
*/
int get_nohz_timer_target(void)
{
int cpu = smp_processor_id();
int i;
struct sched_domain *sd;

rcu_read_lock();
for_each_domain(cpu, sd) {
for_each_cpu(i, sched_domain_span(sd)) {
if (!idle_cpu(i)) {
cpu = i;
goto unlock;
}
}
}
unlock:
rcu_read_unlock();
return cpu;
}
/*
* When add_timer_on() enqueues a timer into the timer wheel of an
* idle CPU then this timer might expire before the next timer event
Expand Down Expand Up @@ -613,37 +640,6 @@ void sched_avg_update(struct rq *rq)
}
}

/*
* This routine returns the cpu which is non-idle. If the local CPU isn't idle
* OR all cpus are idle, local cpu is returned back. If local cpu is idle, then
* we must look for another CPU which isn't idle.
*/
int sched_select_non_idle_cpu(void)
{
struct sched_domain *sd;
int cpu = smp_processor_id();
int i;

/* If Current cpu isn't idle, don't migrate anything */
if (!idle_cpu(cpu))
return cpu;

rcu_read_lock();
for_each_domain(cpu, sd) {
for_each_cpu(i, sched_domain_span(sd)) {
if (i == cpu)
continue;
if (!idle_cpu(i)) {
cpu = i;
goto unlock;
}
}
}
unlock:
rcu_read_unlock();
return cpu;
}

#else /* !CONFIG_SMP */
void resched_task(struct task_struct *p)
{
Expand Down
2 changes: 1 addition & 1 deletion kernel/timer.c
Expand Up @@ -731,7 +731,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
cpu = smp_processor_id();

#if defined(CONFIG_NO_HZ) && defined(CONFIG_SMP)
if (!pinned && get_sysctl_timer_migration())
if (!pinned && get_sysctl_timer_migration() && idle_cpu(cpu))
cpu = get_nohz_timer_target();
#endif
new_base = per_cpu(tvec_bases, cpu);
Expand Down

0 comments on commit 7a5ad58

Please sign in to comment.