Permalink
Browse files

cpufreq: interactive: handle speed up and down in the realtime task

Not useful to have a separate, non-realtime workqueue for speed down
events, avoid priority inversion for speed up events.

Change-Id: Iddcd05545245c847aa1bbe0b8790092914c813d2
Signed-off-by: Todd Poynor <toddpoynor@google.com>

Conflicts:

	drivers/cpufreq/cpufreq_interactive.c
  • Loading branch information...
1 parent 5e2d035 commit 4b88bd7891124e85eab88fa512f1183550799656 @toddpoynor toddpoynor committed with Jul 17, 2012
Showing with 45 additions and 112 deletions.
  1. +44 −105 drivers/cpufreq/cpufreq_interactive.c
  2. +1 −7 include/trace/events/cpufreq_interactive.h
@@ -56,15 +56,10 @@ struct cpufreq_interactive_cpuinfo {
static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
-/* Workqueues handle frequency scaling */
-static struct task_struct *up_task;
-static struct workqueue_struct *down_wq;
-static struct work_struct freq_scale_down_work;
-static cpumask_t up_cpumask;
-static spinlock_t up_cpumask_lock;
-static cpumask_t down_cpumask;
-static spinlock_t down_cpumask_lock;
-static struct mutex set_speed_lock;
+/* realtime thread handles frequency scaling */
+static struct task_struct *speedchange_task;
+static cpumask_t speedchange_cpumask;
+static spinlock_t speedchange_cpumask_lock;
/* Hi speed to bump to from lo speed when load burst (default max) */
static u64 hispeed_freq;
@@ -103,7 +98,7 @@ struct cpufreq_interactive_inputopen {
struct work_struct inputopen_work;
};
-static struct cpufreq_interactive_inputopen inputopen;
+static struct workqueue_struct *inputopen_wq;
/*
* Non-zero means longer-term speed boost active.
@@ -259,19 +254,11 @@ static void cpufreq_interactive_timer(unsigned long data)
pcpu->target_set_time_in_idle = now_idle;
pcpu->target_set_time = pcpu->timer_run_time;
- if (new_freq < pcpu->target_freq) {
- pcpu->target_freq = new_freq;
- spin_lock_irqsave(&down_cpumask_lock, flags);
- cpumask_set_cpu(data, &down_cpumask);
- spin_unlock_irqrestore(&down_cpumask_lock, flags);
- queue_work(down_wq, &freq_scale_down_work);
- } else {
- pcpu->target_freq = new_freq;
- spin_lock_irqsave(&up_cpumask_lock, flags);
- cpumask_set_cpu(data, &up_cpumask);
- spin_unlock_irqrestore(&up_cpumask_lock, flags);
- wake_up_process(up_task);
- }
+ pcpu->target_freq = new_freq;
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
+ cpumask_set_cpu(data, &speedchange_cpumask);
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
+ wake_up_process(speedchange_task);
rearm_if_notmax:
/*
@@ -394,7 +381,7 @@ static void cpufreq_interactive_idle_end(void)
}
-static int cpufreq_interactive_up_task(void *data)
+static int cpufreq_interactive_speedchange_task(void *data)
{
unsigned int cpu;
cpumask_t tmp_mask;
@@ -403,22 +390,23 @@ static int cpufreq_interactive_up_task(void *data)
while (1) {
set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irqsave(&up_cpumask_lock, flags);
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
- if (cpumask_empty(&up_cpumask)) {
- spin_unlock_irqrestore(&up_cpumask_lock, flags);
+ if (cpumask_empty(&speedchange_cpumask)) {
+ spin_unlock_irqrestore(&speedchange_cpumask_lock,
+ flags);
schedule();
if (kthread_should_stop())
break;
- spin_lock_irqsave(&up_cpumask_lock, flags);
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
}
set_current_state(TASK_RUNNING);
- tmp_mask = up_cpumask;
- cpumask_clear(&up_cpumask);
- spin_unlock_irqrestore(&up_cpumask_lock, flags);
+ tmp_mask = speedchange_cpumask;
+ cpumask_clear(&speedchange_cpumask);
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
for_each_cpu(cpu, &tmp_mask) {
unsigned int j;
@@ -430,8 +418,6 @@ static int cpufreq_interactive_up_task(void *data)
if (!pcpu->governor_enabled)
continue;
- mutex_lock(&set_speed_lock);
-
for_each_cpu(j, pcpu->policy->cpus) {
struct cpufreq_interactive_cpuinfo *pjcpu =
&per_cpu(cpuinfo, j);
@@ -444,72 +430,30 @@ static int cpufreq_interactive_up_task(void *data)
__cpufreq_driver_target(pcpu->policy,
max_freq,
CPUFREQ_RELATION_H);
- mutex_unlock(&set_speed_lock);
- trace_cpufreq_interactive_up(cpu, pcpu->target_freq,
+ trace_cpufreq_interactive_setspeed(cpu,
+ pcpu->target_freq,
pcpu->policy->cur);
}
}
return 0;
}
-static void cpufreq_interactive_freq_down(struct work_struct *work)
-{
- unsigned int cpu;
- cpumask_t tmp_mask;
- unsigned long flags;
- struct cpufreq_interactive_cpuinfo *pcpu;
-
- spin_lock_irqsave(&down_cpumask_lock, flags);
- tmp_mask = down_cpumask;
- cpumask_clear(&down_cpumask);
- spin_unlock_irqrestore(&down_cpumask_lock, flags);
-
- for_each_cpu(cpu, &tmp_mask) {
- unsigned int j;
- unsigned int max_freq = 0;
-
- pcpu = &per_cpu(cpuinfo, cpu);
- smp_rmb();
-
- if (!pcpu->governor_enabled)
- continue;
-
- mutex_lock(&set_speed_lock);
-
- for_each_cpu(j, pcpu->policy->cpus) {
- struct cpufreq_interactive_cpuinfo *pjcpu =
- &per_cpu(cpuinfo, j);
-
- if (pjcpu->target_freq > max_freq)
- max_freq = pjcpu->target_freq;
- }
-
- if (max_freq != pcpu->policy->cur)
- __cpufreq_driver_target(pcpu->policy, max_freq,
- CPUFREQ_RELATION_H);
-
- mutex_unlock(&set_speed_lock);
- trace_cpufreq_interactive_down(cpu, pcpu->target_freq,
- pcpu->policy->cur);
- }
-}
-
static void cpufreq_interactive_boost(void)
{
int i;
int anyboost = 0;
unsigned long flags;
struct cpufreq_interactive_cpuinfo *pcpu;
- spin_lock_irqsave(&up_cpumask_lock, flags);
+ spin_lock_irqsave(&speedchange_cpumask_lock, flags);
for_each_online_cpu(i) {
pcpu = &per_cpu(cpuinfo, i);
if (pcpu->target_freq < hispeed_freq) {
pcpu->target_freq = hispeed_freq;
- cpumask_set_cpu(i, &up_cpumask);
+ cpumask_set_cpu(i, &speedchange_cpumask);
pcpu->target_set_time_in_idle =
get_cpu_idle_time_us(i, &pcpu->target_set_time);
pcpu->hispeed_validate_time = pcpu->target_set_time;
@@ -525,10 +469,10 @@ static void cpufreq_interactive_boost(void)
pcpu->floor_validate_time = ktime_to_us(ktime_get());
}
- spin_unlock_irqrestore(&up_cpumask_lock, flags);
+ spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
if (anyboost)
- wake_up_process(up_task);
+ wake_up_process(speedchange_task);
}
/*
@@ -580,7 +524,7 @@ static int cpufreq_interactive_input_connect(struct input_handler *handler,
goto err;
inputopen.handle = handle;
- queue_work(down_wq, &inputopen.inputopen_work);
+ queue_work(inputopen_wq, &inputopen.inputopen_work);
return 0;
err:
kfree(handle);
@@ -911,7 +855,7 @@ static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
pcpu->idle_exit_time = 0;
}
- flush_work(&freq_scale_down_work);
+ flush_work(&inputopen.inputopen_work);
if (atomic_dec_return(&active_count) > 0)
return 0;
@@ -953,35 +897,30 @@ static int __init cpufreq_interactive_init(void)
pcpu->cpu_timer.data = i;
}
- spin_lock_init(&up_cpumask_lock);
- spin_lock_init(&down_cpumask_lock);
- mutex_init(&set_speed_lock);
-
- up_task = kthread_create(cpufreq_interactive_up_task, NULL,
- "kinteractiveup");
- if (IS_ERR(up_task))
- return PTR_ERR(up_task);
+ spin_lock_init(&speedchange_cpumask_lock);
+ speedchange_task =
+ kthread_create(cpufreq_interactive_speedchange_task, NULL,
+ "cfinteractive");
+ if (IS_ERR(speedchange_task))
+ return PTR_ERR(speedchange_task);
- sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
- get_task_struct(up_task);
+ sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
+ get_task_struct(speedchange_task);
- /* No rescuer thread, bind to CPU queuing the work for possibly
- warm cache (probably doesn't matter much). */
- down_wq = create_workqueue("knteractive_down");
+ inputopen_wq = create_workqueue("cfinteractive");
- if (!down_wq)
- goto err_freeuptask;
+ if (!inputopen_wq)
+ goto err_freetask;
- INIT_WORK(&freq_scale_down_work, cpufreq_interactive_freq_down);
INIT_WORK(&inputopen.inputopen_work, cpufreq_interactive_input_open);
/* NB: wake up so the thread does not look hung to the freezer */
- wake_up_process(up_task);
+ wake_up_process(speedchange_task);
return cpufreq_register_governor(&cpufreq_gov_interactive);
-err_freeuptask:
- put_task_struct(up_task);
+err_freetask:
+ put_task_struct(speedchange_task);
return -ENOMEM;
}
@@ -994,9 +933,9 @@ module_init(cpufreq_interactive_init);
static void __exit cpufreq_interactive_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_interactive);
- kthread_stop(up_task);
- put_task_struct(up_task);
- destroy_workqueue(down_wq);
+ kthread_stop(speedchange_task);
+ put_task_struct(speedchange_task);
+ destroy_workqueue(inputopen_wq);
}
module_exit(cpufreq_interactive_exit);
@@ -28,13 +28,7 @@ DECLARE_EVENT_CLASS(set,
__entry->actualfreq)
);
-DEFINE_EVENT(set, cpufreq_interactive_up,
- TP_PROTO(u32 cpu_id, unsigned long targfreq,
- unsigned long actualfreq),
- TP_ARGS(cpu_id, targfreq, actualfreq)
-);
-
-DEFINE_EVENT(set, cpufreq_interactive_down,
+DEFINE_EVENT(set, cpufreq_interactive_setspeed,
TP_PROTO(u32 cpu_id, unsigned long targfreq,
unsigned long actualfreq),
TP_ARGS(cpu_id, targfreq, actualfreq)

0 comments on commit 4b88bd7

Please sign in to comment.