Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

add cpufreq governors

  • Loading branch information...
commit 8b81de9ed1b0dbab0ec5db99035d399f09c10ed0 1 parent 3376972
omegamoon authored
60 drivers/cpufreq/Kconfig
View
@@ -109,6 +109,34 @@ config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
loading your cpufreq low-level hardware driver, using the
'interactive' governor for latency-sensitive workloads.
+config CPU_FREQ_DEFAULT_GOV_SMARTASS2
+ bool "smartass2"
+ select CPU_FREQ_GOV_SMARTASS2
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'smartassV2' as default.
+
+config CPU_FREQ_DEFAULT_GOV_INTERACTIVEX
+ bool "interactivex"
+ select CPU_FREQ_GOV_INTERACTIVEX
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'interactivex' as default.
+
+config CPU_FREQ_DEFAULT_GOV_SAVAGEDZEN
+ bool "savagedzen"
+ select CPU_FREQ_GOV_SAVAGEDZEN
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'savagedzen' as default.
+
+config CPU_FREQ_DEFAULT_GOV_LULZACTIVE
+ bool "lulzactive"
+ select CPU_FREQ_GOV_LULZACTIVE
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'lulzactive' as default.
+
endchoice
config CPU_FREQ_GOV_PERFORMANCE
@@ -206,6 +234,38 @@ config CPU_FREQ_GOV_CONSERVATIVE
If in doubt, say N.
+config CPU_FREQ_GOV_SMARTASS2
+ tristate "'smartassV2' cpufreq governor"
+ depends on CPU_FREQ
+ help
+ 'smartassV2' - a "smart" governor
+
+ If in doubt, say N.
+
+config CPU_FREQ_GOV_INTERACTIVEX
+ tristate "'interactiveX' cpufreq policy governor"
+ depends on CPU_FREQ
+ help
+ 'InteractiveX' - Modified version of interactive with sleep+wake code.
+
+ If in doubt, say N.
+
+config CPU_FREQ_GOV_SAVAGEDZEN
+ tristate "'savagedzen' cpufreq governor"
+ depends on CPU_FREQ
+ help
+ 'Savaged-Zen' - a "smartass" based governor
+
+ If in doubt, say N.
+
+config CPU_FREQ_GOV_LULZACTIVE
+ tristate "'lulzactive' cpufreq governor"
+ depends on CPU_FREQ
+ help
+ 'lulzactive' - a new interactive governor by Tegrak!
+
+ If in doubt, say N.
+
menu "x86 CPU frequency scaling drivers"
depends on X86
source "drivers/cpufreq/Kconfig.x86"
4 drivers/cpufreq/Makefile
View
@@ -10,6 +10,10 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
+obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2) += cpufreq_smartass2.o
+obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVEX) += cpufreq_interactivex.o
+obj-$(CONFIG_CPU_FREQ_GOV_SAVAGEDZEN) += cpufreq_savagedzen.o
+obj-$(CONFIG_CPU_FREQ_GOV_LULZACTIVE) += cpufreq_lulzactive.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
378 drivers/cpufreq/cpufreq_interactivex.c
View
@@ -0,0 +1,378 @@
+/*
+ * drivers/cpufreq/cpufreq_interactivex.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com) - modified for suspend/wake by imoseyon
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/earlysuspend.h>
+
+#include <asm/cputime.h>
+
+static void (*pm_idle_old)(void);
+static atomic_t active_count = ATOMIC_INIT(0);
+
+static DEFINE_PER_CPU(struct timer_list, cpu_timer);
+
+static DEFINE_PER_CPU(u64, time_in_idle);
+static DEFINE_PER_CPU(u64, idle_exit_time);
+
+static struct cpufreq_policy *policy;
+static unsigned int target_freq;
+
+/* Workqueues handle frequency scaling */
+static struct workqueue_struct *up_wq;
+static struct workqueue_struct *down_wq;
+static struct work_struct freq_scale_work;
+
+static u64 freq_change_time;
+static u64 freq_change_time_in_idle;
+
+static cpumask_t work_cpumask;
+
+static unsigned int suspended = 0;
+static unsigned int enabled = 0;
+
+/*
+ * The minimum ammount of time to spend at a frequency before we can ramp down,
+ * default is 50ms.
+ */
+#define DEFAULT_MIN_SAMPLE_TIME 50000;
+static unsigned long min_sample_time;
+
+#define FREQ_THRESHOLD 604800;
+#define RESUME_SPEED 604800;
+
+static int cpufreq_governor_interactivex(struct cpufreq_policy *policy,
+ unsigned int event);
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX
+static
+#endif
+struct cpufreq_governor cpufreq_gov_interactivex = {
+ .name = "InteractiveX",
+ .governor = cpufreq_governor_interactivex,
+ .max_transition_latency = 10000000,
+ .owner = THIS_MODULE,
+};
+
+static void cpufreq_interactivex_timer(unsigned long data)
+{
+ u64 delta_idle;
+ u64 update_time;
+ u64 *cpu_time_in_idle;
+ u64 *cpu_idle_exit_time;
+ struct timer_list *t;
+
+ u64 now_idle = get_cpu_idle_time_us(data,
+ &update_time);
+
+
+ cpu_time_in_idle = &per_cpu(time_in_idle, data);
+ cpu_idle_exit_time = &per_cpu(idle_exit_time, data);
+
+ if (update_time == *cpu_idle_exit_time)
+ return;
+
+ delta_idle = cputime64_sub(now_idle, *cpu_time_in_idle);
+
+ /* Scale up if there were no idle cycles since coming out of idle */
+ if (delta_idle == 0) {
+ if (policy->cur == policy->max)
+ return;
+
+ if (nr_running() < 1)
+ return;
+
+ target_freq = policy->max;
+
+ cpumask_set_cpu(data, &work_cpumask);
+ queue_work(up_wq, &freq_scale_work);
+ return;
+ }
+
+ /*
+ * There is a window where if the cpu utlization can go from low to high
+ * between the timer expiring, delta_idle will be > 0 and the cpu will
+ * be 100% busy, preventing idle from running, and this timer from
+ * firing. So setup another timer to fire to check cpu utlization.
+ * Do not setup the timer if there is no scheduled work.
+ */
+ t = &per_cpu(cpu_timer, data);
+ if (!timer_pending(t) && nr_running() > 0) {
+ *cpu_time_in_idle = get_cpu_idle_time_us(
+ data, cpu_idle_exit_time);
+ mod_timer(t, jiffies + 2);
+ }
+
+ if (policy->cur == policy->min)
+ return;
+
+ /*
+ * Do not scale down unless we have been at this frequency for the
+ * minimum sample time.
+ */
+ if (cputime64_sub(update_time, freq_change_time) < min_sample_time)
+ return;
+
+ target_freq = policy->min;
+ cpumask_set_cpu(data, &work_cpumask);
+ queue_work(down_wq, &freq_scale_work);
+}
+
+static void cpufreq_idle(void)
+{
+ struct timer_list *t;
+ u64 *cpu_time_in_idle;
+ u64 *cpu_idle_exit_time;
+
+ pm_idle_old();
+
+ if (!cpumask_test_cpu(smp_processor_id(), policy->cpus))
+ return;
+
+ /* Timer to fire in 1-2 ticks, jiffie aligned. */
+ t = &per_cpu(cpu_timer, smp_processor_id());
+ cpu_idle_exit_time = &per_cpu(idle_exit_time, smp_processor_id());
+ cpu_time_in_idle = &per_cpu(time_in_idle, smp_processor_id());
+
+ if (timer_pending(t) == 0) {
+ *cpu_time_in_idle = get_cpu_idle_time_us(
+ smp_processor_id(), cpu_idle_exit_time);
+ mod_timer(t, jiffies + 2);
+ }
+}
+
+/*
+ * Choose the cpu frequency based off the load. For now choose the minimum
+ * frequency that will satisfy the load, which is not always the lower power.
+ */
+static unsigned int cpufreq_interactivex_calc_freq(unsigned int cpu)
+{
+ unsigned int delta_time;
+ unsigned int idle_time;
+ unsigned int cpu_load;
+ unsigned int newfreq;
+ u64 current_wall_time;
+ u64 current_idle_time;;
+
+ current_idle_time = get_cpu_idle_time_us(cpu, &current_wall_time);
+
+ idle_time = (unsigned int) current_idle_time - freq_change_time_in_idle;
+ delta_time = (unsigned int) current_wall_time - freq_change_time;
+
+ cpu_load = 100 * (delta_time - idle_time) / delta_time;
+
+ if (cpu_load > 98) newfreq = policy->max;
+ else newfreq = policy->cur * cpu_load / 100;
+
+ return newfreq;
+}
+
+
+/* We use the same work function to sale up and down */
+static void cpufreq_interactivex_freq_change_time_work(struct work_struct *work)
+{
+ unsigned int cpu;
+ unsigned int newtarget;
+ cpumask_t tmp_mask = work_cpumask;
+ newtarget = FREQ_THRESHOLD;
+
+ for_each_cpu(cpu, tmp_mask) {
+ if (!suspended) {
+ if (target_freq == policy->max) {
+ if (nr_running() == 1) {
+ cpumask_clear_cpu(cpu, &work_cpumask);
+ return;
+ }
+// __cpufreq_driver_target(policy, target_freq, CPUFREQ_RELATION_H);
+ __cpufreq_driver_target(policy, newtarget, CPUFREQ_RELATION_H);
+ } else {
+ target_freq = cpufreq_interactivex_calc_freq(cpu);
+ __cpufreq_driver_target(policy, target_freq,
+ CPUFREQ_RELATION_L);
+ }
+ }
+ freq_change_time_in_idle = get_cpu_idle_time_us(cpu, &freq_change_time);
+ cpumask_clear_cpu(cpu, &work_cpumask);
+ }
+
+
+}
+
+static ssize_t show_min_sample_time(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", min_sample_time);
+}
+
+static ssize_t store_min_sample_time(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ return strict_strtoul(buf, 0, &min_sample_time);
+}
+
+static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
+ show_min_sample_time, store_min_sample_time);
+
+static struct attribute *interactivex_attributes[] = {
+ &min_sample_time_attr.attr,
+ NULL,
+};
+
+static struct attribute_group interactivex_attr_group = {
+ .attrs = interactivex_attributes,
+ .name = "InteractiveX",
+};
+
+static void interactivex_suspend(int suspend)
+{
+ unsigned int max_speed;
+
+ max_speed = RESUME_SPEED;
+
+ if (!enabled) return;
+ if (!suspend) { // resume at max speed:
+ suspended = 0;
+ __cpufreq_driver_target(policy, max_speed, CPUFREQ_RELATION_L);
+ pr_info("[imoseyon] interactiveX awake at %d\n", policy->cur);
+ } else {
+ suspended = 1;
+ __cpufreq_driver_target(policy, policy->min, CPUFREQ_RELATION_L);
+ pr_info("[imoseyon] interactiveX suspended at %d\n", policy->cur);
+ }
+}
+
+static void interactivex_early_suspend(struct early_suspend *handler) {
+ interactivex_suspend(1);
+}
+
+static void interactivex_late_resume(struct early_suspend *handler) {
+ interactivex_suspend(0);
+}
+
+static struct early_suspend interactivex_power_suspend = {
+ .suspend = interactivex_early_suspend,
+ .resume = interactivex_late_resume,
+ .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
+};
+
+static int cpufreq_governor_interactivex(struct cpufreq_policy *new_policy,
+ unsigned int event)
+{
+ int rc;
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if (!cpu_online(new_policy->cpu))
+ return -EINVAL;
+
+ /*
+ * Do not register the idle hook and create sysfs
+ * entries if we have already done so.
+ */
+ if (atomic_inc_return(&active_count) > 1)
+ return 0;
+
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ &interactivex_attr_group);
+ if (rc)
+ return rc;
+
+ pm_idle_old = pm_idle;
+ pm_idle = cpufreq_idle;
+ policy = new_policy;
+ enabled = 1;
+ register_early_suspend(&interactivex_power_suspend);
+ pr_info("[imoseyon] interactiveX active\n");
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ if (atomic_dec_return(&active_count) > 1)
+ return 0;
+
+ sysfs_remove_group(cpufreq_global_kobject,
+ &interactivex_attr_group);
+
+ pm_idle = pm_idle_old;
+ del_timer(&per_cpu(cpu_timer, new_policy->cpu));
+ enabled = 0;
+ unregister_early_suspend(&interactivex_power_suspend);
+ pr_info("[imoseyon] interactiveX inactive\n");
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ if (new_policy->max < new_policy->cur)
+ __cpufreq_driver_target(new_policy,
+ new_policy->max, CPUFREQ_RELATION_H);
+ else if (new_policy->min > new_policy->cur)
+ __cpufreq_driver_target(new_policy,
+ new_policy->min, CPUFREQ_RELATION_L);
+ break;
+ }
+ return 0;
+}
+
+static int __init cpufreq_interactivex_init(void)
+{
+ unsigned int i;
+ struct timer_list *t;
+ min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
+
+ /* Initalize per-cpu timers */
+ for_each_possible_cpu(i) {
+ t = &per_cpu(cpu_timer, i);
+ init_timer_deferrable(t);
+ t->function = cpufreq_interactivex_timer;
+ t->data = i;
+ }
+
+ /* Scale up is high priority */
+ up_wq = create_workqueue("kinteractive_up");
+ down_wq = create_workqueue("knteractive_down");
+
+ INIT_WORK(&freq_scale_work, cpufreq_interactivex_freq_change_time_work);
+
+ pr_info("[imoseyon] interactiveX enter\n");
+ return cpufreq_register_governor(&cpufreq_gov_interactivex);
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX
+fs_initcall(cpufreq_interactivex_init);
+#else
+module_init(cpufreq_interactivex_init);
+#endif
+
+static void __exit cpufreq_interactivex_exit(void)
+{
+ pr_info("[imoseyon] interactiveX exit\n");
+ cpufreq_unregister_governor(&cpufreq_gov_interactivex);
+ destroy_workqueue(up_wq);
+ destroy_workqueue(down_wq);
+}
+
+module_exit(cpufreq_interactivex_exit);
+
+MODULE_AUTHOR("Mike Chan <mike@android.com>");
+MODULE_DESCRIPTION("'cpufreq_interactiveX' - A cpufreq governor for "
+ "Latency sensitive workloads");
+MODULE_LICENSE("GPL");
+
1,174 drivers/cpufreq/cpufreq_lulzactive.c
View
@@ -0,0 +1,1174 @@
+/*
+ * drivers/cpufreq/cpufreq_lulzactive.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Mike Chan (mike@android.com)
+ * Edited: Tegrak (luciferanna@gmail.com)
+ *
+ * Driver values in /sys/devices/system/cpu/cpufreq/lulzactive
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/kthread.h>
+#include <linux/earlysuspend.h>
+#include <asm/cputime.h>
+#include <linux/suspend.h>
+
+#define LULZACTIVE_VERSION (2)
+#define LULZACTIVE_AUTHOR "tegrak"
+
+// if you changed some codes for optimization, just write your name here.
+#define LULZACTIVE_TUNER ""
+
+#define LOGI(fmt...) printk(KERN_INFO "[lulzactive] " fmt)
+#define LOGW(fmt...) printk(KERN_WARNING "[lulzactive] " fmt)
+#define LOGD(fmt...) printk(KERN_DEBUG "[lulzactive] " fmt)
+
+static void (*pm_idle_old)(void);
+static atomic_t active_count = ATOMIC_INIT(0);
+
+struct cpufreq_lulzactive_cpuinfo {
+ struct timer_list cpu_timer;
+ int timer_idlecancel;
+ u64 time_in_idle;
+ u64 idle_exit_time;
+ u64 timer_run_time;
+ int idling;
+ u64 freq_change_time;
+ u64 freq_change_time_in_idle;
+ struct cpufreq_policy *policy;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int freq_table_size;
+ unsigned int target_freq;
+ int governor_enabled;
+};
+
+static DEFINE_PER_CPU(struct cpufreq_lulzactive_cpuinfo, cpuinfo);
+
+/* Workqueues handle frequency scaling */
+static struct task_struct *up_task;
+static struct workqueue_struct *down_wq;
+static struct work_struct freq_scale_down_work;
+static cpumask_t up_cpumask;
+static spinlock_t up_cpumask_lock;
+static cpumask_t down_cpumask;
+static spinlock_t down_cpumask_lock;
+
+/*
+ * The minimum amount of time to spend at a frequency before we can step up.
+ */
+#define DEFAULT_UP_SAMPLE_TIME 24000
+static unsigned long up_sample_time;
+
+/*
+ * The minimum amount of time to spend at a frequency before we can step down.
+ */
+#define DEFAULT_DOWN_SAMPLE_TIME 49000
+static unsigned long down_sample_time;
+
+/*
+ * DEBUG print flags
+ */
+static unsigned long debug_mode;
+enum {
+ LULZACTIVE_DEBUG_EARLY_SUSPEND=1,
+ LULZACTIVE_DEBUG_START_STOP=2,
+ LULZACTIVE_DEBUG_LOAD=4,
+ LULZACTIVE_DEBUG_SUSPEND=8,
+};
+#define DEFAULT_DEBUG_MODE (LULZACTIVE_DEBUG_EARLY_SUSPEND | LULZACTIVE_DEBUG_START_STOP | LULZACTIVE_DEBUG_SUSPEND)
+
+/*
+ * CPU freq will be increased if measured load > inc_cpu_load;
+ */
+#define DEFAULT_INC_CPU_LOAD 60
+static unsigned long inc_cpu_load;
+
+/*
+ * CPU freq will be decreased if measured load < dec_cpu_load;
+ * not implemented yet.
+ */
+#define DEFAULT_DEC_CPU_LOAD 30
+static unsigned long dec_cpu_load;
+
+/*
+ * Increasing frequency table index
+ * zero disables and causes to always jump straight to max frequency.
+ */
+#define DEFAULT_PUMP_UP_STEP 1
+static unsigned long pump_up_step;
+
+/*
+ * Decreasing frequency table index
+ * zero disables and will calculate frequency according to load heuristic.
+ */
+#define DEFAULT_PUMP_DOWN_STEP 1
+static unsigned long pump_down_step;
+
+/*
+ * Use minimum frequency while suspended.
+ */
+static unsigned int suspending;
+static unsigned int early_suspended;
+
+#define SCREEN_OFF_LOWEST_STEP (0xffffffff)
+#define DEFAULT_SCREEN_OFF_MIN_STEP (SCREEN_OFF_LOWEST_STEP)
+static unsigned long screen_off_min_step;
+
+#define DEBUG 0
+#define BUFSZ 128
+
+#if DEBUG
+#include <linux/proc_fs.h>
+
+struct dbgln {
+ int cpu;
+ unsigned long jiffy;
+ unsigned long run;
+ char buf[BUFSZ];
+};
+
+#define NDBGLNS 256
+
+static struct dbgln dbgbuf[NDBGLNS];
+static int dbgbufs;
+static int dbgbufe;
+static struct proc_dir_entry *dbg_proc;
+static spinlock_t dbgpr_lock;
+
+static u64 up_request_time;
+static unsigned int up_max_latency;
+
+static void dbgpr(char *fmt, ...)
+{
+ va_list args;
+ int n;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dbgpr_lock, flags);
+ n = dbgbufe;
+ va_start(args, fmt);
+ vsnprintf(dbgbuf[n].buf, BUFSZ, fmt, args);
+ va_end(args);
+ dbgbuf[n].cpu = smp_processor_id();
+ dbgbuf[n].run = nr_running();
+ dbgbuf[n].jiffy = jiffies;
+
+ if (++dbgbufe >= NDBGLNS)
+ dbgbufe = 0;
+
+ if (dbgbufe == dbgbufs)
+ if (++dbgbufs >= NDBGLNS)
+ dbgbufs = 0;
+
+ spin_unlock_irqrestore(&dbgpr_lock, flags);
+}
+
+static void dbgdump(void)
+{
+ int i, j;
+ unsigned long flags;
+ static struct dbgln prbuf[NDBGLNS];
+
+ spin_lock_irqsave(&dbgpr_lock, flags);
+ i = dbgbufs;
+ j = dbgbufe;
+ memcpy(prbuf, dbgbuf, sizeof(dbgbuf));
+ dbgbufs = 0;
+ dbgbufe = 0;
+ spin_unlock_irqrestore(&dbgpr_lock, flags);
+
+ while (i != j)
+ {
+ printk("%lu %d %lu %s",
+ prbuf[i].jiffy, prbuf[i].cpu, prbuf[i].run,
+ prbuf[i].buf);
+ if (++i == NDBGLNS)
+ i = 0;
+ }
+}
+
+static int dbg_proc_read(char *buffer, char **start, off_t offset,
+ int count, int *peof, void *dat)
+{
+ printk("max up_task latency=%uus\n", up_max_latency);
+ dbgdump();
+ *peof = 1;
+ return 0;
+}
+
+
+#else
+#define dbgpr(...) do {} while (0)
+#endif
+
+static int cpufreq_governor_lulzactive(struct cpufreq_policy *policy,
+ unsigned int event);
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LULZACTIVE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_lulzactive = {
+ .name = "lulzactive",
+ .governor = cpufreq_governor_lulzactive,
+ .max_transition_latency = 9000000,
+ .owner = THIS_MODULE,
+};
+
+static unsigned int get_freq_table_size(struct cpufreq_frequency_table *freq_table) {
+ unsigned int size = 0;
+ while (freq_table[++size].frequency != CPUFREQ_TABLE_END);
+ return size;
+}
+
+static inline void fix_screen_off_min_step(struct cpufreq_lulzactive_cpuinfo *pcpu) {
+ if (pcpu->freq_table_size <= 0) {
+ screen_off_min_step = 0;
+ return;
+ }
+
+ if (DEFAULT_SCREEN_OFF_MIN_STEP == screen_off_min_step)
+ screen_off_min_step = pcpu->freq_table_size - 2;
+
+ if (screen_off_min_step >= pcpu->freq_table_size)
+ screen_off_min_step = pcpu->freq_table_size - 1;
+}
+
+static inline unsigned int adjust_screen_off_freq(
+ struct cpufreq_lulzactive_cpuinfo *pcpu, unsigned int freq) {
+
+ if (early_suspended && freq > pcpu->freq_table[screen_off_min_step].frequency) {
+ freq = pcpu->freq_table[screen_off_min_step].frequency;
+ pcpu->target_freq = pcpu->policy->cur;
+
+ if (freq > pcpu->policy->max)
+ freq = pcpu->policy->max;
+ if (freq < pcpu->policy->min)
+ freq = pcpu->policy->min;
+ }
+
+ return freq;
+}
+
+static void cpufreq_lulzactive_timer(unsigned long data)
+{
+ // do not step down if up scaling was stucked by short sampling time by tegrak
+ static unsigned int stuck_on_sampling = 0;
+
+ unsigned int delta_idle;
+ unsigned int delta_time;
+ int cpu_load;
+ int load_since_change;
+ u64 time_in_idle;
+ u64 idle_exit_time;
+ struct cpufreq_lulzactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, data);
+ u64 now_idle;
+ unsigned int new_freq;
+ int index;
+ int ret;
+
+ /*
+ * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
+ * this lets idle exit know the current idle time sample has
+ * been processed, and idle exit can generate a new sample and
+ * re-arm the timer. This prevents a concurrent idle
+ * exit on that CPU from writing a new set of info at the same time
+ * the timer function runs (the timer function can't use that info
+ * until more time passes).
+ */
+ time_in_idle = pcpu->time_in_idle;
+ idle_exit_time = pcpu->idle_exit_time;
+ now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
+ smp_wmb();
+
+ /* If we raced with cancelling a timer, skip. */
+ if (!idle_exit_time) {
+ dbgpr("timer %d: no valid idle exit sample\n", (int) data);
+ goto exit;
+ }
+
+ /* let it be when s5pv310 contorl the suspending by tegrak */
+ //if (suspending) {
+ // goto rearm;
+ //}
+
+#if DEBUG
+ if ((int) jiffies - (int) pcpu->cpu_timer.expires >= 10)
+ dbgpr("timer %d: late by %d ticks\n",
+ (int) data, jiffies - pcpu->cpu_timer.expires);
+#endif
+
+ delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
+ delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
+ idle_exit_time);
+
+ /*
+ * If timer ran less than 1ms after short-term sample started, retry.
+ */
+ if (delta_time < 1000) {
+ dbgpr("timer %d: time delta %u too short exit=%llu now=%llu\n", (int) data,
+ delta_time, idle_exit_time, pcpu->timer_run_time);
+ goto rearm;
+ }
+
+ if (delta_idle > delta_time)
+ cpu_load = 0;
+ else
+ cpu_load = 100 * (delta_time - delta_idle) / delta_time;
+
+ delta_idle = (unsigned int) cputime64_sub(now_idle,
+ pcpu->freq_change_time_in_idle);
+ delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
+ pcpu->freq_change_time);
+
+ if (delta_idle > delta_time)
+ load_since_change = 0;
+ else
+ load_since_change =
+ 100 * (delta_time - delta_idle) / delta_time;
+
+ /*
+ * Choose greater of short-term load (since last idle timer
+ * started or timer function re-armed itself) or long-term load
+ * (since last frequency change).
+ */
+ if (load_since_change > cpu_load)
+ cpu_load = load_since_change;
+
+ /*
+ * START lulzactive algorithm section
+ */
+ /*
+ if (early_suspended) {
+ new_freq = pcpu->policy->min;
+ pcpu->target_freq = pcpu->policy->cur;
+ }
+ else */if (cpu_load >= inc_cpu_load) {
+ if (pump_up_step && pcpu->policy->cur < pcpu->policy->max) {
+ ret = cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ pcpu->policy->cur, CPUFREQ_RELATION_H,
+ &index);
+ if (ret < 0) {
+ goto rearm;
+ }
+
+ // apply pump_up_step by tegrak
+ index -= pump_up_step;
+ if (index < 0)
+ index = 0;
+
+ new_freq = pcpu->freq_table[index].frequency;
+ }
+ else {
+ new_freq = pcpu->policy->max;
+ }
+ }
+ /*
+ else if (cpu_load <= dec_cpu_load) {
+ ret = cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ pcpu->policy->cur, CPUFREQ_RELATION_H,
+ &index);
+ if (ret < 0) {
+ goto rearm;
+ }
+ if (ramp_down_step) {
+ //set next low frequency of table
+ new_freq = pcpu->freq_table[index + 1].frequency;
+ }
+ else if (ramp_down_step) {
+ //new_freq = pcpu->policy->max * cpu_load / 100;
+ new_freq = pcpu->policy->min;
+ }
+ }
+ */
+ else if (stuck_on_sampling) {
+ new_freq = pcpu->policy->cur;
+ }
+ else {
+ if (pump_down_step) {
+ ret = cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ pcpu->policy->cur, CPUFREQ_RELATION_H,
+ &index);
+ if (ret < 0) {
+ goto rearm;
+ }
+
+ // apply pump_down_step by tegrak
+ index += pump_down_step;
+ if (index >= pcpu->freq_table_size) {
+ index = pcpu->freq_table_size - 1;
+ }
+
+ new_freq = (pcpu->policy->cur > pcpu->policy->min) ?
+ (pcpu->freq_table[index].frequency) :
+ (pcpu->policy->min);
+ }
+ else {
+ new_freq = pcpu->policy->max * cpu_load / 100;
+ ret = cpufreq_frequency_table_target(
+ pcpu->policy, pcpu->freq_table,
+ new_freq, CPUFREQ_RELATION_H,
+ &index);
+ if (ret < 0) {
+ goto rearm;
+ }
+ new_freq = pcpu->freq_table[index].frequency;
+ }
+ }
+
+ // adjust freq when screen off
+ new_freq = adjust_screen_off_freq(pcpu, new_freq);
+
+ if (pcpu->target_freq == new_freq)
+ {
+ dbgpr("timer %d: load=%d, already at %d\n", (int) data, cpu_load, new_freq);
+ stuck_on_sampling = 0;
+ goto rearm_if_notmax;
+ }
+
+ /*
+ * Do not scale down unless we have been at this frequency for the
+ * minimum sample time.
+ */
+ if (new_freq < pcpu->target_freq) {
+ if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) <
+ down_sample_time) {
+ dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
+ goto rearm;
+ }
+ }
+ else {
+ if (cputime64_sub(pcpu->timer_run_time, pcpu->freq_change_time) <
+ up_sample_time) {
+ dbgpr("timer %d: load=%d cur=%d tgt=%d not yet\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
+ /* don't reset timer */
+ stuck_on_sampling = 1;
+ goto rearm;
+ }
+ }
+
+ if (suspending && debug_mode & LULZACTIVE_DEBUG_SUSPEND) {
+ LOGI("suspending: cpu_load=%d%% new_freq=%u ppcpu->policy->cur=%u\n",
+ cpu_load, new_freq, pcpu->policy->cur);
+ }
+ //if (early_suspended && !suspending && debug_mode & LULZACTIVE_DEBUG_EARLY_SUSPEND) {
+ if (early_suspended && !suspending && debug_mode & LULZACTIVE_DEBUG_LOAD) {
+ LOGI("early_suspended: cpu_load=%d%% new_freq=%u ppcpu->policy->cur=%u\n",
+ cpu_load, new_freq, pcpu->policy->cur);
+ //LOGI("lock @%uMHz!\n", new_freq/1000);
+ }
+ if (debug_mode & LULZACTIVE_DEBUG_LOAD && !early_suspended && !suspending) {
+ LOGI("cpu_load=%d%% new_freq=%u pcpu->target_freq=%u pcpu->policy->cur=%u\n",
+ cpu_load, new_freq, pcpu->target_freq, pcpu->policy->cur);
+ }
+
+ dbgpr("timer %d: load=%d cur=%d tgt=%d queue\n", (int) data, cpu_load, pcpu->target_freq, new_freq);
+
+ stuck_on_sampling = 0;
+
+ if (new_freq < pcpu->target_freq) {
+ pcpu->target_freq = new_freq;
+ spin_lock(&down_cpumask_lock);
+ cpumask_set_cpu(data, &down_cpumask);
+ spin_unlock(&down_cpumask_lock);
+ queue_work(down_wq, &freq_scale_down_work);
+ } else {
+ pcpu->target_freq = new_freq;
+#if DEBUG
+ up_request_time = ktime_to_us(ktime_get());
+#endif
+ spin_lock(&up_cpumask_lock);
+ cpumask_set_cpu(data, &up_cpumask);
+ spin_unlock(&up_cpumask_lock);
+ wake_up_process(up_task);
+ }
+
+rearm_if_notmax:
+ /*
+ * Already set max speed and don't see a need to change that,
+ * wait until next idle to re-evaluate, don't need timer.
+ */
+ if (pcpu->target_freq == pcpu->policy->max)
+ goto exit;
+
+rearm:
+ if (!timer_pending(&pcpu->cpu_timer)) {
+ /*
+ * If already at min: if that CPU is idle, don't set timer.
+ * Else cancel the timer if that CPU goes idle. We don't
+ * need to re-evaluate speed until the next idle exit.
+ */
+ if (pcpu->target_freq == pcpu->policy->min) {
+ smp_rmb();
+
+ if (pcpu->idling) {
+ dbgpr("timer %d: cpu idle, don't re-arm\n", (int) data);
+ goto exit;
+ }
+
+ pcpu->timer_idlecancel = 1;
+ }
+
+ pcpu->time_in_idle = get_cpu_idle_time_us(
+ data, &pcpu->idle_exit_time);
+ mod_timer(&pcpu->cpu_timer, jiffies + 2);
+ dbgpr("timer %d: set timer for %lu exit=%llu\n", (int) data, pcpu->cpu_timer.expires, pcpu->idle_exit_time);
+ }
+
+exit:
+ return;
+}
+
+static void cpufreq_lulzactive_idle(void)
+{
+ struct cpufreq_lulzactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, smp_processor_id());
+ int pending;
+
+ if (!pcpu->governor_enabled) {
+ pm_idle_old();
+ return;
+ }
+
+ pcpu->idling = 1;
+ smp_wmb();
+ pending = timer_pending(&pcpu->cpu_timer);
+
+ if (pcpu->target_freq != pcpu->policy->min) {
+#ifdef CONFIG_SMP
+ /*
+ * Entering idle while not at lowest speed. On some
+ * platforms this can hold the other CPU(s) at that speed
+ * even though the CPU is idle. Set a timer to re-evaluate
+ * speed so this idle CPU doesn't hold the other CPUs above
+ * min indefinitely. This should probably be a quirk of
+ * the CPUFreq driver.
+ */
+ if (!pending) {
+ pcpu->time_in_idle = get_cpu_idle_time_us(
+ smp_processor_id(), &pcpu->idle_exit_time);
+ pcpu->timer_idlecancel = 0;
+ mod_timer(&pcpu->cpu_timer, jiffies + 2);
+ dbgpr("idle: enter at %d, set timer for %lu exit=%llu\n",
+ pcpu->target_freq, pcpu->cpu_timer.expires,
+ pcpu->idle_exit_time);
+ }
+#endif
+ } else {
+ /*
+ * If at min speed and entering idle after load has
+ * already been evaluated, and a timer has been set just in
+ * case the CPU suddenly goes busy, cancel that timer. The
+ * CPU didn't go busy; we'll recheck things upon idle exit.
+ */
+ if (pending && pcpu->timer_idlecancel) {
+ dbgpr("idle: cancel timer for %lu\n", pcpu->cpu_timer.expires);
+ del_timer(&pcpu->cpu_timer);
+ /*
+ * Ensure last timer run time is after current idle
+ * sample start time, so next idle exit will always
+ * start a new idle sampling period.
+ */
+ pcpu->idle_exit_time = 0;
+ pcpu->timer_idlecancel = 0;
+ }
+ }
+
+ pm_idle_old();
+ pcpu->idling = 0;
+ smp_wmb();
+
+ /*
+ * Arm the timer for 1-2 ticks later if not already, and if the timer
+ * function has already processed the previous load sampling
+ * interval. (If the timer is not pending but has not processed
+ * the previous interval, it is probably racing with us on another
+ * CPU. Let it compute load based on the previous sample and then
+ * re-arm the timer for another interval when it's done, rather
+ * than updating the interval start time to be "now", which doesn't
+ * give the timer function enough time to make a decision on this
+ * run.)
+ */
+ if (timer_pending(&pcpu->cpu_timer) == 0 &&
+ pcpu->timer_run_time >= pcpu->idle_exit_time) {
+ pcpu->time_in_idle =
+ get_cpu_idle_time_us(smp_processor_id(),
+ &pcpu->idle_exit_time);
+ pcpu->timer_idlecancel = 0;
+ mod_timer(&pcpu->cpu_timer, jiffies + 2);
+ dbgpr("idle: exit, set timer for %lu exit=%llu\n", pcpu->cpu_timer.expires, pcpu->idle_exit_time);
+#if DEBUG
+ } else if (timer_pending(&pcpu->cpu_timer) == 0 &&
+ pcpu->timer_run_time < pcpu->idle_exit_time) {
+ dbgpr("idle: timer not run yet: exit=%llu tmrrun=%llu\n",
+ pcpu->idle_exit_time, pcpu->timer_run_time);
+#endif
+ }
+
+}
+
+static int cpufreq_lulzactive_up_task(void *data)
+{
+ unsigned int cpu;
+ cpumask_t tmp_mask;
+ struct cpufreq_lulzactive_cpuinfo *pcpu;
+
+#if DEBUG
+ u64 now;
+ u64 then;
+ unsigned int lat;
+#endif
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+ spin_lock(&up_cpumask_lock);
+
+ if (cpumask_empty(&up_cpumask)) {
+ spin_unlock(&up_cpumask_lock);
+ schedule();
+
+ if (kthread_should_stop())
+ break;
+
+ spin_lock(&up_cpumask_lock);
+ }
+
+ set_current_state(TASK_RUNNING);
+
+#if DEBUG
+ then = up_request_time;
+ now = ktime_to_us(ktime_get());
+
+ if (now > then) {
+ lat = ktime_to_us(ktime_get()) - then;
+
+ if (lat > up_max_latency)
+ up_max_latency = lat;
+ }
+#endif
+
+ tmp_mask = up_cpumask;
+ cpumask_clear(&up_cpumask);
+ spin_unlock(&up_cpumask_lock);
+
+ for_each_cpu(cpu, &tmp_mask) {
+ pcpu = &per_cpu(cpuinfo, cpu);
+
+ if (nr_running() == 1) {
+ dbgpr("up %d: tgt=%d nothing else running\n", cpu,
+ pcpu->target_freq);
+ }
+
+ __cpufreq_driver_target(pcpu->policy,
+ pcpu->target_freq,
+ CPUFREQ_RELATION_H);
+ pcpu->freq_change_time_in_idle =
+ get_cpu_idle_time_us(cpu,
+ &pcpu->freq_change_time);
+ dbgpr("up %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur);
+ }
+ }
+
+ return 0;
+}
+
+static void cpufreq_lulzactive_freq_down(struct work_struct *work)
+{
+ unsigned int cpu;
+ cpumask_t tmp_mask;
+ struct cpufreq_lulzactive_cpuinfo *pcpu;
+
+ spin_lock(&down_cpumask_lock);
+ tmp_mask = down_cpumask;
+ cpumask_clear(&down_cpumask);
+ spin_unlock(&down_cpumask_lock);
+
+ for_each_cpu(cpu, &tmp_mask) {
+ pcpu = &per_cpu(cpuinfo, cpu);
+ __cpufreq_driver_target(pcpu->policy,
+ pcpu->target_freq,
+ CPUFREQ_RELATION_H);
+ pcpu->freq_change_time_in_idle =
+ get_cpu_idle_time_us(cpu,
+ &pcpu->freq_change_time);
+ dbgpr("down %d: set tgt=%d (actual=%d)\n", cpu, pcpu->target_freq, pcpu->policy->cur);
+ }
+}
+
+// inc_cpu_load
+static ssize_t show_inc_cpu_load(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", inc_cpu_load);
+}
+
+static ssize_t store_inc_cpu_load(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ ssize_t ret;
+ ret = strict_strtoul(buf, 0, &inc_cpu_load);
+
+ if (inc_cpu_load > 100) {
+ inc_cpu_load = 100;
+ }
+ else if (inc_cpu_load < 10) {
+ inc_cpu_load = 10;
+ }
+ return ret;
+}
+
+static struct global_attr inc_cpu_load_attr = __ATTR(inc_cpu_load, 0666,
+ show_inc_cpu_load, store_inc_cpu_load);
+
+// down_sample_time
+static ssize_t show_down_sample_time(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", down_sample_time);
+}
+
+static ssize_t store_down_sample_time(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ return strict_strtoul(buf, 0, &down_sample_time);
+}
+
+static struct global_attr down_sample_time_attr = __ATTR(down_sample_time, 0666,
+ show_down_sample_time, store_down_sample_time);
+
+// up_sample_time
+static ssize_t show_up_sample_time(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", up_sample_time);
+}
+
+static ssize_t store_up_sample_time(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ return strict_strtoul(buf, 0, &up_sample_time);
+}
+
+static struct global_attr up_sample_time_attr = __ATTR(up_sample_time, 0666,
+ show_up_sample_time, store_up_sample_time);
+
+// debug_mode
+static ssize_t show_debug_mode(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", debug_mode);
+}
+
+static ssize_t store_debug_mode(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ return strict_strtoul(buf, 0, &debug_mode);
+}
+
+static struct global_attr debug_mode_attr = __ATTR(debug_mode, 0666,
+ show_debug_mode, store_debug_mode);
+
+// pump_up_step
+static ssize_t show_pump_up_step(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", pump_up_step);
+}
+
+static ssize_t store_pump_up_step(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ return strict_strtoul(buf, 0, &pump_up_step);
+}
+
+static struct global_attr pump_up_step_attr = __ATTR(pump_up_step, 0666,
+ show_pump_up_step, store_pump_up_step);
+
+// pump_down_step
+static ssize_t show_pump_down_step(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", pump_down_step);
+}
+
+static ssize_t store_pump_down_step(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ ssize_t ret;
+ struct cpufreq_lulzactive_cpuinfo *pcpu;
+
+ ret = strict_strtoul(buf, 0, &pump_down_step);
+
+ pcpu = &per_cpu(cpuinfo, 0);
+ // fix out of bound
+ if (pcpu->freq_table_size <= pump_down_step) {
+ pump_down_step = pcpu->freq_table_size - 1;
+ }
+ return ret;
+}
+
+static struct global_attr pump_down_step_attr = __ATTR(pump_down_step, 0666,
+ show_pump_down_step, store_pump_down_step);
+
+// screen_off_min_step
+static ssize_t show_screen_off_min_step(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct cpufreq_lulzactive_cpuinfo *pcpu;
+
+ pcpu = &per_cpu(cpuinfo, 0);
+ fix_screen_off_min_step(pcpu);
+
+ return sprintf(buf, "%lu\n", screen_off_min_step);
+}
+
+static ssize_t store_screen_off_min_step(struct kobject *kobj,
+ struct attribute *attr, const char *buf, size_t count)
+{
+ struct cpufreq_lulzactive_cpuinfo *pcpu;
+ ssize_t ret;
+
+ ret = strict_strtoul(buf, 0, &screen_off_min_step);
+
+ pcpu = &per_cpu(cpuinfo, 0);
+ fix_screen_off_min_step(pcpu);
+
+ return ret;
+}
+
+static struct global_attr screen_off_min_step_attr = __ATTR(screen_off_min_step, 0666,
+ show_screen_off_min_step, store_screen_off_min_step);
+
+// author
+static ssize_t show_author(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", LULZACTIVE_AUTHOR);
+}
+
+static struct global_attr author_attr = __ATTR(author, 0444,
+ show_author, NULL);
+
+// tuner
+static ssize_t show_tuner(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", LULZACTIVE_TUNER);
+}
+
+static struct global_attr tuner_attr = __ATTR(tuner, 0444,
+ show_tuner, NULL);
+
+// version
+static ssize_t show_version(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", LULZACTIVE_VERSION);
+}
+
+static struct global_attr version_attr = __ATTR(version, 0444,
+ show_version, NULL);
+
+// freq_table
+static ssize_t show_freq_table(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct cpufreq_lulzactive_cpuinfo *pcpu;
+ char temp[64];
+ int i;
+
+ pcpu = &per_cpu(cpuinfo, 0);
+
+ for (i = 0; i < pcpu->freq_table_size; i++) {
+ sprintf(temp, "%u\n", pcpu->freq_table[i].frequency);
+ strcat(buf, temp);
+ }
+
+ return strlen(buf);
+}
+
+static struct global_attr freq_table_attr = __ATTR(freq_table, 0444,
+ show_freq_table, NULL);
+
+static struct attribute *lulzactive_attributes[] = {
+ &inc_cpu_load_attr.attr,
+ &up_sample_time_attr.attr,
+ &down_sample_time_attr.attr,
+ &pump_up_step_attr.attr,
+ &pump_down_step_attr.attr,
+ &screen_off_min_step_attr.attr,
+ &debug_mode_attr.attr,
+ &author_attr.attr,
+ &tuner_attr.attr,
+ &version_attr.attr,
+ &freq_table_attr.attr,
+ NULL,
+};
+
+static struct attribute_group lulzactive_attr_group = {
+ .attrs = lulzactive_attributes,
+ .name = "lulzactive",
+};
+
+static int cpufreq_governor_lulzactive(struct cpufreq_policy *new_policy,
+ unsigned int event)
+{
+ int rc;
+ struct cpufreq_lulzactive_cpuinfo *pcpu =
+ &per_cpu(cpuinfo, new_policy->cpu);
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if (debug_mode & LULZACTIVE_DEBUG_START_STOP) {
+ LOGI("CPUFREQ_GOV_START\n");
+ }
+ if (!cpu_online(new_policy->cpu))
+ return -EINVAL;
+
+ pcpu->policy = new_policy;
+ pcpu->freq_table = cpufreq_frequency_get_table(new_policy->cpu);
+ pcpu->target_freq = new_policy->cur;
+ pcpu->freq_change_time_in_idle =
+ get_cpu_idle_time_us(new_policy->cpu,
+ &pcpu->freq_change_time);
+ pcpu->governor_enabled = 1;
+ pcpu->freq_table_size = get_freq_table_size(pcpu->freq_table);
+
+ // fix invalid screen_off_min_step
+ fix_screen_off_min_step(pcpu);
+
+ /*
+ * Do not register the idle hook and create sysfs
+ * entries if we have already done so.
+ */
+ if (atomic_inc_return(&active_count) > 1)
+ return 0;
+
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ &lulzactive_attr_group);
+ if (rc)
+ return rc;
+
+ pm_idle_old = pm_idle;
+ pm_idle = cpufreq_lulzactive_idle;
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ if (debug_mode & LULZACTIVE_DEBUG_START_STOP) {
+ LOGI("CPUFREQ_GOV_STOP\n");
+ }
+ pcpu->governor_enabled = 0;
+
+ if (atomic_dec_return(&active_count) > 0)
+ return 0;
+
+ sysfs_remove_group(cpufreq_global_kobject,
+ &lulzactive_attr_group);
+
+ pm_idle = pm_idle_old;
+ del_timer(&pcpu->cpu_timer);
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ if (new_policy->max < new_policy->cur)
+ __cpufreq_driver_target(new_policy,
+ new_policy->max, CPUFREQ_RELATION_H);
+ else if (new_policy->min > new_policy->cur)
+ __cpufreq_driver_target(new_policy,
+ new_policy->min, CPUFREQ_RELATION_L);
+ break;
+ }
+ return 0;
+}
+
+static void lulzactive_early_suspend(struct early_suspend *handler) {
+ struct cpufreq_lulzactive_cpuinfo *pcpu;
+ unsigned int min_freq, max_freq;
+
+ early_suspended = 1;
+
+ if (debug_mode & LULZACTIVE_DEBUG_EARLY_SUSPEND) {
+ LOGI("%s\n", __func__);
+
+ pcpu = &per_cpu(cpuinfo, 0);
+
+ min_freq = pcpu->policy->min;
+
+ max_freq = min(pcpu->policy->max, pcpu->freq_table[screen_off_min_step].frequency);
+ max_freq = max(max_freq, min_freq);
+
+ LOGI("lock @%u~@%uMHz\n", min_freq / 1000, max_freq / 1000);
+ }
+}
+
+static void lulzactive_late_resume(struct early_suspend *handler) {
+ early_suspended = 0;
+ if (debug_mode & LULZACTIVE_DEBUG_EARLY_SUSPEND) {
+ LOGI("%s\n", __func__);
+ }
+}
+
+static struct early_suspend lulzactive_power_suspend = {
+ .suspend = lulzactive_early_suspend,
+ .resume = lulzactive_late_resume,
+ .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
+};
+
+static int lulzactive_pm_notifier_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct cpufreq_policy* policy;
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ suspending = 1;
+ if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) {
+ LOGI("PM_SUSPEND_PREPARE");
+ policy = cpufreq_cpu_get(0);
+ if (policy) {
+ LOGI("PM_SUSPEND_PREPARE using @%uMHz\n", policy->cur);
+ }
+ }
+ break;
+ case PM_POST_SUSPEND:
+ suspending = 0;
+ if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) {
+ LOGI("PM_POST_SUSPEND");
+ policy = cpufreq_cpu_get(0);
+ if (policy) {
+ LOGI("PM_POST_SUSPEND using @%uMHz\n", policy->cur);
+ }
+ }
+ break;
+ case PM_RESTORE_PREPARE:
+ if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) {
+ LOGI("PM_RESTORE_PREPARE");
+ }
+ break;
+ case PM_POST_RESTORE:
+ if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) {
+ LOGI("PM_POST_RESTORE");
+ }
+ break;
+ case PM_HIBERNATION_PREPARE:
+ if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) {
+ LOGI("PM_HIBERNATION_PREPARE");
+ }
+ break;
+ case PM_POST_HIBERNATION:
+ if (debug_mode & LULZACTIVE_DEBUG_SUSPEND) {
+ LOGI("PM_POST_HIBERNATION");
+ }
+ break;
+ }
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block lulzactive_pm_notifier = {
+ .notifier_call = lulzactive_pm_notifier_event,
+};
+
+static int __init cpufreq_lulzactive_init(void)
+{
+ unsigned int i;
+ struct cpufreq_lulzactive_cpuinfo *pcpu;
+ struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
+
+ up_sample_time = DEFAULT_UP_SAMPLE_TIME;
+ down_sample_time = DEFAULT_DOWN_SAMPLE_TIME;
+ debug_mode = DEFAULT_DEBUG_MODE;
+ inc_cpu_load = DEFAULT_INC_CPU_LOAD;
+ dec_cpu_load = DEFAULT_DEC_CPU_LOAD;
+ pump_up_step = DEFAULT_PUMP_UP_STEP;
+ pump_down_step = DEFAULT_PUMP_DOWN_STEP;
+ early_suspended = 0;
+ suspending = 0;
+ screen_off_min_step = DEFAULT_SCREEN_OFF_MIN_STEP;
+
+ /* Initalize per-cpu timers */
+ for_each_possible_cpu(i) {
+ pcpu = &per_cpu(cpuinfo, i);
+ init_timer(&pcpu->cpu_timer);
+ pcpu->cpu_timer.function = cpufreq_lulzactive_timer;
+ pcpu->cpu_timer.data = i;
+ }
+
+ up_task = kthread_create(cpufreq_lulzactive_up_task, NULL,
+ "klulzactiveup");
+ if (IS_ERR(up_task))
+ return PTR_ERR(up_task);
+
+ sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
+ get_task_struct(up_task);
+
+ /* No rescuer thread, bind to CPU queuing the work for possibly
+ warm cache (probably doesn't matter much). */
+ down_wq = create_workqueue("klulzactive_down");
+
+ if (! down_wq)
+ goto err_freeuptask;
+
+ INIT_WORK(&freq_scale_down_work,
+ cpufreq_lulzactive_freq_down);
+
+#if DEBUG
+ spin_lock_init(&dbgpr_lock);
+ dbg_proc = create_proc_entry("igov", S_IWUSR | S_IRUGO, NULL);
+ dbg_proc->read_proc = dbg_proc_read;
+#endif
+ spin_lock_init(&down_cpumask_lock);
+ spin_lock_init(&up_cpumask_lock);
+
+ register_pm_notifier(&lulzactive_pm_notifier);
+ register_early_suspend(&lulzactive_power_suspend);
+
+ return cpufreq_register_governor(&cpufreq_gov_lulzactive);
+
+err_freeuptask:
+ put_task_struct(up_task);
+ return -ENOMEM;
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_LULZACTIVE
+fs_initcall(cpufreq_lulzactive_init);
+#else
+module_init(cpufreq_lulzactive_init);
+#endif
+
+static void __exit cpufreq_lulzactive_exit(void)
+{
+ cpufreq_unregister_governor(&cpufreq_gov_lulzactive);
+ unregister_early_suspend(&lulzactive_power_suspend);
+ unregister_pm_notifier(&lulzactive_pm_notifier);
+ kthread_stop(up_task);
+ put_task_struct(up_task);
+ destroy_workqueue(down_wq);
+}
+
+module_exit(cpufreq_lulzactive_exit);
+
+MODULE_AUTHOR("Tegrak <luciferanna@gmail.com>");
+MODULE_DESCRIPTION("'lulzactive' - improved interactive governor inspired by smartass");
+MODULE_LICENSE("GPL");
751 drivers/cpufreq/cpufreq_savagedzen.c
View
@@ -0,0 +1,751 @@
+/*
+ * drivers/cpufreq/cpufreq_savagedzen.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Joshua Seidel
+
+ * Based on the smartass governor by Erasmux
+ *
+ * Based on the interactive governor By Mike Chan (mike@android.com)
+ * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net)
+ *
+ * requires to add
+ * EXPORT_SYMBOL_GPL(nr_running);
+ * at the end of kernel/sched.c
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/moduleparam.h>
+#include <asm/cputime.h>
+#include <linux/earlysuspend.h>
+
+static void (*pm_idle_old)(void);
+static atomic_t active_count = ATOMIC_INIT(0);
+
+struct savagedzen_info_s {
+ struct cpufreq_policy *cur_policy;
+ struct timer_list timer;
+ u64 time_in_idle;
+ u64 idle_exit_time;
+ u64 freq_change_time;
+ u64 freq_change_time_in_idle;
+ int cur_cpu_load;
+ unsigned int force_ramp_up;
+ unsigned int enable;
+ int max_speed;
+ int min_speed;
+};
+static DEFINE_PER_CPU(struct savagedzen_info_s, savagedzen_info);
+
+/* Workqueues handle frequency scaling */
+static struct workqueue_struct *up_wq;
+static struct workqueue_struct *down_wq;
+static struct work_struct freq_scale_work;
+
+static cpumask_t work_cpumask;
+static unsigned int suspended;
+
+enum {
+ SAVAGEDZEN_DEBUG_JUMPS=1,
+ SAVAGEDZEN_DEBUG_LOAD=2
+};
+
+/*
+ * Combination of the above debug flags.
+ */
+static unsigned long debug_mask;
+
+/*
+ * The minimum amount of time to spend at a frequency before we can ramp up.
+ */
+#define DEFAULT_UP_RATE_US 12000;
+static unsigned long up_rate_us;
+
+/*
+ * The minimum amount of time to spend at a frequency before we can ramp down.
+ */
+#define DEFAULT_DOWN_RATE_US 24000;
+static unsigned long down_rate_us;
+
+/*
+ * When ramping up frequency with no idle cycles jump to at least this frequency.
+ * Zero disables. Set a very high value to jump to policy max freqeuncy.
+ */
+#define DEFAULT_UP_MIN_FREQ 0
+static unsigned int up_min_freq;
+
+/*
+ * When sleep_max_freq>0 the frequency when suspended will be capped
+ * by this frequency. Also will wake up at max frequency of policy
+ * to minimize wakeup issues.
+ * Set sleep_max_freq=0 to disable this behavior.
+ */
+#define DEFAULT_SLEEP_MAX_FREQ 384000
+static unsigned int sleep_max_freq;
+
+/*
+ * The frequency to set when waking up from sleep.
+ * When sleep_max_freq=0 this will have no effect.
+ */
+#define DEFAULT_SLEEP_WAKEUP_FREQ 604800
+static unsigned int sleep_wakeup_freq;
+
+/*
+ * When awake_min_freq>0 the frequency when not suspended will not
+ * go below this frequency.
+ * Set awake_min_freq=0 to disable this behavior.
+ */
+#define DEFAULT_AWAKE_MIN_FREQ 0
+static unsigned int awake_min_freq;
+
+/*
+ * Sampling rate, I highly recommend to leave it at 2.
+ */
+#define DEFAULT_SAMPLE_RATE_JIFFIES 2
+static unsigned int sample_rate_jiffies;
+
+/*
+ * Freqeuncy delta when ramping up.
+ * zero disables and causes to always jump straight to max frequency.
+ */
+#define DEFAULT_RAMP_UP_STEP 245000
+static unsigned int ramp_up_step;
+
+/*
+ * Freqeuncy delta when ramping down.
+ * zero disables and will calculate ramp down according to load heuristic.
+ */
+#define DEFAULT_RAMP_DOWN_STEP 0
+static unsigned int ramp_down_step;
+
+/*
+ * CPU freq will be increased if measured load > max_cpu_load;
+ */
+#define DEFAULT_MAX_CPU_LOAD 65
+static unsigned long max_cpu_load;
+
+/*
+ * CPU freq will be decreased if measured load < min_cpu_load;
+ */
+#define DEFAULT_MIN_CPU_LOAD 50
+static unsigned long min_cpu_load;
+
+
+static int cpufreq_governor_savagedzen(struct cpufreq_policy *policy,
+ unsigned int event);
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SAVAGEDZEN
+static
+#endif
+struct cpufreq_governor cpufreq_gov_savagedzen = {
+ .name = "SavagedZen",
+ .governor = cpufreq_governor_savagedzen,
+ .max_transition_latency = 9000000,
+ .owner = THIS_MODULE,
+};
+
+static void savagedzen_update_min_max(struct savagedzen_info_s *this_savagedzen, struct cpufreq_policy *policy, int suspend) {
+ if (suspend) {
+ this_savagedzen->min_speed = policy->min;
+ this_savagedzen->max_speed = // sleep_max_freq; but make sure it obeys the policy min/max
+ policy->max > sleep_max_freq ? (sleep_max_freq > policy->min ? sleep_max_freq : policy->min) : policy->max;
+ } else {
+ this_savagedzen->min_speed = // awake_min_freq; but make sure it obeys the policy min/max
+ policy->min < awake_min_freq ? (awake_min_freq < policy->max ? awake_min_freq : policy->max) : policy->min;
+ this_savagedzen->max_speed = policy->max;
+ }
+}
+
+inline static unsigned int validate_freq(struct savagedzen_info_s *this_savagedzen, int freq) {
+ if (freq > this_savagedzen->max_speed)
+ return this_savagedzen->max_speed;
+ if (freq < this_savagedzen->min_speed)
+ return this_savagedzen->min_speed;
+ return freq;
+}
+
+static void reset_timer(unsigned long cpu, struct savagedzen_info_s *this_savagedzen) {
+ this_savagedzen->time_in_idle = get_cpu_idle_time_us(cpu, &this_savagedzen->idle_exit_time);
+ mod_timer(&this_savagedzen->timer, jiffies + sample_rate_jiffies);
+}
+
+static void cpufreq_savagedzen_timer(unsigned long data)
+{
+ u64 delta_idle;
+ u64 delta_time;
+ int cpu_load;
+ u64 update_time;
+ u64 now_idle;
+ struct savagedzen_info_s *this_savagedzen = &per_cpu(savagedzen_info, data);
+ struct cpufreq_policy *policy = this_savagedzen->cur_policy;
+
+ now_idle = get_cpu_idle_time_us(data, &update_time);
+
+ if (this_savagedzen->idle_exit_time == 0 || update_time == this_savagedzen->idle_exit_time)
+ return;
+
+ delta_idle = cputime64_sub(now_idle, this_savagedzen->time_in_idle);
+ delta_time = cputime64_sub(update_time, this_savagedzen->idle_exit_time);
+ //printk(KERN_INFO "savagedzenT: t=%llu i=%llu\n",cputime64_sub(update_time,this_savagedzen->idle_exit_time),delta_idle);
+
+ // If timer ran less than 1ms after short-term sample started, retry.
+ if (delta_time < 1000) {
+ if (!timer_pending(&this_savagedzen->timer))
+ reset_timer(data,this_savagedzen);
+ return;
+ }
+
+ if (delta_idle > delta_time)
+ cpu_load = 0;
+ else
+ cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time;
+
+ if (debug_mask & SAVAGEDZEN_DEBUG_LOAD)
+ printk(KERN_INFO "savagedzenT @ %d: load %d (delta_time %llu)\n",policy->cur,cpu_load,delta_time);
+
+ this_savagedzen->cur_cpu_load = cpu_load;
+
+ // Scale up if load is above max or if there where no idle cycles since coming out of idle.
+ if (cpu_load > max_cpu_load || delta_idle == 0) {
+ if (policy->cur == policy->max)
+ return;
+
+ if (nr_running() < 1)
+ return;
+
+ if (cputime64_sub(update_time, this_savagedzen->freq_change_time) < up_rate_us)
+ return;
+
+
+ this_savagedzen->force_ramp_up = 1;
+ cpumask_set_cpu(data, &work_cpumask);
+ queue_work(up_wq, &freq_scale_work);
+ return;
+ }
+
+ /*
+ * There is a window where if the cpu utlization can go from low to high
+ * between the timer expiring, delta_idle will be > 0 and the cpu will
+ * be 100% busy, preventing idle from running, and this timer from
+ * firing. So setup another timer to fire to check cpu utlization.
+ * Do not setup the timer if there is no scheduled work or if at max speed.
+ */
+ if (policy->cur < this_savagedzen->max_speed && !timer_pending(&this_savagedzen->timer) && nr_running() > 0)
+ reset_timer(data,this_savagedzen);
+
+ if (policy->cur == policy->min)
+ return;
+
+ /*
+ * Do not scale down unless we have been at this frequency for the
+ * minimum sample time.
+ */
+ if (cputime64_sub(update_time, this_savagedzen->freq_change_time) < down_rate_us)
+ return;
+
+ cpumask_set_cpu(data, &work_cpumask);
+ queue_work(down_wq, &freq_scale_work);
+}
+
+static void cpufreq_idle(void)
+{
+ struct savagedzen_info_s *this_savagedzen = &per_cpu(savagedzen_info, smp_processor_id());
+ struct cpufreq_policy *policy = this_savagedzen->cur_policy;
+
+ if (!this_savagedzen->enable) {
+ pm_idle_old();
+ return;
+ }
+
+ if (policy->cur == this_savagedzen->min_speed && timer_pending(&this_savagedzen->timer))
+ del_timer(&this_savagedzen->timer);
+
+ pm_idle_old();
+
+ if (!timer_pending(&this_savagedzen->timer))
+ reset_timer(smp_processor_id(), this_savagedzen);
+}
+
+/* We use the same work function to sale up and down */
+static void cpufreq_savagedzen_freq_change_time_work(struct work_struct *work)
+{
+ unsigned int cpu;
+ int new_freq;
+ unsigned int force_ramp_up;
+ int cpu_load;
+ struct savagedzen_info_s *this_savagedzen;
+ struct cpufreq_policy *policy;
+ unsigned int relation = CPUFREQ_RELATION_L;
+ cpumask_t tmp_mask = work_cpumask;
+ for_each_cpu(cpu, tmp_mask) {
+ this_savagedzen = &per_cpu(savagedzen_info, cpu);
+ policy = this_savagedzen->cur_policy;
+ cpu_load = this_savagedzen->cur_cpu_load;
+ force_ramp_up = this_savagedzen->force_ramp_up && nr_running() > 1;
+ this_savagedzen->force_ramp_up = 0;
+
+ if (force_ramp_up || cpu_load > max_cpu_load) {
+ if (force_ramp_up && up_min_freq) {
+ new_freq = up_min_freq;
+ relation = CPUFREQ_RELATION_L;
+ } else if (ramp_up_step) {
+ new_freq = policy->cur + ramp_up_step;
+ relation = CPUFREQ_RELATION_H;
+ } else {
+ new_freq = this_savagedzen->max_speed;
+ relation = CPUFREQ_RELATION_H;
+ }
+ }
+ else if (cpu_load < min_cpu_load) {
+ if (ramp_down_step)
+ new_freq = policy->cur - ramp_down_step;
+ else {
+ cpu_load += 100 - max_cpu_load; // dummy load.
+ new_freq = policy->cur * cpu_load / 100;
+ }
+ relation = CPUFREQ_RELATION_L;
+ }
+ else new_freq = policy->cur;
+
+ new_freq = validate_freq(this_savagedzen,new_freq);
+
+ if (new_freq != policy->cur) {
+ if (debug_mask & SAVAGEDZEN_DEBUG_JUMPS)
+ printk(KERN_INFO "savagedzenQ: jumping from %d to %d\n",policy->cur,new_freq);
+
+ __cpufreq_driver_target(policy, new_freq, relation);
+
+ this_savagedzen->freq_change_time_in_idle =
+ get_cpu_idle_time_us(cpu,&this_savagedzen->freq_change_time);
+ }
+
+ cpumask_clear_cpu(cpu, &work_cpumask);
+ }
+}
+
+static ssize_t show_debug_mask(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%lu\n", debug_mask);
+}
+
+static ssize_t store_debug_mask(struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0)
+ debug_mask = input;
+ return res;
+}
+
+static struct freq_attr debug_mask_attr = __ATTR(debug_mask, 0644,
+ show_debug_mask, store_debug_mask);
+
+static ssize_t show_up_rate_us(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%lu\n", up_rate_us);
+}
+
+static ssize_t store_up_rate_us(struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0 && input <= 100000000)
+ up_rate_us = input;
+ return res;
+}
+
+static struct freq_attr up_rate_us_attr = __ATTR(up_rate_us, 0644,
+ show_up_rate_us, store_up_rate_us);
+
+static ssize_t show_down_rate_us(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%lu\n", down_rate_us);
+}
+
+static ssize_t store_down_rate_us(struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0 && input <= 100000000)
+ down_rate_us = input;
+ return res;
+}
+
+static struct freq_attr down_rate_us_attr = __ATTR(down_rate_us, 0644,
+ show_down_rate_us, store_down_rate_us);
+
+static ssize_t show_up_min_freq(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", up_min_freq);
+}
+
+static ssize_t store_up_min_freq(struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0)
+ up_min_freq = input;
+ return res;
+}
+
+static struct freq_attr up_min_freq_attr = __ATTR(up_min_freq, 0644,
+ show_up_min_freq, store_up_min_freq);
+
+static ssize_t show_sleep_max_freq(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", sleep_max_freq);
+}
+
+static ssize_t store_sleep_max_freq(struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0)
+ sleep_max_freq = input;
+ return res;
+}
+
+static struct freq_attr sleep_max_freq_attr = __ATTR(sleep_max_freq, 0644,
+ show_sleep_max_freq, store_sleep_max_freq);
+
+static ssize_t show_sleep_wakeup_freq(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", sleep_wakeup_freq);
+}
+
+static ssize_t store_sleep_wakeup_freq(struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0)
+ sleep_wakeup_freq = input;
+ return res;
+}
+
+static struct freq_attr sleep_wakeup_freq_attr = __ATTR(sleep_wakeup_freq, 0644,
+ show_sleep_wakeup_freq, store_sleep_wakeup_freq);
+
+static ssize_t show_awake_min_freq(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", awake_min_freq);
+}
+
+static ssize_t store_awake_min_freq(struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0)
+ awake_min_freq = input;
+ return res;
+}
+
+static struct freq_attr awake_min_freq_attr = __ATTR(awake_min_freq, 0644,
+ show_awake_min_freq, store_awake_min_freq);
+
+static ssize_t show_sample_rate_jiffies(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", sample_rate_jiffies);
+}
+
+static ssize_t store_sample_rate_jiffies(struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input > 0 && input <= 1000)
+ sample_rate_jiffies = input;
+ return res;
+}
+
+static struct freq_attr sample_rate_jiffies_attr = __ATTR(sample_rate_jiffies, 0644,
+ show_sample_rate_jiffies, store_sample_rate_jiffies);
+
+static ssize_t show_ramp_up_step(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", ramp_up_step);
+}
+
+static ssize_t store_ramp_up_step(struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0)
+ ramp_up_step = input;
+ return res;
+}
+
+static struct freq_attr ramp_up_step_attr = __ATTR(ramp_up_step, 0644,
+ show_ramp_up_step, store_ramp_up_step);
+
+static ssize_t show_ramp_down_step(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%u\n", ramp_down_step);
+}
+
+static ssize_t store_ramp_down_step(struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0)
+ ramp_down_step = input;
+ return res;
+}
+
+static struct freq_attr ramp_down_step_attr = __ATTR(ramp_down_step, 0644,
+ show_ramp_down_step, store_ramp_down_step);
+
+static ssize_t show_max_cpu_load(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%lu\n", max_cpu_load);
+}
+
+static ssize_t store_max_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input > 0 && input <= 100)
+ max_cpu_load = input;
+ return res;
+}
+
+static struct freq_attr max_cpu_load_attr = __ATTR(max_cpu_load, 0644,
+ show_max_cpu_load, store_max_cpu_load);
+
+static ssize_t show_min_cpu_load(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf(buf, "%lu\n", min_cpu_load);
+}
+
+static ssize_t store_min_cpu_load(struct cpufreq_policy *policy, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input > 0 && input < 100)
+ min_cpu_load = input;
+ return res;
+}
+
+static struct freq_attr min_cpu_load_attr = __ATTR(min_cpu_load, 0644,
+ show_min_cpu_load, store_min_cpu_load);
+
+static struct attribute * savagedzen_attributes[] = {
+ &debug_mask_attr.attr,
+ &up_rate_us_attr.attr,
+ &down_rate_us_attr.attr,
+ &up_min_freq_attr.attr,
+ &sleep_max_freq_attr.attr,
+ &sleep_wakeup_freq_attr.attr,
+ &awake_min_freq_attr.attr,
+ &sample_rate_jiffies_attr.attr,
+ &ramp_up_step_attr.attr,
+ &ramp_down_step_attr.attr,
+ &max_cpu_load_attr.attr,
+ &min_cpu_load_attr.attr,
+ NULL,
+};
+
+static struct attribute_group savagedzen_attr_group = {
+ .attrs = savagedzen_attributes,
+ .name = "savagedzen",
+};
+
+static int cpufreq_governor_savagedzen(struct cpufreq_policy *new_policy,
+ unsigned int event)
+{
+ unsigned int cpu = new_policy->cpu;
+ int rc;
+ struct savagedzen_info_s *this_savagedzen = &per_cpu(savagedzen_info, cpu);
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if ((!cpu_online(cpu)) || (!new_policy->cur))
+ return -EINVAL;
+
+ /*
+ * Do not register the idle hook and create sysfs
+ * entries if we have already done so.
+ */
+ if (atomic_inc_return(&active_count) <= 1) {
+ rc = sysfs_create_group(&new_policy->kobj, &savagedzen_attr_group);
+ if (rc)
+ return rc;
+ pm_idle_old = pm_idle;
+ pm_idle = cpufreq_idle;
+ }
+
+ this_savagedzen->cur_policy = new_policy;
+ this_savagedzen->enable = 1;
+
+ // notice no break here!
+
+ case CPUFREQ_GOV_LIMITS:
+ savagedzen_update_min_max(this_savagedzen,new_policy,suspended);
+ if (this_savagedzen->cur_policy->cur != this_savagedzen->max_speed) {
+ if (debug_mask & SAVAGEDZEN_DEBUG_JUMPS)
+ printk(KERN_INFO "savagedzenI: initializing to %d\n",this_savagedzen->max_speed);
+ __cpufreq_driver_target(new_policy, this_savagedzen->max_speed, CPUFREQ_RELATION_H);
+ }
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ del_timer(&this_savagedzen->timer);
+ this_savagedzen->enable = 0;
+
+ if (atomic_dec_return(&active_count) > 1)
+ return 0;
+ sysfs_remove_group(&new_policy->kobj,
+ &savagedzen_attr_group);
+
+ pm_idle = pm_idle_old;
+ break;
+ }
+
+ return 0;
+}
+
+static void savagedzen_suspend(int cpu, int suspend)
+{
+ struct savagedzen_info_s *this_savagedzen = &per_cpu(savagedzen_info, smp_processor_id());
+ struct cpufreq_policy *policy = this_savagedzen->cur_policy;
+ unsigned int new_freq;
+
+ if (!this_savagedzen->enable || sleep_max_freq==0) // disable behavior for sleep_max_freq==0
+ return;
+
+ savagedzen_update_min_max(this_savagedzen,policy,suspend);
+ if (suspend) {
+ if (policy->cur > this_savagedzen->max_speed) {
+ new_freq = this_savagedzen->max_speed;
+
+ if (debug_mask & SAVAGEDZEN_DEBUG_JUMPS)
+ printk(KERN_INFO "savagedzenS: suspending at %d\n",new_freq);
+
+ __cpufreq_driver_target(policy, new_freq,
+ CPUFREQ_RELATION_H);
+ }
+ } else { // resume at max speed:
+ new_freq = validate_freq(this_savagedzen,sleep_wakeup_freq);
+
+ if (debug_mask & SAVAGEDZEN_DEBUG_JUMPS)
+ printk(KERN_INFO "savagedzenS: awaking at %d\n",new_freq);
+
+ __cpufreq_driver_target(policy, new_freq,
+ CPUFREQ_RELATION_L);
+ }
+}
+
+static void savagedzen_early_suspend(struct early_suspend *handler) {
+ int i;
+ suspended = 1;
+ for_each_online_cpu(i)
+ savagedzen_suspend(i,1);
+}
+
+static void savagedzen_late_resume(struct early_suspend *handler) {
+ int i;
+ suspended = 0;
+ for_each_online_cpu(i)
+ savagedzen_suspend(i,0);
+}
+
+static struct early_suspend savagedzen_power_suspend = {
+ .suspend = savagedzen_early_suspend,
+ .resume = savagedzen_late_resume,
+};
+
+static int __init cpufreq_savagedzen_init(void)
+{
+ unsigned int i;
+ struct savagedzen_info_s *this_savagedzen;
+ debug_mask = 0;
+ up_rate_us = DEFAULT_UP_RATE_US;
+ down_rate_us = DEFAULT_DOWN_RATE_US;
+ up_min_freq = DEFAULT_UP_MIN_FREQ;
+ sleep_max_freq = DEFAULT_SLEEP_MAX_FREQ;
+ sleep_wakeup_freq = DEFAULT_SLEEP_WAKEUP_FREQ;
+ awake_min_freq = DEFAULT_AWAKE_MIN_FREQ;
+ sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES;
+ ramp_up_step = DEFAULT_RAMP_UP_STEP;
+ ramp_down_step = DEFAULT_RAMP_DOWN_STEP;
+ max_cpu_load = DEFAULT_MAX_CPU_LOAD;
+ min_cpu_load = DEFAULT_MIN_CPU_LOAD;
+
+ suspended = 0;
+
+ /* Initalize per-cpu data: */
+ for_each_possible_cpu(i) {
+ this_savagedzen = &per_cpu(savagedzen_info, i);
+ this_savagedzen->enable = 0;
+ this_savagedzen->cur_policy = 0;
+ this_savagedzen->force_ramp_up = 0;
+ this_savagedzen->max_speed = DEFAULT_SLEEP_WAKEUP_FREQ;
+ this_savagedzen->min_speed = DEFAULT_AWAKE_MIN_FREQ;
+ this_savagedzen->time_in_idle = 0;
+ this_savagedzen->idle_exit_time = 0;
+ this_savagedzen->freq_change_time = 0;
+ this_savagedzen->freq_change_time_in_idle = 0;
+ this_savagedzen->cur_cpu_load = 0;
+ // intialize timer:
+ init_timer_deferrable(&this_savagedzen->timer);
+ this_savagedzen->timer.function = cpufreq_savagedzen_timer;
+ this_savagedzen->timer.data = i;
+ }
+
+ /* Scale up is high priority */
+ up_wq = create_workqueue("ksavagedzen_up");
+ down_wq = create_workqueue("ksavagedzen_down");
+
+ INIT_WORK(&freq_scale_work, cpufreq_savagedzen_freq_change_time_work);
+
+ register_early_suspend(&savagedzen_power_suspend);
+
+ return cpufreq_register_governor(&cpufreq_gov_savagedzen);
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_savagedzen
+pure_initcall(cpufreq_savagedzen_init);
+#else
+module_init(cpufreq_savagedzen_init);
+#endif
+
+static void __exit cpufreq_savagedzen_exit(void)
+{
+ cpufreq_unregister_governor(&cpufreq_gov_savagedzen);
+ destroy_workqueue(up_wq);
+ destroy_workqueue(down_wq);
+}
+
+module_exit(cpufreq_savagedzen_exit);
+
+MODULE_AUTHOR ("jsseidel");
+MODULE_DESCRIPTION ("'cpufreq_savagedzen' - A badass cpufreq governor! Based on Smartass");
+MODULE_LICENSE ("GPL");
+
868 drivers/cpufreq/cpufreq_smartass2.c
View
@@ -0,0 +1,868 @@
+/*
+ * drivers/cpufreq/cpufreq_smartass2.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Erasmux
+ *
+ * Based on the interactive governor By Mike Chan (mike@android.com)
+ * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net)
+ *
+ * SMP support based on mod by faux123
+ *
+ * For a general overview of smartassV2 see the relavent part in
+ * Documentation/cpu-freq/governors.txt
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/moduleparam.h>
+#include <asm/cputime.h>
+#include <linux/earlysuspend.h>
+
+
+/******************** Tunable parameters: ********************/
+
+/*
+ * The "ideal" frequency to use when awake. The governor will ramp up faster
+ * towards the ideal frequency and slower after it has passed it. Similarly,
+ * lowering the frequency towards the ideal frequency is faster than below it.
+ */
+#define DEFAULT_AWAKE_IDEAL_FREQ 768000
+static unsigned int awake_ideal_freq;
+
+/*
+ * The "ideal" frequency to use when suspended.
+ * When set to 0, the governor will not track the suspended state (meaning
+ * that practically when sleep_ideal_freq==0 the awake_ideal_freq is used
+ * also when suspended).
+ */
+#define DEFAULT_SLEEP_IDEAL_FREQ 245000
+static unsigned int sleep_ideal_freq;
+
+/*
+ * Freqeuncy delta when ramping up above the ideal freqeuncy.
+ * Zero disables and causes to always jump straight to max frequency.
+ * When below the ideal freqeuncy we always ramp up to the ideal freq.
+ */
+#define DEFAULT_RAMP_UP_STEP 256000
+static unsigned int ramp_up_step;
+
+/*
+ * Freqeuncy delta when ramping down below the ideal freqeuncy.
+ * Zero disables and will calculate ramp down according to load heuristic.
+ * When above the ideal freqeuncy we always ramp down to the ideal freq.
+ */
+#define DEFAULT_RAMP_DOWN_STEP 256000
+static unsigned int ramp_down_step;
+
+/*
+ * CPU freq will be increased if measured load > max_cpu_load;
+ */
+#define DEFAULT_MAX_CPU_LOAD 50
+static unsigned long max_cpu_load;
+
+/*
+ * CPU freq will be decreased if measured load < min_cpu_load;
+ */
+#define DEFAULT_MIN_CPU_LOAD 25
+static unsigned long min_cpu_load;
+
+/*
+ * The minimum amount of time to spend at a frequency before we can ramp up.
+ * Notice we ignore this when we are below the ideal frequency.
+ */
+#define DEFAULT_UP_RATE_US 48000;
+static unsigned long up_rate_us;
+
+/*
+ * The minimum amount of time to spend at a frequency before we can ramp down.
+ * Notice we ignore this when we are above the ideal frequency.
+ */
+#define DEFAULT_DOWN_RATE_US 99000;
+static unsigned long down_rate_us;
+
+/*
+ * The frequency to set when waking up from sleep.
+ * When sleep_ideal_freq=0 this will have no effect.
+ */
+#define DEFAULT_SLEEP_WAKEUP_FREQ 1024000
+static unsigned int sleep_wakeup_freq;
+
+/*
+ * Sampling rate, I highly recommend to leave it at 2.
+ */
+#define DEFAULT_SAMPLE_RATE_JIFFIES 2
+static unsigned int sample_rate_jiffies;
+
+
+/*************** End of tunables ***************/
+
+
+static void (*pm_idle_old)(void);
+static atomic_t active_count = ATOMIC_INIT(0);
+
+struct smartass_info_s {
+ struct cpufreq_policy *cur_policy;
+ struct cpufreq_frequency_table *freq_table;
+ struct timer_list timer;
+ u64 time_in_idle;
+ u64 idle_exit_time;
+ u64 freq_change_time;
+ u64 freq_change_time_in_idle;
+ int cur_cpu_load;
+ int old_freq;
+ int ramp_dir;
+ unsigned int enable;
+ int ideal_speed;
+};
+static DEFINE_PER_CPU(struct smartass_info_s, smartass_info);
+
+/* Workqueues handle frequency scaling */
+static struct workqueue_struct *up_wq;
+static struct workqueue_struct *down_wq;
+static struct work_struct freq_scale_work;
+
+static cpumask_t work_cpumask;
+static spinlock_t cpumask_lock;
+
+static unsigned int suspended;
+
+#define dprintk(flag,msg...) do { \
+ if (debug_mask & flag) printk(KERN_DEBUG msg); \
+ } while (0)
+
+enum {
+ SMARTASS_DEBUG_JUMPS=1,
+ SMARTASS_DEBUG_LOAD=2,
+ SMARTASS_DEBUG_ALG=4
+};
+
+/*
+ * Combination of the above debug flags.
+ */
+static unsigned long debug_mask;
+
+static int cpufreq_governor_smartass(struct cpufreq_policy *policy,
+ unsigned int event);
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2
+static
+#endif
+struct cpufreq_governor cpufreq_gov_smartass2 = {
+ .name = "smartassV2",
+ .governor = cpufreq_governor_smartass,
+ .max_transition_latency = 6000000,
+ .owner = THIS_MODULE,
+};
+
+inline static void smartass_update_min_max(struct smartass_info_s *this_smartass, struct cpufreq_policy *policy, int suspend) {
+ if (suspend) {
+ this_smartass->ideal_speed = // sleep_ideal_freq; but make sure it obeys the policy min/max
+ policy->max > sleep_ideal_freq ?
+ (sleep_ideal_freq > policy->min ? sleep_ideal_freq : policy->min) : policy->max;
+ } else {
+ this_smartass->ideal_speed = // awake_ideal_freq; but make sure it obeys the policy min/max
+ policy->min < awake_ideal_freq ?
+ (awake_ideal_freq < policy->max ? awake_ideal_freq : policy->max) : policy->min;
+ }
+}
+
+inline static void smartass_update_min_max_allcpus(void) {
+ unsigned int i;
+ for_each_online_cpu(i) {
+ struct smartass_info_s *this_smartass = &per_cpu(smartass_info, i);
+ if (this_smartass->enable)
+ smartass_update_min_max(this_smartass,this_smartass->cur_policy,suspended);
+ }
+}
+
+inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) {
+ if (freq > (int)policy->max)
+ return policy->max;
+ if (freq < (int)policy->min)
+ return policy->min;
+ return freq;
+}
+
+inline static void reset_timer(unsigned long cpu, struct smartass_info_s *this_smartass) {
+ this_smartass->time_in_idle = get_cpu_idle_time_us(cpu, &this_smartass->idle_exit_time);
+ mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies);
+}
+
+inline static void work_cpumask_set(unsigned long cpu) {
+ unsigned long flags;
+ spin_lock_irqsave(&cpumask_lock, flags);
+ cpumask_set_cpu(cpu, &work_cpumask);
+ spin_unlock_irqrestore(&cpumask_lock, flags);
+}
+
+inline static int work_cpumask_test_and_clear(unsigned long cpu) {
+ unsigned long flags;
+ int res = 0;
+ spin_lock_irqsave(&cpumask_lock, flags);
+ res = cpumask_test_and_clear_cpu(cpu, &work_cpumask);
+ spin_unlock_irqrestore(&cpumask_lock, flags);
+ return res;
+}
+
+inline static int target_freq(struct cpufreq_policy *policy, struct smartass_info_s *this_smartass,
+ int new_freq, int old_freq, int prefered_relation) {
+ int index, target;
+ struct cpufreq_frequency_table *table = this_smartass->freq_table;
+
+ if (new_freq == old_freq)
+ return 0;
+ new_freq = validate_freq(policy,new_freq);
+ if (new_freq == old_freq)
+ return 0;
+
+ if (table &&
+ !cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index))
+ {
+ target = table[index].frequency;
+ if (target == old_freq) {
+ // if for example we are ramping up to *at most* current + ramp_up_step
+ // but there is no such frequency higher than the current, try also
+ // to ramp up to *at least* current + ramp_up_step.
+ if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H
+ && !cpufreq_frequency_table_target(policy,table,new_freq,
+ CPUFREQ_RELATION_L,&index))
+ target = table[index].frequency;
+ // simlarly for ramping down:
+ else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L
+ && !cpufreq_frequency_table_target(policy,table,new_freq,
+ CPUFREQ_RELATION_H,&index))
+ target = table[index].frequency;
+ }
+
+ if (target == old_freq) {
+ // We should not get here:
+ // If we got here we tried to change to a validated new_freq which is different
+ // from old_freq, so there is no reason for us to remain at same frequency.
+ printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n",
+ old_freq,new_freq,target);
+ return 0;
+ }
+ }
+ else target = new_freq;
+
+ __cpufreq_driver_target(policy, target, prefered_relation);
+
+ dprintk(SMARTASS_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n",
+ old_freq,new_freq,target,policy->cur);
+
+ return target;
+}
+
+static void cpufreq_smartass_timer(unsigned long cpu)
+{
+ u64 delta_idle;
+ u64 delta_time;
+ int cpu_load;
+ int old_freq;
+ u64 update_time;
+ u64 now_idle;
+ int queued_work = 0;
+ struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu);
+ struct cpufreq_policy *policy = this_smartass->cur_policy;
+
+ now_idle = get_cpu_idle_time_us(cpu, &update_time);
+ old_freq = policy->cur;
+
+ if (this_smartass->idle_exit_time == 0 || update_time == this_smartass->idle_exit_time)
+ return;
+
+ delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle);
+ delta_time = cputime64_sub(update_time, this_smartass->idle_exit_time);
+
+ // If timer ran less than 1ms after short-term sample started, retry.
+ if (delta_time < 1000) {
+ if (!timer_pending(&this_smartass->timer))
+ reset_timer(cpu,this_smartass);
+ return;
+ }
+
+ if (delta_idle > delta_time)
+ cpu_load = 0;
+ else
+ cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time;
+
+ dprintk(SMARTASS_DEBUG_LOAD,"smartassT @ %d: load %d (delta_time %llu)\n",
+ old_freq,cpu_load,delta_time);
+
+ this_smartass->cur_cpu_load = cpu_load;
+ this_smartass->old_freq = old_freq;
+
+ // Scale up if load is above max or if there where no idle cycles since coming out of idle,
+ // additionally, if we are at or above the ideal_speed, verify we have been at this frequency
+ // for at least up_rate_us:
+ if (cpu_load > max_cpu_load || delta_idle == 0)
+ {
+ if (old_freq < policy->max &&
+ (old_freq < this_smartass->ideal_speed || delta_idle == 0 ||
+ cputime64_sub(update_time, this_smartass->freq_change_time) >= up_rate_us))
+ {
+ dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp up: load %d (delta_idle %llu)\n",
+ old_freq,cpu_load,delta_idle);
+ this_smartass->ramp_dir = 1;
+ work_cpumask_set(cpu);
+ queue_work(up_wq, &freq_scale_work);
+ queued_work = 1;
+ }
+ else this_smartass->ramp_dir = 0;
+ }
+ // Similarly for scale down: load should be below min and if we are at or below ideal
+ // frequency we require that we have been at this frequency for at least down_rate_us:
+ else if (cpu_load < min_cpu_load && old_freq > policy->min &&
+ (old_freq > this_smartass->ideal_speed ||
+ cputime64_sub(update_time, this_smartass->freq_change_time) >= down_rate_us))
+ {
+ dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp down: load %d (delta_idle %llu)\n",
+ old_freq,cpu_load,delta_idle);
+ this_smartass->ramp_dir = -1;
+ work_cpumask_set(cpu);
+ queue_work(down_wq, &freq_scale_work);
+ queued_work = 1;
+ }
+ else this_smartass->ramp_dir = 0;
+
+ // To avoid unnecessary load when the CPU is already at high load, we don't
+ // reset ourselves if we are at max speed. If and when there are idle cycles,
+ // the idle loop will activate the timer.
+ // Additionally, if we queued some work, the work task will reset the timer
+ // after it has done its adjustments.
+ if (!queued_work && old_freq < policy->max)
+ reset_timer(cpu,this_smartass);
+}
+
+static void cpufreq_idle(void)
+{
+ struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id());
+ struct cpufreq_policy *policy = this_smartass->cur_policy;
+
+ if (!this_smartass->enable) {
+ pm_idle_old();
+ return;
+ }
+
+ if (policy->cur == policy->min && timer_pending(&this_smartass->timer))
+ del_timer(&this_smartass->timer);
+
+ pm_idle_old();
+
+ if (!timer_pending(&this_smartass->timer))
+ reset_timer(smp_processor_id(), this_smartass);
+}
+
+/* We use the same work function to sale up and down */
+static void cpufreq_smartass_freq_change_time_work(struct work_struct *work)
+{
+ unsigned int cpu;
+ int new_freq;
+ int old_freq;
+ int ramp_dir;
+ struct smartass_info_s *this_smartass;
+ struct cpufreq_policy *policy;
+ unsigned int relation = CPUFREQ_RELATION_L;
+ for_each_possible_cpu(cpu) {
+ this_smartass = &per_cpu(smartass_info, cpu);
+ if (!work_cpumask_test_and_clear(cpu))
+ continue;
+
+ ramp_dir = this_smartass->ramp_dir;
+ this_smartass->ramp_dir = 0;
+
+ old_freq = this_smartass->old_freq;
+ policy = this_smartass->cur_policy;
+
+ if (old_freq != policy->cur) {
+ // frequency was changed by someone else?
+ printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n",
+ old_freq,policy->cur);
+ new_freq = old_freq;