Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

ermmm

  • Loading branch information...
commit e72f7c2fa16c6e086ef9a7395602d85a328aa79c 1 parent 1dccc69
@playfulgod authored
View
26 arch/arm/configs/plague_defconfig
@@ -35,7 +35,7 @@ CONFIG_BROKEN_ON_SMP=y
CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_CROSS_COMPILE=""
-CONFIG_LOCALVERSION="$(KERNEL_LOCAL_VERSION)-Plague"
+CONFIG_LOCALVERSION="$(KERNEL_LOCAL_VERSION)"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_LZMA=y
@@ -43,7 +43,7 @@ CONFIG_HAVE_KERNEL_LZO=y
CONFIG_KERNEL_GZIP=y
# CONFIG_KERNEL_LZMA is not set
# CONFIG_KERNEL_LZO is not set
-# CONFIG_SWAP is not set
+CONFIG_SWAP=y
CONFIG_SYSVIPC=y
CONFIG_SYSVIPC_SYSCTL=y
# CONFIG_POSIX_MQUEUE is not set
@@ -174,11 +174,14 @@ CONFIG_LBDAF=y
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
-# CONFIG_IOSCHED_DEADLINE is not set
+# CONFIG_IOSCHED_DEADLINE=Y
CONFIG_IOSCHED_CFQ=y
-CONFIG_DEFAULT_CFQ=y
+CONFIG_IOSCHED_SIO=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ=y
# CONFIG_DEFAULT_NOOP is not set
-CONFIG_DEFAULT_IOSCHED="cfq"
+# CONFIG_DEFAULT_IOSCHED="cfq"
+CONFIG_DEFAULT_IOSCHED="sio"
# CONFIG_INLINE_SPIN_TRYLOCK is not set
# CONFIG_INLINE_SPIN_TRYLOCK_BH is not set
# CONFIG_INLINE_SPIN_LOCK is not set
@@ -399,7 +402,6 @@ CONFIG_MSM_CPU_FREQ_MIN=245760
# CONFIG_MSM_RMT_STORAGE_CLIENT_STATS is not set
CONFIG_MSM_DALRPC=y
# CONFIG_MSM_DALRPC_TEST is not set
-# CONFIG_MSM_CPU_FREQ_SET_MIN_MAX is not set
# CONFIG_MSM_AVS_HW is not set
# CONFIG_MSM_HW3D is not set
CONFIG_MSM_ADSP=y
@@ -562,19 +564,23 @@ CONFIG_CPU_FREQ_TABLE=y
CONFIG_CPU_FREQ_DEBUG=y
CONFIG_CPU_FREQ_STAT=y
CONFIG_CPU_FREQ_STAT_DETAILS=y
-CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set
+CONFIG_CPU_FREQ_DEFAULT_DEFAULT_GOV_SMARTASS2=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_LAGFREE is not set
CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-# CONFIG_CPU_IDLE is not set
+CONFIG_CPU_FREQ_GOV_SMARTASS2=y
+CONFIG_CPU_FREQ_GOV_LAGFREE=y
+CONFIG_CPU_IDLE=y
CONFIG_CPU_FREQ_MSM=y
#
@@ -1101,8 +1107,8 @@ CONFIG_NETDEV_1000=y
CONFIG_WLAN=y
# CONFIG_USB_ZD1201 is not set
CONFIG_LIBRA_SDIOIF=m
-# CONFIG_BCM4329 is not set
-# CONFIG_HOSTAP is not set
+# CONFIG_BCM4329=m
+# CONFIG_HOSTAP=m
#
# Enable WiMAX (Networking options) to see the WiMAX drivers
View
9 arch/arm/mach-msm/acpuclock-7201.c
@@ -396,6 +396,15 @@ static struct clkctl_acpu_speed pll0_960_pll1_589_pll2_1200_pll4_800[] = {
{ 1, 934400, ACPU_PLL_4, 6, 0, 120000, 3, 7, 200000 },
{ 1, 988800, ACPU_PLL_4, 6, 0, 126000, 3, 7, 200000},
{ 1, 1008000, ACPU_PLL_4, 6, 0, 126000, 3, 7, 200000},
+ { 1, 1100000, ACPU_PLL_4, 6, 0, 128000, 3, 7, 200000},
+ { 1, 1150000, ACPU_PLL_4, 6, 0, 128000, 3, 7, 200000},
+ { 1, 1200000, ACPU_PLL_4, 6, 0, 130000, 3, 7, 200000},
+ { 1, 1250000, ACPU_PLL_4, 6, 0, 130000, 3, 7, 200000},
+ { 1, 1300000, ACPU_PLL_4, 6, 0, 132000, 3, 7, 200000},
+ { 1, 1350000, ACPU_PLL_4, 6, 0, 132000, 3, 7, 200000},
+ { 1, 1400000, ACPU_PLL_4, 6, 0, 134000, 3, 7, 200000},
+ { 1, 1450000, ACPU_PLL_4, 6, 0, 134000, 3, 7, 200000},
+ { 1, 1500000, ACPU_PLL_4, 6, 0, 136000, 3, 7, 200000},
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, {0, 0, 0, 0}, {0, 0, 0, 0} }
};
View
5 block/Kconfig.iosched
@@ -59,12 +59,17 @@ choice
config DEFAULT_NOOP
bool "No-op"
+ config DEFAULT_SIO
+
+ bool "SIO" if IOSCHED_SIO=y
+
endchoice
config DEFAULT_IOSCHED
string
default "deadline" if DEFAULT_DEADLINE
default "cfq" if DEFAULT_CFQ
+ default "sio" if DEFAULT_SIO
default "noop" if DEFAULT_NOOP
endmenu
View
1  block/Makefile
@@ -13,6 +13,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
+obj-$(CONFIG_IOSCHED_SIO) += sio-iosched.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += blk-integrity.o
View
348 block/sio-iosched.c
@@ -0,0 +1,348 @@
+
+/*
+ * Simple IO scheduler
+ * Based on Noop, Deadline and V(R) IO schedulers.
+ *
+ * Copyright (C) 2010 Miguel Boton <mboton@gmail.com>
+ *
+ *
+ * This algorithm does not do any kind of sorting, as it is aimed for
+ * aleatory access devices, but it does some basic merging. We try to
+ * keep minimum overhead to achieve low latency.
+ *
+ * Asynchronous and synchronous requests are not treated separately, but
+ * we relay on deadlines to ensure fairness.
+ *
+ */
+#include <linux/blkdev.h>
+#include <linux/elevator.h>
+#include <linux/bio.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+
+enum {
+ ASYNC,
+ SYNC,
+};
+
+/* Tunables */
+static const int sync_expire = HZ / 2; /* max time before a sync is submitted. */
+static const int async_expire = 5 * HZ; /* ditto for async, these limits are SOFT! */
+static const int fifo_batch = 1; /* # of sequential requests treated as one
+ by the above parameters. For throughput. */
+
+/* Elevator data */
+struct sio_data {
+ /* Request queues */
+ struct list_head fifo_list[2];
+
+ /* Attributes */
+ unsigned int batched;
+
+ /* Settings */
+ int fifo_expire[2];
+ int fifo_batch;
+};
+
+static void
+sio_merged_requests(struct request_queue *q, struct request *rq,
+ struct request *next)
+{
+ /*
+ * If next expires before rq, assign its expire time to rq
+ * and move into next position (next will be deleted) in fifo.
+ */
+ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) {
+ if (time_before(rq_fifo_time(next), rq_fifo_time(rq))) {
+ list_move(&rq->queuelist, &next->queuelist);
+ rq_set_fifo_time(rq, rq_fifo_time(next));
+ }
+ }
+
+ /* Delete next request */
+ rq_fifo_clear(next);
+}
+
+static void
+sio_add_request(struct request_queue *q, struct request *rq)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+ const int sync = rq_is_sync(rq);
+
+ /*
+ * Add request to the proper fifo list and set its
+ * expire time.
+ */
+ rq_set_fifo_time(rq, jiffies + sd->fifo_expire[sync]);
+ list_add_tail(&rq->queuelist, &sd->fifo_list[sync]);
+}
+
+static int
+sio_queue_empty(struct request_queue *q)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+
+ /* Check if fifo lists are empty */
+ return list_empty(&sd->fifo_list[SYNC]) &&
+ list_empty(&sd->fifo_list[ASYNC]);
+}
+
+static struct request *
+sio_expired_request(struct sio_data *sd, int sync)
+{
+ struct request *rq;
+
+ if (list_empty(&sd->fifo_list[sync]))
+ return NULL;
+
+ /* Retrieve request */
+ rq = rq_entry_fifo(sd->fifo_list[sync].next);
+
+ /* Request has expired */
+ if (time_after(jiffies, rq_fifo_time(rq)))
+ return rq;
+
+ return NULL;
+}
+
+static struct request *
+sio_choose_expired_request(struct sio_data *sd)
+{
+ struct request *sync = sio_expired_request(sd, SYNC);
+ struct request *async = sio_expired_request(sd, ASYNC);
+
+ /*
+ * Check expired requests. Asynchronous requests have
+ * priority over synchronous.
+ */
+ if (sync && async)
+ return async;
+ if (sync)
+ return sync;
+
+ return async;
+
+}
+
+static struct request *
+sio_choose_request(struct sio_data *sd)
+{
+ /*
+ * Retrieve request from available fifo list.
+ * Synchronous requests have priority over asynchronous.
+ */
+ if (!list_empty(&sd->fifo_list[SYNC]))
+ return rq_entry_fifo(sd->fifo_list[SYNC].next);
+
+ if (!list_empty(&sd->fifo_list[ASYNC]))
+ return rq_entry_fifo(sd->fifo_list[ASYNC].next);
+
+ return NULL;
+}
+
+static inline void
+sio_dispatch_request(struct sio_data *sd, struct request *rq)
+{
+ /*
+ * Remove the request from the fifo list
+ * and dispatch it.
+ */
+ rq_fifo_clear(rq);
+ elv_dispatch_add_tail(rq->q, rq);
+
+ sd->batched++;
+}
+
+static int
+sio_dispatch_requests(struct request_queue *q, int force)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+ struct request *rq = NULL;
+
+ /*
+ * Retrieve any expired request after a batch of
+ * sequential requests.
+ */
+ if (sd->batched > sd->fifo_batch) {
+ sd->batched = 0;
+ rq = sio_choose_expired_request(sd);
+ }
+
+ /* Retrieve request */
+ if (!rq) {
+ rq = sio_choose_request(sd);
+ if (!rq)
+ return 0;
+ }
+
+ /* Dispatch request */
+ sio_dispatch_request(sd, rq);
+
+ return 1;
+}
+
+static struct request *
+sio_former_request(struct request_queue *q, struct request *rq)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+ const int sync = rq_is_sync(rq);
+
+ if (rq->queuelist.prev == &sd->fifo_list[sync])
+ return NULL;
+
+ /* Return former request */
+ return list_entry(rq->queuelist.prev, struct request, queuelist);
+}
+
+static struct request *
+sio_latter_request(struct request_queue *q, struct request *rq)
+{
+ struct sio_data *sd = q->elevator->elevator_data;
+ const int sync = rq_is_sync(rq);
+
+ if (rq->queuelist.next == &sd->fifo_list[sync])
+ return NULL;
+
+ /* Return latter request */
+ return list_entry(rq->queuelist.next, struct request, queuelist);
+}
+
+static void *
+sio_init_queue(struct request_queue *q)
+{
+ struct sio_data *sd;
+
+ /* Allocate structure */
+ sd = kmalloc_node(sizeof(*sd), GFP_KERNEL, q->node);
+ if (!sd)
+ return NULL;
+
+ /* Initialize fifo lists */
+ INIT_LIST_HEAD(&sd->fifo_list[SYNC]);
+ INIT_LIST_HEAD(&sd->fifo_list[ASYNC]);
+
+ /* Initialize data */
+ sd->batched = 0;
+ sd->fifo_expire[SYNC] = sync_expire;
+ sd->fifo_expire[ASYNC] = async_expire;
+ sd->fifo_batch = fifo_batch;
+
+ return sd;
+}
+
+static void
+sio_exit_queue(struct elevator_queue *e)
+{
+ struct sio_data *sd = e->elevator_data;
+
+ BUG_ON(!list_empty(&sd->fifo_list[SYNC]));
+ BUG_ON(!list_empty(&sd->fifo_list[ASYNC]));
+
+ /* Free structure */
+ kfree(sd);
+}
+
+/*
+ * sysfs code
+ */
+
+static ssize_t
+sio_var_show(int var, char *page)
+{
+ return sprintf(page, "%d\n", var);
+}
+
+static ssize_t
+sio_var_store(int *var, const char *page, size_t count)
+{
+ char *p = (char *) page;
+
+ *var = simple_strtol(p, &p, 10);
+ return count;
+}
+
+#define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, char *page) \
+{ \
+ struct sio_data *sd = e->elevator_data; \
+ int __data = __VAR; \
+ if (__CONV) \
+ __data = jiffies_to_msecs(__data); \
+ return sio_var_show(__data, (page)); \
+}
+SHOW_FUNCTION(sio_sync_expire_show, sd->fifo_expire[SYNC], 1);
+SHOW_FUNCTION(sio_async_expire_show, sd->fifo_expire[ASYNC], 1);
+SHOW_FUNCTION(sio_fifo_batch_show, sd->fifo_batch, 0);
+#undef SHOW_FUNCTION
+
+#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
+static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \
+{ \
+ struct sio_data *sd = e->elevator_data; \
+ int __data; \
+ int ret = sio_var_store(&__data, (page), count); \
+ if (__data < (MIN)) \
+ __data = (MIN); \
+ else if (__data > (MAX)) \
+ __data = (MAX); \
+ if (__CONV) \
+ *(__PTR) = msecs_to_jiffies(__data); \
+ else \
+ *(__PTR) = __data; \
+ return ret; \
+}
+STORE_FUNCTION(sio_sync_expire_store, &sd->fifo_expire[SYNC], 0, INT_MAX, 1);
+STORE_FUNCTION(sio_async_expire_store, &sd->fifo_expire[ASYNC], 0, INT_MAX, 1);
+STORE_FUNCTION(sio_fifo_batch_store, &sd->fifo_batch, 0, INT_MAX, 0);
+#undef STORE_FUNCTION
+
+#define DD_ATTR(name) \
+ __ATTR(name, S_IRUGO|S_IWUSR, sio_##name##_show, \
+ sio_##name##_store)
+
+static struct elv_fs_entry sio_attrs[] = {
+ DD_ATTR(sync_expire),
+ DD_ATTR(async_expire),
+ DD_ATTR(fifo_batch),
+ __ATTR_NULL
+};
+
+static struct elevator_type iosched_sio = {
+ .ops = {
+ .elevator_merge_req_fn = sio_merged_requests,
+ .elevator_dispatch_fn = sio_dispatch_requests,
+ .elevator_add_req_fn = sio_add_request,
+ .elevator_queue_empty_fn = sio_queue_empty,
+ .elevator_former_req_fn = sio_former_request,
+ .elevator_latter_req_fn = sio_latter_request,
+ .elevator_init_fn = sio_init_queue,
+ .elevator_exit_fn = sio_exit_queue,
+ },
+
+ .elevator_attrs = sio_attrs,
+ .elevator_name = "sio",
+ .elevator_owner = THIS_MODULE,
+};
+
+static int __init sio_init(void)
+{
+ /* Register elevator */
+ elv_register(&iosched_sio);
+
+ return 0;
+}
+
+static void __exit sio_exit(void)
+{
+ /* Unregister elevator */
+ elv_unregister(&iosched_sio);
+}
+
+module_init(sio_init);
+module_exit(sio_exit);
+
+MODULE_AUTHOR("Miguel Boton");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Simple IO scheduler");
+
View
64 drivers/cpufreq/Kconfig
@@ -120,6 +120,24 @@ config CPU_FREQ_DEFAULT_GOV_INTERACTIVE
loading your cpufreq low-level hardware driver, using the
'interactive' governor for latency-sensitive workloads.
+config CPU_FREQ_DEFAULT_GOV_SMARTASS2
+ bool "smartass2"
+ select CPU_FREQ_GOV_SMARTASS2
+ help
+ Use the CPUFreq governor 'smartassV2' as default.
+
+config CPU_FREQ_DEFAULT_GOV_LAGFREE
+ bool "lagfree"
+ select CPU_FREQ_GOV_LAGFREE
+ select CPU_FREQ_GOV_PERFORMANCE
+ help
+ Use the CPUFreq governor 'lagfree' as default. This allows
+ you to get a full dynamic frequency capable system by simply
+ loading your cpufreq low-level hardware driver.
+ Be aware that not all cpufreq drivers support the lagfree
+ governor. If unsure have a look at the help section of the
+ driver. Fallback governor will be the performance governor.
+
endchoice
config CPU_FREQ_GOV_PERFORMANCE
@@ -206,4 +224,50 @@ config CPU_FREQ_GOV_CONSERVATIVE
If in doubt, say N.
+config CPU_FREQ_GOV_SMARTASS2
+ tristate "'smartassV2' cpufreq governor"
+ depends on CPU_FREQ
+ help
+ 'smartassV2' - a "smart" optimized governor for the hero!
+
+config CPU_FREQ_GOV_LAGFREE
+ tristate "'lagfree' cpufreq governor"
+ depends on CPU_FREQ
+ help
+ 'lagfree' - this driver is rather similar to the 'ondemand'
+ governor both in its source code and its purpose, the difference is
+ its optimisation for better suitability in a battery powered
+ environment. The frequency is gracefully increased and decreased
+ rather than jumping to 100% when speed is required.
+
+ To compile this driver as a module, choose M here: the
+ module will be called cpufreq_lagfree.
+
+ For details, take a look at linux/Documentation/cpu-freq.
+
+ If in doubt, say N.
+
+config CPU_FREQ_MIN_TICKS
+ int "Ticks between governor polling interval."
+ default 10
+ help
+ Minimum number of ticks between polling interval for governors.
+
+ If in doubt, say N.
+
+config CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER
+ int "Sampling rate multiplier for governors."
+ default 1000
+ help
+ Sampling latency rate multiplied by the cpu switch latency.
+ Affects governor polling.
+
+config VDD_USERSPACE
+ bool "VDD sysfs interface"
+ default n
+ depends on CPU_FREQ_STAT
+ help
+ exposes the VDD table to userspace
+ allows users to adjust voltages on the fly
+
endif # CPU_FREQ
View
2  drivers/cpufreq/Makefile
@@ -10,6 +10,8 @@ obj-$(CONFIG_CPU_FREQ_GOV_USERSPACE) += cpufreq_userspace.o
obj-$(CONFIG_CPU_FREQ_GOV_ONDEMAND) += cpufreq_ondemand.o
obj-$(CONFIG_CPU_FREQ_GOV_CONSERVATIVE) += cpufreq_conservative.o
obj-$(CONFIG_CPU_FREQ_GOV_INTERACTIVE) += cpufreq_interactive.o
+obj-$(CONFIG_CPU_FREQ_GOV_LAGFREE) += cpufreq_lagfree.o
+obj-$(CONFIG_CPU_FREQ_GOV_SMARTASS2) += cpufreq_smartass2.o
# CPUfreq cross-arch helpers
obj-$(CONFIG_CPU_FREQ_TABLE) += freq_table.o
View
664 drivers/cpufreq/cpufreq_lagfree.c
@@ -0,0 +1,664 @@
+
+/*
+ * drivers/cpufreq/cpufreq_lagfree.c
+ *
+ * Copyright (C) 2001 Russell King
+ * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
+ * Jun Nakajima <jun.nakajima@intel.com>
+ * (C) 2004 Alexander Clouter <alex-kernel@digriz.org.uk>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/smp.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/ctype.h>
+#include <linux/cpufreq.h>
+#include <linux/sysctl.h>
+#include <linux/types.h>
+#include <linux/fs.h>
+#include <linux/sysfs.h>
+#include <linux/cpu.h>
+#include <linux/kmod.h>
+#include <linux/workqueue.h>
+#include <linux/jiffies.h>
+#include <linux/kernel_stat.h>
+#include <linux/percpu.h>
+#include <linux/mutex.h>
+#include <linux/earlysuspend.h>
+/*
+ * dbs is used in this file as a shortform for demandbased switching
+ * It helps to keep variable names smaller, simpler
+ */
+
+#define DEF_FREQUENCY_UP_THRESHOLD (50)
+#define DEF_FREQUENCY_DOWN_THRESHOLD (15)
+#define FREQ_STEP_DOWN (160000)
+#define FREQ_SLEEP_MAX (320000)
+#define FREQ_AWAKE_MIN (480000)
+#define FREQ_STEP_UP_SLEEP_PERCENT (20)
+
+/*
+ * The polling frequency of this governor depends on the capability of
+ * the processor. Default polling frequency is 1000 times the transition
+ * latency of the processor. The governor will work on any processor with
+ * transition latency <= 10mS, using appropriate sampling
+ * rate.
+ * For CPUs with transition latency > 10mS (mostly drivers
+ * with CPUFREQ_ETERNAL), this governor will not work.
+ * All times here are in uS.
+ */
+static unsigned int def_sampling_rate;
+unsigned int suspended = 0;
+#define MIN_SAMPLING_RATE_RATIO (2)
+/* for correct statistics, we need at least 10 ticks between each measure */
+#define MIN_STAT_SAMPLING_RATE \
+ (MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(CONFIG_CPU_FREQ_MIN_TICKS))
+#define MIN_SAMPLING_RATE \
+ (def_sampling_rate / MIN_SAMPLING_RATE_RATIO)
+#define MAX_SAMPLING_RATE (500 * def_sampling_rate)
+#define DEF_SAMPLING_DOWN_FACTOR (4)
+#define MAX_SAMPLING_DOWN_FACTOR (10)
+#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
+
+static void do_dbs_timer(struct work_struct *work);
+
+struct cpu_dbs_info_s {
+ struct cpufreq_policy *cur_policy;
+ unsigned int prev_cpu_idle_up;
+ unsigned int prev_cpu_idle_down;
+ unsigned int enable;
+ unsigned int down_skip;
+ unsigned int requested_freq;
+};
+static DEFINE_PER_CPU(struct cpu_dbs_info_s, cpu_dbs_info);
+
+static unsigned int dbs_enable; /* number of CPUs using this policy */
+
+/*
+ * DEADLOCK ALERT! There is a ordering requirement between cpu_hotplug
+ * lock and dbs_mutex. cpu_hotplug lock should always be held before
+ * dbs_mutex. If any function that can potentially take cpu_hotplug lock
+ * (like __cpufreq_driver_target()) is being called with dbs_mutex taken, then
+ * cpu_hotplug lock should be taken before that. Note that cpu_hotplug lock
+ * is recursive for the same process. -Venki
+ */
+static DEFINE_MUTEX (dbs_mutex);
+static DECLARE_DELAYED_WORK(dbs_work, do_dbs_timer);
+
+struct dbs_tuners {
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+ unsigned int down_threshold;
+ unsigned int ignore_nice;
+ //unsigned int freq_step;
+};
+
+static struct dbs_tuners dbs_tuners_ins = {
+ .up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
+ .down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
+ .sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
+ .ignore_nice = 1,
+ //.freq_step = 5,
+};
+
+static inline unsigned int get_cpu_idle_time(unsigned int cpu)
+{
+ unsigned int add_nice = 0, ret;
+
+ if (dbs_tuners_ins.ignore_nice)
+ add_nice = kstat_cpu(cpu).cpustat.nice;
+
+ ret = kstat_cpu(cpu).cpustat.idle +
+ kstat_cpu(cpu).cpustat.iowait +
+ add_nice;
+
+ return ret;
+}
+
+/* keep track of frequency transitions */
+static int
+dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
+ void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info,
+ freq->cpu);
+
+ if (!this_dbs_info->enable)
+ return 0;
+
+ this_dbs_info->requested_freq = freq->new;
+
+ return 0;
+}
+
+static struct notifier_block dbs_cpufreq_notifier_block = {
+ .notifier_call = dbs_cpufreq_notifier
+};
+
+/************************** sysfs interface ************************/
+static ssize_t show_sampling_rate_max(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf (buf, "%u\n", MAX_SAMPLING_RATE);
+}
+
+static ssize_t show_sampling_rate_min(struct cpufreq_policy *policy, char *buf)
+{
+ return sprintf (buf, "%u\n", MIN_SAMPLING_RATE);
+}
+
+#define define_one_ro(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0444, show_##_name, NULL)
+
+define_one_ro(sampling_rate_max);
+define_one_ro(sampling_rate_min);
+
+/* cpufreq_lagfree Governor Tunables */
+#define show_one(file_name, object) \
+static ssize_t show_##file_name \
+(struct cpufreq_policy *unused, char *buf) \
+{ \
+ return sprintf(buf, "%u\n", dbs_tuners_ins.object); \
+}
+show_one(sampling_rate, sampling_rate);
+show_one(sampling_down_factor, sampling_down_factor);
+show_one(up_threshold, up_threshold);
+show_one(down_threshold, down_threshold);
+show_one(ignore_nice_load, ignore_nice);
+//show_one(freq_step, freq_step);
+
+static ssize_t store_sampling_down_factor(struct cpufreq_policy *unused,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf (buf, "%u", &input);
+ if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
+ return -EINVAL;
+
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.sampling_down_factor = input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+static ssize_t store_sampling_rate(struct cpufreq_policy *unused,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf (buf, "%u", &input);
+
+ mutex_lock(&dbs_mutex);
+ if (ret != 1 || input > MAX_SAMPLING_RATE || input < MIN_SAMPLING_RATE) {
+ mutex_unlock(&dbs_mutex);
+ return -EINVAL;
+ }
+
+ dbs_tuners_ins.sampling_rate = input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+static ssize_t store_up_threshold(struct cpufreq_policy *unused,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf (buf, "%u", &input);
+
+ mutex_lock(&dbs_mutex);
+ if (ret != 1 || input > 100 || input <= dbs_tuners_ins.down_threshold) {
+ mutex_unlock(&dbs_mutex);
+ return -EINVAL;
+ }
+
+ dbs_tuners_ins.up_threshold = input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+static ssize_t store_down_threshold(struct cpufreq_policy *unused,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+ ret = sscanf (buf, "%u", &input);
+
+ mutex_lock(&dbs_mutex);
+ if (ret != 1 || input > 100 || input >= dbs_tuners_ins.up_threshold) {
+ mutex_unlock(&dbs_mutex);
+ return -EINVAL;
+ }
+
+ dbs_tuners_ins.down_threshold = input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+static ssize_t store_ignore_nice_load(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ unsigned int j;
+
+ ret = sscanf(buf, "%u", &input);
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input > 1)
+ input = 1;
+
+ mutex_lock(&dbs_mutex);
+ if (input == dbs_tuners_ins.ignore_nice) { /* nothing to do */
+ mutex_unlock(&dbs_mutex);
+ return count;
+ }
+ dbs_tuners_ins.ignore_nice = input;
+
+ /* we need to re-evaluate prev_cpu_idle_up and prev_cpu_idle_down */
+ for_each_online_cpu(j) {
+ struct cpu_dbs_info_s *j_dbs_info;
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(j);
+ j_dbs_info->prev_cpu_idle_down = j_dbs_info->prev_cpu_idle_up;
+ }
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}
+
+/*static ssize_t store_freq_step(struct cpufreq_policy *policy,
+ const char *buf, size_t count)
+{
+ unsigned int input;
+ int ret;
+
+ ret = sscanf(buf, "%u", &input);
+
+ if (ret != 1)
+ return -EINVAL;
+
+ if (input > 100)
+ input = 100;
+
+ / * no need to test here if freq_step is zero as the user might actually
+ * want this, they would be crazy though :) * /
+ mutex_lock(&dbs_mutex);
+ dbs_tuners_ins.freq_step = input;
+ mutex_unlock(&dbs_mutex);
+
+ return count;
+}*/
+
+#define define_one_rw(_name) \
+static struct freq_attr _name = \
+__ATTR(_name, 0644, show_##_name, store_##_name)
+
+define_one_rw(sampling_rate);
+define_one_rw(sampling_down_factor);
+define_one_rw(up_threshold);
+define_one_rw(down_threshold);
+define_one_rw(ignore_nice_load);
+//define_one_rw(freq_step);
+
+static struct attribute * dbs_attributes[] = {
+ &sampling_rate_max.attr,
+ &sampling_rate_min.attr,
+ &sampling_rate.attr,
+ &sampling_down_factor.attr,
+ &up_threshold.attr,
+ &down_threshold.attr,
+ &ignore_nice_load.attr,
+ //&freq_step.attr,
+ NULL
+};
+
+static struct attribute_group dbs_attr_group = {
+ .attrs = dbs_attributes,
+ .name = "lagfree",
+};
+
+/************************** sysfs end ************************/
+
+static void dbs_check_cpu(int cpu)
+{
+ unsigned int idle_ticks, up_idle_ticks, down_idle_ticks;
+ unsigned int tmp_idle_ticks, total_idle_ticks;
+ unsigned int freq_target;
+ unsigned int freq_down_sampling_rate;
+ struct cpu_dbs_info_s *this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
+ struct cpufreq_policy *policy;
+
+ if (!this_dbs_info->enable)
+ return;
+
+ policy = this_dbs_info->cur_policy;
+
+ /*
+ * The default safe range is 20% to 80%
+ * Every sampling_rate, we check
+ * - If current idle time is less than 20%, then we try to
+ * increase frequency
+ * Every sampling_rate*sampling_down_factor, we check
+ * - If current idle time is more than 80%, then we try to
+ * decrease frequency
+ *
+ * Any frequency increase takes it to the maximum frequency.
+ * Frequency reduction happens at minimum steps of
+ * 5% (default) of max_frequency
+ */
+
+ /* Check for frequency increase */
+ idle_ticks = UINT_MAX;
+
+ /* Check for frequency increase */
+ total_idle_ticks = get_cpu_idle_time(cpu);
+ tmp_idle_ticks = total_idle_ticks -
+ this_dbs_info->prev_cpu_idle_up;
+ this_dbs_info->prev_cpu_idle_up = total_idle_ticks;
+
+ if (tmp_idle_ticks < idle_ticks)
+ idle_ticks = tmp_idle_ticks;
+
+ /* Scale idle ticks by 100 and compare with up and down ticks */
+ idle_ticks *= 100;
+ up_idle_ticks = (100 - dbs_tuners_ins.up_threshold) *
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate);
+
+ if (idle_ticks < up_idle_ticks) {
+ this_dbs_info->down_skip = 0;
+ this_dbs_info->prev_cpu_idle_down =
+ this_dbs_info->prev_cpu_idle_up;
+
+ /* if we are already at full speed then break out early */
+ if (this_dbs_info->requested_freq == policy->max && !suspended)
+ return;
+
+ //freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
+ if (suspended)
+ freq_target = (FREQ_STEP_UP_SLEEP_PERCENT * policy->max) / 100;
+ else
+ freq_target = policy->max;
+
+ /* max freq cannot be less than 100. But who knows.... */
+ if (unlikely(freq_target == 0))
+ freq_target = 5;
+
+ this_dbs_info->requested_freq += freq_target;
+ if (this_dbs_info->requested_freq > policy->max)
+ this_dbs_info->requested_freq = policy->max;
+
+ //Screen off mode
+ if (suspended && this_dbs_info->requested_freq > FREQ_SLEEP_MAX)
+ this_dbs_info->requested_freq = FREQ_SLEEP_MAX;
+
+ //Screen off mode
+ if (!suspended && this_dbs_info->requested_freq < FREQ_AWAKE_MIN)
+ this_dbs_info->requested_freq = FREQ_AWAKE_MIN;
+
+ __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
+ CPUFREQ_RELATION_H);
+ return;
+ }
+
+ /* Check for frequency decrease */
+ this_dbs_info->down_skip++;
+ if (this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor)
+ return;
+
+ /* Check for frequency decrease */
+ total_idle_ticks = this_dbs_info->prev_cpu_idle_up;
+ tmp_idle_ticks = total_idle_ticks -
+ this_dbs_info->prev_cpu_idle_down;
+ this_dbs_info->prev_cpu_idle_down = total_idle_ticks;
+
+ if (tmp_idle_ticks < idle_ticks)
+ idle_ticks = tmp_idle_ticks;
+
+ /* Scale idle ticks by 100 and compare with up and down ticks */
+ idle_ticks *= 100;
+ this_dbs_info->down_skip = 0;
+
+ freq_down_sampling_rate = dbs_tuners_ins.sampling_rate *
+ dbs_tuners_ins.sampling_down_factor;
+ down_idle_ticks = (100 - dbs_tuners_ins.down_threshold) *
+ usecs_to_jiffies(freq_down_sampling_rate);
+
+ if (idle_ticks > down_idle_ticks) {
+ /*
+ * if we are already at the lowest speed then break out early
+ * or if we 'cannot' reduce the speed as the user might want
+ * freq_target to be zero
+ */
+ if (this_dbs_info->requested_freq == policy->min && suspended
+ /*|| dbs_tuners_ins.freq_step == 0*/)
+ return;
+
+ //freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;
+ freq_target = FREQ_STEP_DOWN; //policy->max;
+
+ /* max freq cannot be less than 100. But who knows.... */
+ if (unlikely(freq_target == 0))
+ freq_target = 5;
+
+ // prevent going under 0
+ if(freq_target > this_dbs_info->requested_freq)
+ this_dbs_info->requested_freq = policy->min;
+ else
+ this_dbs_info->requested_freq -= freq_target;
+
+ if (this_dbs_info->requested_freq < policy->min)
+ this_dbs_info->requested_freq = policy->min;
+
+ //Screen on mode
+ if (!suspended && this_dbs_info->requested_freq < FREQ_AWAKE_MIN)
+ this_dbs_info->requested_freq = FREQ_AWAKE_MIN;
+
+ //Screen off mode
+ if (suspended && this_dbs_info->requested_freq > FREQ_SLEEP_MAX)
+ this_dbs_info->requested_freq = FREQ_SLEEP_MAX;
+
+ __cpufreq_driver_target(policy, this_dbs_info->requested_freq,
+ CPUFREQ_RELATION_H);
+ return;
+ }
+}
+
+static void do_dbs_timer(struct work_struct *work)
+{
+ int i;
+ mutex_lock(&dbs_mutex);
+ for_each_online_cpu(i)
+ dbs_check_cpu(i);
+ schedule_delayed_work(&dbs_work,
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+ mutex_unlock(&dbs_mutex);
+}
+
+static inline void dbs_timer_init(void)
+{
+ init_timer_deferrable(&dbs_work.timer);
+ schedule_delayed_work(&dbs_work,
+ usecs_to_jiffies(dbs_tuners_ins.sampling_rate));
+ return;
+}
+
+static inline void dbs_timer_exit(void)
+{
+ cancel_delayed_work(&dbs_work);
+ return;
+}
+
+static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
+ unsigned int event)
+{
+ unsigned int cpu = policy->cpu;
+ struct cpu_dbs_info_s *this_dbs_info;
+ unsigned int j;
+ int rc;
+
+ this_dbs_info = &per_cpu(cpu_dbs_info, cpu);
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if ((!cpu_online(cpu)) || (!policy->cur))
+ return -EINVAL;
+
+ if (this_dbs_info->enable) /* Already enabled */
+ break;
+
+ mutex_lock(&dbs_mutex);
+
+ rc = sysfs_create_group(&policy->kobj, &dbs_attr_group);
+ if (rc) {
+ mutex_unlock(&dbs_mutex);
+ return rc;
+ }
+
+ for_each_cpu(j, policy->cpus) {
+ struct cpu_dbs_info_s *j_dbs_info;
+ j_dbs_info = &per_cpu(cpu_dbs_info, j);
+ j_dbs_info->cur_policy = policy;
+
+ j_dbs_info->prev_cpu_idle_up = get_cpu_idle_time(cpu);
+ j_dbs_info->prev_cpu_idle_down
+ = j_dbs_info->prev_cpu_idle_up;
+ }
+ this_dbs_info->enable = 1;
+ this_dbs_info->down_skip = 0;
+ this_dbs_info->requested_freq = policy->cur;
+
+ dbs_enable++;
+ /*
+ * Start the timerschedule work, when this governor
+ * is used for first time
+ */
+ if (dbs_enable == 1) {
+ unsigned int latency;
+ /* policy latency is in nS. Convert it to uS first */
+ latency = policy->cpuinfo.transition_latency / 1000;
+ if (latency == 0)
+ latency = 1;
+
+ def_sampling_rate = 10 * latency *
+ CONFIG_CPU_FREQ_SAMPLING_LATENCY_MULTIPLIER;
+
+ if (def_sampling_rate < MIN_STAT_SAMPLING_RATE)
+ def_sampling_rate = MIN_STAT_SAMPLING_RATE;
+
+ dbs_tuners_ins.sampling_rate = def_sampling_rate;
+
+ dbs_timer_init();
+ cpufreq_register_notifier(
+ &dbs_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+
+ mutex_unlock(&dbs_mutex);
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ mutex_lock(&dbs_mutex);
+ this_dbs_info->enable = 0;
+ sysfs_remove_group(&policy->kobj, &dbs_attr_group);
+ dbs_enable--;
+ /*
+ * Stop the timerschedule work, when this governor
+ * is used for first time
+ */
+ if (dbs_enable == 0) {
+ dbs_timer_exit();
+ cpufreq_unregister_notifier(
+ &dbs_cpufreq_notifier_block,
+ CPUFREQ_TRANSITION_NOTIFIER);
+ }
+
+ mutex_unlock(&dbs_mutex);
+
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ mutex_lock(&dbs_mutex);
+ if (policy->max < this_dbs_info->cur_policy->cur)
+ __cpufreq_driver_target(
+ this_dbs_info->cur_policy,
+ policy->max, CPUFREQ_RELATION_H);
+ else if (policy->min > this_dbs_info->cur_policy->cur)
+ __cpufreq_driver_target(
+ this_dbs_info->cur_policy,
+ policy->min, CPUFREQ_RELATION_L);
+ mutex_unlock(&dbs_mutex);
+ break;
+ }
+ return 0;
+}
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_LAGFREE
+static
+#endif
+struct cpufreq_governor cpufreq_gov_lagfree = {
+ .name = "lagfree",
+ .governor = cpufreq_governor_dbs,
+ .max_transition_latency = TRANSITION_LATENCY_LIMIT,
+ .owner = THIS_MODULE,
+};
+
+static void lagfree_early_suspend(struct early_suspend *handler) {
+ suspended = 1;
+}
+
+static void lagfree_late_resume(struct early_suspend *handler) {
+ suspended = 0;
+}
+
+static struct early_suspend lagfree_power_suspend = {
+ .suspend = lagfree_early_suspend,
+ .resume = lagfree_late_resume,
+ .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
+};
+
+static int __init cpufreq_gov_dbs_init(void)
+{
+ register_early_suspend(&lagfree_power_suspend);
+ return cpufreq_register_governor(&cpufreq_gov_lagfree);
+}
+
+static void __exit cpufreq_gov_dbs_exit(void)
+{
+ /* Make sure that the scheduled work is indeed not running */
+ flush_scheduled_work();
+
+ unregister_early_suspend(&lagfree_power_suspend);
+ cpufreq_unregister_governor(&cpufreq_gov_lagfree);
+}
+
+
+MODULE_AUTHOR ("Emilio López <turl@tuxfamily.org>");
+MODULE_DESCRIPTION ("'cpufreq_lagfree' - A dynamic cpufreq governor for "
+ "Low Latency Frequency Transition capable processors "
+ "optimised for use in a battery environment"
+ "Based on conservative by Alexander Clouter");
+MODULE_LICENSE ("GPL");
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_LAGFREE
+fs_initcall(cpufreq_gov_dbs_init);
+#else
+module_init(cpufreq_gov_dbs_init);
+#endif
+module_exit(cpufreq_gov_dbs_exit);
+
View
870 drivers/cpufreq/cpufreq_smartass2.c
@@ -0,0 +1,870 @@
+
+/*
+ * drivers/cpufreq/cpufreq_smartass2.c
+ *
+ * Copyright (C) 2010 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * Author: Erasmux
+ *
+ * Based on the interactive governor By Mike Chan (mike@android.com)
+ * which was adaptated to 2.6.29 kernel by Nadlabak (pavel@doshaska.net)
+ *
+ * SMP support based on mod by faux123
+ *
+ * For a general overview of smartassV2 see the relavent part in
+ * Documentation/cpu-freq/governors.txt
+ *
+ */
+
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/cpufreq.h>
+#include <linux/sched.h>
+#include <linux/tick.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/moduleparam.h>
+#include <asm/cputime.h>
+#include <linux/earlysuspend.h>
+
+
+/******************** Tunable parameters: ********************/
+
+/*
+ * The "ideal" frequency to use when awake. The governor will ramp up faster
+ * towards the ideal frequency and slower after it has passed it. Similarly,
+ * lowering the frequency towards the ideal frequency is faster than below it.
+ */
+#define DEFAULT_AWAKE_IDEAL_FREQ 518400
+static unsigned int awake_ideal_freq;
+
+/*
+ * The "ideal" frequency to use when suspended.
+ * When set to 0, the governor will not track the suspended state (meaning
+ * that practically when sleep_ideal_freq==0 the awake_ideal_freq is used
+ * also when suspended).
+ */
+#define DEFAULT_SLEEP_IDEAL_FREQ 352000
+static unsigned int sleep_ideal_freq;
+
+/*
+ * Freqeuncy delta when ramping up above the ideal freqeuncy.
+ * Zero disables and causes to always jump straight to max frequency.
+ * When below the ideal freqeuncy we always ramp up to the ideal freq.
+ */
+#define DEFAULT_RAMP_UP_STEP 128000
+static unsigned int ramp_up_step;
+
+/*
+ * Freqeuncy delta when ramping down below the ideal freqeuncy.
+ * Zero disables and will calculate ramp down according to load heuristic.
+ * When above the ideal freqeuncy we always ramp down to the ideal freq.
+ */
+#define DEFAULT_RAMP_DOWN_STEP 256000
+static unsigned int ramp_down_step;
+
+/*
+ * CPU freq will be increased if measured load > max_cpu_load;
+ */
+#define DEFAULT_MAX_CPU_LOAD 50
+static unsigned long max_cpu_load;
+
+/*
+ * CPU freq will be decreased if measured load < min_cpu_load;
+ */
+#define DEFAULT_MIN_CPU_LOAD 25
+static unsigned long min_cpu_load;
+
+/*
+ * The minimum amount of time to spend at a frequency before we can ramp up.
+ * Notice we ignore this when we are below the ideal frequency.
+ */
+#define DEFAULT_UP_RATE_US 48000;
+static unsigned long up_rate_us;
+
+/*
+ * The minimum amount of time to spend at a frequency before we can ramp down.
+ * Notice we ignore this when we are above the ideal frequency.
+ */
+#define DEFAULT_DOWN_RATE_US 99000;
+static unsigned long down_rate_us;
+
+/*
+ * The frequency to set when waking up from sleep.
+ * When sleep_ideal_freq=0 this will have no effect.
+ */
+#define DEFAULT_SLEEP_WAKEUP_FREQ 99999999
+static unsigned int sleep_wakeup_freq;
+
+/*
+ * Sampling rate, I highly recommend to leave it at 2.
+ */
+#define DEFAULT_SAMPLE_RATE_JIFFIES 2
+static unsigned int sample_rate_jiffies;
+
+
+/*************** End of tunables ***************/
+
+
+static void (*pm_idle_old)(void);
+static atomic_t active_count = ATOMIC_INIT(0);
+
+struct smartass_info_s {
+ struct cpufreq_policy *cur_policy;
+ struct cpufreq_frequency_table *freq_table;
+ struct timer_list timer;
+ u64 time_in_idle;
+ u64 idle_exit_time;
+ u64 freq_change_time;
+ u64 freq_change_time_in_idle;
+ int cur_cpu_load;
+ int old_freq;
+ int ramp_dir;
+ unsigned int enable;
+ int ideal_speed;
+};
+static DEFINE_PER_CPU(struct smartass_info_s, smartass_info);
+
+/* Workqueues handle frequency scaling */
+static struct workqueue_struct *up_wq;
+static struct workqueue_struct *down_wq;
+static struct work_struct freq_scale_work;
+
+static cpumask_t work_cpumask;
+static spinlock_t cpumask_lock;
+
+static unsigned int suspended;
+
+#define dprintk(flag,msg...) do { \
+ if (debug_mask & flag) printk(KERN_DEBUG msg); \
+ } while (0)
+
+enum {
+ SMARTASS_DEBUG_JUMPS=1,
+ SMARTASS_DEBUG_LOAD=2,
+ SMARTASS_DEBUG_ALG=4
+};
+
+/*
+ * Combination of the above debug flags.
+ */
+static unsigned long debug_mask;
+
+static int cpufreq_governor_smartass(struct cpufreq_policy *policy,
+ unsigned int event);
+
+#ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2
+static
+#endif
+struct cpufreq_governor cpufreq_gov_smartass2 = {
+ .name = "smartassV2",
+ .governor = cpufreq_governor_smartass,
+ .max_transition_latency = 9000000,
+ .owner = THIS_MODULE,
+};
+
+inline static void smartass_update_min_max(struct smartass_info_s *this_smartass, struct cpufreq_policy *policy, int suspend) {
+ if (suspend) {
+ this_smartass->ideal_speed = // sleep_ideal_freq; but make sure it obeys the policy min/max
+ policy->max > sleep_ideal_freq ?
+ (sleep_ideal_freq > policy->min ? sleep_ideal_freq : policy->min) : policy->max;
+ } else {
+ this_smartass->ideal_speed = // awake_ideal_freq; but make sure it obeys the policy min/max
+ policy->min < awake_ideal_freq ?
+ (awake_ideal_freq < policy->max ? awake_ideal_freq : policy->max) : policy->min;
+ }
+}
+
+inline static void smartass_update_min_max_allcpus(void) {
+ unsigned int i;
+ for_each_online_cpu(i) {
+ struct smartass_info_s *this_smartass = &per_cpu(smartass_info, i);
+ if (this_smartass->enable)
+ smartass_update_min_max(this_smartass,this_smartass->cur_policy,suspended);
+ }
+}
+
+inline static unsigned int validate_freq(struct cpufreq_policy *policy, int freq) {
+ if (freq > (int)policy->max)
+ return policy->max;
+ if (freq < (int)policy->min)
+ return policy->min;
+ return freq;
+}
+
+inline static void reset_timer(unsigned long cpu, struct smartass_info_s *this_smartass) {
+ this_smartass->time_in_idle = get_cpu_idle_time_us(cpu, &this_smartass->idle_exit_time);
+ mod_timer(&this_smartass->timer, jiffies + sample_rate_jiffies);
+}
+
+inline static void work_cpumask_set(unsigned long cpu) {
+ unsigned long flags;
+ spin_lock_irqsave(&cpumask_lock, flags);
+ cpumask_set_cpu(cpu, &work_cpumask);
+ spin_unlock_irqrestore(&cpumask_lock, flags);
+}
+
+inline static int work_cpumask_test_and_clear(unsigned long cpu) {
+ unsigned long flags;
+ int res = 0;
+ spin_lock_irqsave(&cpumask_lock, flags);
+ res = cpumask_test_and_clear_cpu(cpu, &work_cpumask);
+ spin_unlock_irqrestore(&cpumask_lock, flags);
+ return res;
+}
+
+inline static int target_freq(struct cpufreq_policy *policy, struct smartass_info_s *this_smartass,
+ int new_freq, int old_freq, int prefered_relation) {
+ int index, target;
+ struct cpufreq_frequency_table *table = this_smartass->freq_table;
+
+ if (new_freq == old_freq)
+ return 0;
+ new_freq = validate_freq(policy,new_freq);
+ if (new_freq == old_freq)
+ return 0;
+
+ if (table &&
+ !cpufreq_frequency_table_target(policy,table,new_freq,prefered_relation,&index))
+ {
+ target = table[index].frequency;
+ if (target == old_freq) {
+ // if for example we are ramping up to *at most* current + ramp_up_step
+ // but there is no such frequency higher than the current, try also
+ // to ramp up to *at least* current + ramp_up_step.
+ if (new_freq > old_freq && prefered_relation==CPUFREQ_RELATION_H
+ && !cpufreq_frequency_table_target(policy,table,new_freq,
+ CPUFREQ_RELATION_L,&index))
+ target = table[index].frequency;
+ // simlarly for ramping down:
+ else if (new_freq < old_freq && prefered_relation==CPUFREQ_RELATION_L
+ && !cpufreq_frequency_table_target(policy,table,new_freq,
+ CPUFREQ_RELATION_H,&index))
+ target = table[index].frequency;
+ }
+
+ if (target == old_freq) {
+ // We should not get here:
+ // If we got here we tried to change to a validated new_freq which is different
+ // from old_freq, so there is no reason for us to remain at same frequency.
+ printk(KERN_WARNING "Smartass: frequency change failed: %d to %d => %d\n",
+ old_freq,new_freq,target);
+ return 0;
+ }
+ }
+ else target = new_freq;
+
+ __cpufreq_driver_target(policy, target, prefered_relation);
+
+ dprintk(SMARTASS_DEBUG_JUMPS,"SmartassQ: jumping from %d to %d => %d (%d)\n",
+ old_freq,new_freq,target,policy->cur);
+
+ return target;
+}
+
+static void cpufreq_smartass_timer(unsigned long cpu)
+{
+ u64 delta_idle;
+ u64 delta_time;
+ int cpu_load;
+ int old_freq;
+ u64 update_time;
+ u64 now_idle;
+ int queued_work = 0;
+ struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu);
+ struct cpufreq_policy *policy = this_smartass->cur_policy;
+
+ now_idle = get_cpu_idle_time_us(cpu, &update_time);
+ old_freq = policy->cur;
+
+ if (this_smartass->idle_exit_time == 0 || update_time == this_smartass->idle_exit_time)
+ return;
+
+ delta_idle = cputime64_sub(now_idle, this_smartass->time_in_idle);
+ delta_time = cputime64_sub(update_time, this_smartass->idle_exit_time);
+
+ // If timer ran less than 1ms after short-term sample started, retry.
+ if (delta_time < 1000) {
+ if (!timer_pending(&this_smartass->timer))
+ reset_timer(cpu,this_smartass);
+ return;
+ }
+
+ if (delta_idle > delta_time)
+ cpu_load = 0;
+ else
+ cpu_load = 100 * (unsigned int)(delta_time - delta_idle) / (unsigned int)delta_time;
+
+ dprintk(SMARTASS_DEBUG_LOAD,"smartassT @ %d: load %d (delta_time %llu)\n",
+ old_freq,cpu_load,delta_time);
+
+ this_smartass->cur_cpu_load = cpu_load;
+ this_smartass->old_freq = old_freq;
+
+ // Scale up if load is above max or if there where no idle cycles since coming out of idle,
+ // additionally, if we are at or above the ideal_speed, verify we have been at this frequency
+ // for at least up_rate_us:
+ if (cpu_load > max_cpu_load || delta_idle == 0)
+ {
+ if (old_freq < policy->max &&
+ (old_freq < this_smartass->ideal_speed || delta_idle == 0 ||
+ cputime64_sub(update_time, this_smartass->freq_change_time) >= up_rate_us))
+ {
+ dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp up: load %d (delta_idle %llu)\n",
+ old_freq,cpu_load,delta_idle);
+ this_smartass->ramp_dir = 1;
+ work_cpumask_set(cpu);
+ queue_work(up_wq, &freq_scale_work);
+ queued_work = 1;
+ }
+ else this_smartass->ramp_dir = 0;
+ }
+ // Similarly for scale down: load should be below min and if we are at or below ideal
+ // frequency we require that we have been at this frequency for at least down_rate_us:
+ else if (cpu_load < min_cpu_load && old_freq > policy->min &&
+ (old_freq > this_smartass->ideal_speed ||
+ cputime64_sub(update_time, this_smartass->freq_change_time) >= down_rate_us))
+ {
+ dprintk(SMARTASS_DEBUG_ALG,"smartassT @ %d ramp down: load %d (delta_idle %llu)\n",
+ old_freq,cpu_load,delta_idle);
+ this_smartass->ramp_dir = -1;
+ work_cpumask_set(cpu);
+ queue_work(down_wq, &freq_scale_work);
+ queued_work = 1;
+ }
+ else this_smartass->ramp_dir = 0;
+
+ // To avoid unnecessary load when the CPU is already at high load, we don't
+ // reset ourselves if we are at max speed. If and when there are idle cycles,
+ // the idle loop will activate the timer.
+ // Additionally, if we queued some work, the work task will reset the timer
+ // after it has done its adjustments.
+ if (!queued_work && old_freq < policy->max)
+ reset_timer(cpu,this_smartass);
+}
+
+static void cpufreq_idle(void)
+{
+ struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id());
+ struct cpufreq_policy *policy = this_smartass->cur_policy;
+
+ if (!this_smartass->enable) {
+ pm_idle_old();
+ return;
+ }
+
+ if (policy->cur == policy->min && timer_pending(&this_smartass->timer))
+ del_timer(&this_smartass->timer);
+
+ pm_idle_old();
+
+ if (!timer_pending(&this_smartass->timer))
+ reset_timer(smp_processor_id(), this_smartass);
+}
+
+/* We use the same work function to sale up and down */
+static void cpufreq_smartass_freq_change_time_work(struct work_struct *work)
+{
+ unsigned int cpu;
+ int new_freq;
+ int old_freq;
+ int ramp_dir;
+ struct smartass_info_s *this_smartass;
+ struct cpufreq_policy *policy;
+ unsigned int relation = CPUFREQ_RELATION_L;
+ for_each_possible_cpu(cpu) {
+ this_smartass = &per_cpu(smartass_info, cpu);
+ if (!work_cpumask_test_and_clear(cpu))
+ continue;
+
+ ramp_dir = this_smartass->ramp_dir;
+ this_smartass->ramp_dir = 0;
+
+ old_freq = this_smartass->old_freq;
+ policy = this_smartass->cur_policy;
+
+ if (old_freq != policy->cur) {
+ // frequency was changed by someone else?
+ printk(KERN_WARNING "Smartass: frequency changed by 3rd party: %d to %d\n",
+ old_freq,policy->cur);
+ new_freq = old_freq;
+ }
+ else if (ramp_dir > 0 && nr_running() > 1) {
+ // ramp up logic:
+ if (old_freq < this_smartass->ideal_speed)
+ new_freq = this_smartass->ideal_speed;
+ else if (ramp_up_step) {
+ new_freq = old_freq + ramp_up_step;
+ relation = CPUFREQ_RELATION_H;
+ }
+ else {
+ new_freq = policy->max;
+ relation = CPUFREQ_RELATION_H;
+ }
+ dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp up: ramp_dir=%d ideal=%d\n",
+ old_freq,ramp_dir,this_smartass->ideal_speed);
+ }
+ else if (ramp_dir < 0) {
+ // ramp down logic:
+ if (old_freq > this_smartass->ideal_speed) {
+ new_freq = this_smartass->ideal_speed;
+ relation = CPUFREQ_RELATION_H;
+ }
+ else if (ramp_down_step)
+ new_freq = old_freq - ramp_down_step;
+ else {
+ // Load heuristics: Adjust new_freq such that, assuming a linear
+ // scaling of load vs. frequency, the load in the new frequency
+ // will be max_cpu_load:
+ new_freq = old_freq * this_smartass->cur_cpu_load / max_cpu_load;
+ if (new_freq > old_freq) // min_cpu_load > max_cpu_load ?!
+ new_freq = old_freq -1;
+ }
+ dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d ramp down: ramp_dir=%d ideal=%d\n",
+ old_freq,ramp_dir,this_smartass->ideal_speed);
+ }
+ else { // ramp_dir==0 ?! Could the timer change its mind about a queued ramp up/down
+ // before the work task gets to run?
+ // This may also happen if we refused to ramp up because the nr_running()==1
+ new_freq = old_freq;
+ dprintk(SMARTASS_DEBUG_ALG,"smartassQ @ %d nothing: ramp_dir=%d nr_running=%lu\n",
+ old_freq,ramp_dir,nr_running());
+ }
+
+ // do actual ramp up (returns 0, if frequency change failed):
+ new_freq = target_freq(policy,this_smartass,new_freq,old_freq,relation);
+ if (new_freq)
+ this_smartass->freq_change_time_in_idle =
+ get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time);
+
+ // reset timer:
+ if (new_freq < policy->max)
+ reset_timer(cpu,this_smartass);
+ // if we are maxed out, it is pointless to use the timer
+ // (idle cycles wake up the timer when the timer comes)
+ else if (timer_pending(&this_smartass->timer))
+ del_timer(&this_smartass->timer);
+ }
+}
+
+static ssize_t show_debug_mask(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", debug_mask);
+}
+
+static ssize_t store_debug_mask(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0)
+ debug_mask = input;
+ return res;
+}
+
+static ssize_t show_up_rate_us(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", up_rate_us);
+}
+
+static ssize_t store_up_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0 && input <= 100000000)
+ up_rate_us = input;
+ return res;
+}
+
+static ssize_t show_down_rate_us(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", down_rate_us);
+}
+
+static ssize_t store_down_rate_us(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0 && input <= 100000000)
+ down_rate_us = input;
+ return res;
+}
+
+static ssize_t show_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", sleep_ideal_freq);
+}
+
+static ssize_t store_sleep_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0) {
+ sleep_ideal_freq = input;
+ if (suspended)
+ smartass_update_min_max_allcpus();
+ }
+ return res;
+}
+
+static ssize_t show_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", sleep_wakeup_freq);
+}
+
+static ssize_t store_sleep_wakeup_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0)
+ sleep_wakeup_freq = input;
+ return res;
+}
+
+static ssize_t show_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", awake_ideal_freq);
+}
+
+static ssize_t store_awake_ideal_freq(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0) {
+ awake_ideal_freq = input;
+ if (!suspended)
+ smartass_update_min_max_allcpus();
+ }
+ return res;
+}
+
+static ssize_t show_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", sample_rate_jiffies);
+}
+
+static ssize_t store_sample_rate_jiffies(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input > 0 && input <= 1000)
+ sample_rate_jiffies = input;
+ return res;
+}
+
+static ssize_t show_ramp_up_step(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", ramp_up_step);
+}
+
+static ssize_t store_ramp_up_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0)
+ ramp_up_step = input;
+ return res;
+}
+
+static ssize_t show_ramp_down_step(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", ramp_down_step);
+}
+
+static ssize_t store_ramp_down_step(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input >= 0)
+ ramp_down_step = input;
+ return res;
+}
+
+static ssize_t show_max_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", max_cpu_load);
+}
+
+static ssize_t store_max_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input > 0 && input <= 100)
+ max_cpu_load = input;
+ return res;
+}
+
+static ssize_t show_min_cpu_load(struct kobject *kobj, struct attribute *attr, char *buf)
+{
+ return sprintf(buf, "%lu\n", min_cpu_load);
+}
+
+static ssize_t store_min_cpu_load(struct kobject *kobj, struct attribute *attr, const char *buf, size_t count)
+{
+ ssize_t res;
+ unsigned long input;
+ res = strict_strtoul(buf, 0, &input);
+ if (res >= 0 && input > 0 && input < 100)
+ min_cpu_load = input;
+ return res;
+}
+
+#define define_global_rw_attr(_name) \
+static struct global_attr _name##_attr = \
+ __ATTR(_name, 0644, show_##_name, store_##_name)
+
+define_global_rw_attr(debug_mask);
+define_global_rw_attr(up_rate_us);
+define_global_rw_attr(down_rate_us);
+define_global_rw_attr(sleep_ideal_freq);
+define_global_rw_attr(sleep_wakeup_freq);
+define_global_rw_attr(awake_ideal_freq);
+define_global_rw_attr(sample_rate_jiffies);
+define_global_rw_attr(ramp_up_step);
+define_global_rw_attr(ramp_down_step);
+define_global_rw_attr(max_cpu_load);
+define_global_rw_attr(min_cpu_load);
+
+static struct attribute * smartass_attributes[] = {
+ &debug_mask_attr.attr,
+ &up_rate_us_attr.attr,
+ &down_rate_us_attr.attr,
+ &sleep_ideal_freq_attr.attr,
+ &sleep_wakeup_freq_attr.attr,
+ &awake_ideal_freq_attr.attr,
+ &sample_rate_jiffies_attr.attr,
+ &ramp_up_step_attr.attr,
+ &ramp_down_step_attr.attr,
+ &max_cpu_load_attr.attr,
+ &min_cpu_load_attr.attr,
+ NULL,
+};
+
+static struct attribute_group smartass_attr_group = {
+ .attrs = smartass_attributes,
+ .name = "smartass",
+};
+
+static int cpufreq_governor_smartass(struct cpufreq_policy *new_policy,
+ unsigned int event)
+{
+ unsigned int cpu = new_policy->cpu;
+ int rc;
+ struct smartass_info_s *this_smartass = &per_cpu(smartass_info, cpu);
+
+ switch (event) {
+ case CPUFREQ_GOV_START:
+ if ((!cpu_online(cpu)) || (!new_policy->cur))
+ return -EINVAL;
+
+ this_smartass->cur_policy = new_policy;
+
+ this_smartass->enable = 1;
+
+ smartass_update_min_max(this_smartass,new_policy,suspended);
+
+ this_smartass->freq_table = cpufreq_frequency_get_table(cpu);
+ if (!this_smartass->freq_table)
+ printk(KERN_WARNING "Smartass: no frequency table for cpu %d?!\n",cpu);
+
+ smp_wmb();
+
+ // Do not register the idle hook and create sysfs
+ // entries if we have already done so.
+ if (atomic_inc_return(&active_count) <= 1) {
+ rc = sysfs_create_group(cpufreq_global_kobject,
+ &smartass_attr_group);
+ if (rc)
+ return rc;
+
+ pm_idle_old = pm_idle;
+ pm_idle = cpufreq_idle;
+ }
+
+ if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer))
+ reset_timer(cpu,this_smartass);
+
+ break;
+
+ case CPUFREQ_GOV_LIMITS:
+ smartass_update_min_max(this_smartass,new_policy,suspended);
+
+ if (this_smartass->cur_policy->cur > new_policy->max) {
+ dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new max freq: %d\n",new_policy->max);
+ __cpufreq_driver_target(this_smartass->cur_policy,
+ new_policy->max, CPUFREQ_RELATION_H);
+ }
+ else if (this_smartass->cur_policy->cur < new_policy->min) {
+ dprintk(SMARTASS_DEBUG_JUMPS,"SmartassI: jumping to new min freq: %d\n",new_policy->min);
+ __cpufreq_driver_target(this_smartass->cur_policy,
+ new_policy->min, CPUFREQ_RELATION_L);
+ }
+
+ if (this_smartass->cur_policy->cur < new_policy->max && !timer_pending(&this_smartass->timer))
+ reset_timer(cpu,this_smartass);
+
+ break;
+
+ case CPUFREQ_GOV_STOP:
+ this_smartass->enable = 0;
+ smp_wmb();
+ del_timer(&this_smartass->timer);
+ flush_work(&freq_scale_work);
+ this_smartass->idle_exit_time = 0;
+
+ if (atomic_dec_return(&active_count) <= 1) {
+ sysfs_remove_group(cpufreq_global_kobject,
+ &smartass_attr_group);
+ pm_idle = pm_idle_old;
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static void smartass_suspend(int cpu, int suspend)
+{
+ struct smartass_info_s *this_smartass = &per_cpu(smartass_info, smp_processor_id());
+ struct cpufreq_policy *policy = this_smartass->cur_policy;
+ unsigned int new_freq;
+
+ if (!this_smartass->enable)
+ return;
+
+ smartass_update_min_max(this_smartass,policy,suspend);
+ if (!suspend) { // resume at max speed:
+ new_freq = validate_freq(policy,sleep_wakeup_freq);
+
+ dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: awaking at %d\n",new_freq);
+
+ __cpufreq_driver_target(policy, new_freq,
+ CPUFREQ_RELATION_L);
+ } else {
+ // to avoid wakeup issues with quick sleep/wakeup don't change actual frequency when entering sleep
+ // to allow some time to settle down. Instead we just reset our statistics (and reset the timer).
+ // Eventually, the timer will adjust the frequency if necessary.
+
+ this_smartass->freq_change_time_in_idle =
+ get_cpu_idle_time_us(cpu,&this_smartass->freq_change_time);
+
+ dprintk(SMARTASS_DEBUG_JUMPS,"SmartassS: suspending at %d\n",policy->cur);
+ }
+
+ reset_timer(smp_processor_id(),this_smartass);
+}
+
+static void smartass_early_suspend(struct early_suspend *handler) {
+ int i;
+ if (suspended || sleep_ideal_freq==0) // disable behavior for sleep_ideal_freq==0
+ return;
+ suspended = 1;
+ for_each_online_cpu(i)
+ smartass_suspend(i,1);
+}
+
+static void smartass_late_resume(struct early_suspend *handler) {
+ int i;
+ if (!suspended) // already not suspended so nothing to do
+ return;
+ suspended = 0;
+ for_each_online_cpu(i)
+ smartass_suspend(i,0);
+}
+
+static struct early_suspend smartass_power_suspend = {
+ .suspend = smartass_early_suspend,
+ .resume = smartass_late_resume,
+#ifdef CONFIG_MACH_HERO
+ .level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1,
+#endif
+};
+
+static int __init cpufreq_smartass_init(void)
+{
+ unsigned int i;
+ struct smartass_info_s *this_smartass;
+ debug_mask = 0;
+ up_rate_us = DEFAULT_UP_RATE_US;
+ down_rate_us = DEFAULT_DOWN_RATE_US;
+ sleep_ideal_freq = DEFAULT_SLEEP_IDEAL_FREQ;
+ sleep_wakeup_freq = DEFAULT_SLEEP_WAKEUP_FREQ;
+ awake_ideal_freq = DEFAULT_AWAKE_IDEAL_FREQ;
+ sample_rate_jiffies = DEFAULT_SAMPLE_RATE_JIFFIES;
+ ramp_up_step = DEFAULT_RAMP_UP_STEP;
+ ramp_down_step = DEFAULT_RAMP_DOWN_STEP;
+ max_cpu_load = DEFAULT_MAX_CPU_LOAD;
+ min_cpu_load = DEFAULT_MIN_CPU_LOAD;
+
+ spin_lock_init(&cpumask_lock);
+
+ suspended = 0;
+
+ /* Initalize per-cpu data: */
+ for_each_possible_cpu(i) {
+ this_smartass = &per_cpu(smartass_info, i);
+ this_smartass->enable = 0;
+ this_smartass->cur_policy = 0;
+ this_smartass->ramp_dir = 0;
+ this_smartass->time_in_idle = 0;
+ this_smartass->idle_exit_time = 0;
+ this_smartass->freq_change_time = 0;
+ this_smartass->freq_change_time_in_idle = 0;
+ this_smartass->cur_cpu_load = 0;
+ // intialize timer:
+ init_timer_deferrable(&this_smartass->timer);
+ this_smartass->timer.function = cpufreq_smartass_timer;
+ this_smartass->timer.data = i;
+ work_cpumask_test_and_clear(i);
+ }
+
+ // Scale up is high priority
+ up_wq = create_workqueue("ksmartass_up");
+ down_wq = create_workqueue("ksmartass_down");
+ if (!up_wq || !down_wq)
+ return -ENOMEM;
+
+ INIT_WORK(&freq_scale_work, cpufreq_smartass_freq_change_time_work);
+
+ register_early_suspend(&smartass_power_suspend);
+
+ return cpufreq_register_governor(&cpufreq_gov_smartass2);
+}
+
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2
+fs_initcall(cpufreq_smartass_init);
+#else
+module_init(cpufreq_smartass_init);
+#endif
+
+static void __exit cpufreq_smartass_exit(void)
+{
+ cpufreq_unregister_governor(&cpufreq_gov_smartass2);
+ destroy_workqueue(up_wq);
+ destroy_workqueue(down_wq);
+}
+
+module_exit(cpufreq_smartass_exit);
+
+MODULE_AUTHOR ("Erasmux");
+MODULE_DESCRIPTION ("'cpufreq_smartass2' - A smart cpufreq governor");
+MODULE_LICENSE ("GPL");
+
Please sign in to comment.
Something went wrong with that request. Please try again.