Skip to content

Commit

Permalink
cpufreq: conservative: Squash francos code
Browse files Browse the repository at this point in the history
  • Loading branch information
hellsgod committed Jun 8, 2015
1 parent c374814 commit 6f9e865
Showing 1 changed file with 103 additions and 48 deletions.
151 changes: 103 additions & 48 deletions drivers/cpufreq/cpufreq_conservative.c
Original file line number Diff line number Diff line change
Expand Up @@ -23,15 +23,15 @@
#include <linux/tick.h>
#include <linux/ktime.h>
#include <linux/sched.h>
#include <linux/touchboost.h>

/*
* dbs is used in this file as a shortform for demandbased switching
* It helps to keep variable names smaller, simpler
*/

#define DEF_FREQUENCY_UP_THRESHOLD (80)
#define DEF_FREQUENCY_DOWN_THRESHOLD (20)
#define DEF_FREQUENCY_STEP (5)
#define DEF_FREQUENCY_UP_THRESHOLD (98)
#define DEF_FREQUENCY_DOWN_THRESHOLD (30)

/*
* The polling frequency of this governor depends on the capability of
Expand All @@ -52,6 +52,9 @@ static unsigned int min_sampling_rate;
#define DEF_SAMPLING_DOWN_FACTOR (1)
#define MAX_SAMPLING_DOWN_FACTOR (10)
#define TRANSITION_LATENCY_LIMIT (10 * 1000 * 1000)
#define MICRO_FREQUENCY_MIN_SAMPLE_RATE (10000)
#define BOOST_DURATION_US (40000)
#define BOOST_FREQ_VAL (1497600)

static void do_dbs_timer(struct work_struct *work);

Expand Down Expand Up @@ -81,19 +84,25 @@ static unsigned int dbs_enable; /* number of CPUs using this policy */
*/
static DEFINE_MUTEX(dbs_mutex);

static struct workqueue_struct *dbs_wq;

static struct dbs_tuners {
unsigned int sampling_rate;
unsigned int sampling_down_factor;
unsigned int up_threshold;
unsigned int down_threshold;
unsigned int ignore_nice;
unsigned int freq_step;
unsigned int input_boost_freq;
u64 input_boost_duration;
} dbs_tuners_ins = {
.up_threshold = DEF_FREQUENCY_UP_THRESHOLD,
.down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD,
.sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR,
.ignore_nice = 0,
.freq_step = DEF_FREQUENCY_STEP,
.freq_step = 5,
.input_boost_freq = BOOST_FREQ_VAL,
.input_boost_duration = BOOST_DURATION_US,
};

/* keep track of frequency transitions */
Expand All @@ -114,7 +123,7 @@ dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,

/*
* we only care if our internally tracked freq moves outside
* the 'valid' ranges of frequency available to us otherwise
* the 'valid' ranges of freqency available to us otherwise
* we do not change it
*/
if (this_dbs_info->requested_freq > policy->max
Expand Down Expand Up @@ -150,6 +159,8 @@ show_one(up_threshold, up_threshold);
show_one(down_threshold, down_threshold);
show_one(ignore_nice_load, ignore_nice);
show_one(freq_step, freq_step);
show_one(input_boost_freq, input_boost_freq);
show_one(input_boost_duration, input_boost_duration);

static ssize_t store_sampling_down_factor(struct kobject *a,
struct attribute *b,
Expand Down Expand Up @@ -236,7 +247,7 @@ static ssize_t store_ignore_nice_load(struct kobject *a, struct attribute *b,
struct cpu_dbs_info_s *dbs_info;
dbs_info = &per_cpu(cs_cpu_dbs_info, j);
dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&dbs_info->prev_cpu_wall, 0);
&dbs_info->prev_cpu_wall, true);
if (dbs_tuners_ins.ignore_nice)
dbs_info->prev_cpu_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
}
Expand All @@ -262,12 +273,48 @@ static ssize_t store_freq_step(struct kobject *a, struct attribute *b,
return count;
}

static ssize_t store_input_boost_freq(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);

if (ret != 1)
return -EINVAL;

if (input < 0)
input = 0;

dbs_tuners_ins.input_boost_freq = input;
return count;
}

static ssize_t store_input_boost_duration(struct kobject *a, struct attribute *b,
const char *buf, size_t count)
{
unsigned int input;
int ret;
ret = sscanf(buf, "%u", &input);

if (ret != 1)
return -EINVAL;

if (input < 0)
input = 0;

dbs_tuners_ins.input_boost_duration = input;
return count;
}

define_one_global_rw(sampling_rate);
define_one_global_rw(sampling_down_factor);
define_one_global_rw(up_threshold);
define_one_global_rw(down_threshold);
define_one_global_rw(ignore_nice_load);
define_one_global_rw(freq_step);
define_one_global_rw(input_boost_freq);
define_one_global_rw(input_boost_duration);

static struct attribute *dbs_attributes[] = {
&sampling_rate_min.attr,
Expand All @@ -277,6 +324,8 @@ static struct attribute *dbs_attributes[] = {
&down_threshold.attr,
&ignore_nice_load.attr,
&freq_step.attr,
&input_boost_freq.attr,
&input_boost_duration.attr,
NULL
};

Expand All @@ -287,27 +336,19 @@ static struct attribute_group dbs_attr_group = {

/************************** sysfs end ************************/

static inline unsigned int get_freq_target(struct cpufreq_policy *policy)
{
unsigned int freq_target = (dbs_tuners_ins.freq_step * policy->max)
/ 100;

/* max freq cannot be less than 100. But who knows... */
if (unlikely(freq_target == 0))
freq_target = DEF_FREQUENCY_STEP;

return freq_target;
}

static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
{
unsigned int load = 0;
unsigned int max_load = 0;

unsigned int freq_target;
struct cpufreq_policy *policy;
unsigned int j;
bool boosted;
u64 now;

policy = this_dbs_info->cur_policy;
now = ktime_to_us(ktime_get());
boosted = now < (get_input_time() + dbs_tuners_ins.input_boost_duration);

/*
* Every sampling_rate, we check, if current idle time is less
Expand All @@ -328,7 +369,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)

j_dbs_info = &per_cpu(cs_cpu_dbs_info, j);

cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, 0);
cur_idle_time = get_cpu_idle_time(j, &cur_wall_time, true);

wall_time = (unsigned int)
(cur_wall_time - j_dbs_info->prev_cpu_wall);
Expand Down Expand Up @@ -365,6 +406,7 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
}

cpufreq_notify_utilization(policy, max_load);

/*
* break out if we 'cannot' reduce the speed as the user might
* want freq_step to be zero
Expand All @@ -380,7 +422,19 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
if (this_dbs_info->requested_freq == policy->max)
return;

this_dbs_info->requested_freq += get_freq_target(policy);
freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;

/* max freq cannot be less than 100. But who knows.... */
if (unlikely(freq_target == 0))
freq_target = 5;

this_dbs_info->requested_freq += freq_target;

if (boosted)
this_dbs_info->requested_freq
= max(dbs_tuners_ins.input_boost_freq,
this_dbs_info->requested_freq);

if (this_dbs_info->requested_freq > policy->max)
this_dbs_info->requested_freq = policy->max;

Expand All @@ -389,28 +443,28 @@ static void dbs_check_cpu(struct cpu_dbs_info_s *this_dbs_info)
return;
}

/* if sampling_down_factor is active break out early */
if (++this_dbs_info->down_skip < dbs_tuners_ins.sampling_down_factor)
return;
this_dbs_info->down_skip = 0;
/*
* The optimal frequency is the frequency that is the lowest that
* can support the current CPU usage without triggering the up
* policy. To be safe, we focus 10 points under the threshold.
*/
if (max_load < (dbs_tuners_ins.down_threshold)) {
freq_target = (dbs_tuners_ins.freq_step * policy->max) / 100;

this_dbs_info->requested_freq -= freq_target;
if (this_dbs_info->requested_freq < policy->min)
this_dbs_info->requested_freq = policy->min;

/* Check for frequency decrease */
if (max_load < dbs_tuners_ins.down_threshold) {
unsigned int freq_target;
/*
* if we cannot reduce the frequency anymore, break out early
*/
if (policy->cur == policy->min)
return;

freq_target = get_freq_target(policy);
if (this_dbs_info->requested_freq > freq_target)
this_dbs_info->requested_freq -= freq_target;
else
this_dbs_info->requested_freq = policy->min;

if (this_dbs_info->requested_freq < policy->min)
this_dbs_info->requested_freq = policy->min;
if (boosted)
this_dbs_info->requested_freq
= max(dbs_tuners_ins.input_boost_freq,
this_dbs_info->requested_freq);

__cpufreq_driver_target(policy, this_dbs_info->requested_freq,
CPUFREQ_RELATION_H);
Expand All @@ -433,7 +487,7 @@ static void do_dbs_timer(struct work_struct *work)

dbs_check_cpu(dbs_info);

schedule_delayed_work_on(cpu, &dbs_info->work, delay);
queue_delayed_work_on(cpu, dbs_wq, &dbs_info->work, delay);
mutex_unlock(&dbs_info->timer_mutex);
}

Expand All @@ -445,7 +499,7 @@ static inline void dbs_timer_init(struct cpu_dbs_info_s *dbs_info)

dbs_info->enable = 1;
INIT_DELAYED_WORK_DEFERRABLE(&dbs_info->work, do_dbs_timer);
schedule_delayed_work_on(dbs_info->cpu, &dbs_info->work, delay);
queue_delayed_work_on(dbs_info->cpu, dbs_wq, &dbs_info->work, delay);
}

static inline void dbs_timer_exit(struct cpu_dbs_info_s *dbs_info)
Expand Down Expand Up @@ -477,7 +531,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
j_dbs_info->cur_policy = policy;

j_dbs_info->prev_cpu_idle = get_cpu_idle_time(j,
&j_dbs_info->prev_cpu_wall, 0);
&j_dbs_info->prev_cpu_wall, true);
if (dbs_tuners_ins.ignore_nice)
j_dbs_info->prev_cpu_nice =
kcpustat_cpu(j).cpustat[CPUTIME_NICE];
Expand All @@ -500,7 +554,7 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,

rc = sysfs_create_group(cpufreq_global_kobject,
&dbs_attr_group);
if (rc) {
if (rc && rc != -EEXIST) {
mutex_unlock(&dbs_mutex);
return rc;
}
Expand All @@ -512,11 +566,8 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
min_sampling_rate =
MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10);
/* Bring kernel and HW constraints together */
min_sampling_rate = max(min_sampling_rate,
MIN_LATENCY_MULTIPLIER * latency);
dbs_tuners_ins.sampling_rate =
max(min_sampling_rate,
latency * LATENCY_MULTIPLIER);
min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
dbs_tuners_ins.sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;

cpufreq_register_notifier(
&dbs_cpufreq_notifier_block,
Expand Down Expand Up @@ -545,9 +596,6 @@ static int cpufreq_governor_dbs(struct cpufreq_policy *policy,
CPUFREQ_TRANSITION_NOTIFIER);

mutex_unlock(&dbs_mutex);
if (!dbs_enable)
sysfs_remove_group(cpufreq_global_kobject,
&dbs_attr_group);

break;

Expand Down Expand Up @@ -580,12 +628,19 @@ struct cpufreq_governor cpufreq_gov_conservative = {

static int __init cpufreq_gov_dbs_init(void)
{
dbs_wq = alloc_workqueue("conservative_dbs_wq", WQ_HIGHPRI, 0);
if (!dbs_wq) {
printk(KERN_ERR "Failed to create conservative_dbs_wq workqueue\n");
return -EFAULT;
}

return cpufreq_register_governor(&cpufreq_gov_conservative);
}

static void __exit cpufreq_gov_dbs_exit(void)
{
cpufreq_unregister_governor(&cpufreq_gov_conservative);
destroy_workqueue(dbs_wq);
}


Expand Down

0 comments on commit 6f9e865

Please sign in to comment.