Skip to content

Commit

Permalink
soc: qcom: rq_stats: Add hotplug enable toggle
Browse files Browse the repository at this point in the history
Signed-off-by: AudioGod <audiogod@sonic-developers.com>
  • Loading branch information
neobuddy89 authored and AudioGod committed Dec 21, 2015
1 parent 1037d21 commit 1269030
Show file tree
Hide file tree
Showing 2 changed files with 64 additions and 13 deletions.
76 changes: 63 additions & 13 deletions drivers/soc/qcom/msm_rq_stats.c
Expand Up @@ -54,14 +54,18 @@ struct cpu_load_data {

static DEFINE_PER_CPU(struct cpu_load_data, cpuload);


static int update_average_load(unsigned int freq, unsigned int cpu)
{

struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
cputime64_t cur_wall_time, cur_idle_time;
int ret;
unsigned int idle_time, wall_time;
unsigned int cur_load, load_at_max_freq;
cputime64_t cur_wall_time, cur_idle_time;
struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
struct cpufreq_policy policy;

ret = cpufreq_get_policy(&policy, cpu);
if (ret)
return -EINVAL;

cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time, 0);

Expand All @@ -71,14 +75,13 @@ static int update_average_load(unsigned int freq, unsigned int cpu)
idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
pcpu->prev_cpu_idle = cur_idle_time;


if (unlikely(wall_time <= 0 || wall_time < idle_time))
return 0;

cur_load = 100 * (wall_time - idle_time) / wall_time;

/* Calculate the scaled load across CPU */
load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
load_at_max_freq = (cur_load * policy.cur) / policy.max;

if (!pcpu->avg_load_maxfreq) {
/* This is the first sample in this window*/
Expand Down Expand Up @@ -125,6 +128,9 @@ static int cpufreq_transition_handler(struct notifier_block *nb,
struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
int j;

if (!rq_info.hotplug_enabled)
return 0;

switch (val) {
case CPUFREQ_POSTCHANGE:
for_each_cpu(j, this_cpu->related_cpus) {
Expand Down Expand Up @@ -157,6 +163,9 @@ static int cpu_hotplug_handler(struct notifier_block *nb,
unsigned int cpu = (unsigned long)data;
struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);

if (!rq_info.hotplug_enabled)
return 0;

switch (val) {
case CPU_ONLINE:
if (!this_cpu->cur_freq)
Expand All @@ -172,6 +181,9 @@ static int cpu_hotplug_handler(struct notifier_block *nb,
static int system_suspend_handler(struct notifier_block *nb,
unsigned long val, void *data)
{
if (!rq_info.hotplug_enabled)
return 0;

switch (val) {
case PM_POST_HIBERNATION:
case PM_POST_SUSPEND:
Expand Down Expand Up @@ -208,13 +220,45 @@ static int freq_policy_handler(struct notifier_block *nb,
static ssize_t hotplug_disable_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
unsigned int val = 0;
val = rq_info.hotplug_disabled;
return snprintf(buf, MAX_LONG_SIZE, "%d\n", val);
return snprintf(buf, MAX_LONG_SIZE, "%d\n", rq_info.hotplug_disabled);
}

static ssize_t store_hotplug_enable(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int ret;
unsigned int val;
unsigned long flags = 0;

spin_lock_irqsave(&rq_lock, flags);
ret = sscanf(buf, "%u", &val);
if (ret != 1 || val < 0 || val > 1)
return -EINVAL;

rq_info.hotplug_enabled = val;
if (rq_info.hotplug_enabled)
rq_info.hotplug_disabled = 0;
else
rq_info.hotplug_disabled = 1;

spin_unlock_irqrestore(&rq_lock, flags);

return count;
}

static ssize_t show_hotplug_enable(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return snprintf(buf, MAX_LONG_SIZE, "%d\n", rq_info.hotplug_enabled);
}

static struct kobj_attribute hotplug_disabled_attr = __ATTR_RO(hotplug_disable);

static struct kobj_attribute hotplug_enabled_attr =
__ATTR(hotplug_enable, S_IWUSR | S_IRUSR, show_hotplug_enable,
store_hotplug_enable);

#ifdef CONFIG_BRICKED_HOTPLUG
unsigned int get_rq_info(void)
{
Expand All @@ -235,6 +279,9 @@ static struct kobj_attribute hotplug_disabled_attr = __ATTR_RO(hotplug_disable);

static void def_work_fn(struct work_struct *work)
{
if (!rq_info.hotplug_enabled)
return;

/* Notify polling threads on change of value */
sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
}
Expand Down Expand Up @@ -326,7 +373,8 @@ static struct kobj_attribute def_timer_ms_attr =
static ssize_t show_cpu_normalized_load(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq());
return snprintf(buf, MAX_LONG_SIZE, "%u\n",
rq_info.hotplug_enabled ? report_load_at_max_freq() : 0);
}

static struct kobj_attribute cpu_normalized_load_attr =
Expand All @@ -339,6 +387,7 @@ static struct attribute *rq_attrs[] = {
&run_queue_avg_attr.attr,
&run_queue_poll_ms_attr.attr,
&hotplug_disabled_attr.attr,
&hotplug_enabled_attr.attr,
NULL,
};

Expand Down Expand Up @@ -388,7 +437,8 @@ static int __init msm_rq_stats_init(void)
rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
rq_info.rq_poll_last_jiffy = 0;
rq_info.def_timer_last_jiffy = 0;
rq_info.hotplug_disabled = 0;
rq_info.hotplug_disabled = 1;
rq_info.hotplug_enabled = 0;
ret = init_rq_attribs();

rq_info.init = 1;
Expand All @@ -397,9 +447,9 @@ static int __init msm_rq_stats_init(void)
struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
mutex_init(&pcpu->cpu_load_mutex);
cpufreq_get_policy(&cpu_policy, i);
pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
pcpu->policy_max = cpu_policy.max;
if (cpu_online(i))
pcpu->cur_freq = cpufreq_quick_get(i);
pcpu->cur_freq = cpu_policy.cur;
cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
}
freq_transition.notifier_call = cpufreq_transition_handler;
Expand Down
1 change: 1 addition & 0 deletions include/linux/rq_stats.h
Expand Up @@ -19,6 +19,7 @@ struct rq_data {
unsigned long rq_poll_total_jiffies;
unsigned long def_timer_last_jiffy;
unsigned int hotplug_disabled;
unsigned int hotplug_enabled;
int64_t def_start_time;
struct attribute_group *attr_group;
struct kobject *kobj;
Expand Down

0 comments on commit 1269030

Please sign in to comment.