Skip to content

Commit

Permalink
cpufreq: cache tunables for ondemand and conservative governors
Browse files Browse the repository at this point in the history
This makes them work better with big.LITTLE setups.  Previously, all big cluster tunables were lost when a cluster went offline.

Signed-off-by: Luca Grifo <lg@linux.com>
  • Loading branch information
flar2 authored and morogoku committed Mar 26, 2017
1 parent f7c6380 commit 4747f27
Show file tree
Hide file tree
Showing 4 changed files with 123 additions and 15 deletions.
56 changes: 53 additions & 3 deletions drivers/cpufreq/cpufreq_conservative.c
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
#define MAX_SAMPLING_DOWN_FACTOR (10)

static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
static DEFINE_PER_CPU(struct cs_dbs_tuners *, cached_tuners);

static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
struct cpufreq_policy *policy)
Expand Down Expand Up @@ -317,14 +318,29 @@ static struct attribute_group cs_attr_group_gov_pol = {

/************************** sysfs end ************************/

static int cs_init(struct dbs_data *dbs_data)
static void save_tuners(struct cpufreq_policy *policy,
struct cs_dbs_tuners *tuners)
{
int cpu;

if (have_governor_per_policy())
cpu = cpumask_first(policy->related_cpus);
else
cpu = 0;

WARN_ON(per_cpu(cached_tuners, cpu) &&
per_cpu(cached_tuners, cpu) != tuners);
per_cpu(cached_tuners, cpu) = tuners;
}

static struct cs_dbs_tuners *alloc_tuners(struct cpufreq_policy *policy)
{
struct cs_dbs_tuners *tuners;

tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
if (!tuners) {
pr_err("%s: kzalloc failed\n", __func__);
return -ENOMEM;
return ERR_PTR(-ENOMEM);
}

tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
Expand All @@ -333,6 +349,34 @@ static int cs_init(struct dbs_data *dbs_data)
tuners->ignore_nice_load = 0;
tuners->freq_step = DEF_FREQUENCY_STEP;

save_tuners(policy, tuners);

return tuners;
}

static struct cs_dbs_tuners *restore_tuners(struct cpufreq_policy *policy)
{
int cpu;

if (have_governor_per_policy())
cpu = cpumask_first(policy->related_cpus);
else
cpu = 0;

return per_cpu(cached_tuners, cpu);
}

static int cs_init(struct dbs_data *dbs_data, struct cpufreq_policy *policy)
{
struct cs_dbs_tuners *tuners;

tuners = restore_tuners(policy);
if (!tuners) {
tuners = alloc_tuners(policy);
if (IS_ERR(tuners))
return PTR_ERR(tuners);
}

dbs_data->tuners = tuners;
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
jiffies_to_usecs(10);
Expand All @@ -342,7 +386,7 @@ static int cs_init(struct dbs_data *dbs_data)

static void cs_exit(struct dbs_data *dbs_data)
{
kfree(dbs_data->tuners);
//nothing to do
}

define_get_cpu_dbs_routines(cs_cpu_dbs_info);
Expand Down Expand Up @@ -391,7 +435,13 @@ static int __init cpufreq_gov_dbs_init(void)

static void __exit cpufreq_gov_dbs_exit(void)
{
int cpu;

cpufreq_unregister_governor(&cpufreq_gov_conservative);
for_each_possible_cpu(cpu) {
kfree(per_cpu(cached_tuners, cpu));
per_cpu(cached_tuners, cpu) = NULL;
}
}

MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
Expand Down
2 changes: 1 addition & 1 deletion drivers/cpufreq/cpufreq_governor.c
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,

dbs_data->cdata = cdata;
dbs_data->usage_count = 1;
rc = cdata->init(dbs_data);
rc = cdata->init(dbs_data, policy);
if (rc) {
pr_err("%s: POLICY_INIT: init() failed\n", __func__);
kfree(dbs_data);
Expand Down
2 changes: 1 addition & 1 deletion drivers/cpufreq/cpufreq_governor.h
Original file line number Diff line number Diff line change
Expand Up @@ -208,7 +208,7 @@ struct common_dbs_data {
void *(*get_cpu_dbs_info_s)(int cpu);
void (*gov_dbs_timer)(struct work_struct *work);
void (*gov_check_cpu)(int cpu, unsigned int load);
int (*init)(struct dbs_data *dbs_data);
int (*init)(struct dbs_data *dbs_data, struct cpufreq_policy *policy);
void (*exit)(struct dbs_data *dbs_data);

/* Governor specific ops, see below */
Expand Down
78 changes: 68 additions & 10 deletions drivers/cpufreq/cpufreq_ondemand.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
#define MAX_FREQUENCY_UP_THRESHOLD (100)

static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info);
static DEFINE_PER_CPU(struct od_dbs_tuners *, cached_tuners);

static struct od_ops od_ops;

Expand Down Expand Up @@ -479,7 +480,22 @@ static struct attribute_group od_attr_group_gov_pol = {

/************************** sysfs end ************************/

static int od_init(struct dbs_data *dbs_data)
static void save_tuners(struct cpufreq_policy *policy,
struct od_dbs_tuners *tuners)
{
int cpu;

if (have_governor_per_policy())
cpu = cpumask_first(policy->related_cpus);
else
cpu = 0;

WARN_ON(per_cpu(cached_tuners, cpu) &&
per_cpu(cached_tuners, cpu) != tuners);
per_cpu(cached_tuners, cpu) = tuners;
}

static struct od_dbs_tuners *alloc_tuners(struct cpufreq_policy *policy)
{
struct od_dbs_tuners *tuners;
u64 idle_time;
Expand All @@ -488,7 +504,7 @@ static int od_init(struct dbs_data *dbs_data)
tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
if (!tuners) {
pr_err("%s: kzalloc failed\n", __func__);
return -ENOMEM;
return ERR_PTR(-ENOMEM);
}

cpu = get_cpu();
Expand All @@ -497,33 +513,69 @@ static int od_init(struct dbs_data *dbs_data)
if (idle_time != -1ULL) {
/* Idle micro accounting is supported. Use finer thresholds */
tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD;
} else {
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
}

tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
tuners->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
tuners->io_is_busy = should_io_be_busy();

save_tuners(policy, tuners);

return tuners;
}

static struct od_dbs_tuners *restore_tuners(struct cpufreq_policy *policy)
{
int cpu;

if (have_governor_per_policy())
cpu = cpumask_first(policy->related_cpus);
else
cpu = 0;

return per_cpu(cached_tuners, cpu);
}

static int od_init(struct dbs_data *dbs_data, struct cpufreq_policy *policy)
{
struct od_dbs_tuners *tuners;
u64 idle_time;
int cpu;

tuners = restore_tuners(policy);
if (!tuners) {
tuners = alloc_tuners(policy);
if (IS_ERR(tuners))
return PTR_ERR(tuners);
}

cpu = get_cpu();
idle_time = get_cpu_idle_time_us(cpu, NULL);
put_cpu();
if (idle_time != -1ULL) {
/*
* In nohz/micro accounting case we set the minimum frequency
* not depending on HZ, but fixed (very low). The deferred
* timer might skip some samples if idle/sleeping as needed.
*/
dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE;
} else {
tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;

/* For correct statistics, we need 10 ticks for each measure */
dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
jiffies_to_usecs(10);
}

tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
tuners->ignore_nice_load = 0;
tuners->powersave_bias = default_powersave_bias;
tuners->io_is_busy = should_io_be_busy();

dbs_data->tuners = tuners;
mutex_init(&dbs_data->mutex);
return 0;
}

static void od_exit(struct dbs_data *dbs_data)
{
kfree(dbs_data->tuners);
//nothing to do
}

define_get_cpu_dbs_routines(od_cpu_dbs_info);
Expand Down Expand Up @@ -618,7 +670,13 @@ static int __init cpufreq_gov_dbs_init(void)

static void __exit cpufreq_gov_dbs_exit(void)
{
int cpu;

cpufreq_unregister_governor(&cpufreq_gov_ondemand);
for_each_possible_cpu(cpu) {
kfree(per_cpu(cached_tuners, cpu));
per_cpu(cached_tuners, cpu) = NULL;
}
}

MODULE_AUTHOR("Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>");
Expand Down

0 comments on commit 4747f27

Please sign in to comment.