diff --git a/drivers/cpufreq/cpufreq_conservative.c b/drivers/cpufreq/cpufreq_conservative.c index 25a70d06c5bf..4bffc506ab82 100644 --- a/drivers/cpufreq/cpufreq_conservative.c +++ b/drivers/cpufreq/cpufreq_conservative.c @@ -22,6 +22,7 @@ #define MAX_SAMPLING_DOWN_FACTOR (10) static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info); +static DEFINE_PER_CPU(struct cs_dbs_tuners *, cached_tuners); static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners, struct cpufreq_policy *policy) @@ -317,14 +318,29 @@ static struct attribute_group cs_attr_group_gov_pol = { /************************** sysfs end ************************/ -static int cs_init(struct dbs_data *dbs_data) +static void save_tuners(struct cpufreq_policy *policy, + struct cs_dbs_tuners *tuners) +{ + int cpu; + + if (have_governor_per_policy()) + cpu = cpumask_first(policy->related_cpus); + else + cpu = 0; + + WARN_ON(per_cpu(cached_tuners, cpu) && + per_cpu(cached_tuners, cpu) != tuners); + per_cpu(cached_tuners, cpu) = tuners; +} + +static struct cs_dbs_tuners *alloc_tuners(struct cpufreq_policy *policy) { struct cs_dbs_tuners *tuners; tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); if (!tuners) { pr_err("%s: kzalloc failed\n", __func__); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; @@ -333,6 +349,34 @@ static int cs_init(struct dbs_data *dbs_data) tuners->ignore_nice_load = 0; tuners->freq_step = DEF_FREQUENCY_STEP; + save_tuners(policy, tuners); + + return tuners; +} + +static struct cs_dbs_tuners *restore_tuners(struct cpufreq_policy *policy) +{ + int cpu; + + if (have_governor_per_policy()) + cpu = cpumask_first(policy->related_cpus); + else + cpu = 0; + + return per_cpu(cached_tuners, cpu); +} + +static int cs_init(struct dbs_data *dbs_data, struct cpufreq_policy *policy) +{ + struct cs_dbs_tuners *tuners; + + tuners = restore_tuners(policy); + if (!tuners) { + tuners = alloc_tuners(policy); + if (IS_ERR(tuners)) + return PTR_ERR(tuners); + } + dbs_data->tuners = tuners; dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); @@ -342,7 +386,7 @@ static int cs_init(struct dbs_data *dbs_data) static void cs_exit(struct dbs_data *dbs_data) { - kfree(dbs_data->tuners); + //nothing to do } define_get_cpu_dbs_routines(cs_cpu_dbs_info); @@ -391,7 +435,13 @@ static int __init cpufreq_gov_dbs_init(void) static void __exit cpufreq_gov_dbs_exit(void) { + int cpu; + cpufreq_unregister_governor(&cpufreq_gov_conservative); + for_each_possible_cpu(cpu) { + kfree(per_cpu(cached_tuners, cpu)); + per_cpu(cached_tuners, cpu) = NULL; + } } MODULE_AUTHOR("Alexander Clouter "); diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c index 0c0c1c3f6634..f71d1171794a 100644 --- a/drivers/cpufreq/cpufreq_governor.c +++ b/drivers/cpufreq/cpufreq_governor.c @@ -286,7 +286,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy, dbs_data->cdata = cdata; dbs_data->usage_count = 1; - rc = cdata->init(dbs_data); + rc = cdata->init(dbs_data, policy); if (rc) { pr_err("%s: POLICY_INIT: init() failed\n", __func__); kfree(dbs_data); diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h index cc401d147e72..85f0295ad149 100644 --- a/drivers/cpufreq/cpufreq_governor.h +++ b/drivers/cpufreq/cpufreq_governor.h @@ -208,7 +208,7 @@ struct common_dbs_data { void *(*get_cpu_dbs_info_s)(int cpu); void (*gov_dbs_timer)(struct work_struct *work); void (*gov_check_cpu)(int cpu, unsigned int load); - int (*init)(struct dbs_data *dbs_data); + int (*init)(struct dbs_data *dbs_data, struct cpufreq_policy *policy); void (*exit)(struct dbs_data *dbs_data); /* Governor specific ops, see below */ diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c index 7a7cda2f163e..8a2eb4ad90ea 100644 --- a/drivers/cpufreq/cpufreq_ondemand.c +++ b/drivers/cpufreq/cpufreq_ondemand.c @@ -32,6 +32,7 @@ #define MAX_FREQUENCY_UP_THRESHOLD (100) static DEFINE_PER_CPU(struct od_cpu_dbs_info_s, od_cpu_dbs_info); +static DEFINE_PER_CPU(struct od_dbs_tuners *, cached_tuners); static struct od_ops od_ops; @@ -479,7 +480,22 @@ static struct attribute_group od_attr_group_gov_pol = { /************************** sysfs end ************************/ -static int od_init(struct dbs_data *dbs_data) +static void save_tuners(struct cpufreq_policy *policy, + struct od_dbs_tuners *tuners) +{ + int cpu; + + if (have_governor_per_policy()) + cpu = cpumask_first(policy->related_cpus); + else + cpu = 0; + + WARN_ON(per_cpu(cached_tuners, cpu) && + per_cpu(cached_tuners, cpu) != tuners); + per_cpu(cached_tuners, cpu) = tuners; +} + +static struct od_dbs_tuners *alloc_tuners(struct cpufreq_policy *policy) { struct od_dbs_tuners *tuners; u64 idle_time; @@ -488,7 +504,7 @@ static int od_init(struct dbs_data *dbs_data) tuners = kzalloc(sizeof(*tuners), GFP_KERNEL); if (!tuners) { pr_err("%s: kzalloc failed\n", __func__); - return -ENOMEM; + return ERR_PTR(-ENOMEM); } cpu = get_cpu(); @@ -497,6 +513,49 @@ static int od_init(struct dbs_data *dbs_data) if (idle_time != -1ULL) { /* Idle micro accounting is supported. Use finer thresholds */ tuners->up_threshold = MICRO_FREQUENCY_UP_THRESHOLD; + } else { + tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; + } + + tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; + tuners->ignore_nice_load = 0; + tuners->powersave_bias = default_powersave_bias; + tuners->io_is_busy = should_io_be_busy(); + + save_tuners(policy, tuners); + + return tuners; +} + +static struct od_dbs_tuners *restore_tuners(struct cpufreq_policy *policy) +{ + int cpu; + + if (have_governor_per_policy()) + cpu = cpumask_first(policy->related_cpus); + else + cpu = 0; + + return per_cpu(cached_tuners, cpu); +} + +static int od_init(struct dbs_data *dbs_data, struct cpufreq_policy *policy) +{ + struct od_dbs_tuners *tuners; + u64 idle_time; + int cpu; + + tuners = restore_tuners(policy); + if (!tuners) { + tuners = alloc_tuners(policy); + if (IS_ERR(tuners)) + return PTR_ERR(tuners); + } + + cpu = get_cpu(); + idle_time = get_cpu_idle_time_us(cpu, NULL); + put_cpu(); + if (idle_time != -1ULL) { /* * In nohz/micro accounting case we set the minimum frequency * not depending on HZ, but fixed (very low). The deferred @@ -504,18 +563,11 @@ static int od_init(struct dbs_data *dbs_data) */ dbs_data->min_sampling_rate = MICRO_FREQUENCY_MIN_SAMPLE_RATE; } else { - tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD; - /* For correct statistics, we need 10 ticks for each measure */ dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO * jiffies_to_usecs(10); } - tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR; - tuners->ignore_nice_load = 0; - tuners->powersave_bias = default_powersave_bias; - tuners->io_is_busy = should_io_be_busy(); - dbs_data->tuners = tuners; mutex_init(&dbs_data->mutex); return 0; @@ -523,7 +575,7 @@ static int od_init(struct dbs_data *dbs_data) static void od_exit(struct dbs_data *dbs_data) { - kfree(dbs_data->tuners); + //nothing to do } define_get_cpu_dbs_routines(od_cpu_dbs_info); @@ -618,7 +670,13 @@ static int __init cpufreq_gov_dbs_init(void) static void __exit cpufreq_gov_dbs_exit(void) { + int cpu; + cpufreq_unregister_governor(&cpufreq_gov_ondemand); + for_each_possible_cpu(cpu) { + kfree(per_cpu(cached_tuners, cpu)); + per_cpu(cached_tuners, cpu) = NULL; + } } MODULE_AUTHOR("Venkatesh Pallipadi ");