Skip to content

Commit 3e66c4b

Browse files
committed
Merge branch 'pm-cpufreq'
* pm-cpufreq: intel_pstate: fix PCT_TO_HWP macro intel_pstate: Fix user input of min/max to legal policy region cpufreq-dt: add suspend frequency support cpufreq: allow cpufreq_generic_suspend() to work without suspend frequency cpufreq: Use __func__ to print function's name cpufreq: staticize cpufreq_cpu_get_raw() cpufreq: Add ARM_MT8173_CPUFREQ dependency on THERMAL cpufreq: dt: Tolerance applies on both sides of target voltage cpufreq: dt: Print error on failing to mark OPPs as shared cpufreq: dt: Check OPP count before marking them shared
2 parents 7c97666 + 74da56c commit 3e66c4b

File tree

4 files changed

+59
-28
lines changed

4 files changed

+59
-28
lines changed

drivers/cpufreq/Kconfig.arm

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -133,6 +133,7 @@ config ARM_KIRKWOOD_CPUFREQ
133133
config ARM_MT8173_CPUFREQ
134134
bool "Mediatek MT8173 CPUFreq support"
135135
depends on ARCH_MEDIATEK && REGULATOR
136+
depends on !CPU_THERMAL || THERMAL=y
136137
select PM_OPP
137138
help
138139
This adds the CPUFreq driver support for Mediatek MT8173 SoC.

drivers/cpufreq/cpufreq-dt.c

Lines changed: 26 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -196,6 +196,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
196196
struct device *cpu_dev;
197197
struct regulator *cpu_reg;
198198
struct clk *cpu_clk;
199+
struct dev_pm_opp *suspend_opp;
199200
unsigned long min_uV = ~0, max_uV = 0;
200201
unsigned int transition_latency;
201202
bool need_update = false;
@@ -239,6 +240,17 @@ static int cpufreq_init(struct cpufreq_policy *policy)
239240
*/
240241
of_cpumask_init_opp_table(policy->cpus);
241242

243+
/*
244+
* But we need OPP table to function so if it is not there let's
245+
* give platform code chance to provide it for us.
246+
*/
247+
ret = dev_pm_opp_get_opp_count(cpu_dev);
248+
if (ret <= 0) {
249+
pr_debug("OPP table is not ready, deferring probe\n");
250+
ret = -EPROBE_DEFER;
251+
goto out_free_opp;
252+
}
253+
242254
if (need_update) {
243255
struct cpufreq_dt_platform_data *pd = cpufreq_get_driver_data();
244256

@@ -249,24 +261,16 @@ static int cpufreq_init(struct cpufreq_policy *policy)
249261
* OPP tables are initialized only for policy->cpu, do it for
250262
* others as well.
251263
*/
252-
set_cpus_sharing_opps(cpu_dev, policy->cpus);
264+
ret = set_cpus_sharing_opps(cpu_dev, policy->cpus);
265+
if (ret)
266+
dev_err(cpu_dev, "%s: failed to mark OPPs as shared: %d\n",
267+
__func__, ret);
253268

254269
of_property_read_u32(np, "clock-latency", &transition_latency);
255270
} else {
256271
transition_latency = dev_pm_opp_get_max_clock_latency(cpu_dev);
257272
}
258273

259-
/*
260-
* But we need OPP table to function so if it is not there let's
261-
* give platform code chance to provide it for us.
262-
*/
263-
ret = dev_pm_opp_get_opp_count(cpu_dev);
264-
if (ret <= 0) {
265-
pr_debug("OPP table is not ready, deferring probe\n");
266-
ret = -EPROBE_DEFER;
267-
goto out_free_opp;
268-
}
269-
270274
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
271275
if (!priv) {
272276
ret = -ENOMEM;
@@ -300,7 +304,8 @@ static int cpufreq_init(struct cpufreq_policy *policy)
300304
rcu_read_unlock();
301305

302306
tol_uV = opp_uV * priv->voltage_tolerance / 100;
303-
if (regulator_is_supported_voltage(cpu_reg, opp_uV,
307+
if (regulator_is_supported_voltage(cpu_reg,
308+
opp_uV - tol_uV,
304309
opp_uV + tol_uV)) {
305310
if (opp_uV < min_uV)
306311
min_uV = opp_uV;
@@ -329,6 +334,13 @@ static int cpufreq_init(struct cpufreq_policy *policy)
329334
policy->driver_data = priv;
330335

331336
policy->clk = cpu_clk;
337+
338+
rcu_read_lock();
339+
suspend_opp = dev_pm_opp_get_suspend_opp(cpu_dev);
340+
if (suspend_opp)
341+
policy->suspend_freq = dev_pm_opp_get_freq(suspend_opp) / 1000;
342+
rcu_read_unlock();
343+
332344
ret = cpufreq_table_validate_and_show(policy, freq_table);
333345
if (ret) {
334346
dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
@@ -419,6 +431,7 @@ static struct cpufreq_driver dt_cpufreq_driver = {
419431
.ready = cpufreq_ready,
420432
.name = "cpufreq-dt",
421433
.attr = cpufreq_dt_attr,
434+
.suspend = cpufreq_generic_suspend,
422435
};
423436

424437
static int dt_cpufreq_probe(struct platform_device *pdev)

drivers/cpufreq/cpufreq.c

Lines changed: 4 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -239,7 +239,7 @@ int cpufreq_generic_init(struct cpufreq_policy *policy,
239239
EXPORT_SYMBOL_GPL(cpufreq_generic_init);
240240

241241
/* Only for cpufreq core internal use */
242-
struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
242+
static struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
243243
{
244244
struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
245245

@@ -1626,8 +1626,8 @@ int cpufreq_generic_suspend(struct cpufreq_policy *policy)
16261626
int ret;
16271627

16281628
if (!policy->suspend_freq) {
1629-
pr_err("%s: suspend_freq can't be zero\n", __func__);
1630-
return -EINVAL;
1629+
pr_debug("%s: suspend_freq not defined\n", __func__);
1630+
return 0;
16311631
}
16321632

16331633
pr_debug("%s: Setting suspend-freq: %u\n", __func__,
@@ -2031,8 +2031,7 @@ static int __cpufreq_governor(struct cpufreq_policy *policy,
20312031
if (!try_module_get(policy->governor->owner))
20322032
return -EINVAL;
20332033

2034-
pr_debug("__cpufreq_governor for CPU %u, event %u\n",
2035-
policy->cpu, event);
2034+
pr_debug("%s: for CPU %u, event %u\n", __func__, policy->cpu, event);
20362035

20372036
mutex_lock(&cpufreq_governor_lock);
20382037
if ((policy->governor_enabled && event == CPUFREQ_GOV_START)

drivers/cpufreq/intel_pstate.c

Lines changed: 28 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -260,24 +260,31 @@ static inline void update_turbo_state(void)
260260
cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
261261
}
262262

263-
#define PCT_TO_HWP(x) (x * 255 / 100)
264263
static void intel_pstate_hwp_set(void)
265264
{
266-
int min, max, cpu;
267-
u64 value, freq;
265+
int min, hw_min, max, hw_max, cpu, range, adj_range;
266+
u64 value, cap;
267+
268+
rdmsrl(MSR_HWP_CAPABILITIES, cap);
269+
hw_min = HWP_LOWEST_PERF(cap);
270+
hw_max = HWP_HIGHEST_PERF(cap);
271+
range = hw_max - hw_min;
268272

269273
get_online_cpus();
270274

271275
for_each_online_cpu(cpu) {
272276
rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
273-
min = PCT_TO_HWP(limits.min_perf_pct);
277+
adj_range = limits.min_perf_pct * range / 100;
278+
min = hw_min + adj_range;
274279
value &= ~HWP_MIN_PERF(~0L);
275280
value |= HWP_MIN_PERF(min);
276281

277-
max = PCT_TO_HWP(limits.max_perf_pct);
282+
adj_range = limits.max_perf_pct * range / 100;
283+
max = hw_min + adj_range;
278284
if (limits.no_turbo) {
279-
rdmsrl( MSR_HWP_CAPABILITIES, freq);
280-
max = HWP_GUARANTEED_PERF(freq);
285+
hw_max = HWP_GUARANTEED_PERF(cap);
286+
if (hw_max < max)
287+
max = hw_max;
281288
}
282289

283290
value &= ~HWP_MAX_PERF(~0L);
@@ -423,6 +430,8 @@ static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
423430

424431
limits.max_sysfs_pct = clamp_t(int, input, 0 , 100);
425432
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
433+
limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
434+
limits.max_perf_pct = max(limits.min_perf_pct, limits.max_perf_pct);
426435
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
427436

428437
if (hwp_active)
@@ -442,6 +451,8 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
442451

443452
limits.min_sysfs_pct = clamp_t(int, input, 0 , 100);
444453
limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
454+
limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
455+
limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
445456
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
446457

447458
if (hwp_active)
@@ -989,12 +1000,19 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
9891000

9901001
limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
9911002
limits.min_policy_pct = clamp_t(int, limits.min_policy_pct, 0 , 100);
992-
limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
993-
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
994-
9951003
limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq;
9961004
limits.max_policy_pct = clamp_t(int, limits.max_policy_pct, 0 , 100);
1005+
1006+
/* Normalize user input to [min_policy_pct, max_policy_pct] */
1007+
limits.min_perf_pct = max(limits.min_policy_pct, limits.min_sysfs_pct);
1008+
limits.min_perf_pct = min(limits.max_policy_pct, limits.min_perf_pct);
9971009
limits.max_perf_pct = min(limits.max_policy_pct, limits.max_sysfs_pct);
1010+
limits.max_perf_pct = max(limits.min_policy_pct, limits.max_perf_pct);
1011+
1012+
/* Make sure min_perf_pct <= max_perf_pct */
1013+
limits.min_perf_pct = min(limits.max_perf_pct, limits.min_perf_pct);
1014+
1015+
limits.min_perf = div_fp(int_tofp(limits.min_perf_pct), int_tofp(100));
9981016
limits.max_perf = div_fp(int_tofp(limits.max_perf_pct), int_tofp(100));
9991017

10001018
if (hwp_active)

0 commit comments

Comments
 (0)