Skip to content
This repository has been archived by the owner on Sep 9, 2019. It is now read-only.

Commit

Permalink
Browse files Browse the repository at this point in the history
Merge branch 'sched-updates'
EAS scheduler updates from CAF, as well as backports by Quentin Perret.
These should make the scheduler more energy efficient at the cost of
slightly higher jitter.

Signed-off-by: Danny Lin <danny@kdrag0n.dev>
  • Loading branch information
kdrag0n committed Mar 29, 2019
2 parents 43a5dfa + c2c8278 commit 67dc2e6
Show file tree
Hide file tree
Showing 15 changed files with 1,111 additions and 29 deletions.
3 changes: 3 additions & 0 deletions arch/arm64/configs/b1c1_defconfig
Expand Up @@ -614,6 +614,7 @@ CONFIG_PM_GENERIC_DOMAINS_SLEEP=y
CONFIG_PM_GENERIC_DOMAINS_OF=y
CONFIG_CPU_PM=y
# CONFIG_DEDUCE_WAKEUP_REASONS is not set
CONFIG_ENERGY_MODEL=y
CONFIG_ARCH_HIBERNATION_POSSIBLE=y
CONFIG_ARCH_SUSPEND_POSSIBLE=y

Expand Down Expand Up @@ -4714,6 +4715,8 @@ CONFIG_ANDROID_BINDER_DEVICES="binder,hwbinder,vndbinder"
# CONFIG_FPGA is not set
CONFIG_SENSORS_SSC=y
# CONFIG_TEE is not set
CONFIG_LEGACY_ENERGY_MODEL_DT=y
CONFIG_LEGACY_EM_FREQ_TUPLE=y

#
# Firmware Drivers
Expand Down
1 change: 1 addition & 0 deletions drivers/Kconfig
Expand Up @@ -210,4 +210,5 @@ source "drivers/sensors/Kconfig"

source "drivers/tee/Kconfig"

source "drivers/energy_model/Kconfig"
endmenu
2 changes: 2 additions & 0 deletions drivers/Makefile
Expand Up @@ -154,6 +154,8 @@ obj-$(CONFIG_HWSPINLOCK) += hwspinlock/
obj-$(CONFIG_REMOTEPROC) += remoteproc/
obj-$(CONFIG_RPMSG) += rpmsg/

obj-$(CONFIG_ENERGY_MODEL) += energy_model/

# Virtualization drivers
obj-$(CONFIG_VIRT_DRIVERS) += virt/
obj-$(CONFIG_HYPERV) += hv/
Expand Down
21 changes: 21 additions & 0 deletions drivers/energy_model/Kconfig
@@ -0,0 +1,21 @@
config LEGACY_ENERGY_MODEL_DT
bool "Legacy DT-based Energy Model of CPUs"
default n
help
The Energy Aware Scheduler (EAS) used to rely on Energy Models
(EMs) statically defined in the Device Tree. More recent
versions of EAS now rely on the EM framework to get the power
costs of CPUs.

This driver reads old-style static EMs in DT and feeds them in
the EM framework, hence enabling to use EAS on platforms with
old DT files. Since EAS now uses only the active costs of CPUs,
the cluster-related costs and idle-costs of the old EM are
ignored.

If in doubt, say N.

config LEGACY_EM_FREQ_TUPLE
bool "Frequency-power tuples format for legacy DT EMs"
depends on LEGACY_ENERGY_MODEL_DT
default n
3 changes: 3 additions & 0 deletions drivers/energy_model/Makefile
@@ -0,0 +1,3 @@
# SPDX-License-Identifier: GPL-2.0

obj-$(CONFIG_LEGACY_ENERGY_MODEL_DT) += legacy_em_dt.o
210 changes: 210 additions & 0 deletions drivers/energy_model/legacy_em_dt.c
@@ -0,0 +1,210 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Legacy Energy Model loading driver
*
* Copyright (C) 2018, ARM Ltd.
* Written by: Quentin Perret, ARM Ltd.
*/

#define pr_fmt(fmt) "legacy-dt-em: " fmt

#include <linux/cpufreq.h>
#include <linux/cpumask.h>
#include <linux/cpuset.h>
#include <linux/energy_model.h>
#include <linux/gfp.h>
#include <linux/init.h>
#include <linux/of.h>
#include <linux/printk.h>
#include <linux/slab.h>

static cpumask_var_t cpus_to_visit;

static DEFINE_PER_CPU(unsigned long, nr_states) = 0;

struct em_state {
unsigned long frequency;
unsigned long power;
unsigned long capacity;
};
static DEFINE_PER_CPU(struct em_state*, cpu_em) = NULL;

static void finish_em_loading_workfn(struct work_struct *work);
static DECLARE_WORK(finish_em_loading_work, finish_em_loading_workfn);

static DEFINE_MUTEX(em_loading_mutex);

/*
* Callback given to the EM framework. All this does is browse the table
* created by legacy_em_dt().
*/
static int get_power(unsigned long *mW, unsigned long *KHz, int cpu)
{
unsigned long nstates = per_cpu(nr_states, cpu);
struct em_state *em = per_cpu(cpu_em, cpu);
int i;

if (!nstates || !em)
return -ENODEV;

for (i = 0; i < nstates - 1; i++) {
if (em[i].frequency > *KHz)
break;
}

*KHz = em[i].frequency;
*mW = em[i].power;

return 0;
}

static int init_em_dt_callback(struct notifier_block *nb, unsigned long val,
void *data)
{
struct em_data_callback em_cb = EM_DATA_CB(get_power);
unsigned long nstates, max_freq, nr_opp;
unsigned long __maybe_unused scale_cpu;
struct cpufreq_policy *policy = data;
const struct property *prop;
struct device_node *cn, *cp;
struct em_state *em;
int cpu, i, ret = 0;
const __be32 *tmp;

if (val != CPUFREQ_NOTIFY)
return 0;

mutex_lock(&em_loading_mutex);

/* Do not register twice an energy model */
for_each_cpu(cpu, policy->cpus) {
if (per_cpu(nr_states, cpu) || per_cpu(cpu_em, cpu)) {
pr_err("EM of CPU%d already loaded\n", cpu);
ret = -EEXIST;
goto unlock;
}
}

max_freq = policy->cpuinfo.max_freq;
if (!max_freq) {
pr_err("No policy->max for CPU%d\n", cpu);
ret = -EINVAL;
goto unlock;
}

cpu = cpumask_first(policy->cpus);
cn = of_get_cpu_node(cpu, NULL);
if (!cn) {
pr_err("No device_node for CPU%d\n", cpu);
ret = -ENODEV;
goto unlock;
}

cp = of_parse_phandle(cn, "sched-energy-costs", 0);
if (!cp) {
pr_err("CPU%d node has no sched-energy-costs\n", cpu);
ret = -ENODEV;
goto unlock;
}

prop = of_find_property(cp, "busy-cost-data", NULL);
if (!prop || !prop->value) {
pr_err("No busy-cost-data for CPU%d\n", cpu);
ret = -ENODEV;
goto unlock;
}

nstates = nr_opp = (prop->length / sizeof(u32)) / 2;
em = kcalloc(nstates, sizeof(struct em_cap_state), GFP_KERNEL);
if (!em) {
ret = -ENOMEM;
goto unlock;
}
#ifdef CONFIG_LEGACY_EM_FREQ_TUPLE
/* Copy the frequency and power cost to the table. */
for (i = 0, tmp = prop->value; i < nstates; i++) {
unsigned long freq = be32_to_cpup(tmp++);
unsigned long power = be32_to_cpup(tmp++);

/* Avoid OPPs out of the current speedbin */
if (freq > max_freq) {
nr_opp--;
continue;
}

em[i].frequency = freq;
em[i].power = power;
}
#else
/* Copy the capacity and power cost to the table. */
for (i = 0, tmp = prop->value; i < nstates; i++) {
em[i].capacity = be32_to_cpup(tmp++);
em[i].power = be32_to_cpup(tmp++);
}

/* Get the CPU capacity (according to the EM) */
scale_cpu = em[nstates - 1].capacity;
if (!scale_cpu) {
pr_err("CPU%d: capacity cannot be 0\n", cpu);
kfree(em);
ret = -EINVAL;
goto unlock;
}

/* Re-compute the intermediate frequencies based on the EM. */
for (i = 0; i < nstates; i++)
em[i].frequency = em[i].capacity * max_freq / scale_cpu;
#endif

/* Assign the table to all CPUs of this policy. */
for_each_cpu(i, policy->cpus) {
per_cpu(nr_states, i) = nr_opp;
per_cpu(cpu_em, i) = em;
}

pr_info("Registering EM of %*pbl\n", cpumask_pr_args(policy->cpus));
em_register_perf_domain(policy->cpus, nr_opp, &em_cb);

/* Finish the work when all possible CPUs have been registered. */
cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->cpus);
if (cpumask_empty(cpus_to_visit))
schedule_work(&finish_em_loading_work);

unlock:
mutex_unlock(&em_loading_mutex);

return ret;
}

static struct notifier_block init_em_dt_notifier = {
.notifier_call = init_em_dt_callback,
};

static void finish_em_loading_workfn(struct work_struct *work)
{
cpufreq_unregister_notifier(&init_em_dt_notifier,
CPUFREQ_POLICY_NOTIFIER);
free_cpumask_var(cpus_to_visit);

/* Let the scheduler know the Energy Model is ready. */
rebuild_sched_domains();
}

static int __init register_cpufreq_notifier(void)
{
int ret;

if (!alloc_cpumask_var(&cpus_to_visit, GFP_KERNEL))
return -ENOMEM;

cpumask_copy(cpus_to_visit, cpu_possible_mask);

ret = cpufreq_register_notifier(&init_em_dt_notifier,
CPUFREQ_POLICY_NOTIFIER);

if (ret)
free_cpumask_var(cpus_to_visit);

return ret;
}
core_initcall(register_cpufreq_notifier);

0 comments on commit 67dc2e6

Please sign in to comment.