Skip to content

Commit

Permalink
auto_hotplug: enhancements
Browse files Browse the repository at this point in the history
1) added module params so tuning an be adjusted in userspace and
debugging can easliy be enabled/disabled

/sys/module/auto_hotplug/parameters/debug
(N/Y - turn off/on debugging)

/sys/module/auto_hotplug/parameters/disable_load_threshold
(integer - load at which a CPU is disabled)

/sys/module/auto_hotplug/parameters/enable_load_threshold (integer -
load at which an extra CPU is enabled)

/sys/module/auto_hotplug/parameters/enable_all_load_threshold (integer -
load at wich all CPU's are enabled)

/sys/module/auto_hotplug/parameters/min_sampling_rate
(milliseconds, converted to jiffies at runtime)

2) lowered enable_load_threshold from 225 to 200 for a little more pep.

To-do: add get/set routines to keep values safe and in acceptable ranges
  • Loading branch information
motley-git authored and hellsgod committed Aug 22, 2013
1 parent 7ec08e7 commit dabb384
Showing 1 changed file with 99 additions and 94 deletions.
193 changes: 99 additions & 94 deletions arch/arm/kernel/auto_hotplug.c
@@ -1,4 +1,6 @@
/* Copyright (c) 2012, Will Tisdale <willtisdale@gmail.com>. All rights reserved.
*
* 2012 Enhanced by motley <motley.slate@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
Expand Down Expand Up @@ -38,39 +40,31 @@
#include <linux/earlysuspend.h>
#endif

/*
* Enable debug output to dump the average
* calculations and ring buffer array values
* WARNING: Enabling this causes a ton of overhead
*
* FIXME: Turn it into debugfs stats (somehow)
* because currently it is a sack of shit.
*/
#define DEBUG 0

#define CPUS_AVAILABLE num_possible_cpus()

/*
* SAMPLING_PERIODS * MIN_SAMPLING_RATE is the minimum
* load history which will be averaged
*/
#define SAMPLING_PERIODS 10
#define INDEX_MAX_VALUE (SAMPLING_PERIODS - 1)
/*
* MIN_SAMPLING_RATE is scaled based on num_online_cpus()
* DEFAULT_MIN_SAMPLING_RATE is the base minimum sampling rate
* that is based on num_online_cpus()
*/
#define MIN_SAMPLING_RATE msecs_to_jiffies(20)
#define DEFAULT_MIN_SAMPLING_RATE 20

/*
* Load defines:
* ENABLE_ALL is a high watermark to rapidly online all CPUs
* DEFAULT_ENABLE_ALL is a default high watermark to rapidly online all CPUs
*
* ENABLE is the load which is required to enable 1 extra CPU
* DISABLE is the load at which a CPU is disabled
* DEFAULT_ENABLE_LOAD_THRESHOLD is the default load which is required to enable 1 extra CPU
* DEFAULT_DISABLE_LOAD_THRESHOLD is the default load at which a CPU is disabled
* These two are scaled based on num_online_cpus()
*/
#define ENABLE_ALL_LOAD_THRESHOLD (125 * CPUS_AVAILABLE)
#define ENABLE_LOAD_THRESHOLD 225
#define DISABLE_LOAD_THRESHOLD 60
#define DEFAULT_ENABLE_ALL_LOAD_THRESHOLD 500 //(125 * CPUS_AVAILABLE)
#define DEFAULT_ENABLE_LOAD_THRESHOLD 200
#define DEFAULT_DISABLE_LOAD_THRESHOLD 60

/* Control flags */
unsigned char flags;
Expand All @@ -79,6 +73,20 @@ unsigned char flags;
#define BOOSTPULSE_ACTIVE (1 << 2)
#define EARLYSUSPEND_ACTIVE (1 << 3)

/*
* Enable debug output to dump the average
* calculations and ring buffer array values
* WARNING: Enabling this causes a ton of overhead
* FIXME: Turn it into debugfs stats (somehow)
* because currently it is a sack of shit.
*/
static unsigned int debug = 0;

static unsigned int enable_all_load_threshold = DEFAULT_ENABLE_ALL_LOAD_THRESHOLD;
static unsigned int enable_load_threshold = DEFAULT_ENABLE_LOAD_THRESHOLD;
static unsigned int disable_load_threshold = DEFAULT_DISABLE_LOAD_THRESHOLD;
static unsigned int min_sampling_rate = DEFAULT_MIN_SAMPLING_RATE;

struct delayed_work hotplug_decision_work;
struct delayed_work hotplug_unpause_work;
struct work_struct hotplug_online_all_work;
Expand All @@ -90,18 +98,25 @@ struct work_struct hotplug_boost_online_work;
static unsigned int history[SAMPLING_PERIODS];
static unsigned int index;

module_param(enable_all_load_threshold, int, 0775);
module_param(enable_load_threshold, int, 0775);
module_param(disable_load_threshold, int, 0775);
module_param(min_sampling_rate, int, 0775);
module_param(debug, int, 0775);

static void hotplug_decision_work_fn(struct work_struct *work)
{
unsigned int running, disable_load, sampling_rate, enable_load, avg_running = 0;
unsigned int running, disable_load, enable_load, avg_running = 0;
unsigned int online_cpus, available_cpus, i, j;
#if DEBUG
unsigned int k;
#endif
unsigned long sampling_rate = 0;
unsigned long min_sampling_rate_in_jiffies = 0;

min_sampling_rate_in_jiffies = msecs_to_jiffies(min_sampling_rate);
online_cpus = num_online_cpus();
available_cpus = CPUS_AVAILABLE;
disable_load = DISABLE_LOAD_THRESHOLD * online_cpus;
enable_load = ENABLE_LOAD_THRESHOLD * online_cpus;
disable_load = disable_load_threshold * online_cpus;
enable_load = enable_load_threshold * online_cpus;
/*
* Multiply nr_running() by 100 so we don't have to
* use fp division to get the average.
Expand All @@ -110,13 +125,13 @@ static void hotplug_decision_work_fn(struct work_struct *work)

history[index] = running;

#if DEBUG
pr_info("online_cpus is: %d\n", online_cpus);
pr_info("enable_load is: %d\n", enable_load);
pr_info("disable_load is: %d\n", disable_load);
pr_info("index is: %d\n", index);
pr_info("running is: %d\n", running);
#endif
if (debug) {
pr_info("online_cpus is: %d\n", online_cpus);
pr_info("enable_load is: %d\n", enable_load);
pr_info("disable_load is: %d\n", disable_load);
pr_info("index is: %d\n", index);
pr_info("running is: %d\n", running);
}

/*
* Use a circular buffer to calculate the average load
Expand All @@ -137,26 +152,25 @@ static void hotplug_decision_work_fn(struct work_struct *work)
if (unlikely(index++ == INDEX_MAX_VALUE))
index = 0;

#if DEBUG
pr_info("array contents: ");
for (k = 0; k < SAMPLING_PERIODS; k++) {
pr_info("%d: %d\t",k, history[k]);
if (debug) {
pr_info("array contents: ");
for (k = 0; k < SAMPLING_PERIODS; k++) {
pr_info("%d: %d\t",k, history[k]);
}
pr_info("\n");
pr_info("avg_running before division: %d\n", avg_running);
}
pr_info("\n");
pr_info("avg_running before division: %d\n", avg_running);
#endif

avg_running = avg_running / SAMPLING_PERIODS;

#if DEBUG
pr_info("average_running is: %d\n", avg_running);
#endif
if (debug)
pr_info("average_running is: %d\n", avg_running);

if (likely(!(flags & HOTPLUG_DISABLED))) {
if (unlikely((avg_running >= ENABLE_ALL_LOAD_THRESHOLD) && (online_cpus < available_cpus))) {
#if DEBUG
pr_info("auto_hotplug: Onlining all CPUs, avg running: %d\n", avg_running);
#endif
if (unlikely((avg_running >= enable_all_load_threshold) && (online_cpus < available_cpus))) {
if (debug)
pr_info("auto_hotplug: Onlining all CPUs, avg running: %d\n", avg_running);

/*
* Flush any delayed offlining work from the workqueue.
* No point in having expensive unnecessary hotplug transitions.
Expand All @@ -171,41 +185,38 @@ static void hotplug_decision_work_fn(struct work_struct *work)
schedule_work(&hotplug_online_all_work);
return;
} else if (flags & HOTPLUG_PAUSED) {
schedule_delayed_work_on(0, &hotplug_decision_work, MIN_SAMPLING_RATE);
schedule_delayed_work_on(0, &hotplug_decision_work, min_sampling_rate_in_jiffies);
return;
} else if ((avg_running >= enable_load) && (online_cpus < available_cpus)) {
#if DEBUG
pr_info("auto_hotplug: Onlining single CPU, avg running: %d\n", avg_running);
#endif
if (debug)
pr_info("auto_hotplug: Onlining single CPU, avg running: %d\n", avg_running);
if (delayed_work_pending(&hotplug_offline_work))
cancel_delayed_work(&hotplug_offline_work);
schedule_work(&hotplug_online_single_work);
return;
} else if (avg_running <= disable_load) {
/* Only queue a cpu_down() if there isn't one already pending */
if (!(delayed_work_pending(&hotplug_offline_work))) {
#if DEBUG
pr_info("auto_hotplug: Offlining CPU, avg running: %d\n", avg_running);
#endif
if (debug)
pr_info("auto_hotplug: Offlining CPU, avg running: %d\n", avg_running);
schedule_delayed_work_on(0, &hotplug_offline_work, HZ);
}
/* If boostpulse is active, clear the flags */
if (flags & BOOSTPULSE_ACTIVE) {
flags &= ~BOOSTPULSE_ACTIVE;
#if DEBUG
pr_info("auto_hotplug: Clearing boostpulse flags\n");
#endif
if (debug)
pr_info("auto_hotplug: Clearing boostpulse flags\n");
}
}
}

/*
* Reduce the sampling rate dynamically based on online cpus.
*/
sampling_rate = MIN_SAMPLING_RATE * (online_cpus * online_cpus);
#if DEBUG
pr_info("sampling_rate is: %d\n", jiffies_to_msecs(sampling_rate));
#endif
sampling_rate = min_sampling_rate_in_jiffies * (online_cpus * online_cpus);
if (debug)
pr_info("sampling_rate is: %d\n", jiffies_to_msecs(sampling_rate));

schedule_delayed_work_on(0, &hotplug_decision_work, sampling_rate);

}
Expand All @@ -216,16 +227,15 @@ static void __cpuinit hotplug_online_all_work_fn(struct work_struct *work)
for_each_possible_cpu(cpu) {
if (likely(!cpu_online(cpu))) {
cpu_up(cpu);
#if DEBUG
pr_info("auto_hotplug: CPU%d up.\n", cpu);
#endif
if (debug)
pr_info("auto_hotplug: CPU%d up.\n", cpu);
}
}
/*
* Pause for 2 seconds before even considering offlining a CPU
*/
schedule_delayed_work(&hotplug_unpause_work, HZ * 2);
schedule_delayed_work_on(0, &hotplug_decision_work, MIN_SAMPLING_RATE);
schedule_delayed_work_on(0, &hotplug_decision_work, min_sampling_rate);
}

static void hotplug_offline_all_work_fn(struct work_struct *work)
Expand All @@ -234,9 +244,8 @@ static void hotplug_offline_all_work_fn(struct work_struct *work)
for_each_possible_cpu(cpu) {
if (likely(cpu_online(cpu) && (cpu))) {
cpu_down(cpu);
#if DEBUG
pr_info("auto_hotplug: CPU%d down.\n", cpu);
#endif
if (debug)
pr_info("auto_hotplug: CPU%d down.\n", cpu);
}
}
}
Expand All @@ -249,14 +258,13 @@ static void __cpuinit hotplug_online_single_work_fn(struct work_struct *work)
if (cpu) {
if (!cpu_online(cpu)) {
cpu_up(cpu);
#if DEBUG
pr_info("auto_hotplug: CPU%d up.\n", cpu);
#endif
if (debug)
pr_info("auto_hotplug: CPU%d up.\n", cpu);
break;
}
}
}
schedule_delayed_work_on(0, &hotplug_decision_work, MIN_SAMPLING_RATE);
schedule_delayed_work_on(0, &hotplug_decision_work, min_sampling_rate);
}

static void hotplug_offline_work_fn(struct work_struct *work)
Expand All @@ -265,35 +273,33 @@ static void hotplug_offline_work_fn(struct work_struct *work)
for_each_online_cpu(cpu) {
if (cpu) {
cpu_down(cpu);
#if DEBUG
pr_info("auto_hotplug: CPU%d down.\n", cpu);
#endif
if (debug)
pr_info("auto_hotplug: CPU%d down.\n", cpu);
break;
}
}
schedule_delayed_work_on(0, &hotplug_decision_work, MIN_SAMPLING_RATE);
schedule_delayed_work_on(0, &hotplug_decision_work, min_sampling_rate);
}

static void hotplug_unpause_work_fn(struct work_struct *work)
{
pr_info("auto_hotplug: Clearing pause flag\n");
if (debug)
pr_info("auto_hotplug: Clearing pause flag\n");
flags &= ~HOTPLUG_PAUSED;
}

void hotplug_disable(bool flag)
{
if (flags & HOTPLUG_DISABLED && !flag) {
if ((flags & HOTPLUG_DISABLED) && !flag) {
flags &= ~HOTPLUG_DISABLED;
flags &= ~HOTPLUG_PAUSED;
#if DEBUG
pr_info("auto_hotplug: Clearing disable flag\n");
#endif
if (debug)
pr_info("auto_hotplug: Clearing disable flag\n");
schedule_delayed_work_on(0, &hotplug_decision_work, 0);
} else if (flag && (!(flags & HOTPLUG_DISABLED))) {
flags |= HOTPLUG_DISABLED;
#if DEBUG
pr_info("auto_hotplug: Setting disable flag\n");
#endif
if (debug)
pr_info("auto_hotplug: Setting disable flag\n");
cancel_delayed_work_sync(&hotplug_offline_work);
cancel_delayed_work_sync(&hotplug_decision_work);
cancel_delayed_work_sync(&hotplug_unpause_work);
Expand Down Expand Up @@ -321,17 +327,15 @@ inline void hotplug_boostpulse(void)
schedule_work(&hotplug_online_single_work);
schedule_delayed_work(&hotplug_unpause_work, HZ * 2);
} else {
#if DEBUG
pr_info("auto_hotplug: %s: %d CPUs online\n", __func__, num_online_cpus());
#endif
if (debug)
pr_info("auto_hotplug: %s: %d CPUs online\n", __func__, num_online_cpus());
if (delayed_work_pending(&hotplug_offline_work)) {
#if DEBUG
pr_info("auto_hotplug: %s: Canceling hotplug_offline_work\n", __func__);
#endif
if (debug)
pr_info("auto_hotplug: %s: Canceling hotplug_offline_work\n", __func__);
cancel_delayed_work(&hotplug_offline_work);
flags |= HOTPLUG_PAUSED;
schedule_delayed_work(&hotplug_unpause_work, HZ * 2);
schedule_delayed_work_on(0, &hotplug_decision_work, MIN_SAMPLING_RATE);
schedule_delayed_work_on(0, &hotplug_decision_work, min_sampling_rate);
}
}
}
Expand All @@ -340,9 +344,9 @@ inline void hotplug_boostpulse(void)
#ifdef CONFIG_HAS_EARLYSUSPEND
static void auto_hotplug_early_suspend(struct early_suspend *handler)
{
#if DEBUG
pr_info("auto_hotplug: early suspend handler\n");
#endif
if (debug)
pr_info("auto_hotplug: early suspend handler\n");

flags |= EARLYSUSPEND_ACTIVE;

/* Cancel all scheduled delayed work to avoid races */
Expand All @@ -356,9 +360,9 @@ static void auto_hotplug_early_suspend(struct early_suspend *handler)

static void auto_hotplug_late_resume(struct early_suspend *handler)
{
#if DEBUG
pr_info("auto_hotplug: late resume handler\n");
#endif
if (debug)
pr_info("auto_hotplug: late resume handler\n");

flags &= ~EARLYSUSPEND_ACTIVE;

schedule_delayed_work_on(0, &hotplug_decision_work, HZ);
Expand All @@ -373,6 +377,7 @@ static struct early_suspend auto_hotplug_suspend = {
int __init auto_hotplug_init(void)
{
pr_info("auto_hotplug: v0.220 by _thalamus\n");
pr_info("auto_hotplug: rev 1 enhanced by motley\n");
pr_info("auto_hotplug: %d CPUs detected\n", CPUS_AVAILABLE);

INIT_DELAYED_WORK(&hotplug_decision_work, hotplug_decision_work_fn);
Expand Down

0 comments on commit dabb384

Please sign in to comment.