Skip to content
This repository has been archived by the owner on Jul 7, 2021. It is now read-only.

Commit

Permalink
Merge branch 'eas/neutrino-msm-hotdogb-4.14' of https://github.com/0c…
Browse files Browse the repository at this point in the history
…tobot/neutrino-staging into neutrino-msm-hotdogb-4.14

* eas/neutrino-msm-hotdogb-4.14: (12 commits)
  Revert "sched: fix issue of cpu freq running at max always"
  cpufreq: schedutil: Queue sugov irq work on policy online cpu
  sched/walt: Avoid walt irq work in offlined cpu
  sched: Improve the scheduler
  sched: Improve the scheduler
  sched: use rq_clock if WALT is not enabled
  sched/fair: Optimize the tick path active migration
  sched: fair: Stop running idle_balance on active migration kick
  sched: Spare resched IPI when prio changes on a single fair task
  BACKPORT: sched/core: Distribute tasks within affinity masks
  ANDROID: increase limit on sched-tune boost groups
  sched/tune: Streamline SchedTune Assist logging

Signed-off-by: Adam W. Willis <return.of.octobot@gmail.com>
  • Loading branch information
0ctobot committed May 22, 2020
2 parents 2fd536a + 5f00bb7 commit f364884
Show file tree
Hide file tree
Showing 9 changed files with 103 additions and 27 deletions.
7 changes: 7 additions & 0 deletions include/linux/cpumask.h
Expand Up @@ -184,6 +184,11 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
return 0;
}

static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p) {
return cpumask_next_and(-1, src1p, src2p);
}

#define for_each_cpu(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_not(cpu, mask) \
Expand Down Expand Up @@ -224,6 +229,8 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
unsigned int cpumask_local_spread(unsigned int i, int node);
int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p);

/**
* for_each_cpu - iterate over every cpu in a mask
Expand Down
23 changes: 17 additions & 6 deletions kernel/sched/core.c
Expand Up @@ -1162,9 +1162,14 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
goto out;

cpumask_andnot(&allowed_mask, new_mask, cpu_isolated_mask);
cpumask_and(&allowed_mask, &allowed_mask, cpu_valid_mask);

if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
/*
* Picking a ~random cpu helps in cases where we are changing affinity
* for groups of tasks (ie. cpuset), so that load balancing is not
* immediately required to distribute the tasks within their new mask.
*/
dest_cpu = cpumask_any_and_distribute(cpu_valid_mask, &allowed_mask);
if (dest_cpu >= nr_cpu_ids) {
cpumask_and(&allowed_mask, cpu_valid_mask, new_mask);
dest_cpu = cpumask_any(&allowed_mask);
if (!cpumask_intersects(new_mask, cpu_valid_mask)) {
Expand All @@ -1189,7 +1194,6 @@ static int __set_cpus_allowed_ptr(struct task_struct *p,
if (cpumask_test_cpu(task_cpu(p), &allowed_mask))
goto out;

dest_cpu = cpumask_any_and(cpu_valid_mask, new_mask);
if (task_running(rq, p) || p->state == TASK_WAKING) {
struct migration_arg arg = { p, dest_cpu };
/* Need help from migration thread: drop lock and wait. */
Expand Down Expand Up @@ -4939,8 +4943,10 @@ unsigned int sched_lib_mask_force;
bool is_sched_lib_based_app(pid_t pid)
{
const char *name = NULL;
char *libname, *lib_list;
struct vm_area_struct *vma;
char path_buf[LIB_PATH_LENGTH];
char tmp_lib_name[LIB_PATH_LENGTH];
bool found = false;
struct task_struct *p;
struct mm_struct *mm;
Expand Down Expand Up @@ -4972,10 +4978,15 @@ bool is_sched_lib_based_app(pid_t pid)
if (IS_ERR(name))
goto release_sem;

if (strnstr(name, sched_lib_name,
strlcpy(tmp_lib_name, sched_lib_name, LIB_PATH_LENGTH);
lib_list = tmp_lib_name;
while ((libname = strsep(&lib_list, ","))) {
libname = skip_spaces(libname);
if (strnstr(name, libname,
strnlen(name, LIB_PATH_LENGTH))) {
found = true;
break;
found = true;
goto release_sem;
}
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion kernel/sched/cpufreq_schedutil.c
Expand Up @@ -185,7 +185,7 @@ static void sugov_update_commit(struct sugov_policy *sg_policy, u64 time,
} else {
if (use_pelt())
sg_policy->work_in_progress = true;
irq_work_queue(&sg_policy->irq_work);
sched_irq_work_queue(&sg_policy->irq_work);
}
}

Expand Down
19 changes: 14 additions & 5 deletions kernel/sched/fair.c
Expand Up @@ -11584,10 +11584,11 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
update_next_balance(sd, &next_balance);

/*
* Stop searching for tasks to pull if there are
* now runnable tasks on this rq.
* Stop searching for tasks to pull if there are now runnable
* tasks on this rq or if active migration kicked in.
*/
if (pulled_task || this_rq->nr_running > 0)
if (pulled_task || this_rq->nr_running > 0 ||
!continue_balancing)
break;
}
rcu_read_unlock();
Expand Down Expand Up @@ -12401,6 +12402,9 @@ prio_changed_fair(struct rq *rq, struct task_struct *p, int oldprio)
if (!task_on_rq_queued(p))
return;

if (rq->cfs.nr_running == 1)
return;

/*
* Reschedule if we are currently running on this runqueue and
* our priority decreased, or if we are not currently running on
Expand Down Expand Up @@ -13230,11 +13234,12 @@ static inline void walt_check_for_rotation(struct rq *rq)
static DEFINE_RAW_SPINLOCK(migration_lock);
void check_for_migration(struct rq *rq, struct task_struct *p)
{
int active_balance;
int active_balance, ret;
int new_cpu = -1;
int cpu = smp_processor_id();
int prev_cpu = task_cpu(p);
struct sched_domain *sd = NULL;
int ret;

if (rq->misfit_task_load) {
if (rq->curr->state != TASK_RUNNING ||
Expand All @@ -13254,9 +13259,13 @@ void check_for_migration(struct rq *rq, struct task_struct *p)
if (active_balance) {
mark_reserved(new_cpu);
raw_spin_unlock(&migration_lock);
stop_one_cpu_nowait(prev_cpu,
ret = stop_one_cpu_nowait(prev_cpu,
active_load_balance_cpu_stop, rq,
&rq->active_balance_work);
if (!ret)
clear_reserved(new_cpu);
else
wake_up_if_idle(new_cpu);
return;
}
} else {
Expand Down
6 changes: 4 additions & 2 deletions kernel/sched/idle.c
Expand Up @@ -66,7 +66,8 @@ static noinline int __cpuidle cpu_idle_poll(void)
local_irq_enable();
stop_critical_timings();
while (!tif_need_resched() &&
(cpu_idle_force_poll || tick_check_broadcast_expired()))
(cpu_idle_force_poll || tick_check_broadcast_expired() ||
is_reserved(smp_processor_id())))
cpu_relax();
start_critical_timings();
trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id());
Expand Down Expand Up @@ -261,7 +262,8 @@ static void do_idle(void)
* broadcast device expired for us, we don't want to go deep
* idle as we know that the IPI is going to arrive right away.
*/
if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
if (cpu_idle_force_poll || tick_check_broadcast_expired() ||
is_reserved(smp_processor_id())) {
tick_nohz_idle_restart_tick();
cpu_idle_poll();
} else {
Expand Down
21 changes: 19 additions & 2 deletions kernel/sched/sched.h
Expand Up @@ -1967,7 +1967,10 @@ static inline int hrtick_enabled(struct rq *rq)
#ifdef CONFIG_SCHED_WALT
u64 sched_ktime_clock(void);
#else
#define sched_ktime_clock ktime_get_ns
static inline u64 sched_ktime_clock(void)
{
return sched_clock();
}
#endif

#ifdef CONFIG_SMP
Expand Down Expand Up @@ -2526,16 +2529,20 @@ DECLARE_PER_CPU(struct update_util_data *, cpufreq_update_util_data);
static inline void cpufreq_update_util(struct rq *rq, unsigned int flags)
{
struct update_util_data *data;
u64 clock;

#ifdef CONFIG_SCHED_WALT
if (!(flags & SCHED_CPUFREQ_WALT))
return;
clock = sched_ktime_clock();
#else
clock = rq_clock(rq);
#endif

data = rcu_dereference_sched(*per_cpu_ptr(&cpufreq_update_util_data,
cpu_of(rq)));
if (data)
data->func(data, sched_ktime_clock(), flags);
data->func(data, clock, flags);
}
#else
static inline void cpufreq_update_util(struct rq *rq, unsigned int flags) {}
Expand Down Expand Up @@ -3160,3 +3167,13 @@ struct sched_avg_stats {
int nr_max;
};
extern void sched_get_nr_running_avg(struct sched_avg_stats *stats);

#ifdef CONFIG_SMP
static inline void sched_irq_work_queue(struct irq_work *work)
{
if (likely(cpu_online(raw_smp_processor_id())))
irq_work_queue(work);
else
irq_work_queue_on(work, cpumask_any(cpu_online_mask));
}
#endif
19 changes: 10 additions & 9 deletions kernel/sched/tune.c
Expand Up @@ -103,7 +103,7 @@ root_schedtune = {
* implementation especially for the computation of the per-CPU boost
* value
*/
#define BOOSTGROUPS_COUNT 6
#define BOOSTGROUPS_COUNT 7

/* Array of configured boostgroups */
static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
Expand Down Expand Up @@ -691,26 +691,27 @@ static void write_default_values(struct cgroup_subsys_state *css)
static struct st_data st_targets[] = {
{ "audio-app", 0, 0, 0, 0 },
{ "background", 0, 0, 0, 0 },
{ "foreground", 0, 1, 0, 1 },
{ "foreground", 0, 1, 0, 0 },
{ "rt", 0, 0, 0, 0 },
{ "top-app", 1, 1, 0, 1 },
{ "top-app", 1, 1, 0, 0 },
};
int i;

for (i = 0; i < ARRAY_SIZE(st_targets); i++) {
struct st_data tgt = st_targets[i];

if (!strcmp(css->cgroup->kn->name, tgt.name)) {
pr_info("stune_assist: setting values for %s: boost=%d prefer_idle=%d colocate=%d no_override=%d\n",
tgt.name, tgt.boost, tgt.prefer_idle,
tgt.colocate, tgt.no_override);

boost_write(css, NULL, tgt.boost);
prefer_idle_write(css, NULL, tgt.prefer_idle);
#ifdef CONFIG_SCHED_WALT
sched_boost_override_write(css, NULL, tgt.no_override);
#ifndef CONFIG_SCHED_WALT
pr_info("stune_assist: setting values for %s: boost=%d prefer_idle=%d no_override=%d\n",
tgt.name, tgt.boost, tgt.prefer_idle, tgt.no_override);
#else
sched_colocate_write(css, NULL, tgt.colocate);
pr_info("stune_assist: setting values for %s: boost=%d prefer_idle=%d colocate=%d no_override=%d\n",
tgt.name, tgt.boost, tgt.prefer_idle, tgt.colocate, tgt.no_override);
#endif
sched_boost_override_write(css, NULL, tgt.no_override);
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions kernel/sched/walt.c
Expand Up @@ -880,7 +880,7 @@ void fixup_busy_time(struct task_struct *p, int new_cpu)
if (!same_freq_domain(new_cpu, task_cpu(p))) {
src_rq->notif_pending = true;
dest_rq->notif_pending = true;
irq_work_queue(&walt_migration_irq_work);
sched_irq_work_queue(&walt_migration_irq_work);
}

if (is_ed_enabled()) {
Expand Down Expand Up @@ -1961,7 +1961,7 @@ static inline void run_walt_irq_work(u64 old_window_start, struct rq *rq)
result = atomic64_cmpxchg(&walt_irq_work_lastq_ws, old_window_start,
rq->window_start);
if (result == old_window_start)
irq_work_queue(&walt_cpufreq_irq_work);
sched_irq_work_queue(&walt_cpufreq_irq_work);
}

/* Reflect task activity on its demand and cpu's busy time statistics */
Expand Down
29 changes: 29 additions & 0 deletions lib/cpumask.c
Expand Up @@ -228,3 +228,32 @@ unsigned int cpumask_local_spread(unsigned int i, int node)
BUG();
}
EXPORT_SYMBOL(cpumask_local_spread);

static DEFINE_PER_CPU(int, distribute_cpu_mask_prev);

/**
* Returns an arbitrary cpu within srcp1 & srcp2.
*
* Iterated calls using the same srcp1 and srcp2 will be distributed within
* their intersection.
*
* Returns >= nr_cpu_ids if the intersection is empty.
*/
int cpumask_any_and_distribute(const struct cpumask *src1p,
const struct cpumask *src2p)
{
int next, prev;

/* NOTE: our first selection will skip 0. */
prev = __this_cpu_read(distribute_cpu_mask_prev);

next = cpumask_next_and(prev, src1p, src2p);
if (next >= nr_cpu_ids)
next = cpumask_first_and(src1p, src2p);

if (next < nr_cpu_ids)
__this_cpu_write(distribute_cpu_mask_prev, next);

return next;
}
EXPORT_SYMBOL(cpumask_any_and_distribute);

0 comments on commit f364884

Please sign in to comment.