Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…56-r2-release.html

We already had 2 out of the three changes vs r1 since 1e7f731 so this only adds https://gitlab.com/alfredchen/linux-bmq/-/commit/0761e7e82af6b9e8c43435a69f8b73e85a684471

Fixes #2
  • Loading branch information
Tk-Glitch committed Apr 13, 2020
1 parent 9b1bb7e commit 498e8c0
Show file tree
Hide file tree
Showing 2 changed files with 36 additions and 126 deletions.
6 changes: 3 additions & 3 deletions linux56-tkg/PKGBUILD
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ source=("https://www.kernel.org/pub/linux/kernel/v5.x/linux-${_basekernel}.tar.x
#0008-5.6-bcachefs.patch
0009-glitched-ondemand-bmq.patch
0009-glitched-bmq.patch
0009-bmq_v5.6-r1.patch
0009-bmq_v5.6-r2.patch
0011-ZFS-fix.patch
#0012-linux-hardened.patch
0013-tp_smapi_ec.patch
Expand All @@ -140,7 +140,7 @@ sha256sums=('e342b04a2aa63808ea0ef1baab28fc520bd031ef8cf93d9ee4a31d4058fcb622'
'2d9260b80b43bbd605cf420d6bd53aa7262103dfd77196ba590ece5600b6dc0d'
'9fad4a40449e09522899955762c8928ae17f4cdaa16e01239fd12592e9d58177'
'965a517a283f265a012545fbb5cc9e516efc9f6166d2aa1baf7293a32a1086b7'
'fee8594610e5535296bd57dfa21af281cf161264f0f466f204a1dc1a2aa8e0dc'
'0e1b569bf16a6c514710715eff06df04447378d1c067b853db0c454ac3c5e463'
'49262ce4a8089fa70275aad742fc914baa28d9c384f710c9a62f64796d13e104'
'4a83c17a33779df304ee44ad8e736069b25d917baec429ecdd193fe1a9a63576')

Expand Down Expand Up @@ -243,7 +243,7 @@ prepare() {
patch -Np1 -i ../0005-glitched-pds.patch
elif [ "${_cpusched}" == "bmq" ]; then
# BMQ
patch -Np1 -i ../0009-bmq_v5.6-r1.patch
patch -Np1 -i ../0009-bmq_v5.6-r2.patch
if [ "${_aggressive_ondemand}" == "true" ]; then
patch -Np1 -i ../0009-glitched-ondemand-bmq.patch
fi
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -571,10 +571,10 @@ index 21fb5a5662b5..ac31239aa51a 100644
obj-$(CONFIG_CPU_FREQ_GOV_SCHEDUTIL) += cpufreq_schedutil.o
diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c
new file mode 100644
index 000000000000..e6d6fc98bead
index 000000000000..b37608bbc23a
--- /dev/null
+++ b/kernel/sched/bmq.c
@@ -0,0 +1,5982 @@
@@ -0,0 +1,5977 @@
+/*
+ * kernel/sched/bmq.c
+ *
Expand Down Expand Up @@ -647,7 +647,7 @@ index 000000000000..e6d6fc98bead
+
+static inline void print_scheduler_version(void)
+{
+ printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r1 by Alfred Chen.\n");
+ printk(KERN_INFO "bmq: BMQ CPU Scheduler 5.6-r2 by Alfred Chen.\n");
+}
+
+/**
Expand Down Expand Up @@ -3808,6 +3808,9 @@ index 000000000000..e6d6fc98bead
+{
+ struct cpumask *affinity_mask, *end_mask;
+
+ if (unlikely(!rq->online))
+ return 0;
+
+ if (cpumask_empty(&sched_rq_pending_mask))
+ return 0;
+
Expand Down Expand Up @@ -3870,19 +3873,17 @@ index 000000000000..e6d6fc98bead
+ if (unlikely(rq->skip)) {
+ next = rq_runnable_task(rq);
+#ifdef CONFIG_SMP
+ if (likely(rq->online))
+ if (next == rq->idle && take_other_rq_tasks(rq, cpu))
+ next = rq_runnable_task(rq);
+ if (next == rq->idle && take_other_rq_tasks(rq, cpu))
+ next = rq_runnable_task(rq);
+#endif
+ rq->skip = NULL;
+ return next;
+ }
+
+ next = rq_first_bmq_task(rq);
+#ifdef CONFIG_SMP
+ if (likely(rq->online))
+ if (next == rq->idle && take_other_rq_tasks(rq, cpu))
+ return rq_first_bmq_task(rq);
+ if (next == rq->idle && take_other_rq_tasks(rq, cpu))
+ return rq_first_bmq_task(rq);
+#endif
+ return next;
+}
Expand All @@ -3893,8 +3894,11 @@ index 000000000000..e6d6fc98bead
+
+ if (unlikely(sched_timeslice_ns == p->time_slice))
+ rq->last_ts_switch = rq->clock;
+
+ if (p == rq->idle)
+ schedstat_inc(rq->sched_goidle);
+#ifdef CONFIG_HIGH_RES_TIMERS
+ if (p != rq->idle)
+ else
+ hrtick_start(rq, p->time_slice);
+#endif
+}
Expand Down Expand Up @@ -3997,9 +4001,7 @@ index 000000000000..e6d6fc98bead
+ set_rq_task(rq, next);
+
+ if (prev != next) {
+ if (MAX_PRIO == next->prio)
+ schedstat_inc(rq->sched_goidle);
+
+ rq->nr_switches++;
+ /*
+ * RCU users of rcu_dereference(rq->curr) may not see
+ * changes to task_struct made by pick_next_task().
Expand All @@ -4020,7 +4022,6 @@ index 000000000000..e6d6fc98bead
+ * is a RELEASE barrier),
+ */
+ ++*switch_count;
+ rq->nr_switches++;
+ rq->last_ts_switch = rq->clock;
+
+ trace_sched_switch(preempt, prev, next);
Expand Down Expand Up @@ -5733,8 +5734,6 @@ index 000000000000..e6d6fc98bead
+ idle->last_ran = rq->clock_task;
+ idle->state = TASK_RUNNING;
+ idle->flags |= PF_IDLE;
+ /* Setting prio to illegal value shouldn't matter as it will never be de/enqueued */
+ idle->prio = MAX_PRIO;
+ idle->bmq_idx = IDLE_TASK_SCHED_PRIO;
+ bmq_init_idle(&rq->queue, idle);
+
Expand Down Expand Up @@ -6076,14 +6075,12 @@ index 000000000000..e6d6fc98bead
+ }
+}
+
+#define TOPOLOGY_CPUMASK(name, func) \
+ if (cpumask_and(chk, chk, func(cpu))) { \
+ per_cpu(sched_cpu_llc_mask, cpu) = chk; \
+ per_cpu(sd_llc_id, cpu) = cpumask_first(func(cpu)); \
+ printk(KERN_INFO "bmq: cpu#%d affinity mask - "#name" 0x%08lx", \
+#define TOPOLOGY_CPUMASK(name, mask, last) \
+ if (cpumask_and(chk, chk, mask)) \
+ printk(KERN_INFO "bmq: cpu#%02d affinity mask: 0x%08lx - "#name,\
+ cpu, (chk++)->bits[0]); \
+ } \
+ cpumask_complement(chk, func(cpu))
+ if (!last) \
+ cpumask_complement(chk, mask)
+
+static void sched_init_topology_cpumask(void)
+{
Expand All @@ -6095,20 +6092,18 @@ index 000000000000..e6d6fc98bead
+
+ cpumask_complement(chk, cpumask_of(cpu));
+#ifdef CONFIG_SCHED_SMT
+ TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask);
+#endif
+#ifdef CONFIG_SCHED_MC
+ TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask);
+ TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
+#endif
+ per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
+ per_cpu(sched_cpu_llc_mask, cpu) = chk;
+ TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);
+
+ TOPOLOGY_CPUMASK(core, topology_core_cpumask);
+ TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);
+
+ if (cpumask_and(chk, chk, cpu_online_mask))
+ printk(KERN_INFO "bmq: cpu#%d affinity mask - others 0x%08lx",
+ cpu, (chk++)->bits[0]);
+ TOPOLOGY_CPUMASK(others, cpu_online_mask, true);
+
+ per_cpu(sched_cpu_affinity_end_mask, cpu) = chk;
+ printk(KERN_INFO "bmq: cpu#%d llc_id = %d, llc_mask idx = %d\n",
+ printk(KERN_INFO "bmq: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
+ cpu, per_cpu(sd_llc_id, cpu),
+ (int) (per_cpu(sched_cpu_llc_mask, cpu) -
+ &(per_cpu(sched_cpu_affinity_masks, cpu)[0])));
Expand Down Expand Up @@ -7306,7 +7301,7 @@ index afff644da065..4da52afaeff8 100644
static inline int
update_irq_load_avg(struct rq *rq, u64 running)
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9ea647835fd6..911b30506c83 100644
index 9ea647835fd6..f38d1343e2bf 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2,6 +2,10 @@
Expand All @@ -7320,24 +7315,15 @@ index 9ea647835fd6..911b30506c83 100644
#include <linux/sched.h>

#include <linux/sched/autogroup.h>
@@ -2480,15 +2484,8 @@ static inline void membarrier_switch_mm(struct rq *rq,
@@ -2492,3 +2496,9 @@ static inline bool is_per_cpu_kthread(struct task_struct *p)
return true;
}
#endif

-#ifdef CONFIG_SMP
-static inline bool is_per_cpu_kthread(struct task_struct *p)
+
+static inline int task_running_nice(struct task_struct *p)
{
- if (!(p->flags & PF_KTHREAD))
- return false;
-
- if (p->nr_cpus_allowed != 1)
- return false;
-
- return true;
+{
+ return (task_nice(p) > 0);
}
-#endif
+}
+#endif /* !CONFIG_SCHED_BMQ */
diff --git a/kernel/sched/stats.c b/kernel/sched/stats.c
index 750fb3c67eed..0cc040a28d3f 100644
Expand Down Expand Up @@ -7565,79 +7551,3 @@ index b5e3496cf803..545be2c4f07c 100644
};
struct wakeup_test_data *x = data;

diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 911b30506c83336a94a2748a321060ab11e8f9a7..f38d1343e2bfeb728637be4959120a7b37351b1a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2484,6 +2484,19 @@ static inline void membarrier_switch_mm(struct rq *rq,
}
#endif

+#ifdef CONFIG_SMP
+static inline bool is_per_cpu_kthread(struct task_struct *p)
+{
+ if (!(p->flags & PF_KTHREAD))
+ return false;
+
+ if (p->nr_cpus_allowed != 1)
+ return false;
+
+ return true;
+}
+#endif
+
static inline int task_running_nice(struct task_struct *p)
{
return (task_nice(p) > 0);
diff --git a/kernel/sched/bmq.c b/kernel/sched/bmq.c
index e6d6fc98bead621f0ea7bfaf885b66af523413c1..fed5e132f2af67007f70851527a401b8f0f79cd1 100644
--- a/kernel/sched/bmq.c
+++ b/kernel/sched/bmq.c
@@ -5499,14 +5499,12 @@ static void sched_init_topology_cpumask_early(void)
}
}

-#define TOPOLOGY_CPUMASK(name, func) \
- if (cpumask_and(chk, chk, func(cpu))) { \
- per_cpu(sched_cpu_llc_mask, cpu) = chk; \
- per_cpu(sd_llc_id, cpu) = cpumask_first(func(cpu)); \
- printk(KERN_INFO "bmq: cpu#%d affinity mask - "#name" 0x%08lx", \
+#define TOPOLOGY_CPUMASK(name, mask, last) \
+ if (cpumask_and(chk, chk, mask)) \
+ printk(KERN_INFO "bmq: cpu#%02d affinity mask: 0x%08lx - "#name,\
cpu, (chk++)->bits[0]); \
- } \
- cpumask_complement(chk, func(cpu))
+ if (!last) \
+ cpumask_complement(chk, mask)

static void sched_init_topology_cpumask(void)
{
@@ -5518,20 +5516,18 @@ static void sched_init_topology_cpumask(void)

cpumask_complement(chk, cpumask_of(cpu));
#ifdef CONFIG_SCHED_SMT
- TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask);
-#endif
-#ifdef CONFIG_SCHED_MC
- TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask);
+ TOPOLOGY_CPUMASK(smt, topology_sibling_cpumask(cpu), false);
#endif
+ per_cpu(sd_llc_id, cpu) = cpumask_first(cpu_coregroup_mask(cpu));
+ per_cpu(sched_cpu_llc_mask, cpu) = chk;
+ TOPOLOGY_CPUMASK(coregroup, cpu_coregroup_mask(cpu), false);

- TOPOLOGY_CPUMASK(core, topology_core_cpumask);
+ TOPOLOGY_CPUMASK(core, topology_core_cpumask(cpu), false);

- if (cpumask_and(chk, chk, cpu_online_mask))
- printk(KERN_INFO "bmq: cpu#%d affinity mask - others 0x%08lx",
- cpu, (chk++)->bits[0]);
+ TOPOLOGY_CPUMASK(others, cpu_online_mask, true);

per_cpu(sched_cpu_affinity_end_mask, cpu) = chk;
- printk(KERN_INFO "bmq: cpu#%d llc_id = %d, llc_mask idx = %d\n",
+ printk(KERN_INFO "bmq: cpu#%02d llc_id = %d, llc_mask idx = %d\n",
cpu, per_cpu(sd_llc_id, cpu),
(int) (per_cpu(sched_cpu_llc_mask, cpu) -
&(per_cpu(sched_cpu_affinity_masks, cpu)[0])));

0 comments on commit 498e8c0

Please sign in to comment.