Skip to content

Commit

Permalink
Update bore/tt scheduler names
Browse files Browse the repository at this point in the history
Signed-off-by: Peter Jung <[email protected]>
  • Loading branch information
ptr1337 committed Oct 10, 2022
1 parent 060f317 commit e65beab
Show file tree
Hide file tree
Showing 5 changed files with 7,373 additions and 0 deletions.
229 changes: 229 additions & 0 deletions 5.15/sched/0001-bore.patch
Original file line number Diff line number Diff line change
@@ -0,0 +1,229 @@
From c44b1b065d57060c79d198618d9114226879d14e Mon Sep 17 00:00:00 2001
From: Masahito S <[email protected]>
Date: Thu, 9 Jun 2022 18:30:37 +0900
Subject: [PATCH] bore1.4.31.0

---
include/linux/sched.h | 3 +++
init/Kconfig | 20 ++++++++++++++++++++
kernel/sched/core.c | 4 ++++
kernel/sched/debug.c | 6 ++++++
kernel/sched/fair.c | 38 ++++++++++++++++++++++++++++++++++++++
kernel/sched/features.h | 11 +++++++++++
kernel/sched/sched.h | 6 ++++++
7 files changed, 88 insertions(+)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4b4cc633b..7cc87ce6a 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -548,6 +548,9 @@ struct sched_entity {
u64 prev_sum_exec_runtime;

u64 nr_migrations;
+#ifdef CONFIG_SCHED_BORE
+ u64 burst_time;
+#endif // CONFIG_SCHED_BORE

#ifdef CONFIG_FAIR_GROUP_SCHED
int depth;
diff --git a/init/Kconfig b/init/Kconfig
index e9119bf54..3e6d85355 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1262,6 +1262,26 @@ config CHECKPOINT_RESTORE

If unsure, say N here.

+config SCHED_BORE
+ bool "Burst-Oriented Response Enhancer"
+ default y
+ help
+ In Desktop and Mobile computing, one might prefer interactive
+ tasks to keep responsive no matter what they run in the background.
+
+ Enabling this kernel feature modifies the scheduler to discriminate
+ tasks by their burst time (runtime since it last went sleeping or
+ yielding state) and prioritize those that run less bursty.
+ Such tasks usually include window compositor, widgets backend,
+ terminal emulator, video playback, games and so on.
+ With a little impact to scheduling fairness, it may improve
+ responsiveness especially under heavy background workload.
+
+ You can turn it off by writing NO_BURST_PENALTY to sched/features.
+ Enabling this feature implies NO_GENTLE_FAIR_SLEEPERS by default.
+
+ If unsure say Y here.
+
config SCHED_AUTOGROUP
bool "Automatic process group scheduling"
select CGROUPS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1eec4925b..325fad52b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9316,6 +9316,10 @@ void __init sched_init(void)
BUG_ON(&dl_sched_class + 1 != &stop_sched_class);
#endif

+#ifdef CONFIG_SCHED_BORE
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 1.4.31.0 by Masahito Suzuki");
+#endif // CONFIG_SCHED_BORE
+
wait_bit_init();

#ifdef CONFIG_FAIR_GROUP_SCHED
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 102d6f70e..78a6d28e6 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -316,6 +316,12 @@ static __init int sched_init_debug(void)

debugfs_create_u32("latency_warn_ms", 0644, debugfs_sched, &sysctl_resched_latency_warn_ms);
debugfs_create_u32("latency_warn_once", 0644, debugfs_sched, &sysctl_resched_latency_warn_once);
+#ifdef CONFIG_SCHED_BORE
+ debugfs_create_u16("burst_penalty_scale", 0644, debugfs_sched, &sched_burst_penalty_scale);
+ debugfs_create_u8("burst_granularity", 0644, debugfs_sched, &sched_burst_granularity);
+ debugfs_create_u8("burst_reduction", 0644, debugfs_sched, &sched_burst_reduction);
+ debugfs_create_bool("burst_preempt", 0644, debugfs_sched, &sched_burst_preempt);
+#endif // CONFIG_SCHED_BORE

#ifdef CONFIG_SMP
debugfs_create_file("tunable_scaling", 0644, debugfs_sched, NULL, &sched_scaling_fops);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 2f461f059..42a5a110c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,9 @@
*
* Adaptive scheduling granularity, math enhancements by Peter Zijlstra
* Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra
+ *
+ * Burst-Oriented Response Enhancer (BORE) CPU Scheduler
+ * Copyright (C) 2021 Masahito Suzuki <[email protected]>
*/
#include "sched.h"

@@ -92,6 +95,13 @@ static unsigned int normalized_sysctl_sched_wakeup_granularity = 1000000UL;

const_debug unsigned int sysctl_sched_migration_cost = 500000UL;

+#ifdef CONFIG_SCHED_BORE
+unsigned short __read_mostly sched_burst_penalty_scale = 1256;
+unsigned char __read_mostly sched_burst_granularity = 5;
+unsigned char __read_mostly sched_burst_reduction = 2;
+bool __read_mostly sched_burst_preempt = 1;
+#endif // CONFIG_SCHED_BORE
+
int sched_thermal_decay_shift;
static int __init setup_sched_thermal_decay_shift(char *str)
{
@@ -846,6 +856,10 @@ static void update_curr(struct cfs_rq *cfs_rq)
struct sched_entity *curr = cfs_rq->curr;
u64 now = rq_clock_task(rq_of(cfs_rq));
u64 delta_exec;
+#ifdef CONFIG_SCHED_BORE
+ u64 burst_count;
+ u32 msb, bcnt_prec10, burst_score;
+#endif // CONFIG_SCHED_BORE

if (unlikely(!curr))
return;
@@ -867,6 +881,19 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);

+#ifdef CONFIG_SCHED_BORE
+ curr->burst_time += delta_exec;
+ if(sched_feat(BURST_PENALTY)) {
+ burst_count = curr->burst_time >> sched_burst_granularity;
+ msb = fls64(burst_count);
+ bcnt_prec10 = (msb << 10) | (burst_count << ((65 - msb) & 0x3F) >> 54);
+ burst_score = min(bcnt_prec10 * sched_burst_penalty_scale >> 20, (u32)39);
+ curr->vruntime += mul_u64_u32_shr(
+ calc_delta_fair(delta_exec, curr),
+ sched_prio_to_wmult[burst_score], 22);
+ }
+ else
+#endif // CONFIG_SCHED_BORE
curr->vruntime += calc_delta_fair(delta_exec, curr);
update_min_vruntime(cfs_rq);

@@ -5718,6 +5745,9 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
dequeue_entity(cfs_rq, se, flags);
+#ifdef CONFIG_SCHED_BORE
+ se->burst_time >>= sched_burst_reduction;
+#endif // CONFIG_SCHED_BORE

cfs_rq->h_nr_running--;
cfs_rq->idle_h_nr_running -= idle_h_nr_running;
@@ -7146,6 +7176,11 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
return;

update_curr(cfs_rq_of(se));
+#ifdef CONFIG_SCHED_BORE
+ /* More bursty tasks are preempted by those less bursty anyway */
+ if(sched_feat(BURST_PENALTY) && sched_burst_preempt && (se->burst_time > pse->burst_time))
+ goto preempt;
+#endif // CONFIG_SCHED_BORE
if (wakeup_preempt_entity(se, pse) == 1) {
/*
* Bias pick_next to pick the sched entity that is
@@ -7382,6 +7417,9 @@ static void yield_task_fair(struct rq *rq)
struct task_struct *curr = rq->curr;
struct cfs_rq *cfs_rq = task_cfs_rq(curr);
struct sched_entity *se = &curr->se;
+#ifdef CONFIG_SCHED_BORE
+ se->burst_time >>= sched_burst_reduction;
+#endif // CONFIG_SCHED_BORE

/*
* Are we the only task in the tree?
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 1cf435bbc..f4ffb0dc8 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -1,10 +1,21 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifdef CONFIG_SCHED_BORE
+/*
+ * Discriminate tasks by their burst time and prioritize those
+ * that run less bursty.
+ */
+SCHED_FEAT(BURST_PENALTY, true)
+#endif // CONFIG_SCHED_BORE
/*
* Only give sleepers 50% of their service deficit. This allows
* them to run sooner, but does not allow tons of sleepers to
* rip the spread apart.
*/
+#ifdef CONFIG_SCHED_BORE
+SCHED_FEAT(GENTLE_FAIR_SLEEPERS, false)
+#else // CONFIG_SCHED_BORE
SCHED_FEAT(GENTLE_FAIR_SLEEPERS, true)
+#endif // CONFIG_SCHED_BORE

/*
* Place new tasks ahead so that they do not starve already running
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e8a554948..c82dceebe 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2411,6 +2411,12 @@ extern unsigned int sysctl_sched_idle_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern int sysctl_resched_latency_warn_ms;
extern int sysctl_resched_latency_warn_once;
+#ifdef CONFIG_SCHED_BORE
+extern unsigned short sched_burst_penalty_scale;
+extern unsigned char sched_burst_granularity;
+extern unsigned char sched_burst_reduction;
+extern bool sched_burst_preempt;
+#endif // CONFIG_SCHED_BORE

extern unsigned int sysctl_sched_tunable_scaling;

--
2.30.2

Loading

0 comments on commit e65beab

Please sign in to comment.