Skip to content

Commit

Permalink
6.6: Add bore 4.5.2
Browse files Browse the repository at this point in the history
Signed-off-by: Piotr Gorski <[email protected]>
  • Loading branch information
sirlucjan committed Mar 7, 2024
1 parent 4588682 commit 0f5d2ca
Show file tree
Hide file tree
Showing 3 changed files with 48 additions and 36 deletions.
28 changes: 16 additions & 12 deletions 6.6/sched-dev/0001-bore-cachy-ext.patch
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
From e2023d9aeaa1a09d0e5ceeb3d030f506d4677271 Mon Sep 17 00:00:00 2001
From 02c43513148fa3908b97efc21771beb8914f30fd Mon Sep 17 00:00:00 2001
From: Piotr Gorski <[email protected]>
Date: Mon, 4 Mar 2024 12:47:24 +0100
Date: Thu, 7 Mar 2024 22:20:58 +0100
Subject: [PATCH] bore-cachy-ext

Signed-off-by: Piotr Gorski <[email protected]>
---
include/linux/sched.h | 12 ++
init/Kconfig | 19 +++
kernel/sched/core.c | 148 ++++++++++++++++++++
kernel/sched/core.c | 148 +++++++++++++++++++
kernel/sched/debug.c | 61 +++++++-
kernel/sched/fair.c | 302 ++++++++++++++++++++++++++++++++++++++--
kernel/sched/fair.c | 306 ++++++++++++++++++++++++++++++++++++++--
kernel/sched/features.h | 4 +
kernel/sched/sched.h | 7 +
7 files changed, 538 insertions(+), 15 deletions(-)
7 files changed, 542 insertions(+), 15 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index f81ff964c..0310f2791 100644
Expand Down Expand Up @@ -68,7 +68,7 @@ index ed7ef50e1..94d997f4e 100644
bool "Automatic process group scheduling"
select CGROUPS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 5f2f52fc7..6f3b08d78 100644
index 5f2f52fc7..a9e30f2ff 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4519,6 +4519,143 @@ int wake_up_state(struct task_struct *p, unsigned int state)
Expand Down Expand Up @@ -241,7 +241,7 @@ index 5f2f52fc7..6f3b08d78 100644

+#ifdef CONFIG_SCHED_BORE
+ sched_init_bore();
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 4.5.1 by Masahito Suzuki");
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 4.5.2 by Masahito Suzuki");
+#endif // CONFIG_SCHED_BORE
+
wait_bit_init();
Expand Down Expand Up @@ -356,7 +356,7 @@ index bbc6b8e37..fb22479f2 100644
P(se.avg.runnable_sum);
P(se.avg.util_sum);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 45dee822b..0e090d965 100644
index 45dee822b..491306b65 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,9 @@
Expand Down Expand Up @@ -789,7 +789,7 @@ index 45dee822b..0e090d965 100644
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
struct sched_entity *curr = cfs_rq->curr;
unsigned long load;
@@ -5136,12 +5384,18 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -5136,12 +5384,22 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
load = cfs_rq->avg_load;
if (curr && curr->on_rq)
Expand All @@ -798,7 +798,11 @@ index 45dee822b..0e090d965 100644

- lag *= load + scale_load_down(se->load.weight);
+ lag *= load + entity_weight(se);
+#if !defined(CONFIG_SCHED_BORE)
if (WARN_ON_ONCE(!load))
+#else // CONFIG_SCHED_BORE
+ if (unlikely(!load))
+#endif // CONFIG_SCHED_BORE
load = 1;
- lag = div_s64(lag, load);
+ lag = div64_s64(lag, load);
Expand All @@ -811,7 +815,7 @@ index 45dee822b..0e090d965 100644
}

se->vruntime = vruntime - lag;
@@ -6698,6 +6952,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
@@ -6698,6 +6956,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bool was_sched_idle = sched_idle_rq(rq);

util_est_dequeue(&rq->cfs, p);
Expand All @@ -826,7 +830,7 @@ index 45dee822b..0e090d965 100644

for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
@@ -8429,16 +8691,25 @@ static void yield_task_fair(struct rq *rq)
@@ -8429,16 +8695,25 @@ static void yield_task_fair(struct rq *rq)
/*
* Are we the only task in the tree?
*/
Expand All @@ -852,7 +856,7 @@ index 45dee822b..0e090d965 100644
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
@@ -12515,6 +12786,9 @@ static void task_fork_fair(struct task_struct *p)
@@ -12515,6 +12790,9 @@ static void task_fork_fair(struct task_struct *p)
curr = cfs_rq->curr;
if (curr)
update_curr(cfs_rq);
Expand Down
28 changes: 16 additions & 12 deletions 6.6/sched-dev/0001-bore-cachy.patch
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
From 7d437013248adcaa0664baa2b1bc8fa4c83c86d2 Mon Sep 17 00:00:00 2001
From 8593b85bea83b6ef1f28fa3295813ef2ef7c5025 Mon Sep 17 00:00:00 2001
From: Piotr Gorski <[email protected]>
Date: Mon, 4 Mar 2024 12:46:48 +0100
Date: Thu, 7 Mar 2024 22:20:26 +0100
Subject: [PATCH] bore-cachy

Signed-off-by: Piotr Gorski <[email protected]>
---
include/linux/sched.h | 12 ++
init/Kconfig | 19 +++
kernel/sched/core.c | 148 ++++++++++++++++++++
kernel/sched/core.c | 148 +++++++++++++++++++
kernel/sched/debug.c | 61 +++++++-
kernel/sched/fair.c | 302 ++++++++++++++++++++++++++++++++++++++--
kernel/sched/fair.c | 306 ++++++++++++++++++++++++++++++++++++++--
kernel/sched/features.h | 4 +
kernel/sched/sched.h | 7 +
7 files changed, 538 insertions(+), 15 deletions(-)
7 files changed, 542 insertions(+), 15 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 77f01ac38..1f53f6ed9 100644
Expand Down Expand Up @@ -68,7 +68,7 @@ index d11e7d652..5b4a799b1 100644
bool "Automatic process group scheduling"
select CGROUPS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a854b7183..cb38bf501 100644
index a854b7183..32d065725 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4488,6 +4488,143 @@ int wake_up_state(struct task_struct *p, unsigned int state)
Expand Down Expand Up @@ -241,7 +241,7 @@ index a854b7183..cb38bf501 100644

+#ifdef CONFIG_SCHED_BORE
+ sched_init_bore();
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 4.5.1 by Masahito Suzuki");
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 4.5.2 by Masahito Suzuki");
+#endif // CONFIG_SCHED_BORE
+
wait_bit_init();
Expand Down Expand Up @@ -356,7 +356,7 @@ index 4c3d0d9f3..1586636fa 100644
P(se.avg.runnable_sum);
P(se.avg.util_sum);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d336af9cb..ddb33d8ed 100644
index d336af9cb..108ba9152 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,9 @@
Expand Down Expand Up @@ -789,7 +789,7 @@ index d336af9cb..ddb33d8ed 100644
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
struct sched_entity *curr = cfs_rq->curr;
unsigned long load;
@@ -5136,12 +5384,18 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -5136,12 +5384,22 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
load = cfs_rq->avg_load;
if (curr && curr->on_rq)
Expand All @@ -798,7 +798,11 @@ index d336af9cb..ddb33d8ed 100644

- lag *= load + scale_load_down(se->load.weight);
+ lag *= load + entity_weight(se);
+#if !defined(CONFIG_SCHED_BORE)
if (WARN_ON_ONCE(!load))
+#else // CONFIG_SCHED_BORE
+ if (unlikely(!load))
+#endif // CONFIG_SCHED_BORE
load = 1;
- lag = div_s64(lag, load);
+ lag = div64_s64(lag, load);
Expand All @@ -811,7 +815,7 @@ index d336af9cb..ddb33d8ed 100644
}

se->vruntime = vruntime - lag;
@@ -6698,6 +6952,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
@@ -6698,6 +6956,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bool was_sched_idle = sched_idle_rq(rq);

util_est_dequeue(&rq->cfs, p);
Expand All @@ -826,7 +830,7 @@ index d336af9cb..ddb33d8ed 100644

for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
@@ -8429,16 +8691,25 @@ static void yield_task_fair(struct rq *rq)
@@ -8429,16 +8695,25 @@ static void yield_task_fair(struct rq *rq)
/*
* Are we the only task in the tree?
*/
Expand All @@ -852,7 +856,7 @@ index d336af9cb..ddb33d8ed 100644
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
@@ -12515,6 +12786,9 @@ static void task_fork_fair(struct task_struct *p)
@@ -12515,6 +12790,9 @@ static void task_fork_fair(struct task_struct *p)
curr = cfs_rq->curr;
if (curr)
update_curr(cfs_rq);
Expand Down
28 changes: 16 additions & 12 deletions 6.6/sched-dev/0001-bore.patch
Original file line number Diff line number Diff line change
@@ -1,18 +1,18 @@
From 6a671af1c3857f5c6b46c593b14f060a9cffd67f Mon Sep 17 00:00:00 2001
From b44d5346a9ed1705f3d55e2970191de51d8d9f32 Mon Sep 17 00:00:00 2001
From: Piotr Gorski <[email protected]>
Date: Mon, 4 Mar 2024 12:46:17 +0100
Date: Thu, 7 Mar 2024 22:18:00 +0100
Subject: [PATCH] bore

Signed-off-by: Piotr Gorski <[email protected]>
---
include/linux/sched.h | 12 ++
init/Kconfig | 19 +++
kernel/sched/core.c | 148 ++++++++++++++++++++
kernel/sched/core.c | 148 +++++++++++++++++++
kernel/sched/debug.c | 61 +++++++-
kernel/sched/fair.c | 302 ++++++++++++++++++++++++++++++++++++++--
kernel/sched/fair.c | 306 ++++++++++++++++++++++++++++++++++++++--
kernel/sched/features.h | 4 +
kernel/sched/sched.h | 7 +
7 files changed, 538 insertions(+), 15 deletions(-)
7 files changed, 542 insertions(+), 15 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index 77f01ac38..1f53f6ed9 100644
Expand Down Expand Up @@ -68,7 +68,7 @@ index 18fece8fe..598329512 100644
bool "Automatic process group scheduling"
select CGROUPS
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a854b7183..cb38bf501 100644
index a854b7183..32d065725 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4488,6 +4488,143 @@ int wake_up_state(struct task_struct *p, unsigned int state)
Expand Down Expand Up @@ -241,7 +241,7 @@ index a854b7183..cb38bf501 100644

+#ifdef CONFIG_SCHED_BORE
+ sched_init_bore();
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 4.5.1 by Masahito Suzuki");
+ printk(KERN_INFO "BORE (Burst-Oriented Response Enhancer) CPU Scheduler modification 4.5.2 by Masahito Suzuki");
+#endif // CONFIG_SCHED_BORE
+
wait_bit_init();
Expand Down Expand Up @@ -356,7 +356,7 @@ index 4c3d0d9f3..1586636fa 100644
P(se.avg.runnable_sum);
P(se.avg.util_sum);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d336af9cb..ddb33d8ed 100644
index d336af9cb..108ba9152 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -19,6 +19,9 @@
Expand Down Expand Up @@ -789,7 +789,7 @@ index d336af9cb..ddb33d8ed 100644
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
struct sched_entity *curr = cfs_rq->curr;
unsigned long load;
@@ -5136,12 +5384,18 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
@@ -5136,12 +5384,22 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
*/
load = cfs_rq->avg_load;
if (curr && curr->on_rq)
Expand All @@ -798,7 +798,11 @@ index d336af9cb..ddb33d8ed 100644

- lag *= load + scale_load_down(se->load.weight);
+ lag *= load + entity_weight(se);
+#if !defined(CONFIG_SCHED_BORE)
if (WARN_ON_ONCE(!load))
+#else // CONFIG_SCHED_BORE
+ if (unlikely(!load))
+#endif // CONFIG_SCHED_BORE
load = 1;
- lag = div_s64(lag, load);
+ lag = div64_s64(lag, load);
Expand All @@ -811,7 +815,7 @@ index d336af9cb..ddb33d8ed 100644
}

se->vruntime = vruntime - lag;
@@ -6698,6 +6952,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
@@ -6698,6 +6956,14 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
bool was_sched_idle = sched_idle_rq(rq);

util_est_dequeue(&rq->cfs, p);
Expand All @@ -826,7 +830,7 @@ index d336af9cb..ddb33d8ed 100644

for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
@@ -8429,16 +8691,25 @@ static void yield_task_fair(struct rq *rq)
@@ -8429,16 +8695,25 @@ static void yield_task_fair(struct rq *rq)
/*
* Are we the only task in the tree?
*/
Expand All @@ -852,7 +856,7 @@ index d336af9cb..ddb33d8ed 100644
/*
* Tell update_rq_clock() that we've just updated,
* so we don't do microscopic update in schedule()
@@ -12515,6 +12786,9 @@ static void task_fork_fair(struct task_struct *p)
@@ -12515,6 +12790,9 @@ static void task_fork_fair(struct task_struct *p)
curr = cfs_rq->curr;
if (curr)
update_curr(cfs_rq);
Expand Down

0 comments on commit 0f5d2ca

Please sign in to comment.