From 71d0bff06887caa7474556f9d2ca72b5ef9decde Mon Sep 17 00:00:00 2001 From: Hamad Al Marri Date: Mon, 13 Dec 2021 16:05:09 +0300 Subject: [PATCH] 0.3.5 --- patches/5.15/tt-5.15.patch | 54 ++++++++++++++++++++++---------------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/patches/5.15/tt-5.15.patch b/patches/5.15/tt-5.15.patch index d2af020..8d183a2 100644 --- a/patches/5.15/tt-5.15.patch +++ b/patches/5.15/tt-5.15.patch @@ -140,10 +140,10 @@ index 978fcfca5871..bfde8e0d851b 100644 obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o pelt.o diff --git a/kernel/sched/bs.c b/kernel/sched/bs.c new file mode 100644 -index 000000000000..e72b0357a83c +index 000000000000..ad79be3d4c45 --- /dev/null +++ b/kernel/sched/bs.c -@@ -0,0 +1,1875 @@ +@@ -0,0 +1,1876 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * TT Scheduler Class (SCHED_NORMAL/SCHED_BATCH) @@ -1576,6 +1576,9 @@ index 000000000000..e72b0357a83c + if (task_running(grq, p)) + return 0; + ++ if (task_hot(p, dst_rq, grq)) ++ return 0; ++ + return 1; +} + @@ -1843,8 +1846,6 @@ index 000000000000..e72b0357a83c + idle_pull_global_candidate(this_rq); + else + active_pull_global_candidate(this_rq, 1); -+ } else if (IS_GRQ_BL_ENABLED) { -+ push_to_grq(this_rq); + } + + if (this_cpu != 0) @@ -2336,10 +2337,10 @@ index 000000000000..b3d99cf13576 +#endif diff --git a/kernel/sched/bs_nohz.h b/kernel/sched/bs_nohz.h new file mode 100644 -index 000000000000..78b1062b121c +index 000000000000..114285b63e9d --- /dev/null +++ b/kernel/sched/bs_nohz.h -@@ -0,0 +1,882 @@ +@@ -0,0 +1,891 @@ + +#ifdef CONFIG_NO_HZ_COMMON + @@ -3017,7 +3018,7 @@ index 000000000000..78b1062b121c +static inline void nohz_newidle_balance(struct rq *this_rq) { } +#endif /* CONFIG_NO_HZ_COMMON */ + -+static int task_can_move_to_grq(struct task_struct *p) ++static int task_can_move_to_grq(struct task_struct *p, struct rq *src_rq) +{ + if (task_running(task_rq(p), p)) + return 0; @@ -3031,6 +3032,9 @@ index 000000000000..78b1062b121c + if (p->nr_cpus_allowed <= 1) + return 0; + ++ if (task_hot(p, grq, src_rq)) ++ return 0; ++ + return 1; +} + @@ -3048,10 +3052,6 @@ index 000000000000..78b1062b121c + if (!cfs_rq->head) + return; + -+ /* no need to push a single task and take it again */ -+ if (cfs_rq->h_nr_running == 1 && !grq->cfs.head) -+ return; -+ + rq_lock_irqsave(rq, &rf); + update_rq_clock(rq); + @@ -3063,7 +3063,7 @@ index 000000000000..78b1062b121c + se = se_of(ttn); + p = task_of(se); + -+ if (!task_can_move_to_grq(p)) ++ if (!task_can_move_to_grq(p, rq)) + goto next; + + // deactivate @@ -3167,34 +3167,44 @@ index 000000000000..78b1062b121c +{ + int cpu; + struct rq *rq; ++ struct cpumask idle_mask; + struct cpumask non_idle_mask; -+ bool balance; ++ bool balance_time; + int pulled = 0; + + cpumask_clear(&non_idle_mask); + ++ /* first, push to grq*/ + for_each_online_cpu(cpu) { + if (cpu == 0) continue; -+ rq = cpu_rq(cpu); -+ pulled = 0; -+ -+ if (idle_cpu(cpu)) -+ pulled = pull_from_grq(rq); -+ else ++ if (!idle_cpu(cpu)) { ++ push_to_grq(cpu_rq(cpu)); + cpumask_set_cpu(cpu, &non_idle_mask); ++ } else { ++ cpumask_set_cpu(cpu, &idle_mask); ++ } ++ } ++ ++ /* second, idle cpus pull first */ ++ for_each_cpu(cpu, &idle_mask) { ++ if (cpu == 0 || !idle_cpu(cpu)) ++ continue; + ++ rq = cpu_rq(cpu); ++ pulled = pull_from_grq(rq); + update_grq_next_balance(rq, pulled); + } + ++ /* last, non idle pull */ + for_each_cpu(cpu, &non_idle_mask) { + rq = cpu_rq(cpu); -+ balance = time_after_eq(jiffies, rq->grq_next_balance); ++ balance_time = time_after_eq(jiffies, rq->grq_next_balance); + pulled = 0; + + /* mybe it is idle now */ + if (idle_cpu(cpu)) -+ pulled = pull_from_grq(rq); -+ else if (tt_grq_balance_ms == 0 || balance) ++ pulled = pull_from_grq(cpu_rq(cpu)); ++ else if (tt_grq_balance_ms == 0 || balance_time) + /* if not idle, try pull every grq_next_balance */ + pulled = try_pull_from_grq(rq); +