As a hedge against unexpected user issues commit 88c56cfeaec4
("sched/fair: Block nohz tick_stop when cfs bandwidth in use")
included a scheduler feature to disable the new functionality.
It's been a few releases (v6.6) and no screams, so remove it.
Signed-off-by: Phil Auld <[email protected]>
Cc: Peter Zijlstra (Intel) <[email protected]>
Cc: Ingo Molnar <[email protected]>
Cc: Vincent Guittot <[email protected]>
Cc: Juri Lelli <[email protected]>
Cc: Ben Segall <[email protected]>
Cc: Valentin Schneider <[email protected]>
---
kernel/sched/core.c | 2 +-
kernel/sched/fair.c | 2 +-
kernel/sched/features.h | 2 --
3 files changed, 2 insertions(+), 4 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 7019a40457a6..d7afcee8e2ae 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1269,7 +1269,7 @@ bool sched_can_stop_tick(struct rq *rq)
* dequeued by migrating while the constrained task continues to run.
* E.g. going from 2->1 without going through pick_next_task().
*/
- if (sched_feat(HZ_BW) && __need_bw_check(rq, rq->curr)) {
+ if (__need_bw_check(rq, rq->curr)) {
if (cfs_task_bw_constrained(rq->curr))
return false;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index c62805dbd608..06e026f10cb4 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6549,7 +6549,7 @@ static void sched_fair_update_stop_tick(struct rq *rq, struct task_struct *p)
{
int cpu = cpu_of(rq);
- if (!sched_feat(HZ_BW) || !cfs_bandwidth_used())
+ if (!cfs_bandwidth_used())
return;
if (!tick_nohz_full_cpu(cpu))
diff --git a/kernel/sched/features.h b/kernel/sched/features.h
index 143f55df890b..929021fd6bc4 100644
--- a/kernel/sched/features.h
+++ b/kernel/sched/features.h
@@ -85,5 +85,3 @@ SCHED_FEAT(WA_BIAS, true)
SCHED_FEAT(UTIL_EST, true)
SCHED_FEAT(LATENCY_WARN, false)
-
-SCHED_FEAT(HZ_BW, true)
--
2.39.3