From: Vineeth Pillai <[email protected]>
If there is only one long running local task and the sibling is
forced idle, it might not get a chance to run until a schedule
event happens on any cpu in the core.
So we check for this condition during a tick to see if a sibling
is starved and then give it a chance to schedule.
Tested-by: Julien Desfossez <[email protected]>
Reviewed-by: Joel Fernandes (Google) <[email protected]>
Signed-off-by: Vineeth Pillai <[email protected]>
Signed-off-by: Julien Desfossez <[email protected]>
Signed-off-by: Joel Fernandes (Google) <[email protected]>
---
kernel/sched/core.c | 15 ++++++++-------
kernel/sched/fair.c | 40 ++++++++++++++++++++++++++++++++++++++++
kernel/sched/sched.h | 2 +-
3 files changed, 49 insertions(+), 8 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 1bd0b0bbb040..52d0e83072a4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5206,16 +5206,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
/* reset state */
rq->core->core_cookie = 0UL;
+ if (rq->core->core_forceidle) {
+ need_sync = true;
+ rq->core->core_forceidle = false;
+ }
for_each_cpu(i, smt_mask) {
struct rq *rq_i = cpu_rq(i);
rq_i->core_pick = NULL;
- if (rq_i->core_forceidle) {
- need_sync = true;
- rq_i->core_forceidle = false;
- }
-
if (i != cpu)
update_rq_clock(rq_i);
}
@@ -5335,8 +5334,10 @@ next_class:;
if (!rq_i->core_pick)
continue;
- if (is_task_rq_idle(rq_i->core_pick) && rq_i->nr_running)
- rq_i->core_forceidle = true;
+ if (is_task_rq_idle(rq_i->core_pick) && rq_i->nr_running &&
+ !rq_i->core->core_forceidle) {
+ rq_i->core->core_forceidle = true;
+ }
if (i == cpu) {
rq_i->core_pick = NULL;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index f53681cd263e..42965c4fd71f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10692,6 +10692,44 @@ static void rq_offline_fair(struct rq *rq)
#endif /* CONFIG_SMP */
+#ifdef CONFIG_SCHED_CORE
+static inline bool
+__entity_slice_used(struct sched_entity *se, int min_nr_tasks)
+{
+ u64 slice = sched_slice(cfs_rq_of(se), se);
+ u64 rtime = se->sum_exec_runtime - se->prev_sum_exec_runtime;
+
+ return (rtime * min_nr_tasks > slice);
+}
+
+#define MIN_NR_TASKS_DURING_FORCEIDLE 2
+static inline void task_tick_core(struct rq *rq, struct task_struct *curr)
+{
+ if (!sched_core_enabled(rq))
+ return;
+
+ /*
+ * If runqueue has only one task which used up its slice and
+ * if the sibling is forced idle, then trigger schedule to
+ * give forced idle task a chance.
+ *
+ * sched_slice() considers only this active rq and it gets the
+ * whole slice. But during force idle, we have siblings acting
+ * like a single runqueue and hence we need to consider runnable
+ * tasks on this cpu and the forced idle cpu. Ideally, we should
+ * go through the forced idle rq, but that would be a perf hit.
+ * We can assume that the forced idle cpu has atleast
+ * MIN_NR_TASKS_DURING_FORCEIDLE - 1 tasks and use that to check
+ * if we need to give up the cpu.
+ */
+ if (rq->core->core_forceidle && rq->cfs.nr_running == 1 &&
+ __entity_slice_used(&curr->se, MIN_NR_TASKS_DURING_FORCEIDLE))
+ resched_curr(rq);
+}
+#else
+static inline void task_tick_core(struct rq *rq, struct task_struct *curr) {}
+#endif
+
/*
* scheduler tick hitting a task of our scheduling class.
*
@@ -10715,6 +10753,8 @@ static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued)
update_misfit_status(curr, rq);
update_overutilized_status(task_rq(curr));
+
+ task_tick_core(rq, curr);
}
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 63b28e1843ee..be656ca8693d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1069,12 +1069,12 @@ struct rq {
unsigned int core_enabled;
unsigned int core_sched_seq;
struct rb_root core_tree;
- unsigned char core_forceidle;
/* shared state */
unsigned int core_task_seq;
unsigned int core_pick_seq;
unsigned long core_cookie;
+ unsigned char core_forceidle;
#endif
};
--
2.29.2.299.gdc1121823c-goog
On Tue, Nov 17, 2020 at 06:19:38PM -0500, Joel Fernandes (Google) wrote:
> From: Vineeth Pillai <[email protected]>
>
> If there is only one long running local task and the sibling is
> forced idle, it might not get a chance to run until a schedule
> event happens on any cpu in the core.
>
> So we check for this condition during a tick to see if a sibling
> is starved and then give it a chance to schedule.
>
> Tested-by: Julien Desfossez <[email protected]>
> Reviewed-by: Joel Fernandes (Google) <[email protected]>
> Signed-off-by: Vineeth Pillai <[email protected]>
> Signed-off-by: Julien Desfossez <[email protected]>
> Signed-off-by: Joel Fernandes (Google) <[email protected]>
> ---
> kernel/sched/core.c | 15 ++++++++-------
> kernel/sched/fair.c | 40 ++++++++++++++++++++++++++++++++++++++++
> kernel/sched/sched.h | 2 +-
> 3 files changed, 49 insertions(+), 8 deletions(-)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 1bd0b0bbb040..52d0e83072a4 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -5206,16 +5206,15 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
>
> /* reset state */
> rq->core->core_cookie = 0UL;
> + if (rq->core->core_forceidle) {
> + need_sync = true;
> + rq->core->core_forceidle = false;
> + }
> for_each_cpu(i, smt_mask) {
> struct rq *rq_i = cpu_rq(i);
>
> rq_i->core_pick = NULL;
>
> - if (rq_i->core_forceidle) {
> - need_sync = true;
> - rq_i->core_forceidle = false;
> - }
> -
> if (i != cpu)
> update_rq_clock(rq_i);
> }
> @@ -5335,8 +5334,10 @@ next_class:;
> if (!rq_i->core_pick)
> continue;
>
> - if (is_task_rq_idle(rq_i->core_pick) && rq_i->nr_running)
> - rq_i->core_forceidle = true;
> + if (is_task_rq_idle(rq_i->core_pick) && rq_i->nr_running &&
> + !rq_i->core->core_forceidle) {
> + rq_i->core->core_forceidle = true;
> + }
>
> if (i == cpu) {
> rq_i->core_pick = NULL;
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index f53681cd263e..42965c4fd71f 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -10692,6 +10692,44 @@ static void rq_offline_fair(struct rq *rq)
>
> #endif /* CONFIG_SMP */
>
> +#ifdef CONFIG_SCHED_CORE
> +static inline bool
> +__entity_slice_used(struct sched_entity *se, int min_nr_tasks)
> +{
> + u64 slice = sched_slice(cfs_rq_of(se), se);
I wonder if the definition of sched_slice() should be revisited for core
scheduling?
Should we use sched_slice = sched_slice / cpumask_weight(smt_mask)?
Would that resolve the issue your seeing? Effectively we need to answer
if two sched core siblings should be treated as executing one large
slice?
Balbir Singh.