At wake up, the vruntime of a task is updated to not be more older than
a sched_latency period behind the min_vruntime. This prevents long sleeping
task to get unlimited credit at wakeup.
Such waking task should preempt current one to use its CPU bandwidth but
wakeup_gran() can be larger than sched_latency, filter out the
wakeup preemption and as a results steals some CPU bandwidth to
the waking task.
Make sure that a task, which vruntime has been capped, will preempt current
task and use its CPU bandwidth even if wakeup_gran() is in the same range
as sched_latency.
If the waking task failed to preempt current it could to wait up to
sysctl_sched_min_granularity before preempting it during next tick.
Strictly speaking, we should use cfs->min_vruntime instead of
curr->vruntime but it doesn't worth the additional overhead and complexity
as the vruntime of current should be close to min_vruntime if not equal.
Signed-off-by: Vincent Guittot <[email protected]>
---
kernel/sched/fair.c | 46 ++++++++++++++++++++------------------------
kernel/sched/sched.h | 30 ++++++++++++++++++++++++++++-
2 files changed, 50 insertions(+), 26 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 5ffec4370602..eb04c83112a0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4345,33 +4345,17 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
{
u64 vruntime = cfs_rq->min_vruntime;
- /*
- * The 'current' period is already promised to the current tasks,
- * however the extra weight of the new task will slow them down a
- * little, place the new task so that it fits in the slot that
- * stays open at the end.
- */
- if (initial && sched_feat(START_DEBIT))
- vruntime += sched_vslice(cfs_rq, se);
-
- /* sleeps up to a single latency don't count. */
- if (!initial) {
- unsigned long thresh;
-
- if (se_is_idle(se))
- thresh = sysctl_sched_min_granularity;
- else
- thresh = sysctl_sched_latency;
-
+ if (!initial)
+ /* sleeps up to a single latency don't count. */
+ vruntime -= get_sched_latency(se_is_idle(se));
+ else if (sched_feat(START_DEBIT))
/*
- * Halve their sleep time's effect, to allow
- * for a gentler effect of sleepers:
+ * The 'current' period is already promised to the current tasks,
+ * however the extra weight of the new task will slow them down a
+ * little, place the new task so that it fits in the slot that
+ * stays open at the end.
*/
- if (sched_feat(GENTLE_FAIR_SLEEPERS))
- thresh >>= 1;
-
- vruntime -= thresh;
- }
+ vruntime += sched_vslice(cfs_rq, se);
/* ensure we never gain time by being placed backwards. */
se->vruntime = max_vruntime(se->vruntime, vruntime);
@@ -7187,6 +7171,18 @@ wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
return -1;
gran = wakeup_gran(se);
+
+ /*
+ * At wake up, the vruntime of a task is capped to not be older than
+ * a sched_latency period compared to min_vruntime. This prevents long
+ * sleeping task to get unlimited credit at wakeup. Such waking up task
+ * has to preempt current in order to not lose its share of CPU
+ * bandwidth but wakeup_gran() can become higher than scheduling period
+ * for low priority task. Make sure that long sleeping task will get a
+ * chance to preempt current.
+ */
+ gran = min_t(s64, gran, get_latency_max());
+
if (vdiff > gran)
return 1;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1fc198be1ffd..14879d429919 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2432,9 +2432,9 @@ extern void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags);
extern const_debug unsigned int sysctl_sched_nr_migrate;
extern const_debug unsigned int sysctl_sched_migration_cost;
-#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_latency;
extern unsigned int sysctl_sched_min_granularity;
+#ifdef CONFIG_SCHED_DEBUG
extern unsigned int sysctl_sched_idle_min_granularity;
extern unsigned int sysctl_sched_wakeup_granularity;
extern int sysctl_resched_latency_warn_ms;
@@ -2448,6 +2448,34 @@ extern unsigned int sysctl_numa_balancing_scan_period_max;
extern unsigned int sysctl_numa_balancing_scan_size;
#endif
+static inline unsigned long get_sched_latency(bool idle)
+{
+ unsigned long thresh;
+
+ if (idle)
+ thresh = sysctl_sched_min_granularity;
+ else
+ thresh = sysctl_sched_latency;
+
+ /*
+ * Halve their sleep time's effect, to allow
+ * for a gentler effect of sleepers:
+ */
+ if (sched_feat(GENTLE_FAIR_SLEEPERS))
+ thresh >>= 1;
+
+ return thresh;
+}
+
+static inline unsigned long get_latency_max(void)
+{
+ unsigned long thresh = get_sched_latency(false);
+
+ thresh -= sysctl_sched_min_granularity;
+
+ return thresh;
+}
+
#ifdef CONFIG_SCHED_HRTICK
/*
--
2.17.1
On 2022-10-28 at 11:33:55 +0200, Vincent Guittot wrote:
[snip]
> +static inline unsigned long get_latency_max(void)
> +{
> + unsigned long thresh = get_sched_latency(false);
> +
> + thresh -= sysctl_sched_min_granularity;
> +
May I know why we substract sysctl_sched_min_granularity above?
I thought thresh -= 1 would be enough to let the waking task preempt
the current one, because:
When a long sleeping task is enqueued on this rq, its vruntime is set
to cfs_rq->min_vtime - latency, so
diff = curr->vtime - cfs_rq->min_vtime + latency,
since (curr->vtime - cfs_rq->min_vtime) >= 0, if we set the thresh to
(latency - 1), the diff is guaranteed to be bigger than thresh and
the waking task can preempt current task.
thanks,
Chenyu
> + return thresh;
> +}
On Sat, 5 Nov 2022 at 15:33, Chen Yu <[email protected]> wrote:
>
> On 2022-10-28 at 11:33:55 +0200, Vincent Guittot wrote:
> [snip]
> > +static inline unsigned long get_latency_max(void)
> > +{
> > + unsigned long thresh = get_sched_latency(false);
> > +
> > + thresh -= sysctl_sched_min_granularity;
> > +
> May I know why we substract sysctl_sched_min_granularity above?
> I thought thresh -= 1 would be enough to let the waking task preempt
> the current one, because:
> When a long sleeping task is enqueued on this rq, its vruntime is set
> to cfs_rq->min_vtime - latency, so
> diff = curr->vtime - cfs_rq->min_vtime + latency,
> since (curr->vtime - cfs_rq->min_vtime) >= 0, if we set the thresh to
> (latency - 1), the diff is guaranteed to be bigger than thresh and
> the waking task can preempt current task.
If the waking task failed to preempt current it could to wait up to
sysctl_sched_min_granularity before preempting it during next tick.
Vincent
>
> thanks,
> Chenyu
> > + return thresh;
> > +}