Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1753995AbcL3Ldz (ORCPT ); Fri, 30 Dec 2016 06:33:55 -0500 Received: from mail-wj0-f194.google.com ([209.85.210.194]:32923 "EHLO mail-wj0-f194.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1753977AbcL3Ldx (ORCPT ); Fri, 30 Dec 2016 06:33:53 -0500 From: Luca Abeni X-Google-Original-From: Luca Abeni To: linux-kernel@vger.kernel.org Cc: Peter Zijlstra , Ingo Molnar , Juri Lelli , Claudio Scordino , Steven Rostedt , Tommaso Cucinotta , Daniel Bristot de Oliveira , Luca Abeni Subject: [RFC v4 1/6] sched/deadline: track the active utilization Date: Fri, 30 Dec 2016 12:33:06 +0100 Message-Id: <1483097591-3871-2-git-send-email-lucabe72@gmail.com> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1483097591-3871-1-git-send-email-lucabe72@gmail.com> References: <1483097591-3871-1-git-send-email-lucabe72@gmail.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 5797 Lines: 174 From: Luca Abeni Active utilization is defined as the total utilization of active (TASK_RUNNING) tasks queued on a runqueue. Hence, it is increased when a task wakes up and is decreased when a task blocks. When a task is migrated from CPUi to CPUj, immediately subtract the task's utilization from CPUi and add it to CPUj. This mechanism is implemented by modifying the pull and push functions. Note: this is not fully correct from the theoretical point of view (the utilization should be removed from CPUi only at the 0 lag time), a more theoretically sound solution will follow. Signed-off-by: Juri Lelli Signed-off-by: Luca Abeni --- kernel/sched/deadline.c | 66 ++++++++++++++++++++++++++++++++++++++++++++++--- kernel/sched/sched.h | 6 +++++ 2 files changed, 69 insertions(+), 3 deletions(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 70ef2b1..23c840e 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -43,6 +43,28 @@ static inline int on_dl_rq(struct sched_dl_entity *dl_se) return !RB_EMPTY_NODE(&dl_se->rb_node); } +static inline +void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) +{ + u64 old = dl_rq->running_bw; + + lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); + dl_rq->running_bw += dl_se->dl_bw; + SCHED_WARN_ON(dl_rq->running_bw < old); /* overflow */ +} + +static inline +void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) +{ + u64 old = dl_rq->running_bw; + + lockdep_assert_held(&(rq_of_dl_rq(dl_rq))->lock); + dl_rq->running_bw -= dl_se->dl_bw; + SCHED_WARN_ON(dl_rq->running_bw > old); /* underflow */ + if (dl_rq->running_bw > old) + dl_rq->running_bw = 0; +} + static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) { struct sched_dl_entity *dl_se = &p->dl; @@ -909,8 +931,12 @@ enqueue_dl_entity(struct sched_dl_entity *dl_se, * parameters of the task might need updating. Otherwise, * we want a replenishment of its runtime. */ - if (flags & ENQUEUE_WAKEUP) + if (flags & ENQUEUE_WAKEUP) { + struct dl_rq *dl_rq = dl_rq_of_se(dl_se); + + add_running_bw(dl_se, dl_rq); update_dl_entity(dl_se, pi_se); + } else if (flags & ENQUEUE_REPLENISH) replenish_dl_entity(dl_se, pi_se); @@ -947,14 +973,25 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) return; } + if (p->on_rq == TASK_ON_RQ_MIGRATING) + add_running_bw(&p->dl, &rq->dl); + /* - * If p is throttled, we do nothing. In fact, if it exhausted + * If p is throttled, we do not enqueue it. In fact, if it exhausted * its budget it needs a replenishment and, since it now is on * its rq, the bandwidth timer callback (which clearly has not * run yet) will take care of this. + * However, the active utilization does not depend on the fact + * that the task is on the runqueue or not (but depends on the + * task's state - in GRUB parlance, "inactive" vs "active contending"). + * In other words, even if a task is throttled its utilization must + * be counted in the active utilization; hence, we need to call + * add_running_bw(). */ - if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) + if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) { + add_running_bw(&p->dl, &rq->dl); return; + } enqueue_dl_entity(&p->dl, pi_se, flags); @@ -972,6 +1009,21 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) { update_curr_dl(rq); __dequeue_task_dl(rq, p, flags); + + if (p->on_rq == TASK_ON_RQ_MIGRATING) + sub_running_bw(&p->dl, &rq->dl); + + /* + * This check allows to start the inactive timer (or to immediately + * decrease the active utilization, if needed) in two cases: + * when the task blocks and when it is terminating + * (p->state == TASK_DEAD). We can handle the two cases in the same + * way, because from GRUB's point of view the same thing is happening + * (the task moves from "active contending" to "active non contending" + * or "inactive") + */ + if (flags & DEQUEUE_SLEEP) + sub_running_bw(&p->dl, &rq->dl); } /* @@ -1501,7 +1553,9 @@ static int push_dl_task(struct rq *rq) } deactivate_task(rq, next_task, 0); + sub_running_bw(&next_task->dl, &rq->dl); set_task_cpu(next_task, later_rq->cpu); + add_running_bw(&next_task->dl, &later_rq->dl); activate_task(later_rq, next_task, 0); ret = 1; @@ -1589,7 +1643,9 @@ static void pull_dl_task(struct rq *this_rq) resched = true; deactivate_task(src_rq, p, 0); + sub_running_bw(&p->dl, &src_rq->dl); set_task_cpu(p, this_cpu); + add_running_bw(&p->dl, &this_rq->dl); activate_task(this_rq, p, 0); dmin = p->dl.deadline; @@ -1695,6 +1751,9 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) if (!start_dl_timer(p)) __dl_clear_params(p); + if (task_on_rq_queued(p)) + sub_running_bw(&p->dl, &rq->dl); + /* * Since this might be the only -deadline task on the rq, * this is the right place to try to pull some other one @@ -1712,6 +1771,7 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) */ static void switched_to_dl(struct rq *rq, struct task_struct *p) { + add_running_bw(&p->dl, &rq->dl); /* If p is not queued we will update its parameters at next wakeup. */ if (!task_on_rq_queued(p)) diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 7b34c78..0659772 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -536,6 +536,12 @@ struct dl_rq { #else struct dl_bw dl_bw; #endif + /* + * "Active utilization" for this runqueue: increased when a + * task wakes up (becomes TASK_RUNNING) and decreased when a + * task blocks + */ + u64 running_bw; }; #ifdef CONFIG_SMP -- 2.7.4