Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1759310AbcDAPV2 (ORCPT ); Fri, 1 Apr 2016 11:21:28 -0400 Received: from mail-wm0-f48.google.com ([74.125.82.48]:33235 "EHLO mail-wm0-f48.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752517AbcDAPSX (ORCPT ); Fri, 1 Apr 2016 11:18:23 -0400 From: Luca Abeni To: linux-kernel@vger.kernel.org Cc: Peter Zijlstra , Ingo Molnar , Juri Lelli , Luca Abeni Subject: [RFC v2 1/7] Track the active utilisation Date: Fri, 1 Apr 2016 17:12:27 +0200 Message-Id: <1459523553-29089-2-git-send-email-luca.abeni@unitn.it> X-Mailer: git-send-email 2.5.0 In-Reply-To: <1459523553-29089-1-git-send-email-luca.abeni@unitn.it> References: <1459523553-29089-1-git-send-email-luca.abeni@unitn.it> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 3694 Lines: 117 The active utilisation here is defined as the total utilisation of the active (TASK_RUNNING) tasks queued on a runqueue. Hence, it is increased when a task wakes up and is decreased when a task blocks. This might need to be fixed / improved by decreasing the active utilisation at the so-called "0-lag time" instead of when the task blocks. Signed-off-by: Juri Lelli --- kernel/sched/deadline.c | 32 +++++++++++++++++++++++++++++++- kernel/sched/sched.h | 6 ++++++ 2 files changed, 37 insertions(+), 1 deletion(-) diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index c7a036f..3c64ebf 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -43,6 +43,22 @@ static inline int on_dl_rq(struct sched_dl_entity *dl_se) return !RB_EMPTY_NODE(&dl_se->rb_node); } +static void add_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) +{ + u64 se_bw = dl_se->dl_bw; + + dl_rq->running_bw += se_bw; +} + +static void sub_running_bw(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq) +{ + u64 se_bw = dl_se->dl_bw; + + dl_rq->running_bw -= se_bw; + if (WARN_ON(dl_rq->running_bw < 0)) + dl_rq->running_bw = 0; +} + static inline int is_leftmost(struct task_struct *p, struct dl_rq *dl_rq) { struct sched_dl_entity *dl_se = &p->dl; @@ -510,6 +526,8 @@ static void update_dl_entity(struct sched_dl_entity *dl_se, struct dl_rq *dl_rq = dl_rq_of_se(dl_se); struct rq *rq = rq_of_dl_rq(dl_rq); + add_running_bw(dl_se, dl_rq); + if (dl_time_before(dl_se->deadline, rq_clock(rq)) || dl_entity_overflow(dl_se, pi_se, rq_clock(rq))) { dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline; @@ -957,8 +975,10 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags) * its rq, the bandwidth timer callback (which clearly has not * run yet) will take care of this. */ - if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) + if (p->dl.dl_throttled && !(flags & ENQUEUE_REPLENISH)) { + add_running_bw(&p->dl, &rq->dl); return; + } enqueue_dl_entity(&p->dl, pi_se, flags); @@ -976,6 +996,8 @@ static void dequeue_task_dl(struct rq *rq, struct task_struct *p, int flags) { update_curr_dl(rq); __dequeue_task_dl(rq, p, flags); + if (flags & DEQUEUE_SLEEP) + sub_running_bw(&p->dl, &rq->dl); } /* @@ -1210,6 +1232,8 @@ static void task_fork_dl(struct task_struct *p) static void task_dead_dl(struct task_struct *p) { struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); + struct dl_rq *dl_rq = dl_rq_of_se(&p->dl); + struct rq *rq = rq_of_dl_rq(dl_rq); /* * Since we are TASK_DEAD we won't slip out of the domain! @@ -1218,6 +1242,9 @@ static void task_dead_dl(struct task_struct *p) /* XXX we should retain the bw until 0-lag */ dl_b->total_bw -= p->dl.dl_bw; raw_spin_unlock_irq(&dl_b->lock); + + if (task_on_rq_queued(p)) + sub_running_bw(&p->dl, &rq->dl); } static void set_curr_task_dl(struct rq *rq) @@ -1697,6 +1724,9 @@ static void switched_from_dl(struct rq *rq, struct task_struct *p) if (!start_dl_timer(p)) __dl_clear_params(p); + if (task_on_rq_queued(p)) + sub_running_bw(&p->dl, &rq->dl); + /* * Since this might be the only -deadline task on the rq, * this is the right place to try to pull some other one diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index e6d4a3f..bc05c29 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -520,6 +520,12 @@ struct dl_rq { #else struct dl_bw dl_bw; #endif + /* + * "Active utilization" for this runqueue: increased when a + * task wakes up (becomes TASK_RUNNING) and decreased when a + * task blocks + */ + s64 running_bw; }; #ifdef CONFIG_SMP -- 2.5.0