Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S941104AbcJXOHu (ORCPT ); Mon, 24 Oct 2016 10:07:50 -0400 Received: from mail-wm0-f66.google.com ([74.125.82.66]:35768 "EHLO mail-wm0-f66.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S941049AbcJXOH0 (ORCPT ); Mon, 24 Oct 2016 10:07:26 -0400 From: Luca Abeni To: linux-kernel@vger.kernel.org Cc: Peter Zijlstra , Ingo Molnar , Juri Lelli , Claudio Scordino , Steven Rostedt , Luca Abeni Subject: [RFC v3 3/6] Fix the update of the total -deadline utilization Date: Mon, 24 Oct 2016 16:06:35 +0200 Message-Id: <1477317998-7487-4-git-send-email-luca.abeni@unitn.it> X-Mailer: git-send-email 2.7.4 In-Reply-To: <1477317998-7487-1-git-send-email-luca.abeni@unitn.it> References: <1477317998-7487-1-git-send-email-luca.abeni@unitn.it> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4875 Lines: 142 Now that the inactive timer can be armed to fire at the 0-lag time, it is possible to use inactive_task_timer() to update the total -deadline utilization (dl_b->total_bw) at the correct time, fixing dl_overflow() and __setparam_dl(). Signed-off-by: Luca Abeni --- kernel/sched/core.c | 36 ++++++++++++------------------------ kernel/sched/deadline.c | 34 +++++++++++++++++++++++++--------- 2 files changed, 37 insertions(+), 33 deletions(-) diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 664c618..337a5f0 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -2507,9 +2507,6 @@ static inline int dl_bw_cpus(int i) * allocated bandwidth to reflect the new situation. * * This function is called while holding p's rq->lock. - * - * XXX we should delay bw change until the task's 0-lag point, see - * __setparam_dl(). */ static int dl_overflow(struct task_struct *p, int policy, const struct sched_attr *attr) @@ -2538,11 +2535,22 @@ static int dl_overflow(struct task_struct *p, int policy, err = 0; } else if (dl_policy(policy) && task_has_dl_policy(p) && !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) { + /* + * XXX this is slightly incorrect: when the task + * utilization decreases, we should delay the total + * utilization change until the task's 0-lag point. + * But this would require to set the task's "inactive + * timer" when the task is not inactive. + */ __dl_clear(dl_b, p->dl.dl_bw); __dl_add(dl_b, new_bw); err = 0; } else if (!dl_policy(policy) && task_has_dl_policy(p)) { - __dl_clear(dl_b, p->dl.dl_bw); + /* + * Do not decrease the total deadline utilization here, + * switched_from_dl() will take care to do it at the correct + * (0-lag) time. + */ err = 0; } raw_spin_unlock(&dl_b->lock); @@ -3912,26 +3920,6 @@ __setparam_dl(struct task_struct *p, const struct sched_attr *attr) dl_se->dl_period = attr->sched_period ?: dl_se->dl_deadline; dl_se->flags = attr->sched_flags; dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime); - - /* - * Changing the parameters of a task is 'tricky' and we're not doing - * the correct thing -- also see task_dead_dl() and switched_from_dl(). - * - * What we SHOULD do is delay the bandwidth release until the 0-lag - * point. This would include retaining the task_struct until that time - * and change dl_overflow() to not immediately decrement the current - * amount. - * - * Instead we retain the current runtime/deadline and let the new - * parameters take effect after the current reservation period lapses. - * This is safe (albeit pessimistic) because the 0-lag point is always - * before the current scheduling deadline. - * - * We can still have temporary overloads because we do not delay the - * change in bandwidth until that time; so admission control is - * not on the safe side. It does however guarantee tasks will never - * consume more than promised. - */ } /* diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 80d1541..4d3545b 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c @@ -91,8 +91,14 @@ static void task_go_inactive(struct task_struct *p) */ if (zerolag_time < 0) { sub_running_bw(dl_se, dl_rq); - if (!dl_task(p)) + if (!dl_task(p)) { + struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); + + raw_spin_lock(&dl_b->lock); + __dl_clear(dl_b, p->dl.dl_bw); __dl_clear_params(p); + raw_spin_unlock(&dl_b->lock); + } return; } @@ -856,8 +862,13 @@ static enum hrtimer_restart inactive_task_timer(struct hrtimer *timer) rq = task_rq_lock(p, &rf); - if (!dl_task(p)) { + if (!dl_task(p) || p->state == TASK_DEAD) { + struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); + + raw_spin_lock(&dl_b->lock); + __dl_clear(dl_b, p->dl.dl_bw); __dl_clear_params(p); + raw_spin_unlock(&dl_b->lock); goto unlock; } @@ -1330,16 +1341,21 @@ static void task_fork_dl(struct task_struct *p) static void task_dead_dl(struct task_struct *p) { - struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); - /* * Since we are TASK_DEAD we won't slip out of the domain! */ - raw_spin_lock_irq(&dl_b->lock); - /* XXX we should retain the bw until 0-lag */ - dl_b->total_bw -= p->dl.dl_bw; - raw_spin_unlock_irq(&dl_b->lock); - if (hrtimer_active(&p->dl.inactive_timer)) { + if (!hrtimer_active(&p->dl.inactive_timer)) { + struct dl_bw *dl_b = dl_bw_of(task_cpu(p)); + + /* + * If the "inactive timer is not active, the 0-lag time + * is already passed, so we immediately decrease the + * total deadline utilization + */ + raw_spin_lock_irq(&dl_b->lock); + __dl_clear(dl_b, p->dl.dl_bw); + raw_spin_unlock_irq(&dl_b->lock); + } else { raw_spin_lock_irq(&task_rq(p)->lock); sub_running_bw(&p->dl, dl_rq_of_se(&p->dl)); raw_spin_unlock_irq(&task_rq(p)->lock); -- 2.7.4