Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1758781AbcDANXe (ORCPT ); Fri, 1 Apr 2016 09:23:34 -0400 Received: from mail-wm0-f43.google.com ([74.125.82.43]:34890 "EHLO mail-wm0-f43.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752684AbcDANXb (ORCPT ); Fri, 1 Apr 2016 09:23:31 -0400 From: Frederic Weisbecker To: Peter Zijlstra Cc: LKML , Frederic Weisbecker , Byungchul Park , Chris Metcalf , Thomas Gleixner , Luiz Capitulino , Christoph Lameter , "Paul E . McKenney" , Mike Galbraith , Rik van Riel , Ingo Molnar Subject: [PATCH 3/4] sched: Optimize tick periodic cpu load updates Date: Fri, 1 Apr 2016 15:23:06 +0200 Message-Id: <1459516987-15745-4-git-send-email-fweisbec@gmail.com> X-Mailer: git-send-email 2.7.0 In-Reply-To: <1459516987-15745-1-git-send-email-fweisbec@gmail.com> References: <1459516987-15745-1-git-send-email-fweisbec@gmail.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 4399 Lines: 139 Don't bother with the whole pending tickless cpu load machinery if we run a tick periodic kernel. That's less job for the CPU on ticks. Cc: Byungchul Park Cc: Chris Metcalf Cc: Christoph Lameter Cc: Ingo Molnar Cc: Luiz Capitulino Cc: Mike Galbraith Cc: Paul E. McKenney Cc: Peter Zijlstra Cc: Rik van Riel Cc: Thomas Gleixner Signed-off-by: Frederic Weisbecker --- kernel/sched/fair.c | 47 +++++++++++++++++++++++------------------------ kernel/sched/sched.h | 4 +++- 2 files changed, 26 insertions(+), 25 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 394f008..1bb053e 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -4526,7 +4526,7 @@ decay_load_missed(unsigned long load, unsigned long missed_updates, int idx) * see decay_load_misses(). For NOHZ_FULL we get to subtract and add the extra * term. See the @active paramter. */ -static void __cpu_load_update(struct rq *this_rq, unsigned long this_load, +static void cpu_load_update(struct rq *this_rq, unsigned long this_load, unsigned long pending_updates) { unsigned long tickless_load = this_rq->cpu_load[0]; @@ -4567,24 +4567,6 @@ static void __cpu_load_update(struct rq *this_rq, unsigned long this_load, sched_avg_update(this_rq); } -static void cpu_load_update(struct rq *this_rq, - unsigned long curr_jiffies, - unsigned long load) -{ - unsigned long pending_updates; - - pending_updates = curr_jiffies - this_rq->last_load_update_tick; - if (pending_updates) { - this_rq->last_load_update_tick = curr_jiffies; - /* - * In the regular NOHZ case, we were idle, this means load 0. - * In the NOHZ_FULL case, we were non-idle, we should consider - * its weighted load. - */ - __cpu_load_update(this_rq, load, pending_updates); - } -} - /* Used instead of source_load when we know the type == 0 */ static unsigned long weighted_cpuload(const int cpu) { @@ -4592,6 +4574,18 @@ static unsigned long weighted_cpuload(const int cpu) } #ifdef CONFIG_NO_HZ_COMMON +static unsigned long cpu_load_pending(struct rq *this_rq) +{ + unsigned long curr_jiffies = READ_ONCE(jiffies); + unsigned long pending_updates; + + pending_updates = curr_jiffies - this_rq->last_load_update_tick; + if (pending_updates) + this_rq->last_load_update_tick = curr_jiffies; + + return pending_updates; +} + /* * There is no sane way to deal with nohz on smp when using jiffies because the * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading @@ -4617,7 +4611,7 @@ static void cpu_load_update_idle(struct rq *this_rq) if (weighted_cpuload(cpu_of(this_rq))) return; - cpu_load_update(this_rq, READ_ONCE(jiffies), 0); + cpu_load_update(this_rq, 0, cpu_load_pending(this_rq)); } /* @@ -4641,18 +4635,23 @@ void cpu_load_update_nohz_start(void) */ void cpu_load_update_nohz_stop(void) { - unsigned long curr_jiffies = READ_ONCE(jiffies); struct rq *this_rq = this_rq(); unsigned long load; - if (curr_jiffies == this_rq->last_load_update_tick) + if (jiffies == this_rq->last_load_update_tick) return; load = weighted_cpuload(cpu_of(this_rq)); + raw_spin_lock(&this_rq->lock); - cpu_load_update(this_rq, curr_jiffies, load); + cpu_load_update(this_rq, load, cpu_load_pending(this_rq)); raw_spin_unlock(&this_rq->lock); } +#else /* !CONFIG_NO_HZ_COMMON */ +static inline unsigned long cpu_load_pending(struct rq *this_rq) +{ + return 1; +} #endif /* CONFIG_NO_HZ_COMMON */ /* @@ -4662,7 +4661,7 @@ void cpu_load_update_active(struct rq *this_rq) { unsigned long load = weighted_cpuload(cpu_of(this_rq)); - cpu_load_update(this_rq, READ_ONCE(jiffies), load); + cpu_load_update(this_rq, load, cpu_load_pending(this_rq)); } /* diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 1802013..d951701 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -585,8 +585,10 @@ struct rq { #endif #define CPU_LOAD_IDX_MAX 5 unsigned long cpu_load[CPU_LOAD_IDX_MAX]; +#ifdef CONFIG_NO_HZ_COMMON +# ifdef CONFIG_SMP unsigned long last_load_update_tick; -#ifdef CONFIG_NO_HZ_COMMON +# endif u64 nohz_stamp; unsigned long nohz_flags; #endif -- 2.7.0