Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1752769AbdICUQD (ORCPT ); Sun, 3 Sep 2017 16:16:03 -0400 Received: from mail-pg0-f48.google.com ([74.125.83.48]:33944 "EHLO mail-pg0-f48.google.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1752466AbdICUP7 (ORCPT ); Sun, 3 Sep 2017 16:15:59 -0400 X-Google-Smtp-Source: ADKCNb4IENKnBZlBvfxYMWf9Au3RkBps+IL6ZCnEHfu4Gqb21+QvQZ55uc7kNKkruEye9akhRPiGZw== From: Joel Fernandes To: linux-kernel@vger.kernel.org Cc: Joel Fernandes , Srinivas Pandruvada , Len Brown , "Rafael J . Wysocki" , Viresh Kumar , Ingo Molnar , Peter Zijlstra , Juri Lelli , Patrick Bellasi , Steve Muckle , kernel-team@android.com Subject: [PATCH RFC v2 1/2] Revert "sched/fair: Drop always true parameter of update_cfs_rq_load_avg()" Date: Sun, 3 Sep 2017 13:15:41 -0700 Message-Id: <20170903201542.2929-2-joelaf@google.com> X-Mailer: git-send-email 2.14.1.581.gf28d330327-goog In-Reply-To: <20170903201542.2929-1-joelaf@google.com> References: <20170903201542.2929-1-joelaf@google.com> Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 3738 Lines: 97 This reverts commit 3a123bbbb10d54dbdde6ccbbd519c74c91ba2f52. Its needed by the series for controlling whether cpufreq is notified about updating frequency during an update to the utilization. Cc: Srinivas Pandruvada Cc: Len Brown Cc: Rafael J. Wysocki Cc: Viresh Kumar Cc: Ingo Molnar Cc: Peter Zijlstra Cc: Juri Lelli Cc: Patrick Bellasi Cc: Steve Muckle Cc: kernel-team@android.com Signed-off-by: Joel Fernandes --- kernel/sched/fair.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index eca6a57527f9..bf3595c0badf 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -797,7 +797,7 @@ void post_init_entity_util_avg(struct sched_entity *se) /* * For !fair tasks do: * - update_cfs_rq_load_avg(now, cfs_rq); + update_cfs_rq_load_avg(now, cfs_rq, false); attach_entity_load_avg(cfs_rq, se); switched_from_fair(rq, p); * @@ -3607,6 +3607,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum * update_cfs_rq_load_avg - update the cfs_rq's load/util averages * @now: current time, as per cfs_rq_clock_task() * @cfs_rq: cfs_rq to update + * @update_freq: should we call cfs_rq_util_change() or will the call do so * * The cfs_rq avg is the direct sum of all its entities (blocked and runnable) * avg. The immediate corollary is that all (fair) tasks must be attached, see @@ -3620,7 +3621,7 @@ static inline void add_tg_cfs_propagate(struct cfs_rq *cfs_rq, long runnable_sum * call update_tg_load_avg() when this function returns true. */ static inline int -update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) +update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) { unsigned long removed_load = 0, removed_util = 0, removed_runnable_sum = 0; struct sched_avg *sa = &cfs_rq->avg; @@ -3657,7 +3658,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) cfs_rq->load_last_update_time_copy = sa->last_update_time; #endif - if (decayed) + if (update_freq && decayed) cfs_rq_util_change(cfs_rq); return decayed; @@ -3751,7 +3752,7 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD)) __update_load_avg_se(now, cpu, cfs_rq, se); - decayed = update_cfs_rq_load_avg(now, cfs_rq); + decayed = update_cfs_rq_load_avg(now, cfs_rq, true); decayed |= propagate_entity_load_avg(se); if (!se->avg.last_update_time && (flags & DO_ATTACH)) { @@ -3841,7 +3842,7 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf); #else /* CONFIG_SMP */ static inline int -update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) +update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq, bool update_freq) { return 0; } @@ -7331,7 +7332,7 @@ static void update_blocked_averages(int cpu) if (throttled_hierarchy(cfs_rq)) continue; - if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq)) + if (update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true)) update_tg_load_avg(cfs_rq, 0); /* Propagate pending load changes to the parent, if any: */ @@ -7404,7 +7405,7 @@ static inline void update_blocked_averages(int cpu) rq_lock_irqsave(rq, &rf); update_rq_clock(rq); - update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq); + update_cfs_rq_load_avg(cfs_rq_clock_task(cfs_rq), cfs_rq, true); rq_unlock_irqrestore(rq, &rf); } -- 2.14.1.581.gf28d330327-goog