Received: by 2002:a25:ab43:0:0:0:0:0 with SMTP id u61csp3433245ybi; Sun, 26 May 2019 23:23:01 -0700 (PDT) X-Google-Smtp-Source: APXvYqy4tSkbNZ2OC+h/rhoxcbmrgagX3k6+uFyMdRIbOvsOrMnWx9AgVBWWo3hFHEyG6X0BQHDf X-Received: by 2002:a62:6d41:: with SMTP id i62mr94194864pfc.227.1558938181783; Sun, 26 May 2019 23:23:01 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1558938181; cv=none; d=google.com; s=arc-20160816; b=Z8ynw9v1BnT7IG8PXNJtMi/GN927tU5U0bw5P0hvzFQlKxLObjB0argVJDm25HbJ2c zsi5zOXNxr03vCWsSvAoRylAypU0Ms/XXtFhNjV8A6qGmZ7t9S3Fs6I0P1KZ3K4uXE50 b4prJfTJ0W6LUvDAuq4lcVt+QT9DPQF1NuRUy414n6VfOJ/9EB9ETJVbrjCzrTDUjl+q uCHTm6N7HSGvh1sz2uYwYn63fFVqd0cURS64qn52+W+ree2Lc9X4l9vZY4E59hLqGoej /EHWG/nST7vuNBrSusxSO/4GkKVMECbLRb5v/eLOpH3pT4Zcv3lZUbalHpDAwo/pnHYj RIng== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:sender:references:in-reply-to:message-id:date :subject:cc:to:from; bh=67D7btHRtva153wFLRxcKr5UfUFwHlkFOL/9nsQPccY=; b=Ve3v75BlOWqFYcu+2kAF+atACEeN66+uVGdPhwp1Ok/ohR4d5JiGNwC1iKsyn11M10 LYJt0zivizmtfsnoGbiQ4Px7s/N+7jVJRbfX48XD08S+SQS1H6SMsewDyDhlr45sRQvO wfn1KzpyXV6+9nzwxg4GIZCGI7g3FGxl/ZKiz1I34N7D/aX/fujQlIdFp7nxLfn9Gc4t zRCGRYM65iqg2ddQp5hIWpQrbIeIbvHLywt2ufrByBGDpGq0Lt6adEFFO/Cbw9wO+ea/ MveGsWNwuaoeqUjL2+TPhlgxX9caGnkGqbPY5dxgYKFnQgL+bTKf7Yti/RRA2Jvrogpt nFSA== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Return-Path: Received: from vger.kernel.org (vger.kernel.org. [209.132.180.67]) by mx.google.com with ESMTP id d62si16827490pga.447.2019.05.26.23.22.46; Sun, 26 May 2019 23:23:01 -0700 (PDT) Received-SPF: pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) client-ip=209.132.180.67; Authentication-Results: mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726255AbfE0GVk (ORCPT + 99 others); Mon, 27 May 2019 02:21:40 -0400 Received: from foss.arm.com ([217.140.101.70]:56122 "EHLO foss.arm.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1726150AbfE0GVh (ORCPT ); Mon, 27 May 2019 02:21:37 -0400 Received: from usa-sjc-imap-foss1.foss.arm.com (unknown [10.72.51.249]) by usa-sjc-mx-foss1.foss.arm.com (Postfix) with ESMTP id D2BF01684; Sun, 26 May 2019 23:21:36 -0700 (PDT) Received: from e107985-lin.cambridge.arm.com (e107985-lin.cambridge.arm.com [10.1.194.38]) by usa-sjc-imap-foss1.foss.arm.com (Postfix) with ESMTPA id DFD1B3F59C; Sun, 26 May 2019 23:21:34 -0700 (PDT) From: Dietmar Eggemann To: Peter Zijlstra , Ingo Molnar Cc: Thomas Gleixner , Frederic Weisbecker , Rik van Riel , Vincent Guittot , Morten Rasmussen , Quentin Perret , Valentin Schneider , Patrick Bellasi , linux-kernel@vger.kernel.org Subject: [PATCH 2/7] sched/fair: Replace source_load() & target_load() w/ weighted_cpuload() Date: Mon, 27 May 2019 07:21:11 +0100 Message-Id: <20190527062116.11512-3-dietmar.eggemann@arm.com> X-Mailer: git-send-email 2.17.1 In-Reply-To: <20190527062116.11512-1-dietmar.eggemann@arm.com> References: <20190527062116.11512-1-dietmar.eggemann@arm.com> Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org With LB_BIAS disabled, source_load() & target_load() return weighted_cpuload(). Replace both with calls to weighted_cpuload(). The function to obtain the load index (sd->*_idx) for an sd, get_sd_load_idx(), can be removed as well. Finally, get rid of the sched feature LB_BIAS. Signed-off-by: Dietmar Eggemann --- kernel/sched/fair.c | 90 ++--------------------------------------- kernel/sched/features.h | 1 - 2 files changed, 4 insertions(+), 87 deletions(-) diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index f619b93ca331..88779c45e8e6 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -1467,8 +1467,6 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page, } static unsigned long weighted_cpuload(struct rq *rq); -static unsigned long source_load(int cpu, int type); -static unsigned long target_load(int cpu, int type); /* Cached statistics for all CPUs within a node */ struct numa_stats { @@ -5336,45 +5334,11 @@ static struct { #endif /* CONFIG_NO_HZ_COMMON */ -/* Used instead of source_load when we know the type == 0 */ static unsigned long weighted_cpuload(struct rq *rq) { return cfs_rq_runnable_load_avg(&rq->cfs); } -/* - * Return a low guess at the load of a migration-source CPU weighted - * according to the scheduling class and "nice" value. - * - * We want to under-estimate the load of migration sources, to - * balance conservatively. - */ -static unsigned long source_load(int cpu, int type) -{ - struct rq *rq = cpu_rq(cpu); - unsigned long total = weighted_cpuload(rq); - - if (type == 0 || !sched_feat(LB_BIAS)) - return total; - - return min(rq->cpu_load[type-1], total); -} - -/* - * Return a high guess at the load of a migration-target CPU weighted - * according to the scheduling class and "nice" value. - */ -static unsigned long target_load(int cpu, int type) -{ - struct rq *rq = cpu_rq(cpu); - unsigned long total = weighted_cpuload(rq); - - if (type == 0 || !sched_feat(LB_BIAS)) - return total; - - return max(rq->cpu_load[type-1], total); -} - static unsigned long capacity_of(int cpu) { return cpu_rq(cpu)->cpu_capacity; @@ -5482,7 +5446,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p, s64 this_eff_load, prev_eff_load; unsigned long task_load; - this_eff_load = target_load(this_cpu, sd->wake_idx); + this_eff_load = weighted_cpuload(cpu_rq(this_cpu)); if (sync) { unsigned long current_load = task_h_load(current); @@ -5500,7 +5464,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p, this_eff_load *= 100; this_eff_load *= capacity_of(prev_cpu); - prev_eff_load = source_load(prev_cpu, sd->wake_idx); + prev_eff_load = weighted_cpuload(cpu_rq(this_cpu)); prev_eff_load -= task_load; if (sched_feat(WA_BIAS)) prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2; @@ -5561,14 +5525,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, unsigned long this_runnable_load = ULONG_MAX; unsigned long min_avg_load = ULONG_MAX, this_avg_load = ULONG_MAX; unsigned long most_spare = 0, this_spare = 0; - int load_idx = sd->forkexec_idx; int imbalance_scale = 100 + (sd->imbalance_pct-100)/2; unsigned long imbalance = scale_load_down(NICE_0_LOAD) * (sd->imbalance_pct-100) / 100; - if (sd_flag & SD_BALANCE_WAKE) - load_idx = sd->wake_idx; - do { unsigned long load, avg_load, runnable_load; unsigned long spare_cap, max_spare_cap; @@ -5592,12 +5552,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, max_spare_cap = 0; for_each_cpu(i, sched_group_span(group)) { - /* Bias balancing toward CPUs of our domain */ - if (local_group) - load = source_load(i, load_idx); - else - load = target_load(i, load_idx); - + load = weighted_cpuload(cpu_rq(i)); runnable_load += load; avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs); @@ -7679,34 +7634,6 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds) }; } -/** - * get_sd_load_idx - Obtain the load index for a given sched domain. - * @sd: The sched_domain whose load_idx is to be obtained. - * @idle: The idle status of the CPU for whose sd load_idx is obtained. - * - * Return: The load index. - */ -static inline int get_sd_load_idx(struct sched_domain *sd, - enum cpu_idle_type idle) -{ - int load_idx; - - switch (idle) { - case CPU_NOT_IDLE: - load_idx = sd->busy_idx; - break; - - case CPU_NEWLY_IDLE: - load_idx = sd->newidle_idx; - break; - default: - load_idx = sd->idle_idx; - break; - } - - return load_idx; -} - static unsigned long scale_rt_capacity(struct sched_domain *sd, int cpu) { struct rq *rq = cpu_rq(cpu); @@ -7995,9 +7922,6 @@ static inline void update_sg_lb_stats(struct lb_env *env, struct sg_lb_stats *sgs, int *sg_status) { - int local_group = cpumask_test_cpu(env->dst_cpu, sched_group_span(group)); - int load_idx = get_sd_load_idx(env->sd, env->idle); - unsigned long load; int i, nr_running; memset(sgs, 0, sizeof(*sgs)); @@ -8008,13 +7932,7 @@ static inline void update_sg_lb_stats(struct lb_env *env, if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false)) env->flags |= LBF_NOHZ_AGAIN; - /* Bias balancing toward CPUs of our domain: */ - if (local_group) - load = target_load(i, load_idx); - else - load = source_load(i, load_idx); - - sgs->group_load += load; + sgs->group_load += weighted_cpuload(rq); sgs->group_util += cpu_util(i); sgs->sum_nr_running += rq->cfs.h_nr_running; diff --git a/kernel/sched/features.h b/kernel/sched/features.h index 858589b83377..2410db5e9a35 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -39,7 +39,6 @@ SCHED_FEAT(WAKEUP_PREEMPTION, true) SCHED_FEAT(HRTICK, false) SCHED_FEAT(DOUBLE_TICK, false) -SCHED_FEAT(LB_BIAS, false) /* * Decrement CPU capacity based on time not spent running tasks -- 2.17.1