2019-05-27 06:23:18

by Dietmar Eggemann

[permalink] [raw]
Subject: [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load()

This is done to align the per cpu (i.e. per rq) load with the util
counterpart (cpu_util(int cpu)). The term 'weighted' is not needed
since there is no 'unweighted' load to distinguish it from.

Signed-off-by: Dietmar Eggemann <[email protected]>
---
kernel/sched/fair.c | 44 ++++++++++++++++++++------------------------
1 file changed, 20 insertions(+), 24 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a33f196703a7..f6d0aad13090 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1466,7 +1466,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
}

-static unsigned long weighted_cpuload(struct rq *rq);
+static unsigned long cpu_load(int cpu);

/* Cached statistics for all CPUs within a node */
struct numa_stats {
@@ -1485,9 +1485,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)

memset(ns, 0, sizeof(*ns));
for_each_cpu(cpu, cpumask_of_node(nid)) {
- struct rq *rq = cpu_rq(cpu);
-
- ns->load += weighted_cpuload(rq);
+ ns->load += cpu_load(cpu);
ns->compute_capacity += capacity_of(cpu);
}

@@ -5334,9 +5332,9 @@ static struct {

#endif /* CONFIG_NO_HZ_COMMON */

-static unsigned long weighted_cpuload(struct rq *rq)
+static unsigned long cpu_load(int cpu)
{
- return cfs_rq_runnable_load_avg(&rq->cfs);
+ return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
}

static unsigned long capacity_of(int cpu)
@@ -5348,7 +5346,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
- unsigned long load_avg = weighted_cpuload(rq);
+ unsigned long load_avg = cpu_load(cpu);

if (nr_running)
return load_avg / nr_running;
@@ -5446,7 +5444,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
s64 this_eff_load, prev_eff_load;
unsigned long task_load;

- this_eff_load = weighted_cpuload(cpu_rq(this_cpu));
+ this_eff_load = cpu_load(this_cpu);

if (sync) {
unsigned long current_load = task_h_load(current);
@@ -5464,7 +5462,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
this_eff_load *= 100;
this_eff_load *= capacity_of(prev_cpu);

- prev_eff_load = weighted_cpuload(cpu_rq(this_cpu));
+ prev_eff_load = cpu_load(this_cpu);
prev_eff_load -= task_load;
if (sched_feat(WA_BIAS))
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
@@ -5552,7 +5550,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
max_spare_cap = 0;

for_each_cpu(i, sched_group_span(group)) {
- load = weighted_cpuload(cpu_rq(i));
+ load = cpu_load(i);
runnable_load += load;

avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
@@ -5688,7 +5686,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
shallowest_idle_cpu = i;
}
} else if (shallowest_idle_cpu == -1) {
- load = weighted_cpuload(cpu_rq(i));
+ load = cpu_load(i);
if (load < min_load) {
min_load = load;
least_loaded_cpu = i;
@@ -7259,8 +7257,8 @@ static struct task_struct *detach_one_task(struct lb_env *env)
static const unsigned int sched_nr_migrate_break = 32;

/*
- * detach_tasks() -- tries to detach up to imbalance weighted load from
- * busiest_rq, as part of a balancing operation within domain "sd".
+ * detach_tasks() -- tries to detach up to imbalance load from busiest_rq,
+ * as part of a balancing operation within domain "sd".
*
* Returns number of detached tasks if successful and 0 otherwise.
*/
@@ -7326,8 +7324,7 @@ static int detach_tasks(struct lb_env *env)
#endif

/*
- * We only want to steal up to the prescribed amount of
- * weighted load.
+ * We only want to steal up to the prescribed amount of load.
*/
if (env->imbalance <= 0)
break;
@@ -7931,7 +7928,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
env->flags |= LBF_NOHZ_AGAIN;

- sgs->group_load += weighted_cpuload(rq);
+ sgs->group_load += cpu_load(i);
sgs->group_util += cpu_util(i);
sgs->sum_nr_running += rq->cfs.h_nr_running;

@@ -8385,8 +8382,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* find_busiest_group - Returns the busiest group within the sched_domain
* if there is an imbalance.
*
- * Also calculates the amount of weighted load which should be moved
- * to restore balance.
+ * Also calculates the amount of load which should be moved to restore balance.
*
* @env: The load balancing environment.
*
@@ -8558,11 +8554,11 @@ static struct rq *find_busiest_queue(struct lb_env *env,
rq->nr_running == 1)
continue;

- wl = weighted_cpuload(rq);
+ wl = cpu_load(i);

/*
- * When comparing with imbalance, use weighted_cpuload()
- * which is not scaled with the CPU capacity.
+ * When comparing with imbalance, use cpu_load() which is not
+ * scaled with the CPU capacity.
*/

if (rq->nr_running == 1 && wl > env->imbalance &&
@@ -8571,9 +8567,9 @@ static struct rq *find_busiest_queue(struct lb_env *env,

/*
* For the load comparisons with the other CPU's, consider
- * the weighted_cpuload() scaled with the CPU capacity, so
- * that the load can be moved away from the CPU that is
- * potentially running at a lower capacity.
+ * the cpu_load() scaled with the CPU capacity, so that the
+ * load can be moved away from the CPU that is potentially
+ * running at a lower capacity.
*
* Thus we're looking for max(wl_i / capacity_i), crosswise
* multiplication to rid ourselves of the division works out
--
2.17.1


2019-05-27 13:34:33

by Vincent Guittot

[permalink] [raw]
Subject: Re: [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load()

On Mon, 27 May 2019 at 08:21, Dietmar Eggemann <[email protected]> wrote:
>
> This is done to align the per cpu (i.e. per rq) load with the util
> counterpart (cpu_util(int cpu)). The term 'weighted' is not needed
> since there is no 'unweighted' load to distinguish it from.
>
> Signed-off-by: Dietmar Eggemann <[email protected]>
> ---
> kernel/sched/fair.c | 44 ++++++++++++++++++++------------------------
> 1 file changed, 20 insertions(+), 24 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index a33f196703a7..f6d0aad13090 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -1466,7 +1466,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
> group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
> }
>
> -static unsigned long weighted_cpuload(struct rq *rq);
> +static unsigned long cpu_load(int cpu);
>
> /* Cached statistics for all CPUs within a node */
> struct numa_stats {
> @@ -1485,9 +1485,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
>
> memset(ns, 0, sizeof(*ns));
> for_each_cpu(cpu, cpumask_of_node(nid)) {
> - struct rq *rq = cpu_rq(cpu);
> -
> - ns->load += weighted_cpuload(rq);
> + ns->load += cpu_load(cpu);
> ns->compute_capacity += capacity_of(cpu);
> }
>
> @@ -5334,9 +5332,9 @@ static struct {
>
> #endif /* CONFIG_NO_HZ_COMMON */
>
> -static unsigned long weighted_cpuload(struct rq *rq)
> +static unsigned long cpu_load(int cpu)

it would be better to use cpu_runnable_load instead of cpu_load
because it returns runnable_load_avg and not load_avg

> {
> - return cfs_rq_runnable_load_avg(&rq->cfs);
> + return cfs_rq_runnable_load_avg(&cpu_rq(cpu)->cfs);
> }
>
> static unsigned long capacity_of(int cpu)
> @@ -5348,7 +5346,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
> {
> struct rq *rq = cpu_rq(cpu);
> unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
> - unsigned long load_avg = weighted_cpuload(rq);
> + unsigned long load_avg = cpu_load(cpu);
>
> if (nr_running)
> return load_avg / nr_running;
> @@ -5446,7 +5444,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
> s64 this_eff_load, prev_eff_load;
> unsigned long task_load;
>
> - this_eff_load = weighted_cpuload(cpu_rq(this_cpu));
> + this_eff_load = cpu_load(this_cpu);
>
> if (sync) {
> unsigned long current_load = task_h_load(current);
> @@ -5464,7 +5462,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
> this_eff_load *= 100;
> this_eff_load *= capacity_of(prev_cpu);
>
> - prev_eff_load = weighted_cpuload(cpu_rq(this_cpu));
> + prev_eff_load = cpu_load(this_cpu);
> prev_eff_load -= task_load;
> if (sched_feat(WA_BIAS))
> prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
> @@ -5552,7 +5550,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
> max_spare_cap = 0;
>
> for_each_cpu(i, sched_group_span(group)) {
> - load = weighted_cpuload(cpu_rq(i));
> + load = cpu_load(i);
> runnable_load += load;
>
> avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
> @@ -5688,7 +5686,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
> shallowest_idle_cpu = i;
> }
> } else if (shallowest_idle_cpu == -1) {
> - load = weighted_cpuload(cpu_rq(i));
> + load = cpu_load(i);
> if (load < min_load) {
> min_load = load;
> least_loaded_cpu = i;
> @@ -7259,8 +7257,8 @@ static struct task_struct *detach_one_task(struct lb_env *env)
> static const unsigned int sched_nr_migrate_break = 32;
>
> /*
> - * detach_tasks() -- tries to detach up to imbalance weighted load from
> - * busiest_rq, as part of a balancing operation within domain "sd".
> + * detach_tasks() -- tries to detach up to imbalance load from busiest_rq,
> + * as part of a balancing operation within domain "sd".
> *
> * Returns number of detached tasks if successful and 0 otherwise.
> */
> @@ -7326,8 +7324,7 @@ static int detach_tasks(struct lb_env *env)
> #endif
>
> /*
> - * We only want to steal up to the prescribed amount of
> - * weighted load.
> + * We only want to steal up to the prescribed amount of load.
> */
> if (env->imbalance <= 0)
> break;
> @@ -7931,7 +7928,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
> if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
> env->flags |= LBF_NOHZ_AGAIN;
>
> - sgs->group_load += weighted_cpuload(rq);
> + sgs->group_load += cpu_load(i);
> sgs->group_util += cpu_util(i);
> sgs->sum_nr_running += rq->cfs.h_nr_running;
>
> @@ -8385,8 +8382,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
> * find_busiest_group - Returns the busiest group within the sched_domain
> * if there is an imbalance.
> *
> - * Also calculates the amount of weighted load which should be moved
> - * to restore balance.
> + * Also calculates the amount of load which should be moved to restore balance.
> *
> * @env: The load balancing environment.
> *
> @@ -8558,11 +8554,11 @@ static struct rq *find_busiest_queue(struct lb_env *env,
> rq->nr_running == 1)
> continue;
>
> - wl = weighted_cpuload(rq);
> + wl = cpu_load(i);
>
> /*
> - * When comparing with imbalance, use weighted_cpuload()
> - * which is not scaled with the CPU capacity.
> + * When comparing with imbalance, use cpu_load() which is not
> + * scaled with the CPU capacity.
> */
>
> if (rq->nr_running == 1 && wl > env->imbalance &&
> @@ -8571,9 +8567,9 @@ static struct rq *find_busiest_queue(struct lb_env *env,
>
> /*
> * For the load comparisons with the other CPU's, consider
> - * the weighted_cpuload() scaled with the CPU capacity, so
> - * that the load can be moved away from the CPU that is
> - * potentially running at a lower capacity.
> + * the cpu_load() scaled with the CPU capacity, so that the
> + * load can be moved away from the CPU that is potentially
> + * running at a lower capacity.
> *
> * Thus we're looking for max(wl_i / capacity_i), crosswise
> * multiplication to rid ourselves of the division works out
> --
> 2.17.1
>

2019-05-27 16:26:25

by Rik van Riel

[permalink] [raw]
Subject: Re: [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load()

On Mon, 2019-05-27 at 07:21 +0100, Dietmar Eggemann wrote:
> This is done to align the per cpu (i.e. per rq) load with the util
> counterpart (cpu_util(int cpu)). The term 'weighted' is not needed
> since there is no 'unweighted' load to distinguish it from.

I can see why you want to make cpu_util() and cpu_load()
have the same parameter, but ...

> @@ -7931,7 +7928,7 @@ static inline void update_sg_lb_stats(struct
> lb_env *env,
> if ((env->flags & LBF_NOHZ_STATS) &&
> update_nohz_stats(rq, false))
> env->flags |= LBF_NOHZ_AGAIN;
>
> - sgs->group_load += weighted_cpuload(rq);
> + sgs->group_load += cpu_load(i);
> sgs->group_util += cpu_util(i);
> sgs->sum_nr_running += rq->cfs.h_nr_running;

... now we end up dereferencing cpu_rq(cpu) 3 times.

I guess per-cpu variables are so cheap that we should
never notice, but I thought I'd ask anyway while looking
over these patches :)

Thank you for removing a bunch of code that slowed down
my understanding of fair.c

--
All Rights Reversed.


Attachments:
signature.asc (499.00 B)
This is a digitally signed message part

2019-05-27 19:16:13

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load()

On Mon, May 27, 2019 at 12:24:07PM -0400, Rik van Riel wrote:
> On Mon, 2019-05-27 at 07:21 +0100, Dietmar Eggemann wrote:
> > This is done to align the per cpu (i.e. per rq) load with the util
> > counterpart (cpu_util(int cpu)). The term 'weighted' is not needed
> > since there is no 'unweighted' load to distinguish it from.
>
> I can see why you want to make cpu_util() and cpu_load()
> have the same parameter, but ...
>
> > @@ -7931,7 +7928,7 @@ static inline void update_sg_lb_stats(struct
> > lb_env *env,
> > if ((env->flags & LBF_NOHZ_STATS) &&
> > update_nohz_stats(rq, false))
> > env->flags |= LBF_NOHZ_AGAIN;
> >
> > - sgs->group_load += weighted_cpuload(rq);
> > + sgs->group_load += cpu_load(i);
> > sgs->group_util += cpu_util(i);
> > sgs->sum_nr_running += rq->cfs.h_nr_running;
>
> ... now we end up dereferencing cpu_rq(cpu) 3 times.
>
> I guess per-cpu variables are so cheap that we should
> never notice, but I thought I'd ask anyway while looking
> over these patches :)

I was going to say CSE should fix that, but then I noticed per_cpu
contains that hideous RELOC_HIDE() thing and I figure that might
confuse GCC enough to break that :/

2019-06-18 12:23:45

by Dietmar Eggemann

[permalink] [raw]
Subject: Re: [PATCH 7/7] sched/fair: Rename weighted_cpuload() to cpu_load()

On 5/27/19 9:13 PM, Peter Zijlstra wrote:
> On Mon, May 27, 2019 at 12:24:07PM -0400, Rik van Riel wrote:
>> On Mon, 2019-05-27 at 07:21 +0100, Dietmar Eggemann wrote:
>>> This is done to align the per cpu (i.e. per rq) load with the util
>>> counterpart (cpu_util(int cpu)). The term 'weighted' is not needed
>>> since there is no 'unweighted' load to distinguish it from.
>>
>> I can see why you want to make cpu_util() and cpu_load()
>> have the same parameter, but ...
>>
>>> @@ -7931,7 +7928,7 @@ static inline void update_sg_lb_stats(struct
>>> lb_env *env,
>>> if ((env->flags & LBF_NOHZ_STATS) &&
>>> update_nohz_stats(rq, false))
>>> env->flags |= LBF_NOHZ_AGAIN;
>>>
>>> - sgs->group_load += weighted_cpuload(rq);
>>> + sgs->group_load += cpu_load(i);
>>> sgs->group_util += cpu_util(i);
>>> sgs->sum_nr_running += rq->cfs.h_nr_running;
>>
>> ... now we end up dereferencing cpu_rq(cpu) 3 times.
>>
>> I guess per-cpu variables are so cheap that we should
>> never notice, but I thought I'd ask anyway while looking
>> over these patches :)
>
> I was going to say CSE should fix that, but then I noticed per_cpu
> contains that hideous RELOC_HIDE() thing and I figure that might
> confuse GCC enough to break that :/

--->8---

From 25fcbbd9f654f243a70e38b0d59d38eb3c3f9313 Mon Sep 17 00:00:00 2001
From: Dietmar Eggemann <[email protected]>
Date: Mon, 13 May 2019 11:50:32 +0100
Subject: [PATCH] sched/fair: Rename weighted_cpuload() to cpu_runnable_load()

The term 'weighted' is not needed since there is no 'unweighted' load.
Instead use the term 'runnable' to distinguish 'runnable' load
(avg.runnable_load_avg) used in load balance from load (avg.load_avg)
which is the sum of 'runnable' and 'blocked' load.

Signed-off-by: Dietmar Eggemann <[email protected]>
---

Related to the question whether replacing the 'struct rq *rq' parameter
with 'int cpu' (cpu_rq(cpu)) for cpu_runnable_load() has an influence
on the code:

RELOC_HIDE() (in per_cpu_ptr() -> SHIFT_PERCPU_PTR()) hinders the
compiler to generate similar code (e.g. in update_sg_lb_stats()).
When using 'int cpu' the addressing mode changed from Based Addressing
to Based_indexed-Scaled. Moreover, the text size of fair.o grows by 32
bytes.

kernel/sched/fair.c | 42 +++++++++++++++++++++---------------------
1 file changed, 21 insertions(+), 21 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3c11dcdedcbc..0436f8eba556 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1466,7 +1466,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
}

-static unsigned long weighted_cpuload(struct rq *rq);
+static unsigned long cpu_runnable_load(struct rq *rq);

/* Cached statistics for all CPUs within a node */
struct numa_stats {
@@ -1487,7 +1487,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
for_each_cpu(cpu, cpumask_of_node(nid)) {
struct rq *rq = cpu_rq(cpu);

- ns->load += weighted_cpuload(rq);
+ ns->load += cpu_runnable_load(rq);
ns->compute_capacity += capacity_of(cpu);
}

@@ -5338,7 +5338,7 @@ static struct {

#endif /* CONFIG_NO_HZ_COMMON */

-static unsigned long weighted_cpuload(struct rq *rq)
+static unsigned long cpu_runnable_load(struct rq *rq)
{
return cfs_rq_runnable_load_avg(&rq->cfs);
}
@@ -5352,7 +5352,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
- unsigned long load_avg = weighted_cpuload(rq);
+ unsigned long load_avg = cpu_runnable_load(rq);

if (nr_running)
return load_avg / nr_running;
@@ -5450,7 +5450,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
s64 this_eff_load, prev_eff_load;
unsigned long task_load;

- this_eff_load = weighted_cpuload(cpu_rq(this_cpu));
+ this_eff_load = cpu_runnable_load(cpu_rq(this_cpu));

if (sync) {
unsigned long current_load = task_h_load(current);
@@ -5468,7 +5468,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
this_eff_load *= 100;
this_eff_load *= capacity_of(prev_cpu);

- prev_eff_load = weighted_cpuload(cpu_rq(prev_cpu));
+ prev_eff_load = cpu_runnable_load(cpu_rq(prev_cpu));
prev_eff_load -= task_load;
if (sched_feat(WA_BIAS))
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
@@ -5556,7 +5556,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
max_spare_cap = 0;

for_each_cpu(i, sched_group_span(group)) {
- load = weighted_cpuload(cpu_rq(i));
+ load = cpu_runnable_load(cpu_rq(i));
runnable_load += load;

avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
@@ -5692,7 +5692,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
shallowest_idle_cpu = i;
}
} else if (shallowest_idle_cpu == -1) {
- load = weighted_cpuload(cpu_rq(i));
+ load = cpu_runnable_load(cpu_rq(i));
if (load < min_load) {
min_load = load;
least_loaded_cpu = i;
@@ -7263,7 +7263,7 @@ static struct task_struct *detach_one_task(struct lb_env *env)
static const unsigned int sched_nr_migrate_break = 32;

/*
- * detach_tasks() -- tries to detach up to imbalance weighted load from
+ * detach_tasks() -- tries to detach up to imbalance runnable load from
* busiest_rq, as part of a balancing operation within domain "sd".
*
* Returns number of detached tasks if successful and 0 otherwise.
@@ -7331,7 +7331,7 @@ static int detach_tasks(struct lb_env *env)

/*
* We only want to steal up to the prescribed amount of
- * weighted load.
+ * runnable load.
*/
if (env->imbalance <= 0)
break;
@@ -7941,7 +7941,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
env->flags |= LBF_NOHZ_AGAIN;

- sgs->group_load += weighted_cpuload(rq);
+ sgs->group_load += cpu_runnable_load(rq);
sgs->group_util += cpu_util(i);
sgs->sum_nr_running += rq->cfs.h_nr_running;

@@ -8395,7 +8395,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* find_busiest_group - Returns the busiest group within the sched_domain
* if there is an imbalance.
*
- * Also calculates the amount of weighted load which should be moved
+ * Also calculates the amount of runnable load which should be moved
* to restore balance.
*
* @env: The load balancing environment.
@@ -8514,7 +8514,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
int i;

for_each_cpu_and(i, sched_group_span(group), env->cpus) {
- unsigned long capacity, wl;
+ unsigned long capacity, load;
enum fbq_type rt;

rq = cpu_rq(i);
@@ -8568,30 +8568,30 @@ static struct rq *find_busiest_queue(struct lb_env *env,
rq->nr_running == 1)
continue;

- wl = weighted_cpuload(rq);
+ load = cpu_runnable_load(rq);

/*
- * When comparing with imbalance, use weighted_cpuload()
+ * When comparing with imbalance, use cpu_runnable_load()
* which is not scaled with the CPU capacity.
*/

- if (rq->nr_running == 1 && wl > env->imbalance &&
+ if (rq->nr_running == 1 && load > env->imbalance &&
!check_cpu_capacity(rq, env->sd))
continue;

/*
* For the load comparisons with the other CPU's, consider
- * the weighted_cpuload() scaled with the CPU capacity, so
+ * the cpu_runnable_load() scaled with the CPU capacity, so
* that the load can be moved away from the CPU that is
* potentially running at a lower capacity.
*
- * Thus we're looking for max(wl_i / capacity_i), crosswise
+ * Thus we're looking for max(load_i / capacity_i), crosswise
* multiplication to rid ourselves of the division works out
- * to: wl_i * capacity_j > wl_j * capacity_i; where j is
+ * to: load_i * capacity_j > load_j * capacity_i; where j is
* our previous maximum.
*/
- if (wl * busiest_capacity > busiest_load * capacity) {
- busiest_load = wl;
+ if (load * busiest_capacity > busiest_load * capacity) {
+ busiest_load = load;
busiest_capacity = capacity;
busiest = rq;
}
--
2.17.1

Subject: [tip:sched/core] sched/fair: Rename weighted_cpuload() to cpu_runnable_load()

Commit-ID: a3df067974c52df936f548ed218120f623c4c560
Gitweb: https://git.kernel.org/tip/a3df067974c52df936f548ed218120f623c4c560
Author: Dietmar Eggemann <[email protected]>
AuthorDate: Tue, 18 Jun 2019 14:23:10 +0200
Committer: Ingo Molnar <[email protected]>
CommitDate: Mon, 24 Jun 2019 19:23:43 +0200

sched/fair: Rename weighted_cpuload() to cpu_runnable_load()

The term 'weighted' is not needed since there is no 'unweighted' load.
Instead use the term 'runnable' to distinguish 'runnable' load
(avg.runnable_load_avg) used in load balance from load (avg.load_avg)
which is the sum of 'runnable' and 'blocked' load.

Signed-off-by: Dietmar Eggemann <[email protected]>
Signed-off-by: Peter Zijlstra (Intel) <[email protected]>
Cc: Frederic Weisbecker <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Morten Rasmussen <[email protected]>
Cc: Patrick Bellasi <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Quentin Perret <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Valentin Schneider <[email protected]>
Cc: Vincent Guittot <[email protected]>
Link: https://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
kernel/sched/fair.c | 42 +++++++++++++++++++++---------------------
1 file changed, 21 insertions(+), 21 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 11ec52709323..3bdcd3c718bc 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -1485,7 +1485,7 @@ bool should_numa_migrate_memory(struct task_struct *p, struct page * page,
group_faults_cpu(ng, src_nid) * group_faults(p, dst_nid) * 4;
}

-static unsigned long weighted_cpuload(struct rq *rq);
+static unsigned long cpu_runnable_load(struct rq *rq);

/* Cached statistics for all CPUs within a node */
struct numa_stats {
@@ -1506,7 +1506,7 @@ static void update_numa_stats(struct numa_stats *ns, int nid)
for_each_cpu(cpu, cpumask_of_node(nid)) {
struct rq *rq = cpu_rq(cpu);

- ns->load += weighted_cpuload(rq);
+ ns->load += cpu_runnable_load(rq);
ns->compute_capacity += capacity_of(cpu);
}

@@ -5366,7 +5366,7 @@ static struct {

#endif /* CONFIG_NO_HZ_COMMON */

-static unsigned long weighted_cpuload(struct rq *rq)
+static unsigned long cpu_runnable_load(struct rq *rq)
{
return cfs_rq_runnable_load_avg(&rq->cfs);
}
@@ -5380,7 +5380,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
{
struct rq *rq = cpu_rq(cpu);
unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
- unsigned long load_avg = weighted_cpuload(rq);
+ unsigned long load_avg = cpu_runnable_load(rq);

if (nr_running)
return load_avg / nr_running;
@@ -5478,7 +5478,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
s64 this_eff_load, prev_eff_load;
unsigned long task_load;

- this_eff_load = weighted_cpuload(cpu_rq(this_cpu));
+ this_eff_load = cpu_runnable_load(cpu_rq(this_cpu));

if (sync) {
unsigned long current_load = task_h_load(current);
@@ -5496,7 +5496,7 @@ wake_affine_weight(struct sched_domain *sd, struct task_struct *p,
this_eff_load *= 100;
this_eff_load *= capacity_of(prev_cpu);

- prev_eff_load = weighted_cpuload(cpu_rq(prev_cpu));
+ prev_eff_load = cpu_runnable_load(cpu_rq(prev_cpu));
prev_eff_load -= task_load;
if (sched_feat(WA_BIAS))
prev_eff_load *= 100 + (sd->imbalance_pct - 100) / 2;
@@ -5584,7 +5584,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
max_spare_cap = 0;

for_each_cpu(i, sched_group_span(group)) {
- load = weighted_cpuload(cpu_rq(i));
+ load = cpu_runnable_load(cpu_rq(i));
runnable_load += load;

avg_load += cfs_rq_load_avg(&cpu_rq(i)->cfs);
@@ -5720,7 +5720,7 @@ find_idlest_group_cpu(struct sched_group *group, struct task_struct *p, int this
shallowest_idle_cpu = i;
}
} else if (shallowest_idle_cpu == -1) {
- load = weighted_cpuload(cpu_rq(i));
+ load = cpu_runnable_load(cpu_rq(i));
if (load < min_load) {
min_load = load;
least_loaded_cpu = i;
@@ -7291,7 +7291,7 @@ static struct task_struct *detach_one_task(struct lb_env *env)
static const unsigned int sched_nr_migrate_break = 32;

/*
- * detach_tasks() -- tries to detach up to imbalance weighted load from
+ * detach_tasks() -- tries to detach up to imbalance runnable load from
* busiest_rq, as part of a balancing operation within domain "sd".
*
* Returns number of detached tasks if successful and 0 otherwise.
@@ -7359,7 +7359,7 @@ static int detach_tasks(struct lb_env *env)

/*
* We only want to steal up to the prescribed amount of
- * weighted load.
+ * runnable load.
*/
if (env->imbalance <= 0)
break;
@@ -7969,7 +7969,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
if ((env->flags & LBF_NOHZ_STATS) && update_nohz_stats(rq, false))
env->flags |= LBF_NOHZ_AGAIN;

- sgs->group_load += weighted_cpuload(rq);
+ sgs->group_load += cpu_runnable_load(rq);
sgs->group_util += cpu_util(i);
sgs->sum_nr_running += rq->cfs.h_nr_running;

@@ -8427,7 +8427,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* find_busiest_group - Returns the busiest group within the sched_domain
* if there is an imbalance.
*
- * Also calculates the amount of weighted load which should be moved
+ * Also calculates the amount of runnable load which should be moved
* to restore balance.
*
* @env: The load balancing environment.
@@ -8546,7 +8546,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
int i;

for_each_cpu_and(i, sched_group_span(group), env->cpus) {
- unsigned long capacity, wl;
+ unsigned long capacity, load;
enum fbq_type rt;

rq = cpu_rq(i);
@@ -8600,30 +8600,30 @@ static struct rq *find_busiest_queue(struct lb_env *env,
rq->nr_running == 1)
continue;

- wl = weighted_cpuload(rq);
+ load = cpu_runnable_load(rq);

/*
- * When comparing with imbalance, use weighted_cpuload()
+ * When comparing with imbalance, use cpu_runnable_load()
* which is not scaled with the CPU capacity.
*/

- if (rq->nr_running == 1 && wl > env->imbalance &&
+ if (rq->nr_running == 1 && load > env->imbalance &&
!check_cpu_capacity(rq, env->sd))
continue;

/*
* For the load comparisons with the other CPU's, consider
- * the weighted_cpuload() scaled with the CPU capacity, so
+ * the cpu_runnable_load() scaled with the CPU capacity, so
* that the load can be moved away from the CPU that is
* potentially running at a lower capacity.
*
- * Thus we're looking for max(wl_i / capacity_i), crosswise
+ * Thus we're looking for max(load_i / capacity_i), crosswise
* multiplication to rid ourselves of the division works out
- * to: wl_i * capacity_j > wl_j * capacity_i; where j is
+ * to: load_i * capacity_j > load_j * capacity_i; where j is
* our previous maximum.
*/
- if (wl * busiest_capacity > busiest_load * capacity) {
- busiest_load = wl;
+ if (load * busiest_capacity > busiest_load * capacity) {
+ busiest_load = load;
busiest_capacity = capacity;
busiest = rq;
}