2023-11-02 14:21:28

by Yiwei Lin

[permalink] [raw]
Subject: [PATCH v2 0/1] sched/fair: Track current se's EEVDF parameters

Changelog:
v1 -> v2:
Consider the contribution to avg_vruntime for 'curr'

kernel/sched/fair.c | 39 +++++++++++----------------------------
1 file changed, 11 insertions(+), 28 deletions(-)

--
2.34.1


2023-11-02 14:21:50

by Yiwei Lin

[permalink] [raw]
Subject: [PATCH v2 1/1] sched/fair: Track current se's EEVDF parameters

After dequeuing the current-picked scheduling entity with
`__dequeue_entity`, its contribution to the EEVDF parameters
cfs_rq->avg_vruntime and cfs_rq->avg_load are also removed.
Because these should in fact be considered for the EEVDF algorithm,
we took curr as the special case and inserted back the contributions
when requests for cfs_rq->avg_vruntime and cfs_rq->avg_load.

Functions like `entity_eligible` which is called insied a loop may
therefore recalculate these statistics repeatly and require more effort.
Instead, we could just avoid to remove these statistics from
cfs_rq->avg_vruntime and cfs_rq->avg_load directly.

Signed-off-by: Yiwei Lin <[email protected]>
---
kernel/sched/fair.c | 39 +++++++++++----------------------------
1 file changed, 11 insertions(+), 28 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 876798824..a10a73603 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -655,17 +655,9 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
*/
u64 avg_vruntime(struct cfs_rq *cfs_rq)
{
- struct sched_entity *curr = cfs_rq->curr;
s64 avg = cfs_rq->avg_vruntime;
long load = cfs_rq->avg_load;

- if (curr && curr->on_rq) {
- unsigned long weight = scale_load_down(curr->load.weight);
-
- avg += entity_key(cfs_rq, curr) * weight;
- load += weight;
- }
-
if (load) {
/* sign flips effective floor / ceil */
if (avg < 0)
@@ -722,17 +714,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
*/
int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
- struct sched_entity *curr = cfs_rq->curr;
s64 avg = cfs_rq->avg_vruntime;
long load = cfs_rq->avg_load;

- if (curr && curr->on_rq) {
- unsigned long weight = scale_load_down(curr->load.weight);
-
- avg += entity_key(cfs_rq, curr) * weight;
- load += weight;
- }
-
return avg >= entity_key(cfs_rq, se) * load;
}

@@ -821,11 +805,12 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
__entity_less, &min_deadline_cb);
}

-static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, bool on_rq)
{
rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
&min_deadline_cb);
- avg_vruntime_sub(cfs_rq, se);
+ if (!on_rq)
+ avg_vruntime_sub(cfs_rq, se);
}

struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
@@ -1137,6 +1122,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
struct sched_entity *curr = cfs_rq->curr;
u64 now = rq_clock_task(rq_of(cfs_rq));
u64 delta_exec;
+ u64 delta_fair;

if (unlikely(!curr))
return;
@@ -1158,7 +1144,9 @@ static void update_curr(struct cfs_rq *cfs_rq)
curr->sum_exec_runtime += delta_exec;
schedstat_add(cfs_rq->exec_clock, delta_exec);

- curr->vruntime += calc_delta_fair(delta_exec, curr);
+ delta_fair = calc_delta_fair(delta_exec, curr);
+ curr->vruntime += delta_fair;
+ cfs_rq->avg_vruntime += delta_fair * scale_load_down(curr->load.weight);
update_deadline(cfs_rq, curr);
update_min_vruntime(cfs_rq);

@@ -3675,8 +3663,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
/* commit outstanding execution time */
if (cfs_rq->curr == se)
update_curr(cfs_rq);
- else
- avg_vruntime_sub(cfs_rq, se);
+ avg_vruntime_sub(cfs_rq, se);
update_load_sub(&cfs_rq->load, se->load.weight);
}
dequeue_load_avg(cfs_rq, se);
@@ -3712,8 +3699,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
enqueue_load_avg(cfs_rq, se);
if (se->on_rq) {
update_load_add(&cfs_rq->load, se->load.weight);
- if (cfs_rq->curr != se)
- avg_vruntime_add(cfs_rq, se);
+ avg_vruntime_add(cfs_rq, se);
}
}

@@ -5023,7 +5009,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* EEVDF: placement strategy #1 / #2
*/
if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
- struct sched_entity *curr = cfs_rq->curr;
unsigned long load;

lag = se->vlag;
@@ -5081,8 +5066,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* vl_i = (W + w_i)*vl'_i / W
*/
load = cfs_rq->avg_load;
- if (curr && curr->on_rq)
- load += scale_load_down(curr->load.weight);

lag *= load + scale_load_down(se->load.weight);
if (WARN_ON_ONCE(!load))
@@ -5229,7 +5212,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)

update_entity_lag(cfs_rq, se);
if (se != cfs_rq->curr)
- __dequeue_entity(cfs_rq, se);
+ __dequeue_entity(cfs_rq, se, 0);
se->on_rq = 0;
account_entity_dequeue(cfs_rq, se);

@@ -5264,7 +5247,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
* runqueue.
*/
update_stats_wait_end_fair(cfs_rq, se);
- __dequeue_entity(cfs_rq, se);
+ __dequeue_entity(cfs_rq, se, 1);
update_load_avg(cfs_rq, se, UPDATE_TG);
/*
* HACK, stash a copy of deadline at the point of pick in vlag,
--
2.34.1

2023-11-03 06:46:52

by Abel Wu

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] sched/fair: Track current se's EEVDF parameters

On 11/2/23 10:20 PM, Yiwei Lin Wrote:
> After dequeuing the current-picked scheduling entity with
> `__dequeue_entity`, its contribution to the EEVDF parameters
> cfs_rq->avg_vruntime and cfs_rq->avg_load are also removed.
> Because these should in fact be considered for the EEVDF algorithm,
> we took curr as the special case and inserted back the contributions
> when requests for cfs_rq->avg_vruntime and cfs_rq->avg_load.
>
> Functions like `entity_eligible` which is called insied a loop may
> therefore recalculate these statistics repeatly and require more effort.
> Instead, we could just avoid to remove these statistics from
> cfs_rq->avg_vruntime and cfs_rq->avg_load directly.
>
> Signed-off-by: Yiwei Lin <[email protected]>
> ---
> kernel/sched/fair.c | 39 +++++++++++----------------------------
> 1 file changed, 11 insertions(+), 28 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 876798824..a10a73603 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -655,17 +655,9 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq, s64 delta)
> */
> u64 avg_vruntime(struct cfs_rq *cfs_rq)
> {
> - struct sched_entity *curr = cfs_rq->curr;
> s64 avg = cfs_rq->avg_vruntime;
> long load = cfs_rq->avg_load;
>
> - if (curr && curr->on_rq) {
> - unsigned long weight = scale_load_down(curr->load.weight);
> -
> - avg += entity_key(cfs_rq, curr) * weight;
> - load += weight;
> - }
> -
> if (load) {
> /* sign flips effective floor / ceil */
> if (avg < 0)
> @@ -722,17 +714,9 @@ static void update_entity_lag(struct cfs_rq *cfs_rq, struct sched_entity *se)
> */
> int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
> {
> - struct sched_entity *curr = cfs_rq->curr;
> s64 avg = cfs_rq->avg_vruntime;
> long load = cfs_rq->avg_load;
>
> - if (curr && curr->on_rq) {
> - unsigned long weight = scale_load_down(curr->load.weight);
> -
> - avg += entity_key(cfs_rq, curr) * weight;
> - load += weight;
> - }
> -
> return avg >= entity_key(cfs_rq, se) * load;
> }
>
> @@ -821,11 +805,12 @@ static void __enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
> __entity_less, &min_deadline_cb);
> }
>
> -static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
> +static void __dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, bool on_rq)

Please don't add such complexity. Since avg_vruntime includes the whole
cfs_rq after this change, you can simply avg_vruntime_add() when doing
enqueue_entity() and avg_vruntime_sub() in dequeue_entity().

> {
> rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
> &min_deadline_cb);
> - avg_vruntime_sub(cfs_rq, se);
> + if (!on_rq)
> + avg_vruntime_sub(cfs_rq, se);
> }
>
> struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
> @@ -1137,6 +1122,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
> struct sched_entity *curr = cfs_rq->curr;
> u64 now = rq_clock_task(rq_of(cfs_rq));
> u64 delta_exec;
> + u64 delta_fair;
>
> if (unlikely(!curr))
> return;
> @@ -1158,7 +1144,9 @@ static void update_curr(struct cfs_rq *cfs_rq)
> curr->sum_exec_runtime += delta_exec;
> schedstat_add(cfs_rq->exec_clock, delta_exec);
>
> - curr->vruntime += calc_delta_fair(delta_exec, curr);
> + delta_fair = calc_delta_fair(delta_exec, curr);
> + curr->vruntime += delta_fair;
> + cfs_rq->avg_vruntime += delta_fair * scale_load_down(curr->load.weight);

What if curr->load.weight changes in this time slice?

> update_deadline(cfs_rq, curr);
> update_min_vruntime(cfs_rq);
>
> @@ -3675,8 +3663,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
> /* commit outstanding execution time */
> if (cfs_rq->curr == se)
> update_curr(cfs_rq);
> - else
> - avg_vruntime_sub(cfs_rq, se);
> + avg_vruntime_sub(cfs_rq, se);
> update_load_sub(&cfs_rq->load, se->load.weight);
> }
> dequeue_load_avg(cfs_rq, se);
> @@ -3712,8 +3699,7 @@ static void reweight_entity(struct cfs_rq *cfs_rq, struct sched_entity *se,
> enqueue_load_avg(cfs_rq, se);
> if (se->on_rq) {
> update_load_add(&cfs_rq->load, se->load.weight);
> - if (cfs_rq->curr != se)
> - avg_vruntime_add(cfs_rq, se);
> + avg_vruntime_add(cfs_rq, se);
> }
> }
>
> @@ -5023,7 +5009,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> * EEVDF: placement strategy #1 / #2
> */
> if (sched_feat(PLACE_LAG) && cfs_rq->nr_running) {
> - struct sched_entity *curr = cfs_rq->curr;
> unsigned long load;
>
> lag = se->vlag;
> @@ -5081,8 +5066,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> * vl_i = (W + w_i)*vl'_i / W
> */
> load = cfs_rq->avg_load;
> - if (curr && curr->on_rq)
> - load += scale_load_down(curr->load.weight);
>
> lag *= load + scale_load_down(se->load.weight);
> if (WARN_ON_ONCE(!load))
> @@ -5229,7 +5212,7 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
>
> update_entity_lag(cfs_rq, se);
> if (se != cfs_rq->curr)
> - __dequeue_entity(cfs_rq, se);
> + __dequeue_entity(cfs_rq, se, 0);
> se->on_rq = 0;
> account_entity_dequeue(cfs_rq, se);
>
> @@ -5264,7 +5247,7 @@ set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
> * runqueue.
> */
> update_stats_wait_end_fair(cfs_rq, se);
> - __dequeue_entity(cfs_rq, se);
> + __dequeue_entity(cfs_rq, se, 1);
> update_load_avg(cfs_rq, se, UPDATE_TG);
> /*
> * HACK, stash a copy of deadline at the point of pick in vlag,

2023-11-03 14:02:30

by Yiwei Lin

[permalink] [raw]
Subject: Re: [PATCH v2 1/1] sched/fair: Track current se's EEVDF parameters


On 11/3/23 14:45, Abel Wu wrote:
> On 11/2/23 10:20 PM, Yiwei Lin Wrote:
>> After dequeuing the current-picked scheduling entity with
>> `__dequeue_entity`, its contribution to the EEVDF parameters
>> cfs_rq->avg_vruntime and cfs_rq->avg_load are also removed.
>> Because these should in fact be considered for the EEVDF algorithm,
>> we took curr as the special case and inserted back the contributions
>> when requests for cfs_rq->avg_vruntime and cfs_rq->avg_load.
>>
>> Functions like `entity_eligible` which is called insied a loop may
>> therefore recalculate these statistics repeatly and require more effort.
>> Instead, we could just avoid to remove these statistics from
>> cfs_rq->avg_vruntime and cfs_rq->avg_load directly.
>>
>> Signed-off-by: Yiwei Lin <[email protected]>
>> ---
>>   kernel/sched/fair.c | 39 +++++++++++----------------------------
>>   1 file changed, 11 insertions(+), 28 deletions(-)
>>
>> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
>> index 876798824..a10a73603 100644
>> --- a/kernel/sched/fair.c
>> +++ b/kernel/sched/fair.c
>> @@ -655,17 +655,9 @@ void avg_vruntime_update(struct cfs_rq *cfs_rq,
>> s64 delta)
>>    */
>>   u64 avg_vruntime(struct cfs_rq *cfs_rq)
>>   {
>> -    struct sched_entity *curr = cfs_rq->curr;
>>       s64 avg = cfs_rq->avg_vruntime;
>>       long load = cfs_rq->avg_load;
>>   -    if (curr && curr->on_rq) {
>> -        unsigned long weight = scale_load_down(curr->load.weight);
>> -
>> -        avg += entity_key(cfs_rq, curr) * weight;
>> -        load += weight;
>> -    }
>> -
>>       if (load) {
>>           /* sign flips effective floor / ceil */
>>           if (avg < 0)
>> @@ -722,17 +714,9 @@ static void update_entity_lag(struct cfs_rq
>> *cfs_rq, struct sched_entity *se)
>>    */
>>   int entity_eligible(struct cfs_rq *cfs_rq, struct sched_entity *se)
>>   {
>> -    struct sched_entity *curr = cfs_rq->curr;
>>       s64 avg = cfs_rq->avg_vruntime;
>>       long load = cfs_rq->avg_load;
>>   -    if (curr && curr->on_rq) {
>> -        unsigned long weight = scale_load_down(curr->load.weight);
>> -
>> -        avg += entity_key(cfs_rq, curr) * weight;
>> -        load += weight;
>> -    }
>> -
>>       return avg >= entity_key(cfs_rq, se) * load;
>>   }
>>   @@ -821,11 +805,12 @@ static void __enqueue_entity(struct cfs_rq
>> *cfs_rq, struct sched_entity *se)
>>                   __entity_less, &min_deadline_cb);
>>   }
>>   -static void __dequeue_entity(struct cfs_rq *cfs_rq, struct
>> sched_entity *se)
>> +static void __dequeue_entity(struct cfs_rq *cfs_rq, struct
>> sched_entity *se, bool on_rq)
>
> Please don't add such complexity. Since avg_vruntime includes the whole
> cfs_rq after this change, you can simply avg_vruntime_add() when doing
> enqueue_entity() and avg_vruntime_sub() in dequeue_entity().
>
Right. I should consider that we'll also use __enqueue_entity to put
back the 'curr', and
avg_vruntime_add() when doing enqueue_entity() and avg_vruntime_sub() in
dequeue_entity()
as you mentioned could not only simplify the implementation but also
address to this issue.
I'll fix it.

>>   {
>>       rb_erase_augmented_cached(&se->run_node, &cfs_rq->tasks_timeline,
>>                     &min_deadline_cb);
>> -    avg_vruntime_sub(cfs_rq, se);
>> +    if (!on_rq)
>> +        avg_vruntime_sub(cfs_rq, se);
>>   }
>>     struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq)
>> @@ -1137,6 +1122,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
>>       struct sched_entity *curr = cfs_rq->curr;
>>       u64 now = rq_clock_task(rq_of(cfs_rq));
>>       u64 delta_exec;
>> +    u64 delta_fair;
>>         if (unlikely(!curr))
>>           return;
>> @@ -1158,7 +1144,9 @@ static void update_curr(struct cfs_rq *cfs_rq)
>>       curr->sum_exec_runtime += delta_exec;
>>       schedstat_add(cfs_rq->exec_clock, delta_exec);
>>   -    curr->vruntime += calc_delta_fair(delta_exec, curr);
>> +    delta_fair = calc_delta_fair(delta_exec, curr);
>> +    curr->vruntime += delta_fair;
>> +    cfs_rq->avg_vruntime += delta_fair *
>> scale_load_down(curr->load.weight);
>
> What if curr->load.weight changes in this time slice?

The change on reweight_entity now don't separate curr and other
entities. If curr->load.weight is changed,
reweight_entity will do:
1. update_curr() to account the latest contribution of vruntime with old
weight.
2. avg_vruntime_sub() to remove the curr vruntime with old weight
3. avg_vruntime_add() to insert the curr vruntime with new weight
Although step1 and step2 could be merged together when re-weighting the
curr entity, but it
seems just find to general implement reweight_entity.