Since sched_slice() is used in high frequency,
small change should also make sense.
Signed-off-by: Peng Liu <[email protected]>
---
kernel/sched/fair.c | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 1054d2cf6aaa..6ae2a507aac0 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -694,19 +694,16 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
for_each_sched_entity(se) {
- struct load_weight *load;
struct load_weight lw;
cfs_rq = cfs_rq_of(se);
- load = &cfs_rq->load;
+ lw = cfs_rq->load;
- if (unlikely(!se->on_rq)) {
+ if (unlikely(!se->on_rq))
lw = cfs_rq->load;
- update_load_add(&lw, se->load.weight);
- load = &lw;
- }
- slice = __calc_delta(slice, se->load.weight, load);
+ update_load_add(&lw, se->load.weight);
+ slice = __calc_delta(slice, se->load.weight, &lw);
}
return slice;
}
--
2.17.1
On Fri, Aug 16, 2019 at 10:12:02PM +0800, Peng Liu wrote:
> Since sched_slice() is used in high frequency,
> small change should also make sense.
An actual Changelog would also make sense; but alas.
On Tue, Aug 20, 2019 at 03:50:55PM +0200, Peter Zijlstra wrote:
> On Fri, Aug 16, 2019 at 10:12:02PM +0800, Peng Liu wrote:
> > Since sched_slice() is used in high frequency,
> > small change should also make sense.
>
> An actual Changelog would also make sense; but alas.
Thanks for your time!
About the changelog, I admit that it makes little improvement to the whole,
It is a so *short* function, I also don't think it can produce any visible
improvement through any kernel test tools. But as you can see, the redundant
intermediate operation indeed exists. There's no reason to let it exist any
more - make code clear and easy to understand if possible.
Best Regards,
Liu
On 08/16/19 22:12, Peng Liu wrote:
> Since sched_slice() is used in high frequency,
> small change should also make sense.
>
> Signed-off-by: Peng Liu <[email protected]>
> ---
> kernel/sched/fair.c | 11 ++++-------
> 1 file changed, 4 insertions(+), 7 deletions(-)
>
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 1054d2cf6aaa..6ae2a507aac0 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -694,19 +694,16 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
> u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
>
> for_each_sched_entity(se) {
> - struct load_weight *load;
> struct load_weight lw;
>
> cfs_rq = cfs_rq_of(se);
> - load = &cfs_rq->load;
> + lw = cfs_rq->load;
>
> - if (unlikely(!se->on_rq)) {
> + if (unlikely(!se->on_rq))
> lw = cfs_rq->load;
>
> - update_load_add(&lw, se->load.weight);
> - load = &lw;
> - }
> - slice = __calc_delta(slice, se->load.weight, load);
> + update_load_add(&lw, se->load.weight);
> + slice = __calc_delta(slice, se->load.weight, &lw);
Unless I misread the diff, you changed the behavior.
update_load_add() is only called if (unlikely(!se->on_rq)), but with your
change it is called unconditionally. And lw is set twice.
I think what you intended is this?
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index bc9cfeaac8bd..6bbe17d5943f 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -694,19 +694,15 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
for_each_sched_entity(se) {
- struct load_weight *load;
struct load_weight lw;
cfs_rq = cfs_rq_of(se);
- load = &cfs_rq->load;
-
- if (unlikely(!se->on_rq)) {
- lw = cfs_rq->load;
+ lw = cfs_rq->load;
+ if (unlikely(!se->on_rq))
update_load_add(&lw, se->load.weight);
- load = &lw;
- }
- slice = __calc_delta(slice, se->load.weight, load);
+
+ slice = __calc_delta(slice, se->load.weight, &lw);
}
return slice;
}
Cheers
--
Qais Yousef
On Wed, Aug 21, 2019 at 04:15:24PM +0100, Qais Yousef wrote:
> On 08/16/19 22:12, Peng Liu wrote:
> > Since sched_slice() is used in high frequency,
> > small change should also make sense.
> >
> > Signed-off-by: Peng Liu <[email protected]>
> > ---
> > kernel/sched/fair.c | 11 ++++-------
> > 1 file changed, 4 insertions(+), 7 deletions(-)
> >
> > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> > index 1054d2cf6aaa..6ae2a507aac0 100644
> > --- a/kernel/sched/fair.c
> > +++ b/kernel/sched/fair.c
> > @@ -694,19 +694,16 @@ static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
> > u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
> >
> > for_each_sched_entity(se) {
> > - struct load_weight *load;
> > struct load_weight lw;
> >
> > cfs_rq = cfs_rq_of(se);
> > - load = &cfs_rq->load;
> > + lw = cfs_rq->load;
> >
> > - if (unlikely(!se->on_rq)) {
> > + if (unlikely(!se->on_rq))
> > lw = cfs_rq->load;
> >
> > - update_load_add(&lw, se->load.weight);
> > - load = &lw;
> > - }
> > - slice = __calc_delta(slice, se->load.weight, load);
> > + update_load_add(&lw, se->load.weight);
> > + slice = __calc_delta(slice, se->load.weight, &lw);
>
> Unless I misread the diff, you changed the behavior.
>
> update_load_add() is only called if (unlikely(!se->on_rq)), but with your
> change it is called unconditionally. And lw is set twice.
>
> I think what you intended is this?
So I'd really rather hold off on this; Rik is poking at getting rid of
all of this hierarchical crud in one go.