After reading the implementation of PELT, I try to refactor it as
below.
Tang Yizhou (2):
sched/pelt: Remove redundant variable in __accumulate_pelt_segments
sched/pelt: Change the type of parameter running to bool
kernel/sched/pelt.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
--
2.17.1
Parameter 'running' in function ___update_load_sum() and
accumulate_sum() describes whether an se is running or not, so change
the type of it to bool to make the code more readable.
Signed-off-by: Tang Yizhou <[email protected]>
---
kernel/sched/pelt.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index 3584df2a0b8e..2010b3bd6e49 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -104,7 +104,7 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
*/
static __always_inline u32
accumulate_sum(u64 delta, struct sched_avg *sa,
- unsigned long load, unsigned long runnable, int running)
+ unsigned long load, unsigned long runnable, bool running)
{
u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
u64 periods;
@@ -182,7 +182,7 @@ accumulate_sum(u64 delta, struct sched_avg *sa,
*/
static __always_inline int
___update_load_sum(u64 now, struct sched_avg *sa,
- unsigned long load, unsigned long runnable, int running)
+ unsigned long load, unsigned long runnable, bool running)
{
u64 delta;
--
2.17.1
As the comment of function accumulate_sum() describes the equation clearly,
There is no need to use a redundant variable c3. Let's make a comment for
d3 directly.
Signed-off-by: Tang Yizhou <[email protected]>
---
kernel/sched/pelt.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
index a554e3bbab2b..3584df2a0b8e 100644
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -60,7 +60,7 @@ static u64 decay_load(u64 val, u64 n)
static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
{
- u32 c1, c2, c3 = d3; /* y^0 == 1 */
+ u32 c1, c2;
/*
* c1 = d1 y^p
@@ -78,7 +78,7 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
*/
c2 = LOAD_AVG_MAX - decay_load(LOAD_AVG_MAX, periods) - 1024;
- return c1 + c2 + c3;
+ return c1 + c2 + d3; /* d3: y^0 == 1 */
}
/*
--
2.17.1
On Thu, Nov 25, 2021 at 11:00:18AM +0800, Tang Yizhou wrote:
> As the comment of function accumulate_sum() describes the equation clearly,
> There is no need to use a redundant variable c3. Let's make a comment for
> d3 directly.
Why bother? Surely the compiler is clever enough to figure out the same
and avoid instantiating the variable. All you've done is made the code
less obvious.
On Thu, Nov 25, 2021 at 11:00:19AM +0800, Tang Yizhou wrote:
> Parameter 'running' in function ___update_load_sum() and
> accumulate_sum() describes whether an se is running or not, so change
> the type of it to bool to make the code more readable.
>
> Signed-off-by: Tang Yizhou <[email protected]>
> ---
> kernel/sched/pelt.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
> diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
> index 3584df2a0b8e..2010b3bd6e49 100644
> --- a/kernel/sched/pelt.c
> +++ b/kernel/sched/pelt.c
> @@ -104,7 +104,7 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
> */
> static __always_inline u32
> accumulate_sum(u64 delta, struct sched_avg *sa,
> - unsigned long load, unsigned long runnable, int running)
> + unsigned long load, unsigned long runnable, bool running)
> {
> u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
> u64 periods;
> @@ -182,7 +182,7 @@ accumulate_sum(u64 delta, struct sched_avg *sa,
> */
> static __always_inline int
> ___update_load_sum(u64 now, struct sched_avg *sa,
> - unsigned long load, unsigned long runnable, int running)
> + unsigned long load, unsigned long runnable, bool running)
> {
> u64 delta;
And this function has:
runnable = running = 0;
and then people complain about assigning 0 to _Bool, and then we get
idiocy like:
runnable = running = false;
Please...
On 2021/11/25 17:55, Peter Zijlstra wrote:
> On Thu, Nov 25, 2021 at 11:00:19AM +0800, Tang Yizhou wrote:
>> Parameter 'running' in function ___update_load_sum() and
>> accumulate_sum() describes whether an se is running or not, so change
>> the type of it to bool to make the code more readable.
>>
>> Signed-off-by: Tang Yizhou <[email protected]>
>> ---
>> kernel/sched/pelt.c | 4 ++--
>> 1 file changed, 2 insertions(+), 2 deletions(-)
>>
>> diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
>> index 3584df2a0b8e..2010b3bd6e49 100644
>> --- a/kernel/sched/pelt.c
>> +++ b/kernel/sched/pelt.c
>> @@ -104,7 +104,7 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
>> */
>> static __always_inline u32
>> accumulate_sum(u64 delta, struct sched_avg *sa,
>> - unsigned long load, unsigned long runnable, int running)
>> + unsigned long load, unsigned long runnable, bool running)
>> {
>> u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
>> u64 periods;
>> @@ -182,7 +182,7 @@ accumulate_sum(u64 delta, struct sched_avg *sa,
>> */
>> static __always_inline int
>> ___update_load_sum(u64 now, struct sched_avg *sa,
>> - unsigned long load, unsigned long runnable, int running)
>> + unsigned long load, unsigned long runnable, bool running)
>> {
>> u64 delta;
>
> And this function has:
>
> runnable = running = 0;
>
> and then people complain about assigning 0 to _Bool, and then we get
> idiocy like:
>
> runnable = running = false;
How about:
running = runnable = 0;
>
> Please...
> .
>
Thanks,
Tang
On 2021/11/25 17:55, Peter Zijlstra wrote:
> On Thu, Nov 25, 2021 at 11:00:19AM +0800, Tang Yizhou wrote:
>> Parameter 'running' in function ___update_load_sum() and
>> accumulate_sum() describes whether an se is running or not, so change
>> the type of it to bool to make the code more readable.
>>
>> Signed-off-by: Tang Yizhou <[email protected]>
>> ---
>> kernel/sched/pelt.c | 4 ++--
>> 1 file changed, 2 insertions(+), 2 deletions(-)
>>
>> diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
>> index 3584df2a0b8e..2010b3bd6e49 100644
>> --- a/kernel/sched/pelt.c
>> +++ b/kernel/sched/pelt.c
>> @@ -104,7 +104,7 @@ static u32 __accumulate_pelt_segments(u64 periods, u32 d1, u32 d3)
>> */
>> static __always_inline u32
>> accumulate_sum(u64 delta, struct sched_avg *sa,
>> - unsigned long load, unsigned long runnable, int running)
>> + unsigned long load, unsigned long runnable, bool running)
>> {
>> u32 contrib = (u32)delta; /* p == 0 -> delta < 1024 */
>> u64 periods;
>> @@ -182,7 +182,7 @@ accumulate_sum(u64 delta, struct sched_avg *sa,
>> */
>> static __always_inline int
>> ___update_load_sum(u64 now, struct sched_avg *sa,
>> - unsigned long load, unsigned long runnable, int running)
>> + unsigned long load, unsigned long runnable, bool running)
>> {
>> u64 delta;
>
> And this function has:
>
> runnable = running = 0;
>
> and then people complain about assigning 0 to _Bool, and then we get
> idiocy like:
>
> runnable = running = false;
>
> Please...
> .
>
I see, assigning 0 to _Bool is inappropriate.
Thanks for your review.