2022-05-23 07:06:33

by Josh Don

[permalink] [raw]
Subject: [PATCH v2] sched/core: add forced idle accounting for cgroups

4feee7d1260 previously added per-task forced idle accounting. This patch
extends this to also include cgroups.

rstat is used for cgroup accounting, except for the root, which uses
kcpustat in order to bypass the need for doing an rstat flush when
reading root stats.

Only cgroup v2 is supported. Similar to the task accounting, the cgroup
accounting requires that schedstats is enabled.

Signed-off-by: Josh Don <[email protected]>
---
v2: Per Tejun's suggestion, move the forceidle stat to cgroup_base_stat
directly.

include/linux/cgroup-defs.h | 4 ++++
include/linux/kernel_stat.h | 7 +++++++
kernel/cgroup/rstat.c | 40 +++++++++++++++++++++++++++++++------
kernel/sched/core_sched.c | 6 +++++-
kernel/sched/cputime.c | 11 ++++++++++
5 files changed, 61 insertions(+), 7 deletions(-)

diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 1bfcfb1af352..025fd0e84a31 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -287,6 +287,10 @@ struct css_set {

struct cgroup_base_stat {
struct task_cputime cputime;
+
+#ifdef CONFIG_SCHED_CORE
+ u64 forceidle_sum;
+#endif
};

/*
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 69ae6b278464..94f435ce1df0 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -28,6 +28,9 @@ enum cpu_usage_stat {
CPUTIME_STEAL,
CPUTIME_GUEST,
CPUTIME_GUEST_NICE,
+#ifdef CONFIG_SCHED_CORE
+ CPUTIME_FORCEIDLE,
+#endif
NR_STATS,
};

@@ -115,4 +118,8 @@ extern void account_process_tick(struct task_struct *, int user);

extern void account_idle_ticks(unsigned long ticks);

+#ifdef CONFIG_SCHED_CORE
+extern void account_forceidle_time(struct task_struct *tsk, u64 delta);
+#endif
+
#endif /* _LINUX_KERNEL_STAT_H */
diff --git a/kernel/cgroup/rstat.c b/kernel/cgroup/rstat.c
index 24b5c2ab5598..d873de6f8716 100644
--- a/kernel/cgroup/rstat.c
+++ b/kernel/cgroup/rstat.c
@@ -310,6 +310,9 @@ static void cgroup_base_stat_add(struct cgroup_base_stat *dst_bstat,
dst_bstat->cputime.utime += src_bstat->cputime.utime;
dst_bstat->cputime.stime += src_bstat->cputime.stime;
dst_bstat->cputime.sum_exec_runtime += src_bstat->cputime.sum_exec_runtime;
+#ifdef CONFIG_SCHED_CORE
+ dst_bstat->forceidle_sum += src_bstat->forceidle_sum;
+#endif
}

static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
@@ -318,6 +321,9 @@ static void cgroup_base_stat_sub(struct cgroup_base_stat *dst_bstat,
dst_bstat->cputime.utime -= src_bstat->cputime.utime;
dst_bstat->cputime.stime -= src_bstat->cputime.stime;
dst_bstat->cputime.sum_exec_runtime -= src_bstat->cputime.sum_exec_runtime;
+#ifdef CONFIG_SCHED_CORE
+ dst_bstat->forceidle_sum -= src_bstat->forceidle_sum;
+#endif
}

static void cgroup_base_stat_flush(struct cgroup *cgrp, int cpu)
@@ -398,6 +404,11 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp,
case CPUTIME_SOFTIRQ:
rstatc->bstat.cputime.stime += delta_exec;
break;
+#ifdef CONFIG_SCHED_CORE
+ case CPUTIME_FORCEIDLE:
+ rstatc->bstat.forceidle_sum += delta_exec;
+ break;
+#endif
default:
break;
}
@@ -411,8 +422,9 @@ void __cgroup_account_cputime_field(struct cgroup *cgrp,
* with how it is done by __cgroup_account_cputime_field for each bit of
* cpu time attributed to a cgroup.
*/
-static void root_cgroup_cputime(struct task_cputime *cputime)
+static void root_cgroup_cputime(struct cgroup_base_stat *bstat)
{
+ struct task_cputime *cputime = &bstat->cputime;
int i;

cputime->stime = 0;
@@ -438,6 +450,10 @@ static void root_cgroup_cputime(struct task_cputime *cputime)
cputime->sum_exec_runtime += user;
cputime->sum_exec_runtime += sys;
cputime->sum_exec_runtime += cpustat[CPUTIME_STEAL];
+
+#ifdef CONFIG_SCHED_CORE
+ bstat->forceidle_sum += cpustat[CPUTIME_FORCEIDLE];
+#endif
}
}

@@ -445,27 +461,39 @@ void cgroup_base_stat_cputime_show(struct seq_file *seq)
{
struct cgroup *cgrp = seq_css(seq)->cgroup;
u64 usage, utime, stime;
- struct task_cputime cputime;
+ struct cgroup_base_stat bstat;
+ u64 __maybe_unused forceidle_time;

if (cgroup_parent(cgrp)) {
cgroup_rstat_flush_hold(cgrp);
usage = cgrp->bstat.cputime.sum_exec_runtime;
cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
&utime, &stime);
+#ifdef CONFIG_SCHED_CORE
+ forceidle_time = cgrp->bstat.forceidle_sum;
+#endif
cgroup_rstat_flush_release();
} else {
- root_cgroup_cputime(&cputime);
- usage = cputime.sum_exec_runtime;
- utime = cputime.utime;
- stime = cputime.stime;
+ root_cgroup_cputime(&bstat);
+ usage = bstat.cputime.sum_exec_runtime;
+ utime = bstat.cputime.utime;
+ stime = bstat.cputime.stime;
+#ifdef CONFIG_SCHED_CORE
+ forceidle_time = bstat.forceidle_sum;
+#endif
}

do_div(usage, NSEC_PER_USEC);
do_div(utime, NSEC_PER_USEC);
do_div(stime, NSEC_PER_USEC);
+ do_div(forceidle_time, NSEC_PER_USEC);

seq_printf(seq, "usage_usec %llu\n"
"user_usec %llu\n"
"system_usec %llu\n",
usage, utime, stime);
+
+#ifdef CONFIG_SCHED_CORE
+ seq_printf(seq, "forceidle_usec %llu\n", forceidle_time);
+#endif
}
diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c
index 38a2cec21014..ddef2b8ddf68 100644
--- a/kernel/sched/core_sched.c
+++ b/kernel/sched/core_sched.c
@@ -277,7 +277,11 @@ void __sched_core_account_forceidle(struct rq *rq)
if (p == rq_i->idle)
continue;

- __schedstat_add(p->stats.core_forceidle_sum, delta);
+ /*
+ * Note: this will account forceidle to the current cpu, even
+ * if it comes from our SMT sibling.
+ */
+ account_forceidle_time(p, delta);
}
}

diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 78a233d43757..598d1026d629 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -226,6 +226,17 @@ void account_idle_time(u64 cputime)
cpustat[CPUTIME_IDLE] += cputime;
}

+
+#ifdef CONFIG_SCHED_CORE
+/* Account for forceidle time due to core scheduling. */
+void account_forceidle_time(struct task_struct *p, u64 delta)
+{
+ schedstat_add(p->stats.core_forceidle_sum, delta);
+
+ task_group_account_field(p, CPUTIME_FORCEIDLE, delta);
+}
+#endif
+
/*
* When a guest is interrupted for a longer amount of time, missed clock
* ticks are not redelivered later. Due to that, this function may on
--
2.36.1.124.g0e6072fb45-goog



2022-05-23 07:49:45

by Peter Zijlstra

[permalink] [raw]
Subject: Re: [PATCH v2] sched/core: add forced idle accounting for cgroups

On Fri, May 20, 2022 at 04:51:38PM -0700, Josh Don wrote:

> diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c
> index 38a2cec21014..ddef2b8ddf68 100644
> --- a/kernel/sched/core_sched.c
> +++ b/kernel/sched/core_sched.c
> @@ -277,7 +277,11 @@ void __sched_core_account_forceidle(struct rq *rq)
> if (p == rq_i->idle)
> continue;
>
> - __schedstat_add(p->stats.core_forceidle_sum, delta);
> + /*
> + * Note: this will account forceidle to the current cpu, even
> + * if it comes from our SMT sibling.
> + */
> + account_forceidle_time(p, delta);
> }

AFAICT this is the only caller of that function.

> }
>
> diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
> index 78a233d43757..598d1026d629 100644
> --- a/kernel/sched/cputime.c
> +++ b/kernel/sched/cputime.c
> @@ -226,6 +226,17 @@ void account_idle_time(u64 cputime)
> cpustat[CPUTIME_IDLE] += cputime;
> }
>
> +
> +#ifdef CONFIG_SCHED_CORE
> +/* Account for forceidle time due to core scheduling. */
> +void account_forceidle_time(struct task_struct *p, u64 delta)
> +{
> + schedstat_add(p->stats.core_forceidle_sum, delta);

But then you loose the __, why?

> +
> + task_group_account_field(p, CPUTIME_FORCEIDLE, delta);
> +}
> +#endif

2022-05-23 08:14:13

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH v2] sched/core: add forced idle accounting for cgroups

Hi Josh,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on tj-cgroup/for-next]
[also build test WARNING on tip/sched/core tip/master v5.18-rc7 next-20220520]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url: https://github.com/intel-lab-lkp/linux/commits/Josh-Don/sched-core-add-forced-idle-accounting-for-cgroups/20220521-075311
base: https://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git for-next
config: m68k-allyesconfig (https://download.01.org/0day-ci/archive/20220521/[email protected]/config)
compiler: m68k-linux-gcc (GCC) 11.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/intel-lab-lkp/linux/commit/0575a42c9f10cda618b09b949cc42fe97abea479
git remote add linux-review https://github.com/intel-lab-lkp/linux
git fetch --no-tags linux-review Josh-Don/sched-core-add-forced-idle-accounting-for-cgroups/20220521-075311
git checkout 0575a42c9f10cda618b09b949cc42fe97abea479
# save the config file
mkdir build_dir && cp config build_dir/.config
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-11.3.0 make.cross W=1 O=build_dir ARCH=m68k SHELL=/bin/bash kernel/

If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <[email protected]>

All warnings (new ones prefixed by >>):

In file included from include/linux/math.h:6,
from include/linux/math64.h:6,
from include/linux/time64.h:5,
from include/linux/restart_block.h:10,
from include/linux/thread_info.h:14,
from include/asm-generic/preempt.h:5,
from ./arch/m68k/include/generated/asm/preempt.h:1,
from include/linux/preempt.h:78,
from arch/m68k/include/asm/irqflags.h:6,
from include/linux/irqflags.h:16,
from arch/m68k/include/asm/atomic.h:6,
from include/linux/atomic.h:7,
from include/linux/rcupdate.h:25,
from include/linux/rculist.h:11,
from include/linux/pid.h:5,
from include/linux/sched.h:14,
from include/linux/cgroup.h:12,
from kernel/cgroup/cgroup-internal.h:5,
from kernel/cgroup/rstat.c:2:
kernel/cgroup/rstat.c: In function 'cgroup_base_stat_cputime_show':
>> arch/m68k/include/asm/div64.h:21:17: warning: 'forceidle_time' is used uninitialized [-Wuninitialized]
21 | __n.n64 = (n); \
| ^
kernel/cgroup/rstat.c:465:28: note: 'forceidle_time' was declared here
465 | u64 __maybe_unused forceidle_time;
| ^~~~~~~~~~~~~~


vim +/forceidle_time +21 arch/m68k/include/asm/div64.h

d20f5aa338dc75 Greg Ungerer 2009-02-06 12
d20f5aa338dc75 Greg Ungerer 2009-02-06 13 #define do_div(n, base) ({ \
d20f5aa338dc75 Greg Ungerer 2009-02-06 14 union { \
d20f5aa338dc75 Greg Ungerer 2009-02-06 15 unsigned long n32[2]; \
d20f5aa338dc75 Greg Ungerer 2009-02-06 16 unsigned long long n64; \
d20f5aa338dc75 Greg Ungerer 2009-02-06 17 } __n; \
d20f5aa338dc75 Greg Ungerer 2009-02-06 18 unsigned long __rem, __upper; \
ea077b1b96e073 Andreas Schwab 2013-08-09 19 unsigned long __base = (base); \
d20f5aa338dc75 Greg Ungerer 2009-02-06 20 \
d20f5aa338dc75 Greg Ungerer 2009-02-06 @21 __n.n64 = (n); \
d20f5aa338dc75 Greg Ungerer 2009-02-06 22 if ((__upper = __n.n32[0])) { \
d20f5aa338dc75 Greg Ungerer 2009-02-06 23 asm ("divul.l %2,%1:%0" \
d20f5aa338dc75 Greg Ungerer 2009-02-06 24 : "=d" (__n.n32[0]), "=d" (__upper) \
ea077b1b96e073 Andreas Schwab 2013-08-09 25 : "d" (__base), "0" (__n.n32[0])); \
d20f5aa338dc75 Greg Ungerer 2009-02-06 26 } \
d20f5aa338dc75 Greg Ungerer 2009-02-06 27 asm ("divu.l %2,%1:%0" \
d20f5aa338dc75 Greg Ungerer 2009-02-06 28 : "=d" (__n.n32[1]), "=d" (__rem) \
ea077b1b96e073 Andreas Schwab 2013-08-09 29 : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \
d20f5aa338dc75 Greg Ungerer 2009-02-06 30 (n) = __n.n64; \
d20f5aa338dc75 Greg Ungerer 2009-02-06 31 __rem; \
d20f5aa338dc75 Greg Ungerer 2009-02-06 32 })
d20f5aa338dc75 Greg Ungerer 2009-02-06 33

--
0-DAY CI Kernel Test Service
https://01.org/lkp

2022-05-23 11:30:13

by Dan Carpenter

[permalink] [raw]
Subject: Re: [PATCH v2] sched/core: add forced idle accounting for cgroups

Hi Josh,

url: https://github.com/intel-lab-lkp/linux/commits/Josh-Don/sched-core-add-forced-idle-accounting-for-cgroups/20220521-075311
base: https://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git for-next
config: x86_64-randconfig-m001 (https://download.01.org/0day-ci/archive/20220521/[email protected]/config)
compiler: gcc-11 (Debian 11.3.0-1) 11.3.0

If you fix the issue, kindly add following tag where applicable
Reported-by: kernel test robot <[email protected]>
Reported-by: Dan Carpenter <[email protected]>

smatch warnings:
kernel/cgroup/rstat.c:489 cgroup_base_stat_cputime_show() error: uninitialized symbol 'forceidle_time'.

vim +/forceidle_time +489 kernel/cgroup/rstat.c

d4ff749b5e0f1e kernel/cgroup/rstat.c Tejun Heo 2018-04-26 460 void cgroup_base_stat_cputime_show(struct seq_file *seq)
041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 461 {
041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 462 struct cgroup *cgrp = seq_css(seq)->cgroup;
041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 463 u64 usage, utime, stime;
0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 464 struct cgroup_base_stat bstat;
0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 465 u64 __maybe_unused forceidle_time;
041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 466
936f2a70f2077f kernel/cgroup/rstat.c Boris Burkov 2020-05-27 467 if (cgroup_parent(cgrp)) {
6162cef0f741c7 kernel/cgroup/rstat.c Tejun Heo 2018-04-26 468 cgroup_rstat_flush_hold(cgrp);
d4ff749b5e0f1e kernel/cgroup/rstat.c Tejun Heo 2018-04-26 469 usage = cgrp->bstat.cputime.sum_exec_runtime;
936f2a70f2077f kernel/cgroup/rstat.c Boris Burkov 2020-05-27 470 cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
936f2a70f2077f kernel/cgroup/rstat.c Boris Burkov 2020-05-27 471 &utime, &stime);
0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 472 #ifdef CONFIG_SCHED_CORE
0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 473 forceidle_time = cgrp->bstat.forceidle_sum;
0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 474 #endif
6162cef0f741c7 kernel/cgroup/rstat.c Tejun Heo 2018-04-26 475 cgroup_rstat_flush_release();
936f2a70f2077f kernel/cgroup/rstat.c Boris Burkov 2020-05-27 476 } else {
0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 477 root_cgroup_cputime(&bstat);
0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 478 usage = bstat.cputime.sum_exec_runtime;
0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 479 utime = bstat.cputime.utime;
0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 480 stime = bstat.cputime.stime;
0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 481 #ifdef CONFIG_SCHED_CORE
0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 482 forceidle_time = bstat.forceidle_sum;
0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 483 #endif
936f2a70f2077f kernel/cgroup/rstat.c Boris Burkov 2020-05-27 484 }
041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 485
041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 486 do_div(usage, NSEC_PER_USEC);
041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 487 do_div(utime, NSEC_PER_USEC);
041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 488 do_div(stime, NSEC_PER_USEC);
0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 @489 do_div(forceidle_time, NSEC_PER_USEC);

I think this will cause MEMSan warnings at run time.

041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 490
d41bf8c9deaed1 kernel/cgroup/stat.c Tejun Heo 2017-10-23 491 seq_printf(seq, "usage_usec %llu\n"
d41bf8c9deaed1 kernel/cgroup/stat.c Tejun Heo 2017-10-23 492 "user_usec %llu\n"
d41bf8c9deaed1 kernel/cgroup/stat.c Tejun Heo 2017-10-23 493 "system_usec %llu\n",
d41bf8c9deaed1 kernel/cgroup/stat.c Tejun Heo 2017-10-23 494 usage, utime, stime);
0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 495

--
0-DAY CI Kernel Test Service
https://01.org/lkp


2022-05-23 21:31:00

by Josh Don

[permalink] [raw]
Subject: Re: [PATCH v2] sched/core: add forced idle accounting for cgroups

On Sat, May 21, 2022 at 3:00 AM Peter Zijlstra <[email protected]> wrote:
>
> On Fri, May 20, 2022 at 04:51:38PM -0700, Josh Don wrote:
>
> > diff --git a/kernel/sched/core_sched.c b/kernel/sched/core_sched.c
> > index 38a2cec21014..ddef2b8ddf68 100644
> > --- a/kernel/sched/core_sched.c
> > +++ b/kernel/sched/core_sched.c
> > @@ -277,7 +277,11 @@ void __sched_core_account_forceidle(struct rq *rq)
> > if (p == rq_i->idle)
> > continue;
> >
> > - __schedstat_add(p->stats.core_forceidle_sum, delta);
> > + /*
> > + * Note: this will account forceidle to the current cpu, even
> > + * if it comes from our SMT sibling.
> > + */
> > + account_forceidle_time(p, delta);
> > }
>
> AFAICT this is the only caller of that function.
>
> > }
> >
> > diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
> > index 78a233d43757..598d1026d629 100644
> > --- a/kernel/sched/cputime.c
> > +++ b/kernel/sched/cputime.c
> > @@ -226,6 +226,17 @@ void account_idle_time(u64 cputime)
> > cpustat[CPUTIME_IDLE] += cputime;
> > }
> >
> > +
> > +#ifdef CONFIG_SCHED_CORE
> > +/* Account for forceidle time due to core scheduling. */
> > +void account_forceidle_time(struct task_struct *p, u64 delta)
> > +{
> > + schedstat_add(p->stats.core_forceidle_sum, delta);
>
> But then you loose the __, why?

Yea, this is probably better off as __ and a comment on the function
stating that it requires schedstat is enabled. My original thinking
was that it would be nice to avoid capturing this requirement here for
future potential use cases, especially given that the
schedstat_enabled() check is basically free anyway.

> > +
> > + task_group_account_field(p, CPUTIME_FORCEIDLE, delta);
> > +}
> > +#endif

2022-05-23 22:45:33

by Josh Don

[permalink] [raw]
Subject: Re: [PATCH v2] sched/core: add forced idle accounting for cgroups

On Mon, May 23, 2022 at 4:28 AM Dan Carpenter <[email protected]> wrote:
>
> Hi Josh,
>
> url: https://github.com/intel-lab-lkp/linux/commits/Josh-Don/sched-core-add-forced-idle-accounting-for-cgroups/20220521-075311
> base: https://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup.git for-next
> config: x86_64-randconfig-m001 (https://download.01.org/0day-ci/archive/20220521/[email protected]/config)
> compiler: gcc-11 (Debian 11.3.0-1) 11.3.0
>
> If you fix the issue, kindly add following tag where applicable
> Reported-by: kernel test robot <[email protected]>
> Reported-by: Dan Carpenter <[email protected]>
>
> smatch warnings:
> kernel/cgroup/rstat.c:489 cgroup_base_stat_cputime_show() error: uninitialized symbol 'forceidle_time'.
>
> vim +/forceidle_time +489 kernel/cgroup/rstat.c
>
> d4ff749b5e0f1e kernel/cgroup/rstat.c Tejun Heo 2018-04-26 460 void cgroup_base_stat_cputime_show(struct seq_file *seq)
> 041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 461 {
> 041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 462 struct cgroup *cgrp = seq_css(seq)->cgroup;
> 041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 463 u64 usage, utime, stime;
> 0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 464 struct cgroup_base_stat bstat;
> 0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 465 u64 __maybe_unused forceidle_time;
> 041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 466
> 936f2a70f2077f kernel/cgroup/rstat.c Boris Burkov 2020-05-27 467 if (cgroup_parent(cgrp)) {
> 6162cef0f741c7 kernel/cgroup/rstat.c Tejun Heo 2018-04-26 468 cgroup_rstat_flush_hold(cgrp);
> d4ff749b5e0f1e kernel/cgroup/rstat.c Tejun Heo 2018-04-26 469 usage = cgrp->bstat.cputime.sum_exec_runtime;
> 936f2a70f2077f kernel/cgroup/rstat.c Boris Burkov 2020-05-27 470 cputime_adjust(&cgrp->bstat.cputime, &cgrp->prev_cputime,
> 936f2a70f2077f kernel/cgroup/rstat.c Boris Burkov 2020-05-27 471 &utime, &stime);
> 0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 472 #ifdef CONFIG_SCHED_CORE
> 0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 473 forceidle_time = cgrp->bstat.forceidle_sum;
> 0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 474 #endif
> 6162cef0f741c7 kernel/cgroup/rstat.c Tejun Heo 2018-04-26 475 cgroup_rstat_flush_release();
> 936f2a70f2077f kernel/cgroup/rstat.c Boris Burkov 2020-05-27 476 } else {
> 0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 477 root_cgroup_cputime(&bstat);
> 0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 478 usage = bstat.cputime.sum_exec_runtime;
> 0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 479 utime = bstat.cputime.utime;
> 0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 480 stime = bstat.cputime.stime;
> 0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 481 #ifdef CONFIG_SCHED_CORE
> 0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 482 forceidle_time = bstat.forceidle_sum;
> 0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 483 #endif
> 936f2a70f2077f kernel/cgroup/rstat.c Boris Burkov 2020-05-27 484 }
> 041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 485
> 041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 486 do_div(usage, NSEC_PER_USEC);
> 041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 487 do_div(utime, NSEC_PER_USEC);
> 041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 488 do_div(stime, NSEC_PER_USEC);
> 0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 @489 do_div(forceidle_time, NSEC_PER_USEC);
>
> I think this will cause MEMSan warnings at run time.

Whoops, I missed wrapping this usage with an #ifdef CONFIG_SCHED_CORE.

> 041cd640b2f3c5 kernel/cgroup/stat.c Tejun Heo 2017-09-25 490
> d41bf8c9deaed1 kernel/cgroup/stat.c Tejun Heo 2017-10-23 491 seq_printf(seq, "usage_usec %llu\n"
> d41bf8c9deaed1 kernel/cgroup/stat.c Tejun Heo 2017-10-23 492 "user_usec %llu\n"
> d41bf8c9deaed1 kernel/cgroup/stat.c Tejun Heo 2017-10-23 493 "system_usec %llu\n",
> d41bf8c9deaed1 kernel/cgroup/stat.c Tejun Heo 2017-10-23 494 usage, utime, stime);
> 0575a42c9f10cd kernel/cgroup/rstat.c Josh Don 2022-05-20 495
>
> --
> 0-DAY CI Kernel Test Service
> https://01.org/lkp
>