2022-04-23 13:30:24

by David Vernet

[permalink] [raw]
Subject: [PATCH 1/5] cgroup: Adding test_cpucg_nested_weight_overprovisioned() testcase

The cgroup cpu controller tests in
tools/testing/selftests/cgroup/test_cpu.c have some testcases that validate
the expected behavior of setting cpu.weight on cgroups, and then hogging
CPUs. What is still missing from the suite is a testcase that validates
nested cgroups. This patch adds test_cpucg_nested_weight_overprovisioned(),
which validates that a parent's cpu.weight will override its children if
they overcommit a host, and properly protect any sibling groups of that
parent.

Signed-off-by: David Vernet <[email protected]>
---
tools/testing/selftests/cgroup/test_cpu.c | 122 ++++++++++++++++++++++
1 file changed, 122 insertions(+)

diff --git a/tools/testing/selftests/cgroup/test_cpu.c b/tools/testing/selftests/cgroup/test_cpu.c
index 64f9ce91c992..fc90b4d0feb9 100644
--- a/tools/testing/selftests/cgroup/test_cpu.c
+++ b/tools/testing/selftests/cgroup/test_cpu.c
@@ -403,6 +403,127 @@ static int test_cpucg_weight_underprovisioned(const char *root)
underprovision_validate);
}

+/*
+ * First, this test creates the following hierarchy:
+ * A
+ * A/B cpu.weight = 1000
+ * A/C cpu.weight = 1000
+ * A/C/D cpu.weight = 5000
+ * A/C/E cpu.weight = 5000
+ *
+ * A separate process is then created for each leaf, which spawn nproc threads
+ * that burn a CPU for a few seconds.
+ *
+ * Once all of those processes have exited, we verify that each of the leaf
+ * cgroups have roughly the same usage from cpu.stat.
+ */
+static int
+test_cpucg_nested_weight_overprovisioned(const char *root)
+{
+ int ret = KSFT_FAIL, i;
+ char *parent = NULL, *child = NULL;
+ struct cpu_hogger leaf[3] = {NULL};
+ long nested_leaf_usage, child_usage;
+ int nprocs = get_nprocs();
+
+ parent = cg_name(root, "cpucg_test");
+ child = cg_name(parent, "cpucg_child");
+ if (!parent || !child)
+ goto cleanup;
+
+ if (cg_create(parent))
+ goto cleanup;
+ if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
+ goto cleanup;
+
+ if (cg_create(child))
+ goto cleanup;
+ if (cg_write(child, "cgroup.subtree_control", "+cpu"))
+ goto cleanup;
+ if (cg_write(child, "cpu.weight", "1000"))
+ goto cleanup;
+
+ for (i = 0; i < ARRAY_SIZE(leaf); i++) {
+ const char *ancestor;
+ long weight;
+
+ if (i == 0) {
+ ancestor = parent;
+ weight = 1000;
+ } else {
+ ancestor = child;
+ weight = 5000;
+ }
+ leaf[i].cgroup = cg_name_indexed(ancestor, "cpucg_leaf", i);
+ if (!leaf[i].cgroup)
+ goto cleanup;
+
+ if (cg_create(leaf[i].cgroup))
+ goto cleanup;
+
+ if (cg_write_numeric(leaf[i].cgroup, "cpu.weight", weight))
+ goto cleanup;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(leaf); i++) {
+ pid_t pid;
+ struct cpu_hog_func_param param = {
+ .nprocs = nprocs,
+ .ts = {
+ .tv_sec = 10,
+ .tv_nsec = 0,
+ },
+ .clock_type = CPU_HOG_CLOCK_WALL,
+ };
+
+ pid = cg_run_nowait(leaf[i].cgroup, hog_cpus_timed,
+ (void *)&param);
+ if (pid <= 0)
+ goto cleanup;
+ leaf[i].pid = pid;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(leaf); i++) {
+ int retcode;
+
+ waitpid(leaf[i].pid, &retcode, 0);
+ if (!WIFEXITED(retcode))
+ goto cleanup;
+ if (WEXITSTATUS(retcode))
+ goto cleanup;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(leaf); i++) {
+ leaf[i].usage = cg_read_key_long(leaf[i].cgroup,
+ "cpu.stat", "usage_usec");
+ if (leaf[i].usage <= 0)
+ goto cleanup;
+ }
+
+ nested_leaf_usage = leaf[1].usage + leaf[2].usage;
+ if (!values_close(leaf[0].usage, nested_leaf_usage, 15))
+ goto cleanup;
+
+ child_usage = cg_read_key_long(child, "cpu.stat", "usage_usec");
+ if (child_usage <= 0)
+ goto cleanup;
+ if (!values_close(child_usage, nested_leaf_usage, 1))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+cleanup:
+ for (i = 0; i < ARRAY_SIZE(leaf); i++) {
+ cg_destroy(leaf[i].cgroup);
+ free(leaf[i].cgroup);
+ }
+ cg_destroy(child);
+ free(child);
+ cg_destroy(parent);
+ free(parent);
+
+ return ret;
+}
+
#define T(x) { x, #x }
struct cpucg_test {
int (*fn)(const char *root);
@@ -412,6 +533,7 @@ struct cpucg_test {
T(test_cpucg_stats),
T(test_cpucg_weight_overprovisioned),
T(test_cpucg_weight_underprovisioned),
+ T(test_cpucg_nested_weight_overprovisioned),
};
#undef T

--
2.30.2


2022-04-23 13:37:38

by David Vernet

[permalink] [raw]
Subject: [PATCH 3/5] cgroup: Add test_cpucg_max() testcase

The cgroup cpu controller test suite has a number of testcases that
validate the expected behavior of the cpu.weight knob, but none for
cpu.max. This testcase fixes that by adding a testcase for cpu.max as well.

Signed-off-by: David Vernet <[email protected]>
---
tools/testing/selftests/cgroup/test_cpu.c | 54 +++++++++++++++++++++++
1 file changed, 54 insertions(+)

diff --git a/tools/testing/selftests/cgroup/test_cpu.c b/tools/testing/selftests/cgroup/test_cpu.c
index de6289814c23..715922c15c78 100644
--- a/tools/testing/selftests/cgroup/test_cpu.c
+++ b/tools/testing/selftests/cgroup/test_cpu.c
@@ -564,6 +564,59 @@ test_cpucg_nested_weight_underprovisioned(const char *root)
return run_cpucg_nested_weight_test(root, false);
}

+/*
+ * This test creates a cgroup with some maximum value within a period, and
+ * verifies that a process in the cgroup is not overscheduled.
+ */
+static int test_cpucg_max(const char *root)
+{
+ int ret = KSFT_FAIL;
+ long usage_usec, user_usec;
+ long usage_seconds = 1;
+ long expected_usage_usec = usage_seconds * USEC_PER_SEC;
+ char *cpucg;
+
+ cpucg = cg_name(root, "cpucg_test");
+ if (!cpucg)
+ goto cleanup;
+
+ if (cg_create(cpucg))
+ goto cleanup;
+
+ if (cg_write(cpucg, "cpu.max", "1000"))
+ goto cleanup;
+
+ struct cpu_hog_func_param param = {
+ .nprocs = 1,
+ .ts = {
+ .tv_sec = usage_seconds,
+ .tv_nsec = 0,
+ },
+ .clock_type = CPU_HOG_CLOCK_WALL,
+ };
+ if (cg_run(cpucg, hog_cpus_timed, (void *)&param))
+ goto cleanup;
+
+ usage_usec = cg_read_key_long(cpucg, "cpu.stat", "usage_usec");
+ user_usec = cg_read_key_long(cpucg, "cpu.stat", "user_usec");
+ if (user_usec <= 0)
+ goto cleanup;
+
+ if (user_usec >= expected_usage_usec)
+ goto cleanup;
+
+ if (values_close(usage_usec, expected_usage_usec, 95))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ cg_destroy(cpucg);
+ free(cpucg);
+
+ return ret;
+}
+
#define T(x) { x, #x }
struct cpucg_test {
int (*fn)(const char *root);
@@ -575,6 +628,7 @@ struct cpucg_test {
T(test_cpucg_weight_underprovisioned),
T(test_cpucg_nested_weight_overprovisioned),
T(test_cpucg_nested_weight_underprovisioned),
+ T(test_cpucg_max),
};
#undef T

--
2.30.2

2022-04-23 14:03:41

by David Vernet

[permalink] [raw]
Subject: [PATCH 2/5] cgroup: Add test_cpucg_nested_weight_underprovisioned() testcase

The cgroup cpu controller test suite currently contains a testcase called
test_cpucg_nested_weight_underprovisioned() which verifies the expected
behavior of cpu.weight when applied to nested cgroups. That first testcase
validated the expected behavior when the processes in the leaf cgroups
overcommitted the system. This patch adds a complementary
test_cpucg_nested_weight_underprovisioned() testcase which validates
behavior when those leaf cgroups undercommit the system.

Signed-off-by: David Vernet <[email protected]>
---
tools/testing/selftests/cgroup/test_cpu.c | 73 ++++++++++++++++++-----
1 file changed, 57 insertions(+), 16 deletions(-)

diff --git a/tools/testing/selftests/cgroup/test_cpu.c b/tools/testing/selftests/cgroup/test_cpu.c
index fc90b4d0feb9..de6289814c23 100644
--- a/tools/testing/selftests/cgroup/test_cpu.c
+++ b/tools/testing/selftests/cgroup/test_cpu.c
@@ -403,22 +403,8 @@ static int test_cpucg_weight_underprovisioned(const char *root)
underprovision_validate);
}

-/*
- * First, this test creates the following hierarchy:
- * A
- * A/B cpu.weight = 1000
- * A/C cpu.weight = 1000
- * A/C/D cpu.weight = 5000
- * A/C/E cpu.weight = 5000
- *
- * A separate process is then created for each leaf, which spawn nproc threads
- * that burn a CPU for a few seconds.
- *
- * Once all of those processes have exited, we verify that each of the leaf
- * cgroups have roughly the same usage from cpu.stat.
- */
static int
-test_cpucg_nested_weight_overprovisioned(const char *root)
+run_cpucg_nested_weight_test(const char *root, bool overprovisioned)
{
int ret = KSFT_FAIL, i;
char *parent = NULL, *child = NULL;
@@ -426,6 +412,16 @@ test_cpucg_nested_weight_overprovisioned(const char *root)
long nested_leaf_usage, child_usage;
int nprocs = get_nprocs();

+ if (!overprovisioned) {
+ if (nprocs < 4)
+ /*
+ * Only run the test if there are enough cores to avoid overprovisioning
+ * the system.
+ */
+ return KSFT_SKIP;
+ nprocs /= 4;
+ }
+
parent = cg_name(root, "cpucg_test");
child = cg_name(parent, "cpucg_child");
if (!parent || !child)
@@ -501,9 +497,13 @@ test_cpucg_nested_weight_overprovisioned(const char *root)
}

nested_leaf_usage = leaf[1].usage + leaf[2].usage;
- if (!values_close(leaf[0].usage, nested_leaf_usage, 15))
+ if (overprovisioned) {
+ if (!values_close(leaf[0].usage, nested_leaf_usage, 15))
+ goto cleanup;
+ } else if (!values_close(leaf[0].usage * 2, nested_leaf_usage, 15))
goto cleanup;

+
child_usage = cg_read_key_long(child, "cpu.stat", "usage_usec");
if (child_usage <= 0)
goto cleanup;
@@ -524,6 +524,46 @@ test_cpucg_nested_weight_overprovisioned(const char *root)
return ret;
}

+/*
+ * First, this test creates the following hierarchy:
+ * A
+ * A/B cpu.weight = 1000
+ * A/C cpu.weight = 1000
+ * A/C/D cpu.weight = 5000
+ * A/C/E cpu.weight = 5000
+ *
+ * A separate process is then created for each leaf, which spawn nproc threads
+ * that burn a CPU for a few seconds.
+ *
+ * Once all of those processes have exited, we verify that each of the leaf
+ * cgroups have roughly the same usage from cpu.stat.
+ */
+static int
+test_cpucg_nested_weight_overprovisioned(const char *root)
+{
+ return run_cpucg_nested_weight_test(root, true);
+}
+
+/*
+ * First, this test creates the following hierarchy:
+ * A
+ * A/B cpu.weight = 1000
+ * A/C cpu.weight = 1000
+ * A/C/D cpu.weight = 5000
+ * A/C/E cpu.weight = 5000
+ *
+ * A separate process is then created for each leaf, which nproc / 4 threads
+ * that burns a CPU for a few seconds.
+ *
+ * Once all of those processes have exited, we verify that each of the leaf
+ * cgroups have roughly the same usage from cpu.stat.
+ */
+static int
+test_cpucg_nested_weight_underprovisioned(const char *root)
+{
+ return run_cpucg_nested_weight_test(root, false);
+}
+
#define T(x) { x, #x }
struct cpucg_test {
int (*fn)(const char *root);
@@ -534,6 +574,7 @@ struct cpucg_test {
T(test_cpucg_weight_overprovisioned),
T(test_cpucg_weight_underprovisioned),
T(test_cpucg_nested_weight_overprovisioned),
+ T(test_cpucg_nested_weight_underprovisioned),
};
#undef T

--
2.30.2

2022-04-23 15:25:47

by David Vernet

[permalink] [raw]
Subject: [PATCH 4/5] Add test_cpucg_max_nested() testcase

The cgroup cpu controller selftests have a test_cpucg_max() testcase
that validates the behavior of the cpu.max knob. Let's also add a
testcase that verifies that the behavior works correctly when set on a
nested cgroup.

Signed-off-by: David Vernet <[email protected]>
---
tools/testing/selftests/cgroup/test_cpu.c | 63 +++++++++++++++++++++++
1 file changed, 63 insertions(+)

diff --git a/tools/testing/selftests/cgroup/test_cpu.c b/tools/testing/selftests/cgroup/test_cpu.c
index 715922c15c78..24020a2c68dc 100644
--- a/tools/testing/selftests/cgroup/test_cpu.c
+++ b/tools/testing/selftests/cgroup/test_cpu.c
@@ -617,6 +617,68 @@ static int test_cpucg_max(const char *root)
return ret;
}

+/*
+ * This test verifies that a process inside of a nested cgroup whose parent
+ * group has a cpu.max value set, is properly throttled.
+ */
+static int test_cpucg_max_nested(const char *root)
+{
+ int ret = KSFT_FAIL;
+ long usage_usec, user_usec;
+ long usage_seconds = 1;
+ long expected_usage_usec = usage_seconds * USEC_PER_SEC;
+ char *parent, *child;
+
+ parent = cg_name(root, "cpucg_parent");
+ child = cg_name(parent, "cpucg_child");
+ if (!parent || !child)
+ goto cleanup;
+
+ if (cg_create(parent))
+ goto cleanup;
+
+ if (cg_write(parent, "cgroup.subtree_control", "+cpu"))
+ goto cleanup;
+
+ if (cg_create(child))
+ goto cleanup;
+
+ if (cg_write(parent, "cpu.max", "1000"))
+ goto cleanup;
+
+ struct cpu_hog_func_param param = {
+ .nprocs = 1,
+ .ts = {
+ .tv_sec = usage_seconds,
+ .tv_nsec = 0,
+ },
+ .clock_type = CPU_HOG_CLOCK_WALL,
+ };
+ if (cg_run(child, hog_cpus_timed, (void *)&param))
+ goto cleanup;
+
+ usage_usec = cg_read_key_long(child, "cpu.stat", "usage_usec");
+ user_usec = cg_read_key_long(child, "cpu.stat", "user_usec");
+ if (user_usec <= 0)
+ goto cleanup;
+
+ if (user_usec >= expected_usage_usec)
+ goto cleanup;
+
+ if (values_close(usage_usec, expected_usage_usec, 95))
+ goto cleanup;
+
+ ret = KSFT_PASS;
+
+cleanup:
+ cg_destroy(child);
+ free(child);
+ cg_destroy(parent);
+ free(parent);
+
+ return ret;
+}
+
#define T(x) { x, #x }
struct cpucg_test {
int (*fn)(const char *root);
@@ -629,6 +691,7 @@ struct cpucg_test {
T(test_cpucg_nested_weight_overprovisioned),
T(test_cpucg_nested_weight_underprovisioned),
T(test_cpucg_max),
+ T(test_cpucg_max_nested),
};
#undef T

--
2.30.2