2023-12-01 23:51:54

by Ian Rogers

[permalink] [raw]
Subject: [PATCH v1 1/9] perf thread_map: Skip exited threads when scanning /proc

Scanning /proc is inherently racy. Scanning /proc/pid/task within that
is also racy as the pid can terminate. Rather than failing in
__thread_map__new_all_cpus, skip pids for such failures.

Signed-off-by: Ian Rogers <[email protected]>
---
tools/perf/util/thread_map.c | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)

diff --git a/tools/perf/util/thread_map.c b/tools/perf/util/thread_map.c
index e848579e61a8..18fbc41d09f3 100644
--- a/tools/perf/util/thread_map.c
+++ b/tools/perf/util/thread_map.c
@@ -109,9 +109,10 @@ static struct perf_thread_map *__thread_map__new_all_cpus(uid_t uid)

snprintf(path, sizeof(path), "/proc/%d/task", pid);
items = scandir(path, &namelist, filter, NULL);
- if (items <= 0)
- goto out_free_closedir;
-
+ if (items <= 0) {
+ pr_debug("scandir for %d returned empty, skipping\n", pid);
+ continue;
+ }
while (threads->nr + items >= max_threads) {
max_threads *= 2;
grow = true;
@@ -152,8 +153,6 @@ static struct perf_thread_map *__thread_map__new_all_cpus(uid_t uid)
for (i = 0; i < items; i++)
zfree(&namelist[i]);
free(namelist);
-
-out_free_closedir:
zfree(&threads);
goto out_closedir;
}
--
2.43.0.rc2.451.g8631bc7472-goog


2023-12-01 23:52:01

by Ian Rogers

[permalink] [raw]
Subject: [PATCH v1 7/9] perf tests: Run time generate shell test suites

Rather than special shell test logic, do a single pass to create an
array of test suites. Hold the shell test file name in the test suite
priv field. This makes the special shell test logic in builtin-test.c
redundant so remove it.

Signed-off-by: Ian Rogers <[email protected]>
---
tools/perf/tests/builtin-test.c | 91 +----------------------
tools/perf/tests/tests-scripts.c | 120 ++++++++++++++++++-------------
tools/perf/tests/tests-scripts.h | 10 +--
3 files changed, 74 insertions(+), 147 deletions(-)

diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 2669d1d66ed8..54b11c23e863 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -132,6 +132,7 @@ static struct test_suite *generic_tests[] = {
static struct test_suite **tests[] = {
generic_tests,
arch_tests,
+ NULL, /* shell tests created at runtime. */
};

static struct test_workload *workloads[] = {
@@ -301,74 +302,12 @@ static int test_and_print(struct test_suite *t, int subtest)
return err;
}

-struct shell_test {
- const char *file;
-};
-
-static int shell_test__run(struct test_suite *test, int subdir __maybe_unused)
-{
- int err;
- struct shell_test *st = test->priv;
- char *cmd;
-
- asprintf(&cmd, "%s%s", st->file, verbose ? " -v" : "");
- if (!cmd)
- return TEST_FAIL;
- err = system(cmd);
- free(cmd);
- if (!err)
- return TEST_OK;
-
- return WEXITSTATUS(err) == 2 ? TEST_SKIP : TEST_FAIL;
-}
-
-static int run_shell_tests(int argc, const char *argv[], int i, int width,
- struct intlist *skiplist)
-{
- struct shell_test st;
- const struct script_file *files, *file;
-
- files = list_script_files();
- if (!files)
- return 0;
- for (file = files; file->file; file++) {
- int curr = i++;
- struct test_case test_cases[] = {
- {
- .desc = file->desc,
- .run_case = shell_test__run,
- },
- { .name = NULL, }
- };
- struct test_suite test_suite = {
- .desc = test_cases[0].desc,
- .test_cases = test_cases,
- .priv = &st,
- };
- st.file = file->file;
-
- if (test_suite.desc == NULL ||
- !perf_test__matches(test_suite.desc, curr, argc, argv))
- continue;
-
- pr_info("%3d: %-*s:", i, width, test_suite.desc);
-
- if (intlist__find(skiplist, i)) {
- color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
- continue;
- }
-
- test_and_print(&test_suite, 0);
- }
- return 0;
-}
-
static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
{
struct test_suite *t;
unsigned int j, k;
int i = 0;
- int width = list_script_max_width();
+ int width = 0;

for_each_test(j, k, t) {
int len = strlen(test_description(t, -1));
@@ -443,28 +382,6 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
}
}
}
-
- return run_shell_tests(argc, argv, i, width, skiplist);
-}
-
-static int perf_test__list_shell(int argc, const char **argv, int i)
-{
- const struct script_file *files, *file;
-
- files = list_script_files();
- if (!files)
- return 0;
- for (file = files; file->file; file++) {
- int curr = i++;
- struct test_suite t = {
- .desc = file->desc
- };
-
- if (!perf_test__matches(t.desc, curr, argc, argv))
- continue;
-
- pr_info("%3d: %s\n", i, t.desc);
- }
return 0;
}

@@ -491,9 +408,6 @@ static int perf_test__list(int argc, const char **argv)
test_description(t, subi));
}
}
-
- perf_test__list_shell(argc, argv, i);
-
return 0;
}

@@ -553,6 +467,7 @@ int cmd_test(int argc, const char **argv)
/* Unbuffered output */
setvbuf(stdout, NULL, _IONBF, 0);

+ tests[2] = create_script_test_suites();
argc = parse_options_subcommand(argc, argv, test_options, test_subcommands, test_usage, 0);
if (argc >= 1 && !strcmp(argv[0], "list"))
return perf_test__list(argc - 1, argv + 1);
diff --git a/tools/perf/tests/tests-scripts.c b/tools/perf/tests/tests-scripts.c
index 9b3b66dd5508..b92a93c251c6 100644
--- a/tools/perf/tests/tests-scripts.c
+++ b/tools/perf/tests/tests-scripts.c
@@ -26,16 +26,6 @@
#include "tests.h"
#include "util/rlimit.h"

-
-/*
- * As this is a singleton built once for the run of the process, there is
- * no value in trying to free it and just let it stay around until process
- * exits when it's cleaned up.
- */
-static size_t files_num = 0;
-static struct script_file *files = NULL;
-static int files_max_width = 0;
-
static int shell_tests__dir_fd(void)
{
char path[PATH_MAX], *exec_path;
@@ -131,12 +121,31 @@ static char *strdup_check(const char *str)
return newstr;
}

-static void append_script(int dir_fd, const char *name, char *desc)
+static int shell_test__run(struct test_suite *test, int subtest __maybe_unused)
+{
+ const char *file = test->priv;
+ int err;
+ char *cmd;
+
+ asprintf(&cmd, "%s%s", file, verbose ? " -v" : "");
+ if (!cmd)
+ return TEST_FAIL;
+ err = system(cmd);
+ free(cmd);
+ if (!err)
+ return TEST_OK;
+
+ return WEXITSTATUS(err) == 2 ? TEST_SKIP : TEST_FAIL;
+}
+
+static void append_script(int dir_fd, const char *name, char *desc,
+ struct test_suite ***result,
+ size_t *result_sz)
{
char filename[PATH_MAX], link[128];
- struct script_file *files_tmp;
- size_t files_num_tmp, len;
- int width;
+ struct test_suite *test_suite, **result_tmp;
+ struct test_case *tests;
+ size_t len;

snprintf(link, sizeof(link), "/proc/%d/fd/%d", getpid(), dir_fd);
len = readlink(link, filename, sizeof(filename));
@@ -146,33 +155,43 @@ static void append_script(int dir_fd, const char *name, char *desc)
}
filename[len++] = '/';
strcpy(&filename[len], name);
- files_num_tmp = files_num + 1;
- if (files_num_tmp >= SIZE_MAX) {
- pr_err("Too many script files\n");
- abort();
+
+ tests = calloc(2, sizeof(*tests));
+ if (!tests) {
+ pr_err("Out of memory while building script test suite list\n");
+ return;
+ }
+ tests[0].name = strdup_check(name);
+ tests[0].desc = strdup_check(desc);
+ tests[0].run_case = shell_test__run;
+
+ test_suite = zalloc(sizeof(*test_suite));
+ if (!test_suite) {
+ pr_err("Out of memory while building script test suite list\n");
+ free(tests);
+ return;
}
+ test_suite->desc = desc;
+ test_suite->test_cases = tests;
+ test_suite->priv = strdup_check(filename);
/* Realloc is good enough, though we could realloc by chunks, not that
* anyone will ever measure performance here */
- files_tmp = realloc(files,
- (files_num_tmp + 1) * sizeof(struct script_file));
- if (files_tmp == NULL) {
- pr_err("Out of memory while building test list\n");
- abort();
+ result_tmp = realloc(*result, (*result_sz + 1) * sizeof(*result_tmp));
+ if (result_tmp == NULL) {
+ pr_err("Out of memory while building script test suite list\n");
+ free(tests);
+ free(test_suite);
+ return;
}
/* Add file to end and NULL terminate the struct array */
- files = files_tmp;
- files_num = files_num_tmp;
- files[files_num - 1].file = strdup_check(filename);
- files[files_num - 1].desc = desc;
- files[files_num].file = NULL;
- files[files_num].desc = NULL;
-
- width = strlen(desc); /* Track max width of desc */
- if (width > files_max_width)
- files_max_width = width;
+ *result = result_tmp;
+ (*result)[*result_sz] = test_suite;
+ (*result_sz)++;
}

-static void append_scripts_in_dir(int dir_fd)
+static void append_scripts_in_dir(int dir_fd,
+ struct test_suite ***result,
+ size_t *result_sz)
{
struct dirent **entlist;
struct dirent *ent;
@@ -191,7 +210,7 @@ static void append_scripts_in_dir(int dir_fd)
char *desc = shell_test__description(dir_fd, ent->d_name);

if (desc) /* It has a desc line - valid script */
- append_script(dir_fd, ent->d_name, desc);
+ append_script(dir_fd, ent->d_name, desc, result, result_sz);
continue;
}
if (ent->d_type != DT_DIR) {
@@ -204,32 +223,31 @@ static void append_scripts_in_dir(int dir_fd)
continue;
}
fd = openat(dir_fd, ent->d_name, O_PATH);
- append_scripts_in_dir(fd);
+ append_scripts_in_dir(fd, result, result_sz);
}
for (i = 0; i < n_dirs; i++) /* Clean up */
zfree(&entlist[i]);
free(entlist);
}

-const struct script_file *list_script_files(void)
+struct test_suite **create_script_test_suites(void)
{
- int dir_fd;
-
- if (files)
- return files; /* Singleton - we already know our list */
+ struct test_suite **result = NULL, **result_tmp;
+ size_t result_sz = 0;
+ int dir_fd = shell_tests__dir_fd(); /* Walk dir */

- dir_fd = shell_tests__dir_fd(); /* Walk dir */
if (dir_fd < 0)
return NULL;

- append_scripts_in_dir(dir_fd);
+ append_scripts_in_dir(dir_fd, &result, &result_sz);
+ result_tmp = realloc(result, (result_sz + 1) * sizeof(*result_tmp));
+ if (result_tmp == NULL) {
+ pr_err("Out of memory while building script test suite list\n");
+ abort();
+ }
+ /* NULL terminate the test suite array. */
+ result = result_tmp;
+ result[result_sz] = NULL;
close(dir_fd);
-
- return files;
-}
-
-int list_script_max_width(void)
-{
- list_script_files(); /* Ensure we have scanned all scripts */
- return files_max_width;
+ return result;
}
diff --git a/tools/perf/tests/tests-scripts.h b/tools/perf/tests/tests-scripts.h
index 3508a293aaf9..b553ad26ea17 100644
--- a/tools/perf/tests/tests-scripts.h
+++ b/tools/perf/tests/tests-scripts.h
@@ -2,14 +2,8 @@
#ifndef TESTS_SCRIPTS_H
#define TESTS_SCRIPTS_H

-struct script_file {
- char *file;
- char *desc;
-};
+#include "tests.h"

-/* List available script tests to run - singleton - never freed */
-const struct script_file *list_script_files(void);
-/* Get maximum width of description string */
-int list_script_max_width(void);
+struct test_suite **create_script_test_suites(void);

#endif /* TESTS_SCRIPTS_H */
--
2.43.0.rc2.451.g8631bc7472-goog

2023-12-01 23:52:01

by Ian Rogers

[permalink] [raw]
Subject: [PATCH v1 9/9] perf tests: Add option to run tests in parallel

By default tests are forked, add an option (-p or --parallel) so that
the forked tests are all started in parallel and then their output
gathered serially. This is opt-in as running in parallel can cause
test flakes.

Rather than fork within the code, the start_command/finish_command
from libsubcmd are used. This changes how stderr and stdout are
handled.

Signed-off-by: Ian Rogers <[email protected]>
---
tools/perf/tests/builtin-test.c | 261 +++++++++++++++++++++-----------
1 file changed, 169 insertions(+), 92 deletions(-)

diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
index 54b11c23e863..91c32b477cbb 100644
--- a/tools/perf/tests/builtin-test.c
+++ b/tools/perf/tests/builtin-test.c
@@ -21,6 +21,7 @@
#include "debug.h"
#include "color.h"
#include <subcmd/parse-options.h>
+#include <subcmd/run-command.h>
#include "string2.h"
#include "symbol.h"
#include "util/rlimit.h"
@@ -31,7 +32,13 @@

#include "tests-scripts.h"

+/*
+ * Command line option to not fork the test running in the same process and
+ * making them easier to debug.
+ */
static bool dont_fork;
+/* Fork the tests in parallel and then wait for their completion. */
+static bool parallel;
const char *dso_to_test;
const char *test_objdump_path = "objdump";

@@ -211,76 +218,36 @@ static bool perf_test__matches(const char *desc, int curr, int argc, const char
return false;
}

-static int run_test(struct test_suite *test, int subtest)
-{
- int status, err = -1, child = dont_fork ? 0 : fork();
- char sbuf[STRERR_BUFSIZE];
-
- if (child < 0) {
- pr_err("failed to fork test: %s\n",
- str_error_r(errno, sbuf, sizeof(sbuf)));
- return -1;
- }
-
- if (!child) {
- if (!dont_fork) {
- pr_debug("test child forked, pid %d\n", getpid());
-
- if (verbose <= 0) {
- int nullfd = open("/dev/null", O_WRONLY);
-
- if (nullfd >= 0) {
- close(STDERR_FILENO);
- close(STDOUT_FILENO);
-
- dup2(nullfd, STDOUT_FILENO);
- dup2(STDOUT_FILENO, STDERR_FILENO);
- close(nullfd);
- }
- } else {
- signal(SIGSEGV, sighandler_dump_stack);
- signal(SIGFPE, sighandler_dump_stack);
- }
- }
-
- err = test_function(test, subtest)(test, subtest);
- if (!dont_fork)
- exit(err);
- }
-
- if (!dont_fork) {
- wait(&status);
+struct child_test {
+ struct child_process process;
+ struct test_suite *test;
+ int test_num;
+ int subtest;
+};

- if (WIFEXITED(status)) {
- err = (signed char)WEXITSTATUS(status);
- pr_debug("test child finished with %d\n", err);
- } else if (WIFSIGNALED(status)) {
- err = -1;
- pr_debug("test child interrupted\n");
- }
- }
+static int run_test_child(struct child_process *process)
+{
+ struct child_test *child = container_of(process, struct child_test, process);
+ int err;

- return err;
+ pr_debug("--- start ---\n");
+ pr_debug("test child forked, pid %d\n", getpid());
+ err = test_function(child->test, child->subtest)(child->test, child->subtest);
+ pr_debug("---- end(%d) ----\n", err);
+ fflush(NULL);
+ return -err;
}

-#define for_each_test(j, k, t) \
- for (j = 0, k = 0; j < ARRAY_SIZE(tests); j++, k = 0) \
- while ((t = tests[j][k++]) != NULL)
-
-static int test_and_print(struct test_suite *t, int subtest)
+static int print_test_result(struct test_suite *t, int i, int subtest, int result, int width)
{
- int err;
-
- pr_debug("\n--- start ---\n");
- err = run_test(t, subtest);
- pr_debug("---- end ----\n");
+ if (has_subtests(t)) {
+ int subw = width > 2 ? width - 2 : width;

- if (!has_subtests(t))
- pr_debug("%s:", t->desc);
- else
- pr_debug("%s subtest %d:", t->desc, subtest + 1);
+ pr_info("%3d.%1d: %-*s:", i + 1, subtest + 1, subw, test_description(t, subtest));
+ } else
+ pr_info("%3d: %-*s:", i + 1, width, test_description(t, subtest));

- switch (err) {
+ switch (result) {
case TEST_OK:
pr_info(" Ok\n");
break;
@@ -299,22 +266,135 @@ static int test_and_print(struct test_suite *t, int subtest)
break;
}

- return err;
+ return 0;
+}
+
+static int finish_test(struct child_test *child_test, int width)
+{
+ struct test_suite *t = child_test->test;
+ int i = child_test->test_num;
+ int subi = child_test->subtest;
+ int out = child_test->process.out;
+ int err = child_test->process.err;
+ int ret;
+
+ if (verbose) {
+ bool out_done = false, err_done = false;
+
+ fcntl(out, F_SETFL, O_NONBLOCK);
+ fcntl(err, F_SETFL, O_NONBLOCK);
+ if (has_subtests(t))
+ pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi));
+ else
+ pr_info("%3d: %s:\n", i + 1, test_description(t, -1));
+
+ while (!out_done && !err_done) {
+ char buf[512];
+ ssize_t len;
+
+ if (!out_done) {
+ errno = 0;
+ len = read(out, buf, sizeof(buf) - 1);
+
+ if (len <= 0)
+ err_done = errno != EAGAIN;
+ else {
+ buf[len] = '\0';
+ fprintf(stdout, "%s", buf);
+ }
+ }
+ if (!err_done) {
+ errno = 0;
+ len = read(err, buf, sizeof(buf) - 1);
+
+ if (len <= 0)
+ err_done = errno != EAGAIN;
+ else {
+ buf[len] = '\0';
+ fprintf(stderr, "%s", buf);
+ }
+ }
+ }
+ }
+ ret = finish_command(&child_test->process);
+ print_test_result(t, i, subi, ret, width);
+ if (out >= 0)
+ close(out);
+ if (err >= 0)
+ close(err);
+ return 0;
+}
+
+static int start_test(struct test_suite *test, int i, int subi, struct child_test **child,
+ int width)
+{
+ int err;
+
+ *child = NULL;
+ if (dont_fork) {
+ pr_debug("--- start ---\n");
+ err = test_function(test, subi)(test, subi);
+ pr_debug("---- end ----\n");
+ print_test_result(test, i, subi, err, width);
+ return 0;
+ }
+
+ *child = zalloc(sizeof(**child));
+ if (!*child)
+ return -ENOMEM;
+
+ (*child)->test = test;
+ (*child)->test_num = i;
+ (*child)->subtest = subi;
+ (*child)->process.pid = -1;
+ (*child)->process.no_stdin = 1;
+ if (verbose <= 0) {
+ (*child)->process.no_stdout = 1;
+ (*child)->process.no_stderr = 1;
+ } else {
+ (*child)->process.out = -1;
+ (*child)->process.err = -1;
+ }
+ (*child)->process.no_exec_cmd = run_test_child;
+ err = start_command(&(*child)->process);
+ if (err || parallel)
+ return err;
+ return finish_test(*child, width);
}

+#define for_each_test(j, k, t) \
+ for (j = 0, k = 0; j < ARRAY_SIZE(tests); j++, k = 0) \
+ while ((t = tests[j][k++]) != NULL)
+
static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
{
struct test_suite *t;
unsigned int j, k;
int i = 0;
int width = 0;
+ size_t num_tests = 0;
+ struct child_test **child_tests;
+ int child_test_num = 0;

for_each_test(j, k, t) {
int len = strlen(test_description(t, -1));

if (width < len)
width = len;
+
+ if (has_subtests(t)) {
+ for (int l = 0, subn = num_subtests(t); l < subn; l++) {
+ len = strlen(test_description(t, -1));
+ if (width < len)
+ width = len;
+ num_tests++;
+ }
+ } else
+ num_tests++;
}
+ child_tests = calloc(num_tests, sizeof(*child_tests));
+ if (!child_tests)
+ return -ENOMEM;

for_each_test(j, k, t) {
int curr = i++;
@@ -336,52 +416,47 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
continue;
}

- pr_info("%3d: %-*s:", i, width, test_description(t, -1));
-
if (intlist__find(skiplist, i)) {
+ pr_info("%3d: %-*s:", curr + 1, width, test_description(t, -1));
color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
continue;
}

if (!has_subtests(t)) {
- test_and_print(t, -1);
+ int err = start_test(t, curr, -1, &child_tests[child_test_num++], width);
+
+ if (err) {
+ /* TODO: if parallel waitpid the already forked children. */
+ free(child_tests);
+ return err;
+ }
} else {
int subn = num_subtests(t);
- /*
- * minus 2 to align with normal testcases.
- * For subtest we print additional '.x' in number.
- * for example:
- *
- * 35: Test LLVM searching and compiling :
- * 35.1: Basic BPF llvm compiling test : Ok
- */
- int subw = width > 2 ? width - 2 : width;
-
- if (subn <= 0) {
- color_fprintf(stderr, PERF_COLOR_YELLOW,
- " Skip (not compiled in)\n");
- continue;
- }
- pr_info("\n");

for (subi = 0; subi < subn; subi++) {
- int len = strlen(test_description(t, subi));
-
- if (subw < len)
- subw = len;
- }
+ int err;

- for (subi = 0; subi < subn; subi++) {
if (!perf_test__matches(test_description(t, subi),
curr, argc, argv))
continue;

- pr_info("%3d.%1d: %-*s:", i, subi + 1, subw,
- test_description(t, subi));
- test_and_print(t, subi);
+ err = start_test(t, curr, subi, &child_tests[child_test_num++],
+ width);
+ if (err)
+ return err;
}
}
}
+ for (i = 0; i < child_test_num; i++) {
+ if (parallel) {
+ int ret = finish_test(child_tests[i], width);
+
+ if (ret)
+ return ret;
+ }
+ free(child_tests[i]);
+ }
+ free(child_tests);
return 0;
}

@@ -449,6 +524,8 @@ int cmd_test(int argc, const char **argv)
"be more verbose (show symbol address, etc)"),
OPT_BOOLEAN('F', "dont-fork", &dont_fork,
"Do not fork for testcase"),
+ OPT_BOOLEAN('p', "parallel", &parallel,
+ "Run the tests altogether in parallel"),
OPT_STRING('w', "workload", &workload, "work", "workload to run for testing"),
OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
OPT_STRING(0, "objdump", &test_objdump_path, "path",
--
2.43.0.rc2.451.g8631bc7472-goog

2023-12-01 23:52:08

by Ian Rogers

[permalink] [raw]
Subject: [PATCH v1 3/9] perf tests: Avoid fork in perf_has_symbol test

perf test -vv Symbols is used to indentify symbols within the perf
binary. Add the -F flag so that the test command doesn't fork the test
before running. This removes a little overhead.

Signed-off-by: Ian Rogers <[email protected]>
---
tools/perf/tests/shell/lib/perf_has_symbol.sh | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh
index 5d59c32ae3e7..561c93b75d77 100644
--- a/tools/perf/tests/shell/lib/perf_has_symbol.sh
+++ b/tools/perf/tests/shell/lib/perf_has_symbol.sh
@@ -3,7 +3,7 @@

perf_has_symbol()
{
- if perf test -vv "Symbols" 2>&1 | grep "[[:space:]]$1$"; then
+ if perf test -vv -F "Symbols" 2>&1 | grep "[[:space:]]$1$"; then
echo "perf does have symbol '$1'"
return 0
fi
--
2.43.0.rc2.451.g8631bc7472-goog

2023-12-02 02:08:42

by Ian Rogers

[permalink] [raw]
Subject: Re: [PATCH v1 9/9] perf tests: Add option to run tests in parallel

On Fri, Dec 1, 2023 at 3:50 PM Ian Rogers <[email protected]> wrote:
>
> By default tests are forked, add an option (-p or --parallel) so that
> the forked tests are all started in parallel and then their output
> gathered serially. This is opt-in as running in parallel can cause
> test flakes.
>
> Rather than fork within the code, the start_command/finish_command
> from libsubcmd are used. This changes how stderr and stdout are
> handled.
>
> Signed-off-by: Ian Rogers <[email protected]>

Actually, I think this patch needs more work. The verbose output is
degraded and missing in some cases. Suggestions on how to fix welcome.
It'd be nice to fix up the tests and allow parallel to be the default.
The first patch in this series is 1 such fix. Another is needed to
make "Couldn't resolve comm name for pid" silent in the cases where it
is racy. I was also noticing hangs on things like the lock contention
test. The good news is the tests are doing their job of finding bugs.

Thanks,
Ian


> ---
> tools/perf/tests/builtin-test.c | 261 +++++++++++++++++++++-----------
> 1 file changed, 169 insertions(+), 92 deletions(-)
>
> diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
> index 54b11c23e863..91c32b477cbb 100644
> --- a/tools/perf/tests/builtin-test.c
> +++ b/tools/perf/tests/builtin-test.c
> @@ -21,6 +21,7 @@
> #include "debug.h"
> #include "color.h"
> #include <subcmd/parse-options.h>
> +#include <subcmd/run-command.h>
> #include "string2.h"
> #include "symbol.h"
> #include "util/rlimit.h"
> @@ -31,7 +32,13 @@
>
> #include "tests-scripts.h"
>
> +/*
> + * Command line option to not fork the test running in the same process and
> + * making them easier to debug.
> + */
> static bool dont_fork;
> +/* Fork the tests in parallel and then wait for their completion. */
> +static bool parallel;
> const char *dso_to_test;
> const char *test_objdump_path = "objdump";
>
> @@ -211,76 +218,36 @@ static bool perf_test__matches(const char *desc, int curr, int argc, const char
> return false;
> }
>
> -static int run_test(struct test_suite *test, int subtest)
> -{
> - int status, err = -1, child = dont_fork ? 0 : fork();
> - char sbuf[STRERR_BUFSIZE];
> -
> - if (child < 0) {
> - pr_err("failed to fork test: %s\n",
> - str_error_r(errno, sbuf, sizeof(sbuf)));
> - return -1;
> - }
> -
> - if (!child) {
> - if (!dont_fork) {
> - pr_debug("test child forked, pid %d\n", getpid());
> -
> - if (verbose <= 0) {
> - int nullfd = open("/dev/null", O_WRONLY);
> -
> - if (nullfd >= 0) {
> - close(STDERR_FILENO);
> - close(STDOUT_FILENO);
> -
> - dup2(nullfd, STDOUT_FILENO);
> - dup2(STDOUT_FILENO, STDERR_FILENO);
> - close(nullfd);
> - }
> - } else {
> - signal(SIGSEGV, sighandler_dump_stack);
> - signal(SIGFPE, sighandler_dump_stack);
> - }
> - }
> -
> - err = test_function(test, subtest)(test, subtest);
> - if (!dont_fork)
> - exit(err);
> - }
> -
> - if (!dont_fork) {
> - wait(&status);
> +struct child_test {
> + struct child_process process;
> + struct test_suite *test;
> + int test_num;
> + int subtest;
> +};
>
> - if (WIFEXITED(status)) {
> - err = (signed char)WEXITSTATUS(status);
> - pr_debug("test child finished with %d\n", err);
> - } else if (WIFSIGNALED(status)) {
> - err = -1;
> - pr_debug("test child interrupted\n");
> - }
> - }
> +static int run_test_child(struct child_process *process)
> +{
> + struct child_test *child = container_of(process, struct child_test, process);
> + int err;
>
> - return err;
> + pr_debug("--- start ---\n");
> + pr_debug("test child forked, pid %d\n", getpid());
> + err = test_function(child->test, child->subtest)(child->test, child->subtest);
> + pr_debug("---- end(%d) ----\n", err);
> + fflush(NULL);
> + return -err;
> }
>
> -#define for_each_test(j, k, t) \
> - for (j = 0, k = 0; j < ARRAY_SIZE(tests); j++, k = 0) \
> - while ((t = tests[j][k++]) != NULL)
> -
> -static int test_and_print(struct test_suite *t, int subtest)
> +static int print_test_result(struct test_suite *t, int i, int subtest, int result, int width)
> {
> - int err;
> -
> - pr_debug("\n--- start ---\n");
> - err = run_test(t, subtest);
> - pr_debug("---- end ----\n");
> + if (has_subtests(t)) {
> + int subw = width > 2 ? width - 2 : width;
>
> - if (!has_subtests(t))
> - pr_debug("%s:", t->desc);
> - else
> - pr_debug("%s subtest %d:", t->desc, subtest + 1);
> + pr_info("%3d.%1d: %-*s:", i + 1, subtest + 1, subw, test_description(t, subtest));
> + } else
> + pr_info("%3d: %-*s:", i + 1, width, test_description(t, subtest));
>
> - switch (err) {
> + switch (result) {
> case TEST_OK:
> pr_info(" Ok\n");
> break;
> @@ -299,22 +266,135 @@ static int test_and_print(struct test_suite *t, int subtest)
> break;
> }
>
> - return err;
> + return 0;
> +}
> +
> +static int finish_test(struct child_test *child_test, int width)
> +{
> + struct test_suite *t = child_test->test;
> + int i = child_test->test_num;
> + int subi = child_test->subtest;
> + int out = child_test->process.out;
> + int err = child_test->process.err;
> + int ret;
> +
> + if (verbose) {
> + bool out_done = false, err_done = false;
> +
> + fcntl(out, F_SETFL, O_NONBLOCK);
> + fcntl(err, F_SETFL, O_NONBLOCK);
> + if (has_subtests(t))
> + pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi));
> + else
> + pr_info("%3d: %s:\n", i + 1, test_description(t, -1));
> +
> + while (!out_done && !err_done) {
> + char buf[512];
> + ssize_t len;
> +
> + if (!out_done) {
> + errno = 0;
> + len = read(out, buf, sizeof(buf) - 1);
> +
> + if (len <= 0)
> + err_done = errno != EAGAIN;
> + else {
> + buf[len] = '\0';
> + fprintf(stdout, "%s", buf);
> + }
> + }
> + if (!err_done) {
> + errno = 0;
> + len = read(err, buf, sizeof(buf) - 1);
> +
> + if (len <= 0)
> + err_done = errno != EAGAIN;
> + else {
> + buf[len] = '\0';
> + fprintf(stderr, "%s", buf);
> + }
> + }
> + }
> + }
> + ret = finish_command(&child_test->process);
> + print_test_result(t, i, subi, ret, width);
> + if (out >= 0)
> + close(out);
> + if (err >= 0)
> + close(err);
> + return 0;
> +}
> +
> +static int start_test(struct test_suite *test, int i, int subi, struct child_test **child,
> + int width)
> +{
> + int err;
> +
> + *child = NULL;
> + if (dont_fork) {
> + pr_debug("--- start ---\n");
> + err = test_function(test, subi)(test, subi);
> + pr_debug("---- end ----\n");
> + print_test_result(test, i, subi, err, width);
> + return 0;
> + }
> +
> + *child = zalloc(sizeof(**child));
> + if (!*child)
> + return -ENOMEM;
> +
> + (*child)->test = test;
> + (*child)->test_num = i;
> + (*child)->subtest = subi;
> + (*child)->process.pid = -1;
> + (*child)->process.no_stdin = 1;
> + if (verbose <= 0) {
> + (*child)->process.no_stdout = 1;
> + (*child)->process.no_stderr = 1;
> + } else {
> + (*child)->process.out = -1;
> + (*child)->process.err = -1;
> + }
> + (*child)->process.no_exec_cmd = run_test_child;
> + err = start_command(&(*child)->process);
> + if (err || parallel)
> + return err;
> + return finish_test(*child, width);
> }
>
> +#define for_each_test(j, k, t) \
> + for (j = 0, k = 0; j < ARRAY_SIZE(tests); j++, k = 0) \
> + while ((t = tests[j][k++]) != NULL)
> +
> static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
> {
> struct test_suite *t;
> unsigned int j, k;
> int i = 0;
> int width = 0;
> + size_t num_tests = 0;
> + struct child_test **child_tests;
> + int child_test_num = 0;
>
> for_each_test(j, k, t) {
> int len = strlen(test_description(t, -1));
>
> if (width < len)
> width = len;
> +
> + if (has_subtests(t)) {
> + for (int l = 0, subn = num_subtests(t); l < subn; l++) {
> + len = strlen(test_description(t, -1));
> + if (width < len)
> + width = len;
> + num_tests++;
> + }
> + } else
> + num_tests++;
> }
> + child_tests = calloc(num_tests, sizeof(*child_tests));
> + if (!child_tests)
> + return -ENOMEM;
>
> for_each_test(j, k, t) {
> int curr = i++;
> @@ -336,52 +416,47 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
> continue;
> }
>
> - pr_info("%3d: %-*s:", i, width, test_description(t, -1));
> -
> if (intlist__find(skiplist, i)) {
> + pr_info("%3d: %-*s:", curr + 1, width, test_description(t, -1));
> color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
> continue;
> }
>
> if (!has_subtests(t)) {
> - test_and_print(t, -1);
> + int err = start_test(t, curr, -1, &child_tests[child_test_num++], width);
> +
> + if (err) {
> + /* TODO: if parallel waitpid the already forked children. */
> + free(child_tests);
> + return err;
> + }
> } else {
> int subn = num_subtests(t);
> - /*
> - * minus 2 to align with normal testcases.
> - * For subtest we print additional '.x' in number.
> - * for example:
> - *
> - * 35: Test LLVM searching and compiling :
> - * 35.1: Basic BPF llvm compiling test : Ok
> - */
> - int subw = width > 2 ? width - 2 : width;
> -
> - if (subn <= 0) {
> - color_fprintf(stderr, PERF_COLOR_YELLOW,
> - " Skip (not compiled in)\n");
> - continue;
> - }
> - pr_info("\n");
>
> for (subi = 0; subi < subn; subi++) {
> - int len = strlen(test_description(t, subi));
> -
> - if (subw < len)
> - subw = len;
> - }
> + int err;
>
> - for (subi = 0; subi < subn; subi++) {
> if (!perf_test__matches(test_description(t, subi),
> curr, argc, argv))
> continue;
>
> - pr_info("%3d.%1d: %-*s:", i, subi + 1, subw,
> - test_description(t, subi));
> - test_and_print(t, subi);
> + err = start_test(t, curr, subi, &child_tests[child_test_num++],
> + width);
> + if (err)
> + return err;
> }
> }
> }
> + for (i = 0; i < child_test_num; i++) {
> + if (parallel) {
> + int ret = finish_test(child_tests[i], width);
> +
> + if (ret)
> + return ret;
> + }
> + free(child_tests[i]);
> + }
> + free(child_tests);
> return 0;
> }
>
> @@ -449,6 +524,8 @@ int cmd_test(int argc, const char **argv)
> "be more verbose (show symbol address, etc)"),
> OPT_BOOLEAN('F', "dont-fork", &dont_fork,
> "Do not fork for testcase"),
> + OPT_BOOLEAN('p', "parallel", &parallel,
> + "Run the tests altogether in parallel"),
> OPT_STRING('w', "workload", &workload, "work", "workload to run for testing"),
> OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
> OPT_STRING(0, "objdump", &test_objdump_path, "path",
> --
> 2.43.0.rc2.451.g8631bc7472-goog
>

2023-12-04 06:55:12

by Adrian Hunter

[permalink] [raw]
Subject: Re: [PATCH v1 3/9] perf tests: Avoid fork in perf_has_symbol test

On 2/12/23 01:50, Ian Rogers wrote:
> perf test -vv Symbols is used to indentify symbols within the perf
> binary. Add the -F flag so that the test command doesn't fork the test
> before running. This removes a little overhead.
>
> Signed-off-by: Ian Rogers <[email protected]>

Acked-by: Adrian Hunter <[email protected]>

> ---
> tools/perf/tests/shell/lib/perf_has_symbol.sh | 2 +-
> 1 file changed, 1 insertion(+), 1 deletion(-)
>
> diff --git a/tools/perf/tests/shell/lib/perf_has_symbol.sh b/tools/perf/tests/shell/lib/perf_has_symbol.sh
> index 5d59c32ae3e7..561c93b75d77 100644
> --- a/tools/perf/tests/shell/lib/perf_has_symbol.sh
> +++ b/tools/perf/tests/shell/lib/perf_has_symbol.sh
> @@ -3,7 +3,7 @@
>
> perf_has_symbol()
> {
> - if perf test -vv "Symbols" 2>&1 | grep "[[:space:]]$1$"; then
> + if perf test -vv -F "Symbols" 2>&1 | grep "[[:space:]]$1$"; then
> echo "perf does have symbol '$1'"
> return 0
> fi

2023-12-04 20:30:59

by Arnaldo Carvalho de Melo

[permalink] [raw]
Subject: Re: [PATCH v1 9/9] perf tests: Add option to run tests in parallel

Em Fri, Dec 01, 2023 at 06:06:12PM -0800, Ian Rogers escreveu:
> On Fri, Dec 1, 2023 at 3:50 PM Ian Rogers <[email protected]> wrote:
> >
> > By default tests are forked, add an option (-p or --parallel) so that
> > the forked tests are all started in parallel and then their output
> > gathered serially. This is opt-in as running in parallel can cause
> > test flakes.
> >
> > Rather than fork within the code, the start_command/finish_command
> > from libsubcmd are used. This changes how stderr and stdout are
> > handled.
> >
> > Signed-off-by: Ian Rogers <[email protected]>
>
> Actually, I think this patch needs more work. The verbose output is
> degraded and missing in some cases. Suggestions on how to fix welcome.

I'll think about, but to make progress I think the first 8 patches in
this series can be considered now?

- Arnaldo

> It'd be nice to fix up the tests and allow parallel to be the default.
> The first patch in this series is 1 such fix. Another is needed to
> make "Couldn't resolve comm name for pid" silent in the cases where it
> is racy. I was also noticing hangs on things like the lock contention
> test. The good news is the tests are doing their job of finding bugs.
>
> Thanks,
> Ian
>
>
> > ---
> > tools/perf/tests/builtin-test.c | 261 +++++++++++++++++++++-----------
> > 1 file changed, 169 insertions(+), 92 deletions(-)
> >
> > diff --git a/tools/perf/tests/builtin-test.c b/tools/perf/tests/builtin-test.c
> > index 54b11c23e863..91c32b477cbb 100644
> > --- a/tools/perf/tests/builtin-test.c
> > +++ b/tools/perf/tests/builtin-test.c
> > @@ -21,6 +21,7 @@
> > #include "debug.h"
> > #include "color.h"
> > #include <subcmd/parse-options.h>
> > +#include <subcmd/run-command.h>
> > #include "string2.h"
> > #include "symbol.h"
> > #include "util/rlimit.h"
> > @@ -31,7 +32,13 @@
> >
> > #include "tests-scripts.h"
> >
> > +/*
> > + * Command line option to not fork the test running in the same process and
> > + * making them easier to debug.
> > + */
> > static bool dont_fork;
> > +/* Fork the tests in parallel and then wait for their completion. */
> > +static bool parallel;
> > const char *dso_to_test;
> > const char *test_objdump_path = "objdump";
> >
> > @@ -211,76 +218,36 @@ static bool perf_test__matches(const char *desc, int curr, int argc, const char
> > return false;
> > }
> >
> > -static int run_test(struct test_suite *test, int subtest)
> > -{
> > - int status, err = -1, child = dont_fork ? 0 : fork();
> > - char sbuf[STRERR_BUFSIZE];
> > -
> > - if (child < 0) {
> > - pr_err("failed to fork test: %s\n",
> > - str_error_r(errno, sbuf, sizeof(sbuf)));
> > - return -1;
> > - }
> > -
> > - if (!child) {
> > - if (!dont_fork) {
> > - pr_debug("test child forked, pid %d\n", getpid());
> > -
> > - if (verbose <= 0) {
> > - int nullfd = open("/dev/null", O_WRONLY);
> > -
> > - if (nullfd >= 0) {
> > - close(STDERR_FILENO);
> > - close(STDOUT_FILENO);
> > -
> > - dup2(nullfd, STDOUT_FILENO);
> > - dup2(STDOUT_FILENO, STDERR_FILENO);
> > - close(nullfd);
> > - }
> > - } else {
> > - signal(SIGSEGV, sighandler_dump_stack);
> > - signal(SIGFPE, sighandler_dump_stack);
> > - }
> > - }
> > -
> > - err = test_function(test, subtest)(test, subtest);
> > - if (!dont_fork)
> > - exit(err);
> > - }
> > -
> > - if (!dont_fork) {
> > - wait(&status);
> > +struct child_test {
> > + struct child_process process;
> > + struct test_suite *test;
> > + int test_num;
> > + int subtest;
> > +};
> >
> > - if (WIFEXITED(status)) {
> > - err = (signed char)WEXITSTATUS(status);
> > - pr_debug("test child finished with %d\n", err);
> > - } else if (WIFSIGNALED(status)) {
> > - err = -1;
> > - pr_debug("test child interrupted\n");
> > - }
> > - }
> > +static int run_test_child(struct child_process *process)
> > +{
> > + struct child_test *child = container_of(process, struct child_test, process);
> > + int err;
> >
> > - return err;
> > + pr_debug("--- start ---\n");
> > + pr_debug("test child forked, pid %d\n", getpid());
> > + err = test_function(child->test, child->subtest)(child->test, child->subtest);
> > + pr_debug("---- end(%d) ----\n", err);
> > + fflush(NULL);
> > + return -err;
> > }
> >
> > -#define for_each_test(j, k, t) \
> > - for (j = 0, k = 0; j < ARRAY_SIZE(tests); j++, k = 0) \
> > - while ((t = tests[j][k++]) != NULL)
> > -
> > -static int test_and_print(struct test_suite *t, int subtest)
> > +static int print_test_result(struct test_suite *t, int i, int subtest, int result, int width)
> > {
> > - int err;
> > -
> > - pr_debug("\n--- start ---\n");
> > - err = run_test(t, subtest);
> > - pr_debug("---- end ----\n");
> > + if (has_subtests(t)) {
> > + int subw = width > 2 ? width - 2 : width;
> >
> > - if (!has_subtests(t))
> > - pr_debug("%s:", t->desc);
> > - else
> > - pr_debug("%s subtest %d:", t->desc, subtest + 1);
> > + pr_info("%3d.%1d: %-*s:", i + 1, subtest + 1, subw, test_description(t, subtest));
> > + } else
> > + pr_info("%3d: %-*s:", i + 1, width, test_description(t, subtest));
> >
> > - switch (err) {
> > + switch (result) {
> > case TEST_OK:
> > pr_info(" Ok\n");
> > break;
> > @@ -299,22 +266,135 @@ static int test_and_print(struct test_suite *t, int subtest)
> > break;
> > }
> >
> > - return err;
> > + return 0;
> > +}
> > +
> > +static int finish_test(struct child_test *child_test, int width)
> > +{
> > + struct test_suite *t = child_test->test;
> > + int i = child_test->test_num;
> > + int subi = child_test->subtest;
> > + int out = child_test->process.out;
> > + int err = child_test->process.err;
> > + int ret;
> > +
> > + if (verbose) {
> > + bool out_done = false, err_done = false;
> > +
> > + fcntl(out, F_SETFL, O_NONBLOCK);
> > + fcntl(err, F_SETFL, O_NONBLOCK);
> > + if (has_subtests(t))
> > + pr_info("%3d.%1d: %s:\n", i + 1, subi + 1, test_description(t, subi));
> > + else
> > + pr_info("%3d: %s:\n", i + 1, test_description(t, -1));
> > +
> > + while (!out_done && !err_done) {
> > + char buf[512];
> > + ssize_t len;
> > +
> > + if (!out_done) {
> > + errno = 0;
> > + len = read(out, buf, sizeof(buf) - 1);
> > +
> > + if (len <= 0)
> > + err_done = errno != EAGAIN;
> > + else {
> > + buf[len] = '\0';
> > + fprintf(stdout, "%s", buf);
> > + }
> > + }
> > + if (!err_done) {
> > + errno = 0;
> > + len = read(err, buf, sizeof(buf) - 1);
> > +
> > + if (len <= 0)
> > + err_done = errno != EAGAIN;
> > + else {
> > + buf[len] = '\0';
> > + fprintf(stderr, "%s", buf);
> > + }
> > + }
> > + }
> > + }
> > + ret = finish_command(&child_test->process);
> > + print_test_result(t, i, subi, ret, width);
> > + if (out >= 0)
> > + close(out);
> > + if (err >= 0)
> > + close(err);
> > + return 0;
> > +}
> > +
> > +static int start_test(struct test_suite *test, int i, int subi, struct child_test **child,
> > + int width)
> > +{
> > + int err;
> > +
> > + *child = NULL;
> > + if (dont_fork) {
> > + pr_debug("--- start ---\n");
> > + err = test_function(test, subi)(test, subi);
> > + pr_debug("---- end ----\n");
> > + print_test_result(test, i, subi, err, width);
> > + return 0;
> > + }
> > +
> > + *child = zalloc(sizeof(**child));
> > + if (!*child)
> > + return -ENOMEM;
> > +
> > + (*child)->test = test;
> > + (*child)->test_num = i;
> > + (*child)->subtest = subi;
> > + (*child)->process.pid = -1;
> > + (*child)->process.no_stdin = 1;
> > + if (verbose <= 0) {
> > + (*child)->process.no_stdout = 1;
> > + (*child)->process.no_stderr = 1;
> > + } else {
> > + (*child)->process.out = -1;
> > + (*child)->process.err = -1;
> > + }
> > + (*child)->process.no_exec_cmd = run_test_child;
> > + err = start_command(&(*child)->process);
> > + if (err || parallel)
> > + return err;
> > + return finish_test(*child, width);
> > }
> >
> > +#define for_each_test(j, k, t) \
> > + for (j = 0, k = 0; j < ARRAY_SIZE(tests); j++, k = 0) \
> > + while ((t = tests[j][k++]) != NULL)
> > +
> > static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
> > {
> > struct test_suite *t;
> > unsigned int j, k;
> > int i = 0;
> > int width = 0;
> > + size_t num_tests = 0;
> > + struct child_test **child_tests;
> > + int child_test_num = 0;
> >
> > for_each_test(j, k, t) {
> > int len = strlen(test_description(t, -1));
> >
> > if (width < len)
> > width = len;
> > +
> > + if (has_subtests(t)) {
> > + for (int l = 0, subn = num_subtests(t); l < subn; l++) {
> > + len = strlen(test_description(t, -1));
> > + if (width < len)
> > + width = len;
> > + num_tests++;
> > + }
> > + } else
> > + num_tests++;
> > }
> > + child_tests = calloc(num_tests, sizeof(*child_tests));
> > + if (!child_tests)
> > + return -ENOMEM;
> >
> > for_each_test(j, k, t) {
> > int curr = i++;
> > @@ -336,52 +416,47 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
> > continue;
> > }
> >
> > - pr_info("%3d: %-*s:", i, width, test_description(t, -1));
> > -
> > if (intlist__find(skiplist, i)) {
> > + pr_info("%3d: %-*s:", curr + 1, width, test_description(t, -1));
> > color_fprintf(stderr, PERF_COLOR_YELLOW, " Skip (user override)\n");
> > continue;
> > }
> >
> > if (!has_subtests(t)) {
> > - test_and_print(t, -1);
> > + int err = start_test(t, curr, -1, &child_tests[child_test_num++], width);
> > +
> > + if (err) {
> > + /* TODO: if parallel waitpid the already forked children. */
> > + free(child_tests);
> > + return err;
> > + }
> > } else {
> > int subn = num_subtests(t);
> > - /*
> > - * minus 2 to align with normal testcases.
> > - * For subtest we print additional '.x' in number.
> > - * for example:
> > - *
> > - * 35: Test LLVM searching and compiling :
> > - * 35.1: Basic BPF llvm compiling test : Ok
> > - */
> > - int subw = width > 2 ? width - 2 : width;
> > -
> > - if (subn <= 0) {
> > - color_fprintf(stderr, PERF_COLOR_YELLOW,
> > - " Skip (not compiled in)\n");
> > - continue;
> > - }
> > - pr_info("\n");
> >
> > for (subi = 0; subi < subn; subi++) {
> > - int len = strlen(test_description(t, subi));
> > -
> > - if (subw < len)
> > - subw = len;
> > - }
> > + int err;
> >
> > - for (subi = 0; subi < subn; subi++) {
> > if (!perf_test__matches(test_description(t, subi),
> > curr, argc, argv))
> > continue;
> >
> > - pr_info("%3d.%1d: %-*s:", i, subi + 1, subw,
> > - test_description(t, subi));
> > - test_and_print(t, subi);
> > + err = start_test(t, curr, subi, &child_tests[child_test_num++],
> > + width);
> > + if (err)
> > + return err;
> > }
> > }
> > }
> > + for (i = 0; i < child_test_num; i++) {
> > + if (parallel) {
> > + int ret = finish_test(child_tests[i], width);
> > +
> > + if (ret)
> > + return ret;
> > + }
> > + free(child_tests[i]);
> > + }
> > + free(child_tests);
> > return 0;
> > }
> >
> > @@ -449,6 +524,8 @@ int cmd_test(int argc, const char **argv)
> > "be more verbose (show symbol address, etc)"),
> > OPT_BOOLEAN('F', "dont-fork", &dont_fork,
> > "Do not fork for testcase"),
> > + OPT_BOOLEAN('p', "parallel", &parallel,
> > + "Run the tests altogether in parallel"),
> > OPT_STRING('w', "workload", &workload, "work", "workload to run for testing"),
> > OPT_STRING(0, "dso", &dso_to_test, "dso", "dso to test"),
> > OPT_STRING(0, "objdump", &test_objdump_path, "path",
> > --
> > 2.43.0.rc2.451.g8631bc7472-goog
> >

--

- Arnaldo

2023-12-04 21:14:51

by Ian Rogers

[permalink] [raw]
Subject: Re: [PATCH v1 9/9] perf tests: Add option to run tests in parallel

On Mon, Dec 4, 2023 at 12:30 PM Arnaldo Carvalho de Melo
<[email protected]> wrote:
>
> Em Fri, Dec 01, 2023 at 06:06:12PM -0800, Ian Rogers escreveu:
> > On Fri, Dec 1, 2023 at 3:50 PM Ian Rogers <[email protected]> wrote:
> > >
> > > By default tests are forked, add an option (-p or --parallel) so that
> > > the forked tests are all started in parallel and then their output
> > > gathered serially. This is opt-in as running in parallel can cause
> > > test flakes.
> > >
> > > Rather than fork within the code, the start_command/finish_command
> > > from libsubcmd are used. This changes how stderr and stdout are
> > > handled.
> > >
> > > Signed-off-by: Ian Rogers <[email protected]>
> >
> > Actually, I think this patch needs more work. The verbose output is
> > degraded and missing in some cases. Suggestions on how to fix welcome.
>
> I'll think about, but to make progress I think the first 8 patches in
> this series can be considered now?

That would be great. Thanks!

Ian