2021-05-26 08:14:45

by David Gow

[permalink] [raw]
Subject: [PATCH 1/3] kunit: Support skipped tests

The kunit_mark_skipped() macro marks the current test as "skipped", with
the provided reason. The kunit_skip() macro will mark the test as
skipped, and abort the test.

The TAP specification supports this "SKIP directive" as a comment after
the "ok" / "not ok" for a test. See the "Directives" section of the TAP
spec for details:
https://testanything.org/tap-specification.html#directives

The 'success' field for KUnit tests is replaced with a kunit_status
enum, which can be SUCCESS, FAILURE, or SKIPPED, combined with a
'status_comment' containing information on why a test was skipped.

A new 'kunit_status' test suite is added to test this.

Signed-off-by: David Gow <[email protected]>
---
This change depends on the assertion typechecking fix here:
https://lore.kernel.org/linux-kselftest/[email protected]/
Only the first two patches in the series are required.

This is the long-awaited follow-up to the skip tests RFC:
https://lore.kernel.org/linux-kselftest/[email protected]/

There are quite a few changes since that version, principally:
- A kunit_status enum is now used, with SKIPPED a distinct state
- The kunit_mark_skipped() and kunit_skip() macros now take printf-style
format strings.
- There is now a kunit_status test suite providing basic tests of this
functionality.
- The kunit_tool changes have been split into a separate commit.
- The example skipped tests have been expanded an moved to their own
suite, which is not enabled by KUNIT_ALL_TESTS.
- A number of other fixes and changes here and there.

Cheers,
-- David

include/kunit/test.h | 68 ++++++++++++++++++++++++++++++++++++++----
lib/kunit/kunit-test.c | 42 +++++++++++++++++++++++++-
lib/kunit/test.c | 51 ++++++++++++++++++-------------
3 files changed, 134 insertions(+), 27 deletions(-)

diff --git a/include/kunit/test.h b/include/kunit/test.h
index b68c61348121..40b536da027e 100644
--- a/include/kunit/test.h
+++ b/include/kunit/test.h
@@ -105,6 +105,18 @@ struct kunit;
#define KUNIT_SUBTEST_INDENT " "
#define KUNIT_SUBSUBTEST_INDENT " "

+/**
+ * enum kunit_status - Type of result for a test or test suite
+ * @KUNIT_SUCCESS: Denotes the test suite has not failed nor been skipped
+ * @KUNIT_FAILURE: Denotes the test has failed.
+ * @KUNIT_SKIPPED: Denotes the test has been skipped.
+ */
+enum kunit_status {
+ KUNIT_SUCCESS,
+ KUNIT_FAILURE,
+ KUNIT_SKIPPED,
+};
+
/**
* struct kunit_case - represents an individual test case.
*
@@ -148,13 +160,20 @@ struct kunit_case {
const void* (*generate_params)(const void *prev, char *desc);

/* private: internal use only. */
- bool success;
+ enum kunit_status status;
char *log;
};

-static inline char *kunit_status_to_string(bool status)
+static inline char *kunit_status_to_string(enum kunit_status status)
{
- return status ? "ok" : "not ok";
+ switch (status) {
+ case KUNIT_SKIPPED:
+ case KUNIT_SUCCESS:
+ return "ok";
+ case KUNIT_FAILURE:
+ return "not ok";
+ }
+ return "invalid";
}

/**
@@ -212,6 +231,7 @@ struct kunit_suite {
struct kunit_case *test_cases;

/* private: internal use only */
+ char status_comment[256];
struct dentry *debugfs;
char *log;
};
@@ -245,19 +265,21 @@ struct kunit {
* be read after the test case finishes once all threads associated
* with the test case have terminated.
*/
- bool success; /* Read only after test_case finishes! */
spinlock_t lock; /* Guards all mutable test state. */
+ enum kunit_status status; /* Read only after test_case finishes! */
/*
* Because resources is a list that may be updated multiple times (with
* new resources) from any thread associated with a test case, we must
* protect it with some type of lock.
*/
struct list_head resources; /* Protected by lock. */
+
+ char status_comment[256];
};

static inline void kunit_set_failure(struct kunit *test)
{
- WRITE_ONCE(test->success, false);
+ WRITE_ONCE(test->status, KUNIT_FAILURE);
}

void kunit_init_test(struct kunit *test, const char *name, char *log);
@@ -348,7 +370,7 @@ static inline int kunit_run_all_tests(void)
#define kunit_suite_for_each_test_case(suite, test_case) \
for (test_case = suite->test_cases; test_case->run_case; test_case++)

-bool kunit_suite_has_succeeded(struct kunit_suite *suite);
+enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite);

/*
* Like kunit_alloc_resource() below, but returns the struct kunit_resource
@@ -612,6 +634,40 @@ void kunit_cleanup(struct kunit *test);

void kunit_log_append(char *log, const char *fmt, ...);

+/**
+ * kunit_mark_skipped() - Marks @test_or_suite as skipped
+ *
+ * @test_or_suite: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Marks the test as skipped. @fmt is given output as the test status
+ * comment, typically the reason the test was skipped.
+ *
+ * Test execution continues after kunit_mark_skipped() is called.
+ */
+#define kunit_mark_skipped(test_or_suite, fmt, ...) \
+ do { \
+ WRITE_ONCE((test_or_suite)->status, KUNIT_SKIPPED); \
+ scnprintf((test_or_suite)->status_comment, 256, fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * kunit_skip() - Marks @test_or_suite as skipped
+ *
+ * @test_or_suite: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Skips the test. @fmt is given output as the test status
+ * comment, typically the reason the test was skipped.
+ *
+ * Test execution is halted after kunit_skip() is called.
+ */
+#define kunit_skip(test_or_suite, fmt, ...) \
+ do { \
+ kunit_mark_skipped((test_or_suite), fmt, ##__VA_ARGS__);\
+ kunit_try_catch_throw(&((test_or_suite)->try_catch)); \
+ } while (0)
+
/*
* printk and log to per-test or per-suite log buffer. Logging only done
* if CONFIG_KUNIT_DEBUGFS is 'y'; if it is 'n', no log is allocated/used.
diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
index 69f902440a0e..d69efcbed624 100644
--- a/lib/kunit/kunit-test.c
+++ b/lib/kunit/kunit-test.c
@@ -437,7 +437,47 @@ static void kunit_log_test(struct kunit *test)
#endif
}

+static void kunit_status_set_failure_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SUCCESS);
+ kunit_set_failure(&fake);
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
+}
+
+static void kunit_status_mark_skipped_test(struct kunit *test)
+{
+ struct kunit fake;
+
+ kunit_init_test(&fake, "fake test", NULL);
+
+ /* Before: Should be SUCCESS with no comment. */
+ KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
+ KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
+
+ /* Mark the test as skipped. */
+ kunit_mark_skipped(&fake, "Accepts format string: %s", "YES");
+
+ /* After: Should be SKIPPED with our comment. */
+ KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED);
+ KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
+}
+
+static struct kunit_case kunit_status_test_cases[] = {
+ KUNIT_CASE(kunit_status_set_failure_test),
+ KUNIT_CASE(kunit_status_mark_skipped_test),
+ {}
+};
+
+static struct kunit_suite kunit_status_test_suite = {
+ .name = "kunit_status",
+ .test_cases = kunit_status_test_cases,
+};
+
kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
- &kunit_log_test_suite);
+ &kunit_log_test_suite, &kunit_status_test_suite);

MODULE_LICENSE("GPL v2");
diff --git a/lib/kunit/test.c b/lib/kunit/test.c
index 2f6cc0123232..0ee07705d2b0 100644
--- a/lib/kunit/test.c
+++ b/lib/kunit/test.c
@@ -98,12 +98,14 @@ static void kunit_print_subtest_start(struct kunit_suite *suite)

static void kunit_print_ok_not_ok(void *test_or_suite,
bool is_test,
- bool is_ok,
+ enum kunit_status status,
size_t test_number,
- const char *description)
+ const char *description,
+ const char *directive)
{
struct kunit_suite *suite = is_test ? NULL : test_or_suite;
struct kunit *test = is_test ? test_or_suite : NULL;
+ const char *directive_header = (status == KUNIT_SKIPPED) ? " # SKIP " : "";

/*
* We do not log the test suite results as doing so would
@@ -114,25 +116,31 @@ static void kunit_print_ok_not_ok(void *test_or_suite,
* representation.
*/
if (suite)
- pr_info("%s %zd - %s\n",
- kunit_status_to_string(is_ok),
- test_number, description);
+ pr_info("%s %zd - %s%s%s\n",
+ kunit_status_to_string(status),
+ test_number, description,
+ directive_header, directive ? directive : "");
else
- kunit_log(KERN_INFO, test, KUNIT_SUBTEST_INDENT "%s %zd - %s",
- kunit_status_to_string(is_ok),
- test_number, description);
+ kunit_log(KERN_INFO, test,
+ KUNIT_SUBTEST_INDENT "%s %zd - %s%s%s",
+ kunit_status_to_string(status),
+ test_number, description,
+ directive_header, directive ? directive : "");
}

-bool kunit_suite_has_succeeded(struct kunit_suite *suite)
+enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite)
{
const struct kunit_case *test_case;
+ enum kunit_status status = KUNIT_SKIPPED;

kunit_suite_for_each_test_case(suite, test_case) {
- if (!test_case->success)
- return false;
+ if (test_case->status == KUNIT_FAILURE)
+ return KUNIT_FAILURE;
+ else if (test_case->status == KUNIT_SUCCESS)
+ status = KUNIT_SUCCESS;
}

- return true;
+ return status;
}
EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);

@@ -143,7 +151,8 @@ static void kunit_print_subtest_end(struct kunit_suite *suite)
kunit_print_ok_not_ok((void *)suite, false,
kunit_suite_has_succeeded(suite),
kunit_suite_counter++,
- suite->name);
+ suite->name,
+ suite->status_comment);
}

unsigned int kunit_test_case_num(struct kunit_suite *suite,
@@ -252,7 +261,8 @@ void kunit_init_test(struct kunit *test, const char *name, char *log)
test->log = log;
if (test->log)
test->log[0] = '\0';
- test->success = true;
+ test->status = KUNIT_SUCCESS;
+ test->status_comment[0] = '\0';
}
EXPORT_SYMBOL_GPL(kunit_init_test);

@@ -376,7 +386,8 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
context.test_case = test_case;
kunit_try_catch_run(try_catch, &context);

- test_case->success = test->success;
+ test_case->status = test->status;
+
}

int kunit_run_tests(struct kunit_suite *suite)
@@ -388,7 +399,6 @@ int kunit_run_tests(struct kunit_suite *suite)

kunit_suite_for_each_test_case(suite, test_case) {
struct kunit test = { .param_value = NULL, .param_index = 0 };
- bool test_success = true;

if (test_case->generate_params) {
/* Get initial param. */
@@ -398,7 +408,6 @@ int kunit_run_tests(struct kunit_suite *suite)

do {
kunit_run_case_catch_errors(suite, test_case, &test);
- test_success &= test_case->success;

if (test_case->generate_params) {
if (param_desc[0] == '\0') {
@@ -410,7 +419,7 @@ int kunit_run_tests(struct kunit_suite *suite)
KUNIT_SUBTEST_INDENT
"# %s: %s %d - %s",
test_case->name,
- kunit_status_to_string(test.success),
+ kunit_status_to_string(test.status),
test.param_index + 1, param_desc);

/* Get next param. */
@@ -420,9 +429,10 @@ int kunit_run_tests(struct kunit_suite *suite)
}
} while (test.param_value);

- kunit_print_ok_not_ok(&test, true, test_success,
+ kunit_print_ok_not_ok(&test, true, test_case->status,
kunit_test_case_num(suite, test_case),
- test_case->name);
+ test_case->name,
+ test.status_comment);
}

kunit_print_subtest_end(suite);
@@ -434,6 +444,7 @@ EXPORT_SYMBOL_GPL(kunit_run_tests);
static void kunit_init_suite(struct kunit_suite *suite)
{
kunit_debugfs_create_suite(suite);
+ suite->status_comment[0] = '\0';
}

int __kunit_test_suites_init(struct kunit_suite * const * const suites)
--
2.31.1.818.g46aad6cb9e-goog


2021-05-26 08:15:20

by David Gow

[permalink] [raw]
Subject: [PATCH 2/3] kunit: tool: Support skipped tests in kunit_tool

Add support for the SKIP directive to kunit_tool's TAP parser.

Skipped tests now show up as such in the printed summary. The number of
skipped tests is counted, and if all tests in a suite are skipped, the
suite is also marked as skipped. Otherwise, skipped tests do affect the
suite result.

Example output:
[00:22:34] ======== [SKIPPED] example_skip ========
[00:22:34] [SKIPPED] example_skip_test # SKIP this test should be skipped
[00:22:34] [SKIPPED] example_mark_skipped_test # SKIP this test should be skipped
[00:22:34] ============================================================
[00:22:34] Testing complete. 2 tests run. 0 failed. 0 crashed. 2 skipped.

Signed-off-by: David Gow <[email protected]>
---
tools/testing/kunit/kunit_parser.py | 47 +++++++++++++++++++-------
tools/testing/kunit/kunit_tool_test.py | 22 ++++++++++++
2 files changed, 57 insertions(+), 12 deletions(-)

diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
index e8bcc139702e..6b5dd26b479d 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -43,6 +43,7 @@ class TestCase(object):
class TestStatus(Enum):
SUCCESS = auto()
FAILURE = auto()
+ SKIPPED = auto()
TEST_CRASHED = auto()
NO_TESTS = auto()
FAILURE_TO_PARSE_TESTS = auto()
@@ -108,6 +109,8 @@ def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None:

OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])

+OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')
+
OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')

OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
@@ -125,6 +128,10 @@ def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
if match:
test_case.log.append(lines.pop(0))
test_case.name = match.group(2)
+ skip_match = OK_NOT_OK_SKIP.match(line)
+ if skip_match:
+ test_case.status = TestStatus.SKIPPED
+ return True
if test_case.status == TestStatus.TEST_CRASHED:
return True
if match.group(1) == 'ok':
@@ -188,16 +195,16 @@ def parse_subtest_plan(lines: List[str]) -> Optional[int]:
return None

def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
- if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
+ if left == right:
+ return left
+ elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
return TestStatus.TEST_CRASHED
elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
return TestStatus.FAILURE
- elif left != TestStatus.SUCCESS:
- return left
- elif right != TestStatus.SUCCESS:
+ elif left == TestStatus.SKIPPED:
return right
else:
- return TestStatus.SUCCESS
+ return left

def parse_ok_not_ok_test_suite(lines: List[str],
test_suite: TestSuite,
@@ -214,6 +221,9 @@ def parse_ok_not_ok_test_suite(lines: List[str],
test_suite.status = TestStatus.SUCCESS
else:
test_suite.status = TestStatus.FAILURE
+ skip_match = OK_NOT_OK_SKIP.match(line)
+ if skip_match:
+ test_suite.status = TestStatus.SKIPPED
suite_index = int(match.group(2))
if suite_index != expected_suite_index:
print_with_timestamp(
@@ -224,8 +234,8 @@ def parse_ok_not_ok_test_suite(lines: List[str],
else:
return False

-def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
- return reduce(max_status, statuses, TestStatus.SUCCESS)
+def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
+ return reduce(max_status, status_list, TestStatus.SKIPPED)

def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
@@ -315,9 +325,12 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
total_tests = 0
failed_tests = 0
crashed_tests = 0
+ skipped_tests = 0
for test_suite in test_result.suites:
if test_suite.status == TestStatus.SUCCESS:
print_suite_divider(green('[PASSED] ') + test_suite.name)
+ elif test_suite.status == TestStatus.SKIPPED:
+ print_suite_divider(yellow('[SKIPPED] ') + test_suite.name)
elif test_suite.status == TestStatus.TEST_CRASHED:
print_suite_divider(red('[CRASHED] ' + test_suite.name))
else:
@@ -326,6 +339,9 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
total_tests += 1
if test_case.status == TestStatus.SUCCESS:
print_with_timestamp(green('[PASSED] ') + test_case.name)
+ elif test_case.status == TestStatus.SKIPPED:
+ skipped_tests += 1
+ print_with_timestamp(yellow('[SKIPPED] ') + test_case.name)
elif test_case.status == TestStatus.TEST_CRASHED:
crashed_tests += 1
print_with_timestamp(red('[CRASHED] ' + test_case.name))
@@ -336,12 +352,13 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
print_with_timestamp(red('[FAILED] ') + test_case.name)
print_log(map(yellow, test_case.log))
print_with_timestamp('')
- return total_tests, failed_tests, crashed_tests
+ return total_tests, failed_tests, crashed_tests, skipped_tests

def parse_run_tests(kernel_output) -> TestResult:
total_tests = 0
failed_tests = 0
crashed_tests = 0
+ skipped_tests = 0
test_result = parse_test_result(list(isolate_kunit_output(kernel_output)))
if test_result.status == TestStatus.NO_TESTS:
print(red('[ERROR] ') + yellow('no tests run!'))
@@ -350,10 +367,16 @@ def parse_run_tests(kernel_output) -> TestResult:
else:
(total_tests,
failed_tests,
- crashed_tests) = print_and_count_results(test_result)
+ crashed_tests,
+ skipped_tests) = print_and_count_results(test_result)
print_with_timestamp(DIVIDER)
- fmt = green if test_result.status == TestStatus.SUCCESS else red
+ if test_result.status == TestStatus.SUCCESS:
+ fmt = green
+ elif test_result.status == TestStatus.SKIPPED:
+ fmt = yellow
+ else:
+ fmt =red
print_with_timestamp(
- fmt('Testing complete. %d tests run. %d failed. %d crashed.' %
- (total_tests, failed_tests, crashed_tests)))
+ fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
+ (total_tests, failed_tests, crashed_tests, skipped_tests)))
return test_result
diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
index 2e809dd956a7..a51e70cafcc1 100755
--- a/tools/testing/kunit/kunit_tool_test.py
+++ b/tools/testing/kunit/kunit_tool_test.py
@@ -183,6 +183,28 @@ class KUnitParserTest(unittest.TestCase):
kunit_parser.TestStatus.TEST_CRASHED,
result.status)

+ def test_skipped_test(self):
+ skipped_log = test_data_path('test_skip_tests.log')
+ file = open(skipped_log)
+ result = kunit_parser.parse_run_tests(file.readlines())
+
+ # A skipped test does not fail the whole suite.
+ self.assertEqual(
+ kunit_parser.TestStatus.SUCCESS,
+ result.status)
+ file.close()
+
+ def test_skipped_all_tests(self):
+ skipped_log = test_data_path('test_skip_all_tests.log')
+ file = open(skipped_log)
+ result = kunit_parser.parse_run_tests(file.readlines())
+
+ self.assertEqual(
+ kunit_parser.TestStatus.SKIPPED,
+ result.status)
+ file.close()
+
+
def test_ignores_prefix_printk_time(self):
prefix_log = test_data_path('test_config_printk_time.log')
with open(prefix_log) as file:
--
2.31.1.818.g46aad6cb9e-goog

2021-05-26 08:17:31

by David Gow

[permalink] [raw]
Subject: [PATCH 3/3] kunit: test: Add example_skip test suite which is always skipped

Add a new KUnit test suite which contains tests which are always
skipped. This is used as an example for how to write tests which are
skipped, and to demonstrate the difference between kunit_skip() and
kunit_mark_skipped().

Because these tests do not pass (they're skipped), they are not enabled
by default, or by the KUNIT_ALL_TESTS config option: they must be
enabled explicitly by setting CONFIG_KUNIT_EXAMPLE_SKIP_TEST=y in either
a .config or .kunitconfig file.

Signed-off-by: David Gow <[email protected]>
---
lib/kunit/Kconfig | 15 +++++++++
lib/kunit/Makefile | 2 ++
lib/kunit/kunit-example-skip-test.c | 52 +++++++++++++++++++++++++++++
3 files changed, 69 insertions(+)
create mode 100644 lib/kunit/kunit-example-skip-test.c

diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
index 0b5dfb001bac..399fe5f789f7 100644
--- a/lib/kunit/Kconfig
+++ b/lib/kunit/Kconfig
@@ -45,6 +45,21 @@ config KUNIT_EXAMPLE_TEST
is intended for curious hackers who would like to understand how to
use KUnit for kernel development.

+config KUNIT_EXAMPLE_SKIP_TEST
+ tristate "Skipped test example for KUnit"
+ default n
+ help
+ Enables an example unit test that is always skipped.
+
+ This test only exists to help new users understand what KUnit is and
+ how it is used. Please refer to the example test itself,
+ lib/kunit/example-test.c, for more information. This option is
+ intended for curious hackers who would like to understand how to use
+ KUnit for kernel development.
+
+ Because this test does not pass, it is not enabled by
+ CONFIG_KUNIT_ALL_TESTS
+
config KUNIT_ALL_TESTS
tristate "All KUnit tests with satisfied dependencies"
help
diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
index c49f4ffb6273..8a99ff2f83bd 100644
--- a/lib/kunit/Makefile
+++ b/lib/kunit/Makefile
@@ -18,3 +18,5 @@ obj-$(CONFIG_KUNIT_TEST) += string-stream-test.o
endif

obj-$(CONFIG_KUNIT_EXAMPLE_TEST) += kunit-example-test.o
+
+obj-$(CONFIG_KUNIT_EXAMPLE_SKIP_TEST) += kunit-example-skip-test.o
diff --git a/lib/kunit/kunit-example-skip-test.c b/lib/kunit/kunit-example-skip-test.c
new file mode 100644
index 000000000000..5395ee0be485
--- /dev/null
+++ b/lib/kunit/kunit-example-skip-test.c
@@ -0,0 +1,52 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Example KUnit test which is always skipped.
+ *
+ * Copyright (C) 2021, Google LLC.
+ * Author: David Gow <[email protected]>
+ */
+
+#include <kunit/test.h>
+
+/*
+ * This test should always be skipped.
+ */
+
+static void example_skip_test(struct kunit *test)
+{
+ /* This line should run */
+ kunit_log(KERN_INFO, test, "You should not see a line below.");
+
+ /* Skip (and abort) the test */
+ kunit_skip(test, "this test should be skipped");
+
+ /* This line should not execute */
+ kunit_log(KERN_INFO, test, "You should not see this line.");
+}
+
+static void example_mark_skipped_test(struct kunit *test)
+{
+ /* This line should run */
+ kunit_log(KERN_INFO, test, "You should see a line below.");
+
+ /* Skip (but do not abort) the test */
+ kunit_mark_skipped(test, "this test should be skipped");
+
+ /* This line should run */
+ kunit_log(KERN_INFO, test, "You should see this line.");
+}
+
+static struct kunit_case example_skip_test_cases[] = {
+ KUNIT_CASE(example_skip_test),
+ KUNIT_CASE(example_mark_skipped_test),
+ {}
+};
+
+static struct kunit_suite example_skip_test_suite = {
+ .name = "example_skip",
+ .test_cases = example_skip_test_cases,
+};
+
+kunit_test_suites(&example_skip_test_suite);
+
+MODULE_LICENSE("GPL v2");
--
2.31.1.818.g46aad6cb9e-goog

2021-05-26 08:58:17

by Marco Elver

[permalink] [raw]
Subject: Re: [PATCH 3/3] kunit: test: Add example_skip test suite which is always skipped

On Wed, May 26, 2021 at 01:11AM -0700, David Gow wrote:
> Add a new KUnit test suite which contains tests which are always
> skipped. This is used as an example for how to write tests which are
> skipped, and to demonstrate the difference between kunit_skip() and
> kunit_mark_skipped().
>
> Because these tests do not pass (they're skipped), they are not enabled
> by default, or by the KUNIT_ALL_TESTS config option: they must be
> enabled explicitly by setting CONFIG_KUNIT_EXAMPLE_SKIP_TEST=y in either
> a .config or .kunitconfig file.
>
> Signed-off-by: David Gow <[email protected]>
> ---
> lib/kunit/Kconfig | 15 +++++++++
> lib/kunit/Makefile | 2 ++
> lib/kunit/kunit-example-skip-test.c | 52 +++++++++++++++++++++++++++++
> 3 files changed, 69 insertions(+)
> create mode 100644 lib/kunit/kunit-example-skip-test.c

I don't know if this test is useful for a user of KUnit. Given it's not
testing KUnit functionality (I see you added tests that the feature
works in patch 1/3), but rather a demonstration and therefore dead code.
I don't think the feature is difficult to understand from the API doc
text.

Instead, would it be more helpful to add something to
Documentation/dev-tools/kunit? Or perhaps just add something to
lib/kunit/kunit-example-test.c? It'd avoid introducing more Kconfig
options at least.

Thanks,
-- Marco

2021-05-26 09:06:21

by Marco Elver

[permalink] [raw]
Subject: Re: [PATCH 1/3] kunit: Support skipped tests

On Wed, May 26, 2021 at 01:11AM -0700, David Gow wrote:
> The kunit_mark_skipped() macro marks the current test as "skipped", with
> the provided reason. The kunit_skip() macro will mark the test as
> skipped, and abort the test.
>
> The TAP specification supports this "SKIP directive" as a comment after
> the "ok" / "not ok" for a test. See the "Directives" section of the TAP
> spec for details:
> https://testanything.org/tap-specification.html#directives
>
> The 'success' field for KUnit tests is replaced with a kunit_status
> enum, which can be SUCCESS, FAILURE, or SKIPPED, combined with a
> 'status_comment' containing information on why a test was skipped.
>
> A new 'kunit_status' test suite is added to test this.
>
> Signed-off-by: David Gow <[email protected]>
[...]
> include/kunit/test.h | 68 ++++++++++++++++++++++++++++++++++++++----
> lib/kunit/kunit-test.c | 42 +++++++++++++++++++++++++-
> lib/kunit/test.c | 51 ++++++++++++++++++-------------
> 3 files changed, 134 insertions(+), 27 deletions(-)

Very nice, thank you.

Tested-by: Marco Elver <[email protected]>

, with the below changes to test_kasan.c. If you would like an immediate
user of kunit_skip(), please feel free to add the below patch to your
series.

Thanks,
-- Marco

------ >8 ------

From: Marco Elver <[email protected]>
Date: Wed, 26 May 2021 10:43:12 +0200
Subject: [PATCH] kasan: test: make use of kunit_skip()

Make use of the recently added kunit_skip() to skip tests, as it permits
TAP parsers to recognize if a test was deliberately skipped.

Signed-off-by: Marco Elver <[email protected]>
---
lib/test_kasan.c | 12 ++++--------
1 file changed, 4 insertions(+), 8 deletions(-)

diff --git a/lib/test_kasan.c b/lib/test_kasan.c
index cacbbbdef768..0a2029d14c91 100644
--- a/lib/test_kasan.c
+++ b/lib/test_kasan.c
@@ -111,17 +111,13 @@ static void kasan_test_exit(struct kunit *test)
} while (0)

#define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
- if (!IS_ENABLED(config)) { \
- kunit_info((test), "skipping, " #config " required"); \
- return; \
- } \
+ if (!IS_ENABLED(config)) \
+ kunit_skip((test), "Test requires " #config "=y"); \
} while (0)

#define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
- if (IS_ENABLED(config)) { \
- kunit_info((test), "skipping, " #config " enabled"); \
- return; \
- } \
+ if (IS_ENABLED(config)) \
+ kunit_skip((test), "Test requires " #config "=n"); \
} while (0)

static void kmalloc_oob_right(struct kunit *test)
--
2.31.1.818.g46aad6cb9e-goog

2021-05-26 10:56:48

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH 1/3] kunit: Support skipped tests

Hi David,

I love your patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v5.13-rc3 next-20210526]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url: https://github.com/0day-ci/linux/commits/David-Gow/kunit-Support-skipped-tests/20210526-161324
base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git ad9f25d338605d26acedcaf3ba5fab5ca26f1c10
config: x86_64-randconfig-r025-20210526 (attached as .config)
compiler: clang version 13.0.0 (https://github.com/llvm/llvm-project 99155e913e9bad5f7f8a247f8bb3a3ff3da74af1)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install x86_64 cross compiling tool for clang build
# apt-get install binutils-x86-64-linux-gnu
# https://github.com/0day-ci/linux/commit/83c919857a4ca319ed69d6feaf3d5b5325dbdc29
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review David-Gow/kunit-Support-skipped-tests/20210526-161324
git checkout 83c919857a4ca319ed69d6feaf3d5b5325dbdc29
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <[email protected]>

All warnings (new ones prefixed by >>):

>> lib/kunit/kunit-test.c:458:2: warning: comparison of distinct pointer types ('typeof (__left) *' (aka 'enum kunit_status *') and 'typeof (__right) *' (aka 'int *')) [-Wcompare-distinct-pointer-types]
KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:1320:2: note: expanded from macro 'KUNIT_EXPECT_EQ'
KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:957:2: note: expanded from macro 'KUNIT_BINARY_EQ_ASSERTION'
KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:947:2: note: expanded from macro 'KUNIT_BINARY_EQ_MSG_ASSERTION'
KUNIT_BASE_EQ_MSG_ASSERTION(test, \
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:858:2: note: expanded from macro 'KUNIT_BASE_EQ_MSG_ASSERTION'
KUNIT_BASE_BINARY_ASSERTION(test, \
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:834:9: note: expanded from macro 'KUNIT_BASE_BINARY_ASSERTION'
((void)__typecheck(__left, __right)); \
^~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/minmax.h:20:28: note: expanded from macro '__typecheck'
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~
lib/kunit/kunit-test.c:459:2: error: array initializer must be an initializer list or string literal
KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
^
include/kunit/test.h:1502:2: note: expanded from macro 'KUNIT_EXPECT_STREQ'
KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
^
include/kunit/test.h:1218:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_ASSERTION'
KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
^
include/kunit/test.h:1211:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_MSG_ASSERTION'
KUNIT_BINARY_STR_ASSERTION(test, \
^
include/kunit/test.h:1188:15: note: expanded from macro 'KUNIT_BINARY_STR_ASSERTION'
typeof(left) __left = (left); \
^
lib/kunit/kunit-test.c:466:2: error: array initializer must be an initializer list or string literal
KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
^
include/kunit/test.h:1502:2: note: expanded from macro 'KUNIT_EXPECT_STREQ'
KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
^
include/kunit/test.h:1218:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_ASSERTION'
KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
^
include/kunit/test.h:1211:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_MSG_ASSERTION'
KUNIT_BINARY_STR_ASSERTION(test, \
^
include/kunit/test.h:1188:15: note: expanded from macro 'KUNIT_BINARY_STR_ASSERTION'
typeof(left) __left = (left); \
^
1 warning and 2 errors generated.


vim +458 lib/kunit/kunit-test.c

450
451 static void kunit_status_mark_skipped_test(struct kunit *test)
452 {
453 struct kunit fake;
454
455 kunit_init_test(&fake, "fake test", NULL);
456
457 /* Before: Should be SUCCESS with no comment. */
> 458 KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
459 KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
460
461 /* Mark the test as skipped. */
462 kunit_mark_skipped(&fake, "Accepts format string: %s", "YES");
463
464 /* After: Should be SKIPPED with our comment. */
465 KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED);
466 KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
467 }
468

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/[email protected]


Attachments:
(No filename) (6.10 kB)
.config.gz (37.39 kB)
Download all attachments

2021-05-26 10:57:18

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH 1/3] kunit: Support skipped tests

Hi David,

I love your patch! Yet something to improve:

[auto build test ERROR on linus/master]
[also build test ERROR on v5.13-rc3 next-20210526]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url: https://github.com/0day-ci/linux/commits/David-Gow/kunit-Support-skipped-tests/20210526-161324
base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git ad9f25d338605d26acedcaf3ba5fab5ca26f1c10
config: arc-allyesconfig (attached as .config)
compiler: arceb-elf-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/0day-ci/linux/commit/83c919857a4ca319ed69d6feaf3d5b5325dbdc29
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review David-Gow/kunit-Support-skipped-tests/20210526-161324
git checkout 83c919857a4ca319ed69d6feaf3d5b5325dbdc29
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=arc

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <[email protected]>

All errors (new ones prefixed by >>):

In file included from include/linux/kernel.h:15,
from include/kunit/assert.h:13,
from include/kunit/test.h:12,
from lib/kunit/kunit-test.c:8:
lib/kunit/kunit-test.c: In function 'kunit_status_mark_skipped_test':
include/linux/minmax.h:20:28: warning: comparison of distinct pointer types lacks a cast
20 | (!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
| ^~
include/kunit/test.h:834:9: note: in expansion of macro '__typecheck'
834 | ((void)__typecheck(__left, __right)); \
| ^~~~~~~~~~~
include/kunit/test.h:858:2: note: in expansion of macro 'KUNIT_BASE_BINARY_ASSERTION'
858 | KUNIT_BASE_BINARY_ASSERTION(test, \
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:947:2: note: in expansion of macro 'KUNIT_BASE_EQ_MSG_ASSERTION'
947 | KUNIT_BASE_EQ_MSG_ASSERTION(test, \
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:957:2: note: in expansion of macro 'KUNIT_BINARY_EQ_MSG_ASSERTION'
957 | KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:1320:2: note: in expansion of macro 'KUNIT_BINARY_EQ_ASSERTION'
1320 | KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
| ^~~~~~~~~~~~~~~~~~~~~~~~~
lib/kunit/kunit-test.c:458:2: note: in expansion of macro 'KUNIT_EXPECT_EQ'
458 | KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
| ^~~~~~~~~~~~~~~
In file included from lib/kunit/kunit-test.c:8:
>> include/kunit/test.h:1188:24: error: invalid initializer
1188 | typeof(left) __left = (left); \
| ^
include/kunit/test.h:1211:2: note: in expansion of macro 'KUNIT_BINARY_STR_ASSERTION'
1211 | KUNIT_BINARY_STR_ASSERTION(test, \
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:1218:2: note: in expansion of macro 'KUNIT_BINARY_STR_EQ_MSG_ASSERTION'
1218 | KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:1502:2: note: in expansion of macro 'KUNIT_BINARY_STR_EQ_ASSERTION'
1502 | KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
lib/kunit/kunit-test.c:459:2: note: in expansion of macro 'KUNIT_EXPECT_STREQ'
459 | KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
| ^~~~~~~~~~~~~~~~~~
>> include/kunit/test.h:1188:24: error: invalid initializer
1188 | typeof(left) __left = (left); \
| ^
include/kunit/test.h:1211:2: note: in expansion of macro 'KUNIT_BINARY_STR_ASSERTION'
1211 | KUNIT_BINARY_STR_ASSERTION(test, \
| ^~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:1218:2: note: in expansion of macro 'KUNIT_BINARY_STR_EQ_MSG_ASSERTION'
1218 | KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:1502:2: note: in expansion of macro 'KUNIT_BINARY_STR_EQ_ASSERTION'
1502 | KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
| ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~
lib/kunit/kunit-test.c:466:2: note: in expansion of macro 'KUNIT_EXPECT_STREQ'
466 | KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
| ^~~~~~~~~~~~~~~~~~


vim +1188 include/kunit/test.h

73cda7bb8bfb1d Brendan Higgins 2019-09-23 849
73cda7bb8bfb1d Brendan Higgins 2019-09-23 850 #define KUNIT_BASE_EQ_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 851 assert_class, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 852 ASSERT_CLASS_INIT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 853 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 854 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 855 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 856 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 857 ...) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 858 KUNIT_BASE_BINARY_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 859 assert_class, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 860 ASSERT_CLASS_INIT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 861 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 862 left, ==, right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 863 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 864 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 865
73cda7bb8bfb1d Brendan Higgins 2019-09-23 866 #define KUNIT_BASE_NE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 867 assert_class, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 868 ASSERT_CLASS_INIT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 869 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 870 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 871 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 872 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 873 ...) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 874 KUNIT_BASE_BINARY_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 875 assert_class, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 876 ASSERT_CLASS_INIT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 877 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 878 left, !=, right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 879 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 880 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 881
73cda7bb8bfb1d Brendan Higgins 2019-09-23 882 #define KUNIT_BASE_LT_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 883 assert_class, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 884 ASSERT_CLASS_INIT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 885 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 886 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 887 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 888 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 889 ...) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 890 KUNIT_BASE_BINARY_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 891 assert_class, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 892 ASSERT_CLASS_INIT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 893 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 894 left, <, right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 895 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 896 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 897
73cda7bb8bfb1d Brendan Higgins 2019-09-23 898 #define KUNIT_BASE_LE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 899 assert_class, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 900 ASSERT_CLASS_INIT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 901 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 902 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 903 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 904 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 905 ...) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 906 KUNIT_BASE_BINARY_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 907 assert_class, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 908 ASSERT_CLASS_INIT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 909 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 910 left, <=, right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 911 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 912 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 913
73cda7bb8bfb1d Brendan Higgins 2019-09-23 914 #define KUNIT_BASE_GT_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 915 assert_class, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 916 ASSERT_CLASS_INIT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 917 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 918 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 919 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 920 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 921 ...) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 922 KUNIT_BASE_BINARY_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 923 assert_class, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 924 ASSERT_CLASS_INIT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 925 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 926 left, >, right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 927 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 928 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 929
73cda7bb8bfb1d Brendan Higgins 2019-09-23 930 #define KUNIT_BASE_GE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 931 assert_class, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 932 ASSERT_CLASS_INIT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 933 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 934 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 935 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 936 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 937 ...) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 938 KUNIT_BASE_BINARY_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 939 assert_class, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 940 ASSERT_CLASS_INIT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 941 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 942 left, >=, right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 943 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 944 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 945
73cda7bb8bfb1d Brendan Higgins 2019-09-23 946 #define KUNIT_BINARY_EQ_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
73cda7bb8bfb1d Brendan Higgins 2019-09-23 947 KUNIT_BASE_EQ_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 948 kunit_binary_assert, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 949 KUNIT_INIT_BINARY_ASSERT_STRUCT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 950 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 951 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 952 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 953 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 954 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 955
73cda7bb8bfb1d Brendan Higgins 2019-09-23 956 #define KUNIT_BINARY_EQ_ASSERTION(test, assert_type, left, right) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 957 KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 958 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 959 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 960 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 961 NULL)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 962
73cda7bb8bfb1d Brendan Higgins 2019-09-23 963 #define KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 964 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 965 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 966 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 967 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 968 ...) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 969 KUNIT_BASE_EQ_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 970 kunit_binary_ptr_assert, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 971 KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 972 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 973 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 974 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 975 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 976 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 977
73cda7bb8bfb1d Brendan Higgins 2019-09-23 978 #define KUNIT_BINARY_PTR_EQ_ASSERTION(test, assert_type, left, right) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 979 KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 980 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 981 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 982 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 983 NULL)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 984
73cda7bb8bfb1d Brendan Higgins 2019-09-23 985 #define KUNIT_BINARY_NE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
73cda7bb8bfb1d Brendan Higgins 2019-09-23 986 KUNIT_BASE_NE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 987 kunit_binary_assert, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 988 KUNIT_INIT_BINARY_ASSERT_STRUCT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 989 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 990 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 991 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 992 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 993 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 994
73cda7bb8bfb1d Brendan Higgins 2019-09-23 995 #define KUNIT_BINARY_NE_ASSERTION(test, assert_type, left, right) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 996 KUNIT_BINARY_NE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 997 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 998 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 999 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1000 NULL)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1001
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1002 #define KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1003 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1004 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1005 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1006 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1007 ...) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1008 KUNIT_BASE_NE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1009 kunit_binary_ptr_assert, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1010 KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1011 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1012 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1013 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1014 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1015 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1016
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1017 #define KUNIT_BINARY_PTR_NE_ASSERTION(test, assert_type, left, right) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1018 KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1019 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1020 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1021 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1022 NULL)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1023
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1024 #define KUNIT_BINARY_LT_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1025 KUNIT_BASE_LT_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1026 kunit_binary_assert, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1027 KUNIT_INIT_BINARY_ASSERT_STRUCT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1028 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1029 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1030 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1031 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1032 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1033
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1034 #define KUNIT_BINARY_LT_ASSERTION(test, assert_type, left, right) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1035 KUNIT_BINARY_LT_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1036 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1037 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1038 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1039 NULL)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1040
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1041 #define KUNIT_BINARY_PTR_LT_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1042 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1043 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1044 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1045 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1046 ...) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1047 KUNIT_BASE_LT_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1048 kunit_binary_ptr_assert, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1049 KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1050 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1051 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1052 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1053 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1054 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1055
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1056 #define KUNIT_BINARY_PTR_LT_ASSERTION(test, assert_type, left, right) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1057 KUNIT_BINARY_PTR_LT_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1058 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1059 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1060 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1061 NULL)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1062
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1063 #define KUNIT_BINARY_LE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1064 KUNIT_BASE_LE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1065 kunit_binary_assert, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1066 KUNIT_INIT_BINARY_ASSERT_STRUCT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1067 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1068 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1069 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1070 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1071 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1072
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1073 #define KUNIT_BINARY_LE_ASSERTION(test, assert_type, left, right) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1074 KUNIT_BINARY_LE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1075 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1076 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1077 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1078 NULL)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1079
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1080 #define KUNIT_BINARY_PTR_LE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1081 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1082 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1083 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1084 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1085 ...) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1086 KUNIT_BASE_LE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1087 kunit_binary_ptr_assert, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1088 KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1089 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1090 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1091 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1092 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1093 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1094
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1095 #define KUNIT_BINARY_PTR_LE_ASSERTION(test, assert_type, left, right) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1096 KUNIT_BINARY_PTR_LE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1097 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1098 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1099 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1100 NULL)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1101
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1102 #define KUNIT_BINARY_GT_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1103 KUNIT_BASE_GT_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1104 kunit_binary_assert, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1105 KUNIT_INIT_BINARY_ASSERT_STRUCT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1106 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1107 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1108 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1109 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1110 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1111
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1112 #define KUNIT_BINARY_GT_ASSERTION(test, assert_type, left, right) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1113 KUNIT_BINARY_GT_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1114 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1115 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1116 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1117 NULL)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1118
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1119 #define KUNIT_BINARY_PTR_GT_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1120 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1121 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1122 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1123 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1124 ...) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1125 KUNIT_BASE_GT_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1126 kunit_binary_ptr_assert, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1127 KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1128 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1129 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1130 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1131 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1132 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1133
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1134 #define KUNIT_BINARY_PTR_GT_ASSERTION(test, assert_type, left, right) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1135 KUNIT_BINARY_PTR_GT_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1136 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1137 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1138 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1139 NULL)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1140
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1141 #define KUNIT_BINARY_GE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1142 KUNIT_BASE_GE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1143 kunit_binary_assert, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1144 KUNIT_INIT_BINARY_ASSERT_STRUCT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1145 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1146 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1147 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1148 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1149 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1150
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1151 #define KUNIT_BINARY_GE_ASSERTION(test, assert_type, left, right) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1152 KUNIT_BINARY_GE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1153 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1154 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1155 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1156 NULL)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1157
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1158 #define KUNIT_BINARY_PTR_GE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1159 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1160 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1161 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1162 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1163 ...) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1164 KUNIT_BASE_GE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1165 kunit_binary_ptr_assert, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1166 KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1167 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1168 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1169 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1170 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1171 ##__VA_ARGS__)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1172
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1173 #define KUNIT_BINARY_PTR_GE_ASSERTION(test, assert_type, left, right) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1174 KUNIT_BINARY_PTR_GE_MSG_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1175 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1176 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1177 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1178 NULL)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1179
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1180 #define KUNIT_BINARY_STR_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1181 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1182 left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1183 op, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1184 right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1185 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1186 ...) \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1187 do { \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 @1188 typeof(left) __left = (left); \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1189 typeof(right) __right = (right); \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1190 \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1191 KUNIT_ASSERTION(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1192 strcmp(__left, __right) op 0, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1193 kunit_binary_str_assert, \
3084db0e0d5076 Daniel Latypov 2020-11-02 1194 KUNIT_INIT_BINARY_STR_ASSERT_STRUCT(test, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1195 assert_type, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1196 #op, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1197 #left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1198 __left, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1199 #right, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1200 __right), \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1201 fmt, \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1202 ##__VA_ARGS__); \
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1203 } while (0)
73cda7bb8bfb1d Brendan Higgins 2019-09-23 1204

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/[email protected]


Attachments:
(No filename) (33.52 kB)
.config.gz (66.54 kB)
Download all attachments

2021-05-26 13:07:41

by kernel test robot

[permalink] [raw]
Subject: Re: [PATCH 1/3] kunit: Support skipped tests

Hi David,

I love your patch! Yet something to improve:

[auto build test ERROR on linus/master]
[also build test ERROR on v5.13-rc3 next-20210526]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url: https://github.com/0day-ci/linux/commits/David-Gow/kunit-Support-skipped-tests/20210526-161324
base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git ad9f25d338605d26acedcaf3ba5fab5ca26f1c10
config: x86_64-randconfig-r025-20210526 (attached as .config)
compiler: clang version 13.0.0 (https://github.com/llvm/llvm-project 99155e913e9bad5f7f8a247f8bb3a3ff3da74af1)
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# install x86_64 cross compiling tool for clang build
# apt-get install binutils-x86-64-linux-gnu
# https://github.com/0day-ci/linux/commit/83c919857a4ca319ed69d6feaf3d5b5325dbdc29
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review David-Gow/kunit-Support-skipped-tests/20210526-161324
git checkout 83c919857a4ca319ed69d6feaf3d5b5325dbdc29
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=clang make.cross ARCH=x86_64

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <[email protected]>

All errors (new ones prefixed by >>):

lib/kunit/kunit-test.c:458:2: warning: comparison of distinct pointer types ('typeof (__left) *' (aka 'enum kunit_status *') and 'typeof (__right) *' (aka 'int *')) [-Wcompare-distinct-pointer-types]
KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:1320:2: note: expanded from macro 'KUNIT_EXPECT_EQ'
KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:957:2: note: expanded from macro 'KUNIT_BINARY_EQ_ASSERTION'
KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:947:2: note: expanded from macro 'KUNIT_BINARY_EQ_MSG_ASSERTION'
KUNIT_BASE_EQ_MSG_ASSERTION(test, \
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:858:2: note: expanded from macro 'KUNIT_BASE_EQ_MSG_ASSERTION'
KUNIT_BASE_BINARY_ASSERTION(test, \
^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/kunit/test.h:834:9: note: expanded from macro 'KUNIT_BASE_BINARY_ASSERTION'
((void)__typecheck(__left, __right)); \
^~~~~~~~~~~~~~~~~~~~~~~~~~~~
include/linux/minmax.h:20:28: note: expanded from macro '__typecheck'
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
~~~~~~~~~~~~~~ ^ ~~~~~~~~~~~~~~
>> lib/kunit/kunit-test.c:459:2: error: array initializer must be an initializer list or string literal
KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
^
include/kunit/test.h:1502:2: note: expanded from macro 'KUNIT_EXPECT_STREQ'
KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
^
include/kunit/test.h:1218:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_ASSERTION'
KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
^
include/kunit/test.h:1211:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_MSG_ASSERTION'
KUNIT_BINARY_STR_ASSERTION(test, \
^
include/kunit/test.h:1188:15: note: expanded from macro 'KUNIT_BINARY_STR_ASSERTION'
typeof(left) __left = (left); \
^
lib/kunit/kunit-test.c:466:2: error: array initializer must be an initializer list or string literal
KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
^
include/kunit/test.h:1502:2: note: expanded from macro 'KUNIT_EXPECT_STREQ'
KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
^
include/kunit/test.h:1218:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_ASSERTION'
KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
^
include/kunit/test.h:1211:2: note: expanded from macro 'KUNIT_BINARY_STR_EQ_MSG_ASSERTION'
KUNIT_BINARY_STR_ASSERTION(test, \
^
include/kunit/test.h:1188:15: note: expanded from macro 'KUNIT_BINARY_STR_ASSERTION'
typeof(left) __left = (left); \
^
1 warning and 2 errors generated.


vim +459 lib/kunit/kunit-test.c

450
451 static void kunit_status_mark_skipped_test(struct kunit *test)
452 {
453 struct kunit fake;
454
455 kunit_init_test(&fake, "fake test", NULL);
456
457 /* Before: Should be SUCCESS with no comment. */
458 KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
> 459 KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
460
461 /* Mark the test as skipped. */
462 kunit_mark_skipped(&fake, "Accepts format string: %s", "YES");
463
464 /* After: Should be SKIPPED with our comment. */
465 KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED);
466 KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
467 }
468

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/[email protected]


Attachments:
(No filename) (6.09 kB)
.config.gz (37.39 kB)
Download all attachments

2021-05-26 20:21:37

by Daniel Latypov

[permalink] [raw]
Subject: Re: [PATCH 3/3] kunit: test: Add example_skip test suite which is always skipped

On Wed, May 26, 2021 at 1:11 AM 'David Gow' via KUnit Development
<[email protected]> wrote:
>
> Add a new KUnit test suite which contains tests which are always
> skipped. This is used as an example for how to write tests which are
> skipped, and to demonstrate the difference between kunit_skip() and
> kunit_mark_skipped().
>
> Because these tests do not pass (they're skipped), they are not enabled
> by default, or by the KUNIT_ALL_TESTS config option: they must be
> enabled explicitly by setting CONFIG_KUNIT_EXAMPLE_SKIP_TEST=y in either
> a .config or .kunitconfig file.
>
> Signed-off-by: David Gow <[email protected]>

Reviewed-by: Daniel Latypov <[email protected]>

LGTM, two minor nits, and as noted elsewhere, I'd like it if we could
have this be in the normal example test.
But even if that doesn't happen, this seems fine to me.

> ---
> lib/kunit/Kconfig | 15 +++++++++
> lib/kunit/Makefile | 2 ++
> lib/kunit/kunit-example-skip-test.c | 52 +++++++++++++++++++++++++++++
> 3 files changed, 69 insertions(+)
> create mode 100644 lib/kunit/kunit-example-skip-test.c
>
> diff --git a/lib/kunit/Kconfig b/lib/kunit/Kconfig
> index 0b5dfb001bac..399fe5f789f7 100644
> --- a/lib/kunit/Kconfig
> +++ b/lib/kunit/Kconfig
> @@ -45,6 +45,21 @@ config KUNIT_EXAMPLE_TEST
> is intended for curious hackers who would like to understand how to
> use KUnit for kernel development.
>
> +config KUNIT_EXAMPLE_SKIP_TEST
> + tristate "Skipped test example for KUnit"
> + default n
> + help
> + Enables an example unit test that is always skipped.
> +
> + This test only exists to help new users understand what KUnit is and
> + how it is used. Please refer to the example test itself,
> + lib/kunit/example-test.c, for more information. This option is
> + intended for curious hackers who would like to understand how to use
> + KUnit for kernel development.
> +
> + Because this test does not pass, it is not enabled by
> + CONFIG_KUNIT_ALL_TESTS
> +
> config KUNIT_ALL_TESTS
> tristate "All KUnit tests with satisfied dependencies"
> help
> diff --git a/lib/kunit/Makefile b/lib/kunit/Makefile
> index c49f4ffb6273..8a99ff2f83bd 100644
> --- a/lib/kunit/Makefile
> +++ b/lib/kunit/Makefile
> @@ -18,3 +18,5 @@ obj-$(CONFIG_KUNIT_TEST) += string-stream-test.o
> endif
>
> obj-$(CONFIG_KUNIT_EXAMPLE_TEST) += kunit-example-test.o
> +
> +obj-$(CONFIG_KUNIT_EXAMPLE_SKIP_TEST) += kunit-example-skip-test.o
> diff --git a/lib/kunit/kunit-example-skip-test.c b/lib/kunit/kunit-example-skip-test.c
> new file mode 100644
> index 000000000000..5395ee0be485
> --- /dev/null
> +++ b/lib/kunit/kunit-example-skip-test.c
> @@ -0,0 +1,52 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Example KUnit test which is always skipped.
> + *
> + * Copyright (C) 2021, Google LLC.
> + * Author: David Gow <[email protected]>
> + */
> +
> +#include <kunit/test.h>
> +
> +/*
> + * This test should always be skipped.
> + */
> +
> +static void example_skip_test(struct kunit *test)
> +{
> + /* This line should run */
> + kunit_log(KERN_INFO, test, "You should not see a line below.");

Btw, why not "kunit_info(test, ...)" ?

> +
> + /* Skip (and abort) the test */
> + kunit_skip(test, "this test should be skipped");
> +
> + /* This line should not execute */
> + kunit_log(KERN_INFO, test, "You should not see this line.");

Would it be more or less confusing to have

KUNIT_FAIL(test, "We should not get to this line")

maybe in addition or instead of this log?

> +}
> +
> +static void example_mark_skipped_test(struct kunit *test)
> +{
> + /* This line should run */
> + kunit_log(KERN_INFO, test, "You should see a line below.");
> +
> + /* Skip (but do not abort) the test */
> + kunit_mark_skipped(test, "this test should be skipped");
> +
> + /* This line should run */
> + kunit_log(KERN_INFO, test, "You should see this line.");
> +}
> +
> +static struct kunit_case example_skip_test_cases[] = {
> + KUNIT_CASE(example_skip_test),
> + KUNIT_CASE(example_mark_skipped_test),
> + {}
> +};
> +
> +static struct kunit_suite example_skip_test_suite = {
> + .name = "example_skip",
> + .test_cases = example_skip_test_cases,
> +};
> +
> +kunit_test_suites(&example_skip_test_suite);
> +
> +MODULE_LICENSE("GPL v2");
> --
> 2.31.1.818.g46aad6cb9e-goog
>
> --
> You received this message because you are subscribed to the Google Groups "KUnit Development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> To view this discussion on the web visit https://groups.google.com/d/msgid/kunit-dev/20210526081112.3652290-3-davidgow%40google.com.

2021-05-26 20:28:20

by Daniel Latypov

[permalink] [raw]
Subject: Re: [PATCH 2/3] kunit: tool: Support skipped tests in kunit_tool

On Wed, May 26, 2021 at 1:11 AM 'David Gow' via KUnit Development
<[email protected]> wrote:
>
> Add support for the SKIP directive to kunit_tool's TAP parser.
>
> Skipped tests now show up as such in the printed summary. The number of
> skipped tests is counted, and if all tests in a suite are skipped, the
> suite is also marked as skipped. Otherwise, skipped tests do affect the
> suite result.
>
> Example output:
> [00:22:34] ======== [SKIPPED] example_skip ========
> [00:22:34] [SKIPPED] example_skip_test # SKIP this test should be skipped
> [00:22:34] [SKIPPED] example_mark_skipped_test # SKIP this test should be skipped
> [00:22:34] ============================================================
> [00:22:34] Testing complete. 2 tests run. 0 failed. 0 crashed. 2 skipped.
>
> Signed-off-by: David Gow <[email protected]>
> ---
> tools/testing/kunit/kunit_parser.py | 47 +++++++++++++++++++-------
> tools/testing/kunit/kunit_tool_test.py | 22 ++++++++++++

This seems to be missing the added test files.

> 2 files changed, 57 insertions(+), 12 deletions(-)
>
> diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
> index e8bcc139702e..6b5dd26b479d 100644
> --- a/tools/testing/kunit/kunit_parser.py
> +++ b/tools/testing/kunit/kunit_parser.py
> @@ -43,6 +43,7 @@ class TestCase(object):
> class TestStatus(Enum):
> SUCCESS = auto()
> FAILURE = auto()
> + SKIPPED = auto()
> TEST_CRASHED = auto()
> NO_TESTS = auto()
> FAILURE_TO_PARSE_TESTS = auto()
> @@ -108,6 +109,8 @@ def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None:
>
> OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
>
> +OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')
> +
> OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
>
> OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
> @@ -125,6 +128,10 @@ def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
> if match:
> test_case.log.append(lines.pop(0))
> test_case.name = match.group(2)
> + skip_match = OK_NOT_OK_SKIP.match(line)
> + if skip_match:
> + test_case.status = TestStatus.SKIPPED
> + return True
> if test_case.status == TestStatus.TEST_CRASHED:
> return True
> if match.group(1) == 'ok':
> @@ -188,16 +195,16 @@ def parse_subtest_plan(lines: List[str]) -> Optional[int]:
> return None
>
> def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
> - if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
> + if left == right:
> + return left
> + elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
> return TestStatus.TEST_CRASHED
> elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
> return TestStatus.FAILURE
> - elif left != TestStatus.SUCCESS:
> - return left
> - elif right != TestStatus.SUCCESS:
> + elif left == TestStatus.SKIPPED:
> return right
> else:
> - return TestStatus.SUCCESS
> + return left
>
> def parse_ok_not_ok_test_suite(lines: List[str],
> test_suite: TestSuite,
> @@ -214,6 +221,9 @@ def parse_ok_not_ok_test_suite(lines: List[str],
> test_suite.status = TestStatus.SUCCESS
> else:
> test_suite.status = TestStatus.FAILURE
> + skip_match = OK_NOT_OK_SKIP.match(line)
> + if skip_match:
> + test_suite.status = TestStatus.SKIPPED
> suite_index = int(match.group(2))
> if suite_index != expected_suite_index:
> print_with_timestamp(
> @@ -224,8 +234,8 @@ def parse_ok_not_ok_test_suite(lines: List[str],
> else:
> return False
>
> -def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
> - return reduce(max_status, statuses, TestStatus.SUCCESS)
> +def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
> + return reduce(max_status, status_list, TestStatus.SKIPPED)
>
> def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
> max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
> @@ -315,9 +325,12 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:

Btw, this type annotation is out of date.
But I think an ever growing Tuple is too cumbersome, how about this?

diff --git a/tools/testing/kunit/kunit_parser.py
b/tools/testing/kunit/kunit_parser.py
index 6b5dd26b479d..055ee1e4d19d 100644
--- a/tools/testing/kunit/kunit_parser.py
+++ b/tools/testing/kunit/kunit_parser.py
@@ -6,6 +6,7 @@
# Author: Felix Guo <[email protected]>
# Author: Brendan Higgins <[email protected]>

+from dataclasses import dataclass
import re

from collections import namedtuple
@@ -321,11 +322,19 @@ def parse_test_result(lines: List[str]) -> TestResult:
else:
return TestResult(TestStatus.NO_TESTS, [], lines)

-def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
- total_tests = 0
- failed_tests = 0
- crashed_tests = 0
- skipped_tests = 0
+# Note: This would require Python 3.7. We currently only required
3.6 (enum.auto). We can do it by hand to avoid that, if we want.
+@dataclass
+class TestCounts:
+ passed: int = 0
+ failed: int = 0
+ skipped: int = 0
+ crashed: int = 0
+
+ def total(self) -> int:
+ return self.passed + self.failed + self.skipped + self.crashed
+
+def print_and_count_results(test_result: TestResult) -> TestCounts:
+ counts = TestCounts()
for test_suite in test_result.suites:
if test_suite.status == TestStatus.SUCCESS:
print_suite_divider(green('[PASSED] ') +
test_suite.name)
@@ -336,39 +345,33 @@ def print_and_count_results(test_result:
TestResult) -> Tuple[int, int, int]:
else:
print_suite_divider(red('[FAILED] ') + test_suite.name)
for test_case in test_suite.cases:
- total_tests += 1
if test_case.status == TestStatus.SUCCESS:
+ counts.passed += 1
print_with_timestamp(green('[PASSED]
') + test_case.name)
elif test_case.status == TestStatus.SKIPPED:
- skipped_tests += 1
+ counts.skipped += 1
print_with_timestamp(yellow('[SKIPPED]
') + test_case.name)
elif test_case.status == TestStatus.TEST_CRASHED:
- crashed_tests += 1
+ counts.crashed += 1
print_with_timestamp(red('[CRASHED] '
+ test_case.name))
print_log(map(yellow, test_case.log))
print_with_timestamp('')
else:
- failed_tests += 1
+ counts.failed += 1
print_with_timestamp(red('[FAILED] ')
+ test_case.name)
print_log(map(yellow, test_case.log))
print_with_timestamp('')
- return total_tests, failed_tests, crashed_tests, skipped_tests
+ return counts

def parse_run_tests(kernel_output) -> TestResult:
- total_tests = 0
- failed_tests = 0
- crashed_tests = 0
- skipped_tests = 0
+ counts = TestCounts()
test_result =
parse_test_result(list(isolate_kunit_output(kernel_output)))
if test_result.status == TestStatus.NO_TESTS:
print(red('[ERROR] ') + yellow('no tests run!'))
elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS:
print(red('[ERROR] ') + yellow('could not parse test results!'))
else:
- (total_tests,
- failed_tests,
- crashed_tests,
- skipped_tests) = print_and_count_results(test_result)
+ counts = print_and_count_results(test_result)
print_with_timestamp(DIVIDER)
if test_result.status == TestStatus.SUCCESS:
fmt = green
@@ -378,5 +381,5 @@ def parse_run_tests(kernel_output) -> TestResult:
fmt =red
print_with_timestamp(
fmt('Testing complete. %d tests run. %d failed. %d
crashed. %d skipped.' %
- (total_tests, failed_tests, crashed_tests, skipped_tests)))
+ (counts.total(), counts.failed, counts.crashed,
counts.skipped)))
return test_result

> total_tests = 0
> failed_tests = 0
> crashed_tests = 0
> + skipped_tests = 0
> for test_suite in test_result.suites:
> if test_suite.status == TestStatus.SUCCESS:
> print_suite_divider(green('[PASSED] ') + test_suite.name)
> + elif test_suite.status == TestStatus.SKIPPED:
> + print_suite_divider(yellow('[SKIPPED] ') + test_suite.name)
> elif test_suite.status == TestStatus.TEST_CRASHED:
> print_suite_divider(red('[CRASHED] ' + test_suite.name))
> else:
> @@ -326,6 +339,9 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
> total_tests += 1
> if test_case.status == TestStatus.SUCCESS:
> print_with_timestamp(green('[PASSED] ') + test_case.name)
> + elif test_case.status == TestStatus.SKIPPED:
> + skipped_tests += 1
> + print_with_timestamp(yellow('[SKIPPED] ') + test_case.name)
> elif test_case.status == TestStatus.TEST_CRASHED:
> crashed_tests += 1
> print_with_timestamp(red('[CRASHED] ' + test_case.name))
> @@ -336,12 +352,13 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
> print_with_timestamp(red('[FAILED] ') + test_case.name)
> print_log(map(yellow, test_case.log))
> print_with_timestamp('')
> - return total_tests, failed_tests, crashed_tests
> + return total_tests, failed_tests, crashed_tests, skipped_tests
>
> def parse_run_tests(kernel_output) -> TestResult:
> total_tests = 0
> failed_tests = 0
> crashed_tests = 0
> + skipped_tests = 0
> test_result = parse_test_result(list(isolate_kunit_output(kernel_output)))
> if test_result.status == TestStatus.NO_TESTS:
> print(red('[ERROR] ') + yellow('no tests run!'))
> @@ -350,10 +367,16 @@ def parse_run_tests(kernel_output) -> TestResult:
> else:
> (total_tests,
> failed_tests,
> - crashed_tests) = print_and_count_results(test_result)
> + crashed_tests,
> + skipped_tests) = print_and_count_results(test_result)
> print_with_timestamp(DIVIDER)
> - fmt = green if test_result.status == TestStatus.SUCCESS else red
> + if test_result.status == TestStatus.SUCCESS:
> + fmt = green
> + elif test_result.status == TestStatus.SKIPPED:
> + fmt = yellow
> + else:
> + fmt =red
> print_with_timestamp(
> - fmt('Testing complete. %d tests run. %d failed. %d crashed.' %
> - (total_tests, failed_tests, crashed_tests)))
> + fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
> + (total_tests, failed_tests, crashed_tests, skipped_tests)))
> return test_result
> diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
> index 2e809dd956a7..a51e70cafcc1 100755
> --- a/tools/testing/kunit/kunit_tool_test.py
> +++ b/tools/testing/kunit/kunit_tool_test.py
> @@ -183,6 +183,28 @@ class KUnitParserTest(unittest.TestCase):
> kunit_parser.TestStatus.TEST_CRASHED,
> result.status)
>
> + def test_skipped_test(self):
> + skipped_log = test_data_path('test_skip_tests.log')
> + file = open(skipped_log)
> + result = kunit_parser.parse_run_tests(file.readlines())
> +
> + # A skipped test does not fail the whole suite.
> + self.assertEqual(
> + kunit_parser.TestStatus.SUCCESS,
> + result.status)
> + file.close()
> +
> + def test_skipped_all_tests(self):
> + skipped_log = test_data_path('test_skip_all_tests.log')
> + file = open(skipped_log)
> + result = kunit_parser.parse_run_tests(file.readlines())
> +
> + self.assertEqual(
> + kunit_parser.TestStatus.SKIPPED,
> + result.status)
> + file.close()
> +
> +
> def test_ignores_prefix_printk_time(self):
> prefix_log = test_data_path('test_config_printk_time.log')
> with open(prefix_log) as file:
> --
> 2.31.1.818.g46aad6cb9e-goog
>
> --
> You received this message because you are subscribed to the Google Groups "KUnit Development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> To view this discussion on the web visit https://groups.google.com/d/msgid/kunit-dev/20210526081112.3652290-2-davidgow%40google.com.

2021-05-27 00:10:05

by Daniel Latypov

[permalink] [raw]
Subject: Re: [PATCH 3/3] kunit: test: Add example_skip test suite which is always skipped

On Wed, May 26, 2021 at 1:56 AM 'Marco Elver' via KUnit Development
<[email protected]> wrote:
>
> On Wed, May 26, 2021 at 01:11AM -0700, David Gow wrote:
> > Add a new KUnit test suite which contains tests which are always
> > skipped. This is used as an example for how to write tests which are
> > skipped, and to demonstrate the difference between kunit_skip() and
> > kunit_mark_skipped().
> >
> > Because these tests do not pass (they're skipped), they are not enabled
> > by default, or by the KUNIT_ALL_TESTS config option: they must be
> > enabled explicitly by setting CONFIG_KUNIT_EXAMPLE_SKIP_TEST=y in either
> > a .config or .kunitconfig file.
> >
> > Signed-off-by: David Gow <[email protected]>
> > ---
> > lib/kunit/Kconfig | 15 +++++++++
> > lib/kunit/Makefile | 2 ++
> > lib/kunit/kunit-example-skip-test.c | 52 +++++++++++++++++++++++++++++
> > 3 files changed, 69 insertions(+)
> > create mode 100644 lib/kunit/kunit-example-skip-test.c
>
> I don't know if this test is useful for a user of KUnit. Given it's not
> testing KUnit functionality (I see you added tests that the feature
> works in patch 1/3), but rather a demonstration and therefore dead code.
> I don't think the feature is difficult to understand from the API doc
> text.
>
> Instead, would it be more helpful to add something to
> Documentation/dev-tools/kunit? Or perhaps just add something to
> lib/kunit/kunit-example-test.c? It'd avoid introducing more Kconfig

I'm in favor of putting it in kunit-example-test.c as well.

But I hear there was pushback to have a non-passing test in the example?
I guess the fear is that someone will see something that doesn't say
"passed" in the example output and think something has gone wrong?

Hence this more conservative change.
But I hope that in the absence of any replies in opposition, we can
just keep one example-test.c

> options at least.
>
> Thanks,
> -- Marco
>
> --
> You received this message because you are subscribed to the Google Groups "KUnit Development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> To view this discussion on the web visit https://groups.google.com/d/msgid/kunit-dev/YK4NRlyrYJ8ktsWQ%40elver.google.com.

2021-05-27 00:10:13

by Marco Elver

[permalink] [raw]
Subject: Re: [PATCH 3/3] kunit: test: Add example_skip test suite which is always skipped

On Wed, May 26, 2021 at 11:29AM -0700, Daniel Latypov wrote:
> On Wed, May 26, 2021 at 1:56 AM 'Marco Elver' via KUnit Development
> <[email protected]> wrote:
> >
> > On Wed, May 26, 2021 at 01:11AM -0700, David Gow wrote:
> > > Add a new KUnit test suite which contains tests which are always
> > > skipped. This is used as an example for how to write tests which are
> > > skipped, and to demonstrate the difference between kunit_skip() and
> > > kunit_mark_skipped().
> > >
> > > Because these tests do not pass (they're skipped), they are not enabled
> > > by default, or by the KUNIT_ALL_TESTS config option: they must be
> > > enabled explicitly by setting CONFIG_KUNIT_EXAMPLE_SKIP_TEST=y in either
> > > a .config or .kunitconfig file.
> > >
> > > Signed-off-by: David Gow <[email protected]>
> > > ---
> > > lib/kunit/Kconfig | 15 +++++++++
> > > lib/kunit/Makefile | 2 ++
> > > lib/kunit/kunit-example-skip-test.c | 52 +++++++++++++++++++++++++++++
> > > 3 files changed, 69 insertions(+)
> > > create mode 100644 lib/kunit/kunit-example-skip-test.c
> >
> > I don't know if this test is useful for a user of KUnit. Given it's not
> > testing KUnit functionality (I see you added tests that the feature
> > works in patch 1/3), but rather a demonstration and therefore dead code.
> > I don't think the feature is difficult to understand from the API doc
> > text.
> >
> > Instead, would it be more helpful to add something to
> > Documentation/dev-tools/kunit? Or perhaps just add something to
> > lib/kunit/kunit-example-test.c? It'd avoid introducing more Kconfig
>
> I'm in favor of putting it in kunit-example-test.c as well.
>
> But I hear there was pushback to have a non-passing test in the example?
> I guess the fear is that someone will see something that doesn't say
> "passed" in the example output and think something has gone wrong?
>
> Hence this more conservative change.
> But I hope that in the absence of any replies in opposition, we can
> just keep one example-test.c

Maybe I misunderstood, but kunit_skip*() isn't supposed to change the
test ok/fail state, right?

That's the behaviour I'd expect at least.

So if the test case deliberately doesn't change the state, but just
skips, it should be fine in example-test.c.

Thanks,
-- Marco

2021-05-27 00:14:28

by Daniel Latypov

[permalink] [raw]
Subject: Re: [PATCH 1/3] kunit: Support skipped tests

On Wed, May 26, 2021 at 1:11 AM 'David Gow' via KUnit Development
<[email protected]> wrote:
>
> The kunit_mark_skipped() macro marks the current test as "skipped", with
> the provided reason. The kunit_skip() macro will mark the test as
> skipped, and abort the test.
>
> The TAP specification supports this "SKIP directive" as a comment after
> the "ok" / "not ok" for a test. See the "Directives" section of the TAP
> spec for details:
> https://testanything.org/tap-specification.html#directives
>
> The 'success' field for KUnit tests is replaced with a kunit_status
> enum, which can be SUCCESS, FAILURE, or SKIPPED, combined with a
> 'status_comment' containing information on why a test was skipped.
>
> A new 'kunit_status' test suite is added to test this.
>
> Signed-off-by: David Gow <[email protected]>

Reviewed-by: Daniel Latypov <[email protected]>

This is pretty exciting to see.
Some minor nits below.


> ---
> This change depends on the assertion typechecking fix here:
> https://lore.kernel.org/linux-kselftest/[email protected]/
> Only the first two patches in the series are required.
>
> This is the long-awaited follow-up to the skip tests RFC:
> https://lore.kernel.org/linux-kselftest/[email protected]/
>
> There are quite a few changes since that version, principally:
> - A kunit_status enum is now used, with SKIPPED a distinct state
> - The kunit_mark_skipped() and kunit_skip() macros now take printf-style
> format strings.
> - There is now a kunit_status test suite providing basic tests of this
> functionality.
> - The kunit_tool changes have been split into a separate commit.
> - The example skipped tests have been expanded an moved to their own
> suite, which is not enabled by KUNIT_ALL_TESTS.
> - A number of other fixes and changes here and there.
>
> Cheers,
> -- David
>
> include/kunit/test.h | 68 ++++++++++++++++++++++++++++++++++++++----
> lib/kunit/kunit-test.c | 42 +++++++++++++++++++++++++-
> lib/kunit/test.c | 51 ++++++++++++++++++-------------
> 3 files changed, 134 insertions(+), 27 deletions(-)
>
> diff --git a/include/kunit/test.h b/include/kunit/test.h
> index b68c61348121..40b536da027e 100644
> --- a/include/kunit/test.h
> +++ b/include/kunit/test.h
> @@ -105,6 +105,18 @@ struct kunit;
> #define KUNIT_SUBTEST_INDENT " "
> #define KUNIT_SUBSUBTEST_INDENT " "
>
> +/**
> + * enum kunit_status - Type of result for a test or test suite
> + * @KUNIT_SUCCESS: Denotes the test suite has not failed nor been skipped
> + * @KUNIT_FAILURE: Denotes the test has failed.
> + * @KUNIT_SKIPPED: Denotes the test has been skipped.
> + */
> +enum kunit_status {
> + KUNIT_SUCCESS,
> + KUNIT_FAILURE,
> + KUNIT_SKIPPED,
> +};
> +
> /**
> * struct kunit_case - represents an individual test case.
> *
> @@ -148,13 +160,20 @@ struct kunit_case {
> const void* (*generate_params)(const void *prev, char *desc);
>
> /* private: internal use only. */
> - bool success;
> + enum kunit_status status;
> char *log;
> };
>
> -static inline char *kunit_status_to_string(bool status)
> +static inline char *kunit_status_to_string(enum kunit_status status)

nit: I personally would think this maps SKIPPED => "SKIPPED", etc.
(If I didn't know all that logic lived in kunit tool).

I don't have any replacement names to suggest that I'm fully happy
with, however.
kunit_status_to_tap_str(), kunit_status_to_ok_notok(), eh.

> {
> - return status ? "ok" : "not ok";
> + switch (status) {
> + case KUNIT_SKIPPED:
> + case KUNIT_SUCCESS:
> + return "ok";
> + case KUNIT_FAILURE:
> + return "not ok";
> + }
> + return "invalid";
> }
>
> /**
> @@ -212,6 +231,7 @@ struct kunit_suite {
> struct kunit_case *test_cases;
>
> /* private: internal use only */
> + char status_comment[256];
> struct dentry *debugfs;
> char *log;
> };
> @@ -245,19 +265,21 @@ struct kunit {
> * be read after the test case finishes once all threads associated
> * with the test case have terminated.
> */
> - bool success; /* Read only after test_case finishes! */
> spinlock_t lock; /* Guards all mutable test state. */
> + enum kunit_status status; /* Read only after test_case finishes! */
> /*
> * Because resources is a list that may be updated multiple times (with
> * new resources) from any thread associated with a test case, we must
> * protect it with some type of lock.
> */
> struct list_head resources; /* Protected by lock. */
> +
> + char status_comment[256];
> };
>
> static inline void kunit_set_failure(struct kunit *test)
> {
> - WRITE_ONCE(test->success, false);
> + WRITE_ONCE(test->status, KUNIT_FAILURE);
> }
>
> void kunit_init_test(struct kunit *test, const char *name, char *log);
> @@ -348,7 +370,7 @@ static inline int kunit_run_all_tests(void)
> #define kunit_suite_for_each_test_case(suite, test_case) \
> for (test_case = suite->test_cases; test_case->run_case; test_case++)
>
> -bool kunit_suite_has_succeeded(struct kunit_suite *suite);
> +enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite);
>
> /*
> * Like kunit_alloc_resource() below, but returns the struct kunit_resource
> @@ -612,6 +634,40 @@ void kunit_cleanup(struct kunit *test);
>
> void kunit_log_append(char *log, const char *fmt, ...);
>
> +/**
> + * kunit_mark_skipped() - Marks @test_or_suite as skipped
> + *
> + * @test_or_suite: The test context object.
> + * @fmt: A printk() style format string.
> + *
> + * Marks the test as skipped. @fmt is given output as the test status
> + * comment, typically the reason the test was skipped.
> + *
> + * Test execution continues after kunit_mark_skipped() is called.
> + */
> +#define kunit_mark_skipped(test_or_suite, fmt, ...) \
> + do { \
> + WRITE_ONCE((test_or_suite)->status, KUNIT_SKIPPED); \
> + scnprintf((test_or_suite)->status_comment, 256, fmt, ##__VA_ARGS__); \
> + } while (0)
> +
> +/**
> + * kunit_skip() - Marks @test_or_suite as skipped
> + *
> + * @test_or_suite: The test context object.
> + * @fmt: A printk() style format string.
> + *
> + * Skips the test. @fmt is given output as the test status
> + * comment, typically the reason the test was skipped.
> + *
> + * Test execution is halted after kunit_skip() is called.
> + */
> +#define kunit_skip(test_or_suite, fmt, ...) \
> + do { \
> + kunit_mark_skipped((test_or_suite), fmt, ##__VA_ARGS__);\
> + kunit_try_catch_throw(&((test_or_suite)->try_catch)); \
> + } while (0)
> +
> /*
> * printk and log to per-test or per-suite log buffer. Logging only done
> * if CONFIG_KUNIT_DEBUGFS is 'y'; if it is 'n', no log is allocated/used.
> diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
> index 69f902440a0e..d69efcbed624 100644
> --- a/lib/kunit/kunit-test.c
> +++ b/lib/kunit/kunit-test.c
> @@ -437,7 +437,47 @@ static void kunit_log_test(struct kunit *test)
> #endif
> }
>
> +static void kunit_status_set_failure_test(struct kunit *test)
> +{
> + struct kunit fake;
> +
> + kunit_init_test(&fake, "fake test", NULL);
> +
> + KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SUCCESS);
> + kunit_set_failure(&fake);
> + KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
> +}
> +
> +static void kunit_status_mark_skipped_test(struct kunit *test)
> +{
> + struct kunit fake;
> +
> + kunit_init_test(&fake, "fake test", NULL);
> +
> + /* Before: Should be SUCCESS with no comment. */
> + KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
> + KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
> +
> + /* Mark the test as skipped. */
> + kunit_mark_skipped(&fake, "Accepts format string: %s", "YES");
> +
> + /* After: Should be SKIPPED with our comment. */
> + KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED);
> + KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
> +}
> +
> +static struct kunit_case kunit_status_test_cases[] = {
> + KUNIT_CASE(kunit_status_set_failure_test),
> + KUNIT_CASE(kunit_status_mark_skipped_test),
> + {}
> +};
> +
> +static struct kunit_suite kunit_status_test_suite = {
> + .name = "kunit_status",
> + .test_cases = kunit_status_test_cases,
> +};
> +
> kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
> - &kunit_log_test_suite);
> + &kunit_log_test_suite, &kunit_status_test_suite);
>
> MODULE_LICENSE("GPL v2");
> diff --git a/lib/kunit/test.c b/lib/kunit/test.c
> index 2f6cc0123232..0ee07705d2b0 100644
> --- a/lib/kunit/test.c
> +++ b/lib/kunit/test.c
> @@ -98,12 +98,14 @@ static void kunit_print_subtest_start(struct kunit_suite *suite)
>
> static void kunit_print_ok_not_ok(void *test_or_suite,
> bool is_test,
> - bool is_ok,
> + enum kunit_status status,
> size_t test_number,
> - const char *description)
> + const char *description,
> + const char *directive)
> {
> struct kunit_suite *suite = is_test ? NULL : test_or_suite;
> struct kunit *test = is_test ? test_or_suite : NULL;
> + const char *directive_header = (status == KUNIT_SKIPPED) ? " # SKIP " : "";
>
> /*
> * We do not log the test suite results as doing so would
> @@ -114,25 +116,31 @@ static void kunit_print_ok_not_ok(void *test_or_suite,
> * representation.
> */
> if (suite)
> - pr_info("%s %zd - %s\n",
> - kunit_status_to_string(is_ok),
> - test_number, description);
> + pr_info("%s %zd - %s%s%s\n",
> + kunit_status_to_string(status),
> + test_number, description,
> + directive_header, directive ? directive : "");
> else
> - kunit_log(KERN_INFO, test, KUNIT_SUBTEST_INDENT "%s %zd - %s",

Hmm, why not kunit_info(test, ...)?

> - kunit_status_to_string(is_ok),
> - test_number, description);
> + kunit_log(KERN_INFO, test,
> + KUNIT_SUBTEST_INDENT "%s %zd - %s%s%s",
> + kunit_status_to_string(status),
> + test_number, description,
> + directive_header, directive ? directive : "");
> }
>
> -bool kunit_suite_has_succeeded(struct kunit_suite *suite)
> +enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite)
> {
> const struct kunit_case *test_case;
> + enum kunit_status status = KUNIT_SKIPPED;
>
> kunit_suite_for_each_test_case(suite, test_case) {
> - if (!test_case->success)
> - return false;
> + if (test_case->status == KUNIT_FAILURE)
> + return KUNIT_FAILURE;
> + else if (test_case->status == KUNIT_SUCCESS)
> + status = KUNIT_SUCCESS;
> }
>
> - return true;
> + return status;
> }
> EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
>
> @@ -143,7 +151,8 @@ static void kunit_print_subtest_end(struct kunit_suite *suite)
> kunit_print_ok_not_ok((void *)suite, false,
> kunit_suite_has_succeeded(suite),
> kunit_suite_counter++,
> - suite->name);
> + suite->name,
> + suite->status_comment);
> }
>
> unsigned int kunit_test_case_num(struct kunit_suite *suite,
> @@ -252,7 +261,8 @@ void kunit_init_test(struct kunit *test, const char *name, char *log)
> test->log = log;
> if (test->log)
> test->log[0] = '\0';
> - test->success = true;
> + test->status = KUNIT_SUCCESS;
> + test->status_comment[0] = '\0';
> }
> EXPORT_SYMBOL_GPL(kunit_init_test);
>
> @@ -376,7 +386,8 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
> context.test_case = test_case;
> kunit_try_catch_run(try_catch, &context);
>
> - test_case->success = test->success;
> + test_case->status = test->status;
> +
> }
>
> int kunit_run_tests(struct kunit_suite *suite)
> @@ -388,7 +399,6 @@ int kunit_run_tests(struct kunit_suite *suite)
>
> kunit_suite_for_each_test_case(suite, test_case) {
> struct kunit test = { .param_value = NULL, .param_index = 0 };
> - bool test_success = true;
>
> if (test_case->generate_params) {
> /* Get initial param. */
> @@ -398,7 +408,6 @@ int kunit_run_tests(struct kunit_suite *suite)
>
> do {
> kunit_run_case_catch_errors(suite, test_case, &test);
> - test_success &= test_case->success;
>
> if (test_case->generate_params) {
> if (param_desc[0] == '\0') {
> @@ -410,7 +419,7 @@ int kunit_run_tests(struct kunit_suite *suite)
> KUNIT_SUBTEST_INDENT
> "# %s: %s %d - %s",
> test_case->name,
> - kunit_status_to_string(test.success),
> + kunit_status_to_string(test.status),
> test.param_index + 1, param_desc);
>
> /* Get next param. */
> @@ -420,9 +429,10 @@ int kunit_run_tests(struct kunit_suite *suite)
> }
> } while (test.param_value);
>
> - kunit_print_ok_not_ok(&test, true, test_success,
> + kunit_print_ok_not_ok(&test, true, test_case->status,
> kunit_test_case_num(suite, test_case),
> - test_case->name);
> + test_case->name,
> + test.status_comment);
> }
>
> kunit_print_subtest_end(suite);
> @@ -434,6 +444,7 @@ EXPORT_SYMBOL_GPL(kunit_run_tests);
> static void kunit_init_suite(struct kunit_suite *suite)
> {
> kunit_debugfs_create_suite(suite);
> + suite->status_comment[0] = '\0';
> }
>
> int __kunit_test_suites_init(struct kunit_suite * const * const suites)
> --
> 2.31.1.818.g46aad6cb9e-goog
>
> --
> You received this message because you are subscribed to the Google Groups "KUnit Development" group.
> To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> To view this discussion on the web visit https://groups.google.com/d/msgid/kunit-dev/20210526081112.3652290-1-davidgow%40google.com.

2021-05-27 08:22:53

by David Gow

[permalink] [raw]
Subject: Re: [PATCH 3/3] kunit: test: Add example_skip test suite which is always skipped

On Thu, May 27, 2021 at 2:29 AM Daniel Latypov <[email protected]> wrote:
>
> On Wed, May 26, 2021 at 1:56 AM 'Marco Elver' via KUnit Development
> <[email protected]> wrote:
> >
> > On Wed, May 26, 2021 at 01:11AM -0700, David Gow wrote:
> > > Add a new KUnit test suite which contains tests which are always
> > > skipped. This is used as an example for how to write tests which are
> > > skipped, and to demonstrate the difference between kunit_skip() and
> > > kunit_mark_skipped().
> > >
> > > Because these tests do not pass (they're skipped), they are not enabled
> > > by default, or by the KUNIT_ALL_TESTS config option: they must be
> > > enabled explicitly by setting CONFIG_KUNIT_EXAMPLE_SKIP_TEST=y in either
> > > a .config or .kunitconfig file.
> > >
> > > Signed-off-by: David Gow <[email protected]>
> > > ---
> > > lib/kunit/Kconfig | 15 +++++++++
> > > lib/kunit/Makefile | 2 ++
> > > lib/kunit/kunit-example-skip-test.c | 52 +++++++++++++++++++++++++++++
> > > 3 files changed, 69 insertions(+)
> > > create mode 100644 lib/kunit/kunit-example-skip-test.c
> >
> > I don't know if this test is useful for a user of KUnit. Given it's not
> > testing KUnit functionality (I see you added tests that the feature
> > works in patch 1/3), but rather a demonstration and therefore dead code.
> > I don't think the feature is difficult to understand from the API doc
> > text.
> >
> > Instead, would it be more helpful to add something to
> > Documentation/dev-tools/kunit? Or perhaps just add something to
> > lib/kunit/kunit-example-test.c? It'd avoid introducing more Kconfig
>
> I'm in favor of putting it in kunit-example-test.c as well.
>
> But I hear there was pushback to have a non-passing test in the example?
> I guess the fear is that someone will see something that doesn't say
> "passed" in the example output and think something has gone wrong?

Yeah, (a simpler version of) this was in kunit-example-test.c before.
Brendan brought up the question of if a test which skips all the time
is useful in [1], but that was more in the context of this being a
test of the functionality than an example. The other part of this did
grow out of the discussion for whether skipped tests should be treated
as 'ok' or 'not ok' in the KTAP spec (somewhere in [2], probably), and
whether or not a test being skipped was indicative of something going
wrong.

Ultimately, I think there's some value in having an example test
that's skipped, and if people are okay with it being in the example
suite (and therefore enabled by default), I'm okay with the default
KUnit run having a splotch of yellow amongst the green in the
kunit_tool output.

> Hence this more conservative change.
> But I hope that in the absence of any replies in opposition, we can
> just keep one example-test.c
>

Agreed, I'll put this back in the existing example suite for v2: if
there's any great opposition, I can always move it back.

> > options at least.
> >
> > Thanks,
> > -- Marco
> >

Cheers,
-- David

[1]: https://lore.kernel.org/linux-kselftest/CAFd5g47auKoQPhCeMHSTMtE_9+fZ6eOHZkojV5j0AX4N4hE_pw@mail.gmail.com/
[2]: https://lore.kernel.org/linux-kselftest/CY4PR13MB1175B804E31E502221BC8163FD830@CY4PR13MB1175.namprd13.prod.outlook.com/


Attachments:
smime.p7s (3.90 kB)
S/MIME Cryptographic Signature

2021-05-27 08:23:32

by David Gow

[permalink] [raw]
Subject: Re: [PATCH 1/3] kunit: Support skipped tests

On Wed, May 26, 2021 at 5:03 PM Marco Elver <[email protected]> wrote:
>
> On Wed, May 26, 2021 at 01:11AM -0700, David Gow wrote:
> > The kunit_mark_skipped() macro marks the current test as "skipped", with
> > the provided reason. The kunit_skip() macro will mark the test as
> > skipped, and abort the test.
> >
> > The TAP specification supports this "SKIP directive" as a comment after
> > the "ok" / "not ok" for a test. See the "Directives" section of the TAP
> > spec for details:
> > https://testanything.org/tap-specification.html#directives
> >
> > The 'success' field for KUnit tests is replaced with a kunit_status
> > enum, which can be SUCCESS, FAILURE, or SKIPPED, combined with a
> > 'status_comment' containing information on why a test was skipped.
> >
> > A new 'kunit_status' test suite is added to test this.
> >
> > Signed-off-by: David Gow <[email protected]>
> [...]
> > include/kunit/test.h | 68 ++++++++++++++++++++++++++++++++++++++----
> > lib/kunit/kunit-test.c | 42 +++++++++++++++++++++++++-
> > lib/kunit/test.c | 51 ++++++++++++++++++-------------
> > 3 files changed, 134 insertions(+), 27 deletions(-)
>
> Very nice, thank you.
>
> Tested-by: Marco Elver <[email protected]>
>
> , with the below changes to test_kasan.c. If you would like an immediate
> user of kunit_skip(), please feel free to add the below patch to your
> series.
>
> Thanks,
> -- Marco
>

Thanks! I'll add this to the next version.

Cheers,
-- David

> ------ >8 ------
>
> From: Marco Elver <[email protected]>
> Date: Wed, 26 May 2021 10:43:12 +0200
> Subject: [PATCH] kasan: test: make use of kunit_skip()
>
> Make use of the recently added kunit_skip() to skip tests, as it permits
> TAP parsers to recognize if a test was deliberately skipped.
>
> Signed-off-by: Marco Elver <[email protected]>
> ---
> lib/test_kasan.c | 12 ++++--------
> 1 file changed, 4 insertions(+), 8 deletions(-)
>
> diff --git a/lib/test_kasan.c b/lib/test_kasan.c
> index cacbbbdef768..0a2029d14c91 100644
> --- a/lib/test_kasan.c
> +++ b/lib/test_kasan.c
> @@ -111,17 +111,13 @@ static void kasan_test_exit(struct kunit *test)
> } while (0)
>
> #define KASAN_TEST_NEEDS_CONFIG_ON(test, config) do { \
> - if (!IS_ENABLED(config)) { \
> - kunit_info((test), "skipping, " #config " required"); \
> - return; \
> - } \
> + if (!IS_ENABLED(config)) \
> + kunit_skip((test), "Test requires " #config "=y"); \
> } while (0)
>
> #define KASAN_TEST_NEEDS_CONFIG_OFF(test, config) do { \
> - if (IS_ENABLED(config)) { \
> - kunit_info((test), "skipping, " #config " enabled"); \
> - return; \
> - } \
> + if (IS_ENABLED(config)) \
> + kunit_skip((test), "Test requires " #config "=n"); \
> } while (0)
>
> static void kmalloc_oob_right(struct kunit *test)
> --
> 2.31.1.818.g46aad6cb9e-goog
>

2021-05-27 08:24:48

by David Gow

[permalink] [raw]
Subject: Re: [PATCH 2/3] kunit: tool: Support skipped tests in kunit_tool

On Thu, May 27, 2021 at 3:10 AM Daniel Latypov <[email protected]> wrote:
>
> On Wed, May 26, 2021 at 1:11 AM 'David Gow' via KUnit Development
> <[email protected]> wrote:
> >
> > Add support for the SKIP directive to kunit_tool's TAP parser.
> >
> > Skipped tests now show up as such in the printed summary. The number of
> > skipped tests is counted, and if all tests in a suite are skipped, the
> > suite is also marked as skipped. Otherwise, skipped tests do affect the
> > suite result.
> >
> > Example output:
> > [00:22:34] ======== [SKIPPED] example_skip ========
> > [00:22:34] [SKIPPED] example_skip_test # SKIP this test should be skipped
> > [00:22:34] [SKIPPED] example_mark_skipped_test # SKIP this test should be skipped
> > [00:22:34] ============================================================
> > [00:22:34] Testing complete. 2 tests run. 0 failed. 0 crashed. 2 skipped.
> >
> > Signed-off-by: David Gow <[email protected]>
> > ---
> > tools/testing/kunit/kunit_parser.py | 47 +++++++++++++++++++-------
> > tools/testing/kunit/kunit_tool_test.py | 22 ++++++++++++
>
> This seems to be missing the added test files.
>

Whoops, yes: I'll add these back in v2.

> > 2 files changed, 57 insertions(+), 12 deletions(-)
> >
> > diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
> > index e8bcc139702e..6b5dd26b479d 100644
> > --- a/tools/testing/kunit/kunit_parser.py
> > +++ b/tools/testing/kunit/kunit_parser.py
> > @@ -43,6 +43,7 @@ class TestCase(object):
> > class TestStatus(Enum):
> > SUCCESS = auto()
> > FAILURE = auto()
> > + SKIPPED = auto()
> > TEST_CRASHED = auto()
> > NO_TESTS = auto()
> > FAILURE_TO_PARSE_TESTS = auto()
> > @@ -108,6 +109,8 @@ def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None:
> >
> > OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
> >
> > +OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')
> > +
> > OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
> >
> > OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
> > @@ -125,6 +128,10 @@ def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
> > if match:
> > test_case.log.append(lines.pop(0))
> > test_case.name = match.group(2)
> > + skip_match = OK_NOT_OK_SKIP.match(line)
> > + if skip_match:
> > + test_case.status = TestStatus.SKIPPED
> > + return True
> > if test_case.status == TestStatus.TEST_CRASHED:
> > return True
> > if match.group(1) == 'ok':
> > @@ -188,16 +195,16 @@ def parse_subtest_plan(lines: List[str]) -> Optional[int]:
> > return None
> >
> > def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
> > - if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
> > + if left == right:
> > + return left
> > + elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
> > return TestStatus.TEST_CRASHED
> > elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
> > return TestStatus.FAILURE
> > - elif left != TestStatus.SUCCESS:
> > - return left
> > - elif right != TestStatus.SUCCESS:
> > + elif left == TestStatus.SKIPPED:
> > return right
> > else:
> > - return TestStatus.SUCCESS
> > + return left
> >
> > def parse_ok_not_ok_test_suite(lines: List[str],
> > test_suite: TestSuite,
> > @@ -214,6 +221,9 @@ def parse_ok_not_ok_test_suite(lines: List[str],
> > test_suite.status = TestStatus.SUCCESS
> > else:
> > test_suite.status = TestStatus.FAILURE
> > + skip_match = OK_NOT_OK_SKIP.match(line)
> > + if skip_match:
> > + test_suite.status = TestStatus.SKIPPED
> > suite_index = int(match.group(2))
> > if suite_index != expected_suite_index:
> > print_with_timestamp(
> > @@ -224,8 +234,8 @@ def parse_ok_not_ok_test_suite(lines: List[str],
> > else:
> > return False
> >
> > -def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
> > - return reduce(max_status, statuses, TestStatus.SUCCESS)
> > +def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
> > + return reduce(max_status, status_list, TestStatus.SKIPPED)
> >
> > def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
> > max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
> > @@ -315,9 +325,12 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
>
> Btw, this type annotation is out of date.

Oops: will fix and/or replace with the below.

> But I think an ever growing Tuple is too cumbersome, how about this?
>

Yeah, this does seem cleaner: I'll put this or something like it in v2.

> diff --git a/tools/testing/kunit/kunit_parser.py
> b/tools/testing/kunit/kunit_parser.py
> index 6b5dd26b479d..055ee1e4d19d 100644
> --- a/tools/testing/kunit/kunit_parser.py
> +++ b/tools/testing/kunit/kunit_parser.py
> @@ -6,6 +6,7 @@
> # Author: Felix Guo <[email protected]>
> # Author: Brendan Higgins <[email protected]>
>
> +from dataclasses import dataclass
> import re
>
> from collections import namedtuple
> @@ -321,11 +322,19 @@ def parse_test_result(lines: List[str]) -> TestResult:
> else:
> return TestResult(TestStatus.NO_TESTS, [], lines)
>
> -def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
> - total_tests = 0
> - failed_tests = 0
> - crashed_tests = 0
> - skipped_tests = 0
> +# Note: This would require Python 3.7. We currently only required
> 3.6 (enum.auto). We can do it by hand to avoid that, if we want.

Hmm... I'm generally loath to increase the version requirement for
something this simple, so might look into doing a version of this
without the dataclass.


> +@dataclass
> +class TestCounts:
> + passed: int = 0
> + failed: int = 0
> + skipped: int = 0
> + crashed: int = 0
> +
> + def total(self) -> int:
> + return self.passed + self.failed + self.skipped + self.crashed
> +
> +def print_and_count_results(test_result: TestResult) -> TestCounts:
> + counts = TestCounts()
> for test_suite in test_result.suites:
> if test_suite.status == TestStatus.SUCCESS:
> print_suite_divider(green('[PASSED] ') +
> test_suite.name)
> @@ -336,39 +345,33 @@ def print_and_count_results(test_result:
> TestResult) -> Tuple[int, int, int]:
> else:
> print_suite_divider(red('[FAILED] ') + test_suite.name)
> for test_case in test_suite.cases:
> - total_tests += 1
> if test_case.status == TestStatus.SUCCESS:
> + counts.passed += 1
> print_with_timestamp(green('[PASSED]
> ') + test_case.name)
> elif test_case.status == TestStatus.SKIPPED:
> - skipped_tests += 1
> + counts.skipped += 1
> print_with_timestamp(yellow('[SKIPPED]
> ') + test_case.name)
> elif test_case.status == TestStatus.TEST_CRASHED:
> - crashed_tests += 1
> + counts.crashed += 1
> print_with_timestamp(red('[CRASHED] '
> + test_case.name))
> print_log(map(yellow, test_case.log))
> print_with_timestamp('')
> else:
> - failed_tests += 1
> + counts.failed += 1
> print_with_timestamp(red('[FAILED] ')
> + test_case.name)
> print_log(map(yellow, test_case.log))
> print_with_timestamp('')
> - return total_tests, failed_tests, crashed_tests, skipped_tests
> + return counts
>
> def parse_run_tests(kernel_output) -> TestResult:
> - total_tests = 0
> - failed_tests = 0
> - crashed_tests = 0
> - skipped_tests = 0
> + counts = TestCounts()
> test_result =
> parse_test_result(list(isolate_kunit_output(kernel_output)))
> if test_result.status == TestStatus.NO_TESTS:
> print(red('[ERROR] ') + yellow('no tests run!'))
> elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS:
> print(red('[ERROR] ') + yellow('could not parse test results!'))
> else:
> - (total_tests,
> - failed_tests,
> - crashed_tests,
> - skipped_tests) = print_and_count_results(test_result)
> + counts = print_and_count_results(test_result)
> print_with_timestamp(DIVIDER)
> if test_result.status == TestStatus.SUCCESS:
> fmt = green
> @@ -378,5 +381,5 @@ def parse_run_tests(kernel_output) -> TestResult:
> fmt =red
> print_with_timestamp(
> fmt('Testing complete. %d tests run. %d failed. %d
> crashed. %d skipped.' %
> - (total_tests, failed_tests, crashed_tests, skipped_tests)))
> + (counts.total(), counts.failed, counts.crashed,
> counts.skipped)))
> return test_result
>
> > total_tests = 0
> > failed_tests = 0
> > crashed_tests = 0
> > + skipped_tests = 0
> > for test_suite in test_result.suites:
> > if test_suite.status == TestStatus.SUCCESS:
> > print_suite_divider(green('[PASSED] ') + test_suite.name)
> > + elif test_suite.status == TestStatus.SKIPPED:
> > + print_suite_divider(yellow('[SKIPPED] ') + test_suite.name)
> > elif test_suite.status == TestStatus.TEST_CRASHED:
> > print_suite_divider(red('[CRASHED] ' + test_suite.name))
> > else:
> > @@ -326,6 +339,9 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
> > total_tests += 1
> > if test_case.status == TestStatus.SUCCESS:
> > print_with_timestamp(green('[PASSED] ') + test_case.name)
> > + elif test_case.status == TestStatus.SKIPPED:
> > + skipped_tests += 1
> > + print_with_timestamp(yellow('[SKIPPED] ') + test_case.name)
> > elif test_case.status == TestStatus.TEST_CRASHED:
> > crashed_tests += 1
> > print_with_timestamp(red('[CRASHED] ' + test_case.name))
> > @@ -336,12 +352,13 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
> > print_with_timestamp(red('[FAILED] ') + test_case.name)
> > print_log(map(yellow, test_case.log))
> > print_with_timestamp('')
> > - return total_tests, failed_tests, crashed_tests
> > + return total_tests, failed_tests, crashed_tests, skipped_tests
> >
> > def parse_run_tests(kernel_output) -> TestResult:
> > total_tests = 0
> > failed_tests = 0
> > crashed_tests = 0
> > + skipped_tests = 0
> > test_result = parse_test_result(list(isolate_kunit_output(kernel_output)))
> > if test_result.status == TestStatus.NO_TESTS:
> > print(red('[ERROR] ') + yellow('no tests run!'))
> > @@ -350,10 +367,16 @@ def parse_run_tests(kernel_output) -> TestResult:
> > else:
> > (total_tests,
> > failed_tests,
> > - crashed_tests) = print_and_count_results(test_result)
> > + crashed_tests,
> > + skipped_tests) = print_and_count_results(test_result)
> > print_with_timestamp(DIVIDER)
> > - fmt = green if test_result.status == TestStatus.SUCCESS else red
> > + if test_result.status == TestStatus.SUCCESS:
> > + fmt = green
> > + elif test_result.status == TestStatus.SKIPPED:
> > + fmt = yellow
> > + else:
> > + fmt =red
> > print_with_timestamp(
> > - fmt('Testing complete. %d tests run. %d failed. %d crashed.' %
> > - (total_tests, failed_tests, crashed_tests)))
> > + fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
> > + (total_tests, failed_tests, crashed_tests, skipped_tests)))
> > return test_result
> > diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
> > index 2e809dd956a7..a51e70cafcc1 100755
> > --- a/tools/testing/kunit/kunit_tool_test.py
> > +++ b/tools/testing/kunit/kunit_tool_test.py
> > @@ -183,6 +183,28 @@ class KUnitParserTest(unittest.TestCase):
> > kunit_parser.TestStatus.TEST_CRASHED,
> > result.status)
> >
> > + def test_skipped_test(self):
> > + skipped_log = test_data_path('test_skip_tests.log')
> > + file = open(skipped_log)
> > + result = kunit_parser.parse_run_tests(file.readlines())
> > +
> > + # A skipped test does not fail the whole suite.
> > + self.assertEqual(
> > + kunit_parser.TestStatus.SUCCESS,
> > + result.status)
> > + file.close()
> > +
> > + def test_skipped_all_tests(self):
> > + skipped_log = test_data_path('test_skip_all_tests.log')
> > + file = open(skipped_log)
> > + result = kunit_parser.parse_run_tests(file.readlines())
> > +
> > + self.assertEqual(
> > + kunit_parser.TestStatus.SKIPPED,
> > + result.status)
> > + file.close()
> > +
> > +
> > def test_ignores_prefix_printk_time(self):
> > prefix_log = test_data_path('test_config_printk_time.log')
> > with open(prefix_log) as file:
> > --
> > 2.31.1.818.g46aad6cb9e-goog
> >
> > --
> > You received this message because you are subscribed to the Google Groups "KUnit Development" group.
> > To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> > To view this discussion on the web visit https://groups.google.com/d/msgid/kunit-dev/20210526081112.3652290-2-davidgow%40google.com.


Attachments:
smime.p7s (3.91 kB)
S/MIME Cryptographic Signature

2021-05-27 15:14:12

by David Gow

[permalink] [raw]
Subject: Re: [PATCH 1/3] kunit: Support skipped tests

On Thu, May 27, 2021 at 4:49 AM Daniel Latypov <[email protected]> wrote:
>
> On Wed, May 26, 2021 at 1:11 AM 'David Gow' via KUnit Development
> <[email protected]> wrote:
> >
> > The kunit_mark_skipped() macro marks the current test as "skipped", with
> > the provided reason. The kunit_skip() macro will mark the test as
> > skipped, and abort the test.
> >
> > The TAP specification supports this "SKIP directive" as a comment after
> > the "ok" / "not ok" for a test. See the "Directives" section of the TAP
> > spec for details:
> > https://testanything.org/tap-specification.html#directives
> >
> > The 'success' field for KUnit tests is replaced with a kunit_status
> > enum, which can be SUCCESS, FAILURE, or SKIPPED, combined with a
> > 'status_comment' containing information on why a test was skipped.
> >
> > A new 'kunit_status' test suite is added to test this.
> >
> > Signed-off-by: David Gow <[email protected]>
>
> Reviewed-by: Daniel Latypov <[email protected]>
>
> This is pretty exciting to see.
> Some minor nits below.
>
>

Thanks: I'll take these suggestions on board for v2.

> > ---
> > This change depends on the assertion typechecking fix here:
> > https://lore.kernel.org/linux-kselftest/[email protected]/
> > Only the first two patches in the series are required.
> >
> > This is the long-awaited follow-up to the skip tests RFC:
> > https://lore.kernel.org/linux-kselftest/[email protected]/
> >
> > There are quite a few changes since that version, principally:
> > - A kunit_status enum is now used, with SKIPPED a distinct state
> > - The kunit_mark_skipped() and kunit_skip() macros now take printf-style
> > format strings.
> > - There is now a kunit_status test suite providing basic tests of this
> > functionality.
> > - The kunit_tool changes have been split into a separate commit.
> > - The example skipped tests have been expanded an moved to their own
> > suite, which is not enabled by KUNIT_ALL_TESTS.
> > - A number of other fixes and changes here and there.
> >
> > Cheers,
> > -- David
> >
> > include/kunit/test.h | 68 ++++++++++++++++++++++++++++++++++++++----
> > lib/kunit/kunit-test.c | 42 +++++++++++++++++++++++++-
> > lib/kunit/test.c | 51 ++++++++++++++++++-------------
> > 3 files changed, 134 insertions(+), 27 deletions(-)
> >
> > diff --git a/include/kunit/test.h b/include/kunit/test.h
> > index b68c61348121..40b536da027e 100644
> > --- a/include/kunit/test.h
> > +++ b/include/kunit/test.h
> > @@ -105,6 +105,18 @@ struct kunit;
> > #define KUNIT_SUBTEST_INDENT " "
> > #define KUNIT_SUBSUBTEST_INDENT " "
> >
> > +/**
> > + * enum kunit_status - Type of result for a test or test suite
> > + * @KUNIT_SUCCESS: Denotes the test suite has not failed nor been skipped
> > + * @KUNIT_FAILURE: Denotes the test has failed.
> > + * @KUNIT_SKIPPED: Denotes the test has been skipped.
> > + */
> > +enum kunit_status {
> > + KUNIT_SUCCESS,
> > + KUNIT_FAILURE,
> > + KUNIT_SKIPPED,
> > +};
> > +
> > /**
> > * struct kunit_case - represents an individual test case.
> > *
> > @@ -148,13 +160,20 @@ struct kunit_case {
> > const void* (*generate_params)(const void *prev, char *desc);
> >
> > /* private: internal use only. */
> > - bool success;
> > + enum kunit_status status;
> > char *log;
> > };
> >
> > -static inline char *kunit_status_to_string(bool status)
> > +static inline char *kunit_status_to_string(enum kunit_status status)
>
> nit: I personally would think this maps SKIPPED => "SKIPPED", etc.
> (If I didn't know all that logic lived in kunit tool).
>
> I don't have any replacement names to suggest that I'm fully happy
> with, however.
> kunit_status_to_tap_str(), kunit_status_to_ok_notok(), eh.
>

Yeah: I kept the existing names for these functions, which which
worked well when it was just a bool.

The TAP spec seems to just call this "ok/not ok", and given we already
have kunit_print_okay_not_ok(), kunit_status_to_ok_not_ok() seems the
best of those options.

> > {
> > - return status ? "ok" : "not ok";
> > + switch (status) {
> > + case KUNIT_SKIPPED:
> > + case KUNIT_SUCCESS:
> > + return "ok";
> > + case KUNIT_FAILURE:
> > + return "not ok";
> > + }
> > + return "invalid";
> > }
> >
> > /**
> > @@ -212,6 +231,7 @@ struct kunit_suite {
> > struct kunit_case *test_cases;
> >
> > /* private: internal use only */
> > + char status_comment[256];
> > struct dentry *debugfs;
> > char *log;
> > };
> > @@ -245,19 +265,21 @@ struct kunit {
> > * be read after the test case finishes once all threads associated
> > * with the test case have terminated.
> > */
> > - bool success; /* Read only after test_case finishes! */
> > spinlock_t lock; /* Guards all mutable test state. */
> > + enum kunit_status status; /* Read only after test_case finishes! */
> > /*
> > * Because resources is a list that may be updated multiple times (with
> > * new resources) from any thread associated with a test case, we must
> > * protect it with some type of lock.
> > */
> > struct list_head resources; /* Protected by lock. */
> > +
> > + char status_comment[256];
> > };
> >
> > static inline void kunit_set_failure(struct kunit *test)
> > {
> > - WRITE_ONCE(test->success, false);
> > + WRITE_ONCE(test->status, KUNIT_FAILURE);
> > }
> >
> > void kunit_init_test(struct kunit *test, const char *name, char *log);
> > @@ -348,7 +370,7 @@ static inline int kunit_run_all_tests(void)
> > #define kunit_suite_for_each_test_case(suite, test_case) \
> > for (test_case = suite->test_cases; test_case->run_case; test_case++)
> >
> > -bool kunit_suite_has_succeeded(struct kunit_suite *suite);
> > +enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite);
> >
> > /*
> > * Like kunit_alloc_resource() below, but returns the struct kunit_resource
> > @@ -612,6 +634,40 @@ void kunit_cleanup(struct kunit *test);
> >
> > void kunit_log_append(char *log, const char *fmt, ...);
> >
> > +/**
> > + * kunit_mark_skipped() - Marks @test_or_suite as skipped
> > + *
> > + * @test_or_suite: The test context object.
> > + * @fmt: A printk() style format string.
> > + *
> > + * Marks the test as skipped. @fmt is given output as the test status
> > + * comment, typically the reason the test was skipped.
> > + *
> > + * Test execution continues after kunit_mark_skipped() is called.
> > + */
> > +#define kunit_mark_skipped(test_or_suite, fmt, ...) \
> > + do { \
> > + WRITE_ONCE((test_or_suite)->status, KUNIT_SKIPPED); \
> > + scnprintf((test_or_suite)->status_comment, 256, fmt, ##__VA_ARGS__); \
> > + } while (0)
> > +
> > +/**
> > + * kunit_skip() - Marks @test_or_suite as skipped
> > + *
> > + * @test_or_suite: The test context object.
> > + * @fmt: A printk() style format string.
> > + *
> > + * Skips the test. @fmt is given output as the test status
> > + * comment, typically the reason the test was skipped.
> > + *
> > + * Test execution is halted after kunit_skip() is called.
> > + */
> > +#define kunit_skip(test_or_suite, fmt, ...) \
> > + do { \
> > + kunit_mark_skipped((test_or_suite), fmt, ##__VA_ARGS__);\
> > + kunit_try_catch_throw(&((test_or_suite)->try_catch)); \
> > + } while (0)
> > +
> > /*
> > * printk and log to per-test or per-suite log buffer. Logging only done
> > * if CONFIG_KUNIT_DEBUGFS is 'y'; if it is 'n', no log is allocated/used.
> > diff --git a/lib/kunit/kunit-test.c b/lib/kunit/kunit-test.c
> > index 69f902440a0e..d69efcbed624 100644
> > --- a/lib/kunit/kunit-test.c
> > +++ b/lib/kunit/kunit-test.c
> > @@ -437,7 +437,47 @@ static void kunit_log_test(struct kunit *test)
> > #endif
> > }
> >
> > +static void kunit_status_set_failure_test(struct kunit *test)
> > +{
> > + struct kunit fake;
> > +
> > + kunit_init_test(&fake, "fake test", NULL);
> > +
> > + KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SUCCESS);
> > + kunit_set_failure(&fake);
> > + KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_FAILURE);
> > +}
> > +
> > +static void kunit_status_mark_skipped_test(struct kunit *test)
> > +{
> > + struct kunit fake;
> > +
> > + kunit_init_test(&fake, "fake test", NULL);
> > +
> > + /* Before: Should be SUCCESS with no comment. */
> > + KUNIT_EXPECT_EQ(test, fake.status, KUNIT_SUCCESS);
> > + KUNIT_EXPECT_STREQ(test, fake.status_comment, "");
> > +
> > + /* Mark the test as skipped. */
> > + kunit_mark_skipped(&fake, "Accepts format string: %s", "YES");
> > +
> > + /* After: Should be SKIPPED with our comment. */
> > + KUNIT_EXPECT_EQ(test, fake.status, (enum kunit_status)KUNIT_SKIPPED);
> > + KUNIT_EXPECT_STREQ(test, fake.status_comment, "Accepts format string: YES");
> > +}
> > +
> > +static struct kunit_case kunit_status_test_cases[] = {
> > + KUNIT_CASE(kunit_status_set_failure_test),
> > + KUNIT_CASE(kunit_status_mark_skipped_test),
> > + {}
> > +};
> > +
> > +static struct kunit_suite kunit_status_test_suite = {
> > + .name = "kunit_status",
> > + .test_cases = kunit_status_test_cases,
> > +};
> > +
> > kunit_test_suites(&kunit_try_catch_test_suite, &kunit_resource_test_suite,
> > - &kunit_log_test_suite);
> > + &kunit_log_test_suite, &kunit_status_test_suite);
> >
> > MODULE_LICENSE("GPL v2");
> > diff --git a/lib/kunit/test.c b/lib/kunit/test.c
> > index 2f6cc0123232..0ee07705d2b0 100644
> > --- a/lib/kunit/test.c
> > +++ b/lib/kunit/test.c
> > @@ -98,12 +98,14 @@ static void kunit_print_subtest_start(struct kunit_suite *suite)
> >
> > static void kunit_print_ok_not_ok(void *test_or_suite,
> > bool is_test,
> > - bool is_ok,
> > + enum kunit_status status,
> > size_t test_number,
> > - const char *description)
> > + const char *description,
> > + const char *directive)
> > {
> > struct kunit_suite *suite = is_test ? NULL : test_or_suite;
> > struct kunit *test = is_test ? test_or_suite : NULL;
> > + const char *directive_header = (status == KUNIT_SKIPPED) ? " # SKIP " : "";
> >
> > /*
> > * We do not log the test suite results as doing so would
> > @@ -114,25 +116,31 @@ static void kunit_print_ok_not_ok(void *test_or_suite,
> > * representation.
> > */
> > if (suite)
> > - pr_info("%s %zd - %s\n",
> > - kunit_status_to_string(is_ok),
> > - test_number, description);
> > + pr_info("%s %zd - %s%s%s\n",
> > + kunit_status_to_string(status),
> > + test_number, description,
> > + directive_header, directive ? directive : "");
> > else
> > - kunit_log(KERN_INFO, test, KUNIT_SUBTEST_INDENT "%s %zd - %s",
>
> Hmm, why not kunit_info(test, ...)?
>

No reason: it was kunit_log() originally, and I didn't change it. I
can replace this for v2 if you prefer.


> > - kunit_status_to_string(is_ok),
> > - test_number, description);
> > + kunit_log(KERN_INFO, test,
> > + KUNIT_SUBTEST_INDENT "%s %zd - %s%s%s",
> > + kunit_status_to_string(status),
> > + test_number, description,
> > + directive_header, directive ? directive : "");
> > }
> >
> > -bool kunit_suite_has_succeeded(struct kunit_suite *suite)
> > +enum kunit_status kunit_suite_has_succeeded(struct kunit_suite *suite)
> > {
> > const struct kunit_case *test_case;
> > + enum kunit_status status = KUNIT_SKIPPED;
> >
> > kunit_suite_for_each_test_case(suite, test_case) {
> > - if (!test_case->success)
> > - return false;
> > + if (test_case->status == KUNIT_FAILURE)
> > + return KUNIT_FAILURE;
> > + else if (test_case->status == KUNIT_SUCCESS)
> > + status = KUNIT_SUCCESS;
> > }
> >
> > - return true;
> > + return status;
> > }
> > EXPORT_SYMBOL_GPL(kunit_suite_has_succeeded);
> >
> > @@ -143,7 +151,8 @@ static void kunit_print_subtest_end(struct kunit_suite *suite)
> > kunit_print_ok_not_ok((void *)suite, false,
> > kunit_suite_has_succeeded(suite),
> > kunit_suite_counter++,
> > - suite->name);
> > + suite->name,
> > + suite->status_comment);
> > }
> >
> > unsigned int kunit_test_case_num(struct kunit_suite *suite,
> > @@ -252,7 +261,8 @@ void kunit_init_test(struct kunit *test, const char *name, char *log)
> > test->log = log;
> > if (test->log)
> > test->log[0] = '\0';
> > - test->success = true;
> > + test->status = KUNIT_SUCCESS;
> > + test->status_comment[0] = '\0';
> > }
> > EXPORT_SYMBOL_GPL(kunit_init_test);
> >
> > @@ -376,7 +386,8 @@ static void kunit_run_case_catch_errors(struct kunit_suite *suite,
> > context.test_case = test_case;
> > kunit_try_catch_run(try_catch, &context);
> >
> > - test_case->success = test->success;
> > + test_case->status = test->status;
> > +
> > }
> >
> > int kunit_run_tests(struct kunit_suite *suite)
> > @@ -388,7 +399,6 @@ int kunit_run_tests(struct kunit_suite *suite)
> >
> > kunit_suite_for_each_test_case(suite, test_case) {
> > struct kunit test = { .param_value = NULL, .param_index = 0 };
> > - bool test_success = true;
> >
> > if (test_case->generate_params) {
> > /* Get initial param. */
> > @@ -398,7 +408,6 @@ int kunit_run_tests(struct kunit_suite *suite)
> >
> > do {
> > kunit_run_case_catch_errors(suite, test_case, &test);
> > - test_success &= test_case->success;
> >
> > if (test_case->generate_params) {
> > if (param_desc[0] == '\0') {
> > @@ -410,7 +419,7 @@ int kunit_run_tests(struct kunit_suite *suite)
> > KUNIT_SUBTEST_INDENT
> > "# %s: %s %d - %s",
> > test_case->name,
> > - kunit_status_to_string(test.success),
> > + kunit_status_to_string(test.status),
> > test.param_index + 1, param_desc);
> >
> > /* Get next param. */
> > @@ -420,9 +429,10 @@ int kunit_run_tests(struct kunit_suite *suite)
> > }
> > } while (test.param_value);
> >
> > - kunit_print_ok_not_ok(&test, true, test_success,
> > + kunit_print_ok_not_ok(&test, true, test_case->status,
> > kunit_test_case_num(suite, test_case),
> > - test_case->name);
> > + test_case->name,
> > + test.status_comment);
> > }
> >
> > kunit_print_subtest_end(suite);
> > @@ -434,6 +444,7 @@ EXPORT_SYMBOL_GPL(kunit_run_tests);
> > static void kunit_init_suite(struct kunit_suite *suite)
> > {
> > kunit_debugfs_create_suite(suite);
> > + suite->status_comment[0] = '\0';
> > }
> >
> > int __kunit_test_suites_init(struct kunit_suite * const * const suites)
> > --
> > 2.31.1.818.g46aad6cb9e-goog
> >
> > --
> > You received this message because you are subscribed to the Google Groups "KUnit Development" group.
> > To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> > To view this discussion on the web visit https://groups.google.com/d/msgid/kunit-dev/20210526081112.3652290-1-davidgow%40google.com.


Attachments:
smime.p7s (3.91 kB)
S/MIME Cryptographic Signature

2021-05-27 23:57:25

by Daniel Latypov

[permalink] [raw]
Subject: Re: [PATCH 2/3] kunit: tool: Support skipped tests in kunit_tool

On Thu, May 27, 2021 at 1:22 AM David Gow <[email protected]> wrote:
>
> On Thu, May 27, 2021 at 3:10 AM Daniel Latypov <[email protected]> wrote:
> >
> > On Wed, May 26, 2021 at 1:11 AM 'David Gow' via KUnit Development
> > <[email protected]> wrote:
> > >
> > > Add support for the SKIP directive to kunit_tool's TAP parser.
> > >
> > > Skipped tests now show up as such in the printed summary. The number of
> > > skipped tests is counted, and if all tests in a suite are skipped, the
> > > suite is also marked as skipped. Otherwise, skipped tests do affect the
> > > suite result.
> > >
> > > Example output:
> > > [00:22:34] ======== [SKIPPED] example_skip ========
> > > [00:22:34] [SKIPPED] example_skip_test # SKIP this test should be skipped
> > > [00:22:34] [SKIPPED] example_mark_skipped_test # SKIP this test should be skipped
> > > [00:22:34] ============================================================
> > > [00:22:34] Testing complete. 2 tests run. 0 failed. 0 crashed. 2 skipped.
> > >
> > > Signed-off-by: David Gow <[email protected]>
> > > ---
> > > tools/testing/kunit/kunit_parser.py | 47 +++++++++++++++++++-------
> > > tools/testing/kunit/kunit_tool_test.py | 22 ++++++++++++
> >
> > This seems to be missing the added test files.
> >
>
> Whoops, yes: I'll add these back in v2.
>
> > > 2 files changed, 57 insertions(+), 12 deletions(-)
> > >
> > > diff --git a/tools/testing/kunit/kunit_parser.py b/tools/testing/kunit/kunit_parser.py
> > > index e8bcc139702e..6b5dd26b479d 100644
> > > --- a/tools/testing/kunit/kunit_parser.py
> > > +++ b/tools/testing/kunit/kunit_parser.py
> > > @@ -43,6 +43,7 @@ class TestCase(object):
> > > class TestStatus(Enum):
> > > SUCCESS = auto()
> > > FAILURE = auto()
> > > + SKIPPED = auto()
> > > TEST_CRASHED = auto()
> > > NO_TESTS = auto()
> > > FAILURE_TO_PARSE_TESTS = auto()
> > > @@ -108,6 +109,8 @@ def save_non_diagnostic(lines: List[str], test_case: TestCase) -> None:
> > >
> > > OkNotOkResult = namedtuple('OkNotOkResult', ['is_ok','description', 'text'])
> > >
> > > +OK_NOT_OK_SKIP = re.compile(r'^[\s]*(ok|not ok) [0-9]+ - (.*) # SKIP(.*)$')
> > > +
> > > OK_NOT_OK_SUBTEST = re.compile(r'^[\s]+(ok|not ok) [0-9]+ - (.*)$')
> > >
> > > OK_NOT_OK_MODULE = re.compile(r'^(ok|not ok) ([0-9]+) - (.*)$')
> > > @@ -125,6 +128,10 @@ def parse_ok_not_ok_test_case(lines: List[str], test_case: TestCase) -> bool:
> > > if match:
> > > test_case.log.append(lines.pop(0))
> > > test_case.name = match.group(2)
> > > + skip_match = OK_NOT_OK_SKIP.match(line)
> > > + if skip_match:
> > > + test_case.status = TestStatus.SKIPPED
> > > + return True
> > > if test_case.status == TestStatus.TEST_CRASHED:
> > > return True
> > > if match.group(1) == 'ok':
> > > @@ -188,16 +195,16 @@ def parse_subtest_plan(lines: List[str]) -> Optional[int]:
> > > return None
> > >
> > > def max_status(left: TestStatus, right: TestStatus) -> TestStatus:
> > > - if left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
> > > + if left == right:
> > > + return left
> > > + elif left == TestStatus.TEST_CRASHED or right == TestStatus.TEST_CRASHED:
> > > return TestStatus.TEST_CRASHED
> > > elif left == TestStatus.FAILURE or right == TestStatus.FAILURE:
> > > return TestStatus.FAILURE
> > > - elif left != TestStatus.SUCCESS:
> > > - return left
> > > - elif right != TestStatus.SUCCESS:
> > > + elif left == TestStatus.SKIPPED:
> > > return right
> > > else:
> > > - return TestStatus.SUCCESS
> > > + return left
> > >
> > > def parse_ok_not_ok_test_suite(lines: List[str],
> > > test_suite: TestSuite,
> > > @@ -214,6 +221,9 @@ def parse_ok_not_ok_test_suite(lines: List[str],
> > > test_suite.status = TestStatus.SUCCESS
> > > else:
> > > test_suite.status = TestStatus.FAILURE
> > > + skip_match = OK_NOT_OK_SKIP.match(line)
> > > + if skip_match:
> > > + test_suite.status = TestStatus.SKIPPED
> > > suite_index = int(match.group(2))
> > > if suite_index != expected_suite_index:
> > > print_with_timestamp(
> > > @@ -224,8 +234,8 @@ def parse_ok_not_ok_test_suite(lines: List[str],
> > > else:
> > > return False
> > >
> > > -def bubble_up_errors(statuses: Iterable[TestStatus]) -> TestStatus:
> > > - return reduce(max_status, statuses, TestStatus.SUCCESS)
> > > +def bubble_up_errors(status_list: Iterable[TestStatus]) -> TestStatus:
> > > + return reduce(max_status, status_list, TestStatus.SKIPPED)
> > >
> > > def bubble_up_test_case_errors(test_suite: TestSuite) -> TestStatus:
> > > max_test_case_status = bubble_up_errors(x.status for x in test_suite.cases)
> > > @@ -315,9 +325,12 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
> >
> > Btw, this type annotation is out of date.
>
> Oops: will fix and/or replace with the below.
>
> > But I think an ever growing Tuple is too cumbersome, how about this?
> >
>
> Yeah, this does seem cleaner: I'll put this or something like it in v2.
>
> > diff --git a/tools/testing/kunit/kunit_parser.py
> > b/tools/testing/kunit/kunit_parser.py
> > index 6b5dd26b479d..055ee1e4d19d 100644
> > --- a/tools/testing/kunit/kunit_parser.py
> > +++ b/tools/testing/kunit/kunit_parser.py
> > @@ -6,6 +6,7 @@
> > # Author: Felix Guo <[email protected]>
> > # Author: Brendan Higgins <[email protected]>
> >
> > +from dataclasses import dataclass
> > import re
> >
> > from collections import namedtuple
> > @@ -321,11 +322,19 @@ def parse_test_result(lines: List[str]) -> TestResult:
> > else:
> > return TestResult(TestStatus.NO_TESTS, [], lines)
> >
> > -def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
> > - total_tests = 0
> > - failed_tests = 0
> > - crashed_tests = 0
> > - skipped_tests = 0
> > +# Note: This would require Python 3.7. We currently only required
> > 3.6 (enum.auto). We can do it by hand to avoid that, if we want.
>
> Hmm... I'm generally loath to increase the version requirement for
> something this simple, so might look into doing a version of this
> without the dataclass.

I think the same argument applies to enum.auto when we can just
manually assign values :P

But yes, I'd suggest not using it.
You'd just need to manually write the __init__() in that case (you
can't use namedtuple since we need to modify the fields, but also I
prefer having type annotations on my fields).

I only used @dataclass to make my example easier to write since I'm lazy.

>
>
> > +@dataclass
> > +class TestCounts:
> > + passed: int = 0
> > + failed: int = 0
> > + skipped: int = 0
> > + crashed: int = 0
> > +
> > + def total(self) -> int:
> > + return self.passed + self.failed + self.skipped + self.crashed
> > +
> > +def print_and_count_results(test_result: TestResult) -> TestCounts:
> > + counts = TestCounts()
> > for test_suite in test_result.suites:
> > if test_suite.status == TestStatus.SUCCESS:
> > print_suite_divider(green('[PASSED] ') +
> > test_suite.name)
> > @@ -336,39 +345,33 @@ def print_and_count_results(test_result:
> > TestResult) -> Tuple[int, int, int]:
> > else:
> > print_suite_divider(red('[FAILED] ') + test_suite.name)
> > for test_case in test_suite.cases:
> > - total_tests += 1
> > if test_case.status == TestStatus.SUCCESS:
> > + counts.passed += 1
> > print_with_timestamp(green('[PASSED]
> > ') + test_case.name)
> > elif test_case.status == TestStatus.SKIPPED:
> > - skipped_tests += 1
> > + counts.skipped += 1
> > print_with_timestamp(yellow('[SKIPPED]
> > ') + test_case.name)
> > elif test_case.status == TestStatus.TEST_CRASHED:
> > - crashed_tests += 1
> > + counts.crashed += 1
> > print_with_timestamp(red('[CRASHED] '
> > + test_case.name))
> > print_log(map(yellow, test_case.log))
> > print_with_timestamp('')
> > else:
> > - failed_tests += 1
> > + counts.failed += 1
> > print_with_timestamp(red('[FAILED] ')
> > + test_case.name)
> > print_log(map(yellow, test_case.log))
> > print_with_timestamp('')
> > - return total_tests, failed_tests, crashed_tests, skipped_tests
> > + return counts
> >
> > def parse_run_tests(kernel_output) -> TestResult:
> > - total_tests = 0
> > - failed_tests = 0
> > - crashed_tests = 0
> > - skipped_tests = 0
> > + counts = TestCounts()
> > test_result =
> > parse_test_result(list(isolate_kunit_output(kernel_output)))
> > if test_result.status == TestStatus.NO_TESTS:
> > print(red('[ERROR] ') + yellow('no tests run!'))
> > elif test_result.status == TestStatus.FAILURE_TO_PARSE_TESTS:
> > print(red('[ERROR] ') + yellow('could not parse test results!'))
> > else:
> > - (total_tests,
> > - failed_tests,
> > - crashed_tests,
> > - skipped_tests) = print_and_count_results(test_result)
> > + counts = print_and_count_results(test_result)
> > print_with_timestamp(DIVIDER)
> > if test_result.status == TestStatus.SUCCESS:
> > fmt = green
> > @@ -378,5 +381,5 @@ def parse_run_tests(kernel_output) -> TestResult:
> > fmt =red
> > print_with_timestamp(
> > fmt('Testing complete. %d tests run. %d failed. %d
> > crashed. %d skipped.' %
> > - (total_tests, failed_tests, crashed_tests, skipped_tests)))
> > + (counts.total(), counts.failed, counts.crashed,
> > counts.skipped)))
> > return test_result
> >
> > > total_tests = 0
> > > failed_tests = 0
> > > crashed_tests = 0
> > > + skipped_tests = 0
> > > for test_suite in test_result.suites:
> > > if test_suite.status == TestStatus.SUCCESS:
> > > print_suite_divider(green('[PASSED] ') + test_suite.name)
> > > + elif test_suite.status == TestStatus.SKIPPED:
> > > + print_suite_divider(yellow('[SKIPPED] ') + test_suite.name)
> > > elif test_suite.status == TestStatus.TEST_CRASHED:
> > > print_suite_divider(red('[CRASHED] ' + test_suite.name))
> > > else:
> > > @@ -326,6 +339,9 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
> > > total_tests += 1
> > > if test_case.status == TestStatus.SUCCESS:
> > > print_with_timestamp(green('[PASSED] ') + test_case.name)
> > > + elif test_case.status == TestStatus.SKIPPED:
> > > + skipped_tests += 1
> > > + print_with_timestamp(yellow('[SKIPPED] ') + test_case.name)
> > > elif test_case.status == TestStatus.TEST_CRASHED:
> > > crashed_tests += 1
> > > print_with_timestamp(red('[CRASHED] ' + test_case.name))
> > > @@ -336,12 +352,13 @@ def print_and_count_results(test_result: TestResult) -> Tuple[int, int, int]:
> > > print_with_timestamp(red('[FAILED] ') + test_case.name)
> > > print_log(map(yellow, test_case.log))
> > > print_with_timestamp('')
> > > - return total_tests, failed_tests, crashed_tests
> > > + return total_tests, failed_tests, crashed_tests, skipped_tests
> > >
> > > def parse_run_tests(kernel_output) -> TestResult:
> > > total_tests = 0
> > > failed_tests = 0
> > > crashed_tests = 0
> > > + skipped_tests = 0
> > > test_result = parse_test_result(list(isolate_kunit_output(kernel_output)))
> > > if test_result.status == TestStatus.NO_TESTS:
> > > print(red('[ERROR] ') + yellow('no tests run!'))
> > > @@ -350,10 +367,16 @@ def parse_run_tests(kernel_output) -> TestResult:
> > > else:
> > > (total_tests,
> > > failed_tests,
> > > - crashed_tests) = print_and_count_results(test_result)
> > > + crashed_tests,
> > > + skipped_tests) = print_and_count_results(test_result)
> > > print_with_timestamp(DIVIDER)
> > > - fmt = green if test_result.status == TestStatus.SUCCESS else red
> > > + if test_result.status == TestStatus.SUCCESS:
> > > + fmt = green
> > > + elif test_result.status == TestStatus.SKIPPED:
> > > + fmt = yellow
> > > + else:
> > > + fmt =red
> > > print_with_timestamp(
> > > - fmt('Testing complete. %d tests run. %d failed. %d crashed.' %
> > > - (total_tests, failed_tests, crashed_tests)))
> > > + fmt('Testing complete. %d tests run. %d failed. %d crashed. %d skipped.' %
> > > + (total_tests, failed_tests, crashed_tests, skipped_tests)))
> > > return test_result
> > > diff --git a/tools/testing/kunit/kunit_tool_test.py b/tools/testing/kunit/kunit_tool_test.py
> > > index 2e809dd956a7..a51e70cafcc1 100755
> > > --- a/tools/testing/kunit/kunit_tool_test.py
> > > +++ b/tools/testing/kunit/kunit_tool_test.py
> > > @@ -183,6 +183,28 @@ class KUnitParserTest(unittest.TestCase):
> > > kunit_parser.TestStatus.TEST_CRASHED,
> > > result.status)
> > >
> > > + def test_skipped_test(self):
> > > + skipped_log = test_data_path('test_skip_tests.log')
> > > + file = open(skipped_log)
> > > + result = kunit_parser.parse_run_tests(file.readlines())
> > > +
> > > + # A skipped test does not fail the whole suite.
> > > + self.assertEqual(
> > > + kunit_parser.TestStatus.SUCCESS,
> > > + result.status)
> > > + file.close()
> > > +
> > > + def test_skipped_all_tests(self):
> > > + skipped_log = test_data_path('test_skip_all_tests.log')
> > > + file = open(skipped_log)
> > > + result = kunit_parser.parse_run_tests(file.readlines())
> > > +
> > > + self.assertEqual(
> > > + kunit_parser.TestStatus.SKIPPED,
> > > + result.status)
> > > + file.close()
> > > +
> > > +
> > > def test_ignores_prefix_printk_time(self):
> > > prefix_log = test_data_path('test_config_printk_time.log')
> > > with open(prefix_log) as file:
> > > --
> > > 2.31.1.818.g46aad6cb9e-goog
> > >
> > > --
> > > You received this message because you are subscribed to the Google Groups "KUnit Development" group.
> > > To unsubscribe from this group and stop receiving emails from it, send an email to [email protected].
> > > To view this discussion on the web visit https://groups.google.com/d/msgid/kunit-dev/20210526081112.3652290-2-davidgow%40google.com.