2020-02-05 20:46:23

by Marco Elver

[permalink] [raw]
Subject: [PATCH 1/3] kcsan: Introduce KCSAN_ACCESS_ASSERT access type

The KCSAN_ACCESS_ASSERT access type may be used to introduce dummy reads
and writes to assert certain properties of synchronization logic, where
bugs could not be detected as normal data races.

For example, a variable that is only meant to be written by a single
CPU, but may be read (without locking) by other CPUs must still be
marked properly to avoid data races. However, concurrent writes,
regardless if WRITE_ONCE() or not, would be a bug. Using
kcsan_check_access(&x, sizeof(x), KCSAN_ACCESS_ASSERT) would allow
catching such bugs.

Notably, the KCSAN_ACCESS_ASSERT type disables various filters, so that
all races that KCSAN observes are reported.

Signed-off-by: Marco Elver <[email protected]>
---
include/linux/kcsan-checks.h | 8 +++++++-
kernel/kcsan/core.c | 24 +++++++++++++++++++++---
kernel/kcsan/report.c | 11 +++++++++++
3 files changed, 39 insertions(+), 4 deletions(-)

diff --git a/include/linux/kcsan-checks.h b/include/linux/kcsan-checks.h
index ef3ee233a3fa9..21b1d1f214ad5 100644
--- a/include/linux/kcsan-checks.h
+++ b/include/linux/kcsan-checks.h
@@ -6,10 +6,16 @@
#include <linux/types.h>

/*
- * Access type modifiers.
+ * ACCESS TYPE MODIFIERS
+ *
+ * <none>: normal read access;
+ * WRITE : write access;
+ * ATOMIC: access is atomic;
+ * ASSERT: access is not a regular access, but an assertion;
*/
#define KCSAN_ACCESS_WRITE 0x1
#define KCSAN_ACCESS_ATOMIC 0x2
+#define KCSAN_ACCESS_ASSERT 0x4

/*
* __kcsan_*: Always calls into the runtime when KCSAN is enabled. This may be used
diff --git a/kernel/kcsan/core.c b/kernel/kcsan/core.c
index 82c2bef827d42..190fb5c958fe3 100644
--- a/kernel/kcsan/core.c
+++ b/kernel/kcsan/core.c
@@ -178,6 +178,14 @@ is_atomic(const volatile void *ptr, size_t size, int type)
if ((type & KCSAN_ACCESS_ATOMIC) != 0)
return true;

+ /*
+ * Unless explicitly declared atomic, never consider an assertion access
+ * as atomic. This allows using them also in atomic regions, such as
+ * seqlocks, without implicitly changing their semantics.
+ */
+ if ((type & KCSAN_ACCESS_ASSERT) != 0)
+ return false;
+
if (IS_ENABLED(CONFIG_KCSAN_ASSUME_PLAIN_WRITES_ATOMIC) &&
(type & KCSAN_ACCESS_WRITE) != 0 && size <= sizeof(long) &&
IS_ALIGNED((unsigned long)ptr, size))
@@ -307,6 +315,7 @@ static noinline void
kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
{
const bool is_write = (type & KCSAN_ACCESS_WRITE) != 0;
+ const bool is_assertion = (type & KCSAN_ACCESS_ASSERT) != 0;
atomic_long_t *watchpoint;
union {
u8 _1;
@@ -430,12 +439,21 @@ kcsan_setup_watchpoint(const volatile void *ptr, size_t size, int type)
* No need to increment 'data_races' counter, as the racing
* thread already did.
*/
- kcsan_report(ptr, size, type, size > 8 || value_change,
- smp_processor_id(), KCSAN_REPORT_RACE_SIGNAL);
+
+ /*
+ * - If we were not able to observe a value change due to size
+ * constraints, always assume a value change.
+ * - If the access type is an assertion, we also always assume a
+ * value change to always report the race.
+ */
+ value_change = value_change || size > 8 || is_assertion;
+
+ kcsan_report(ptr, size, type, value_change, smp_processor_id(),
+ KCSAN_REPORT_RACE_SIGNAL);
} else if (value_change) {
/* Inferring a race, since the value should not have changed. */
kcsan_counter_inc(KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN);
- if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN))
+ if (IS_ENABLED(CONFIG_KCSAN_REPORT_RACE_UNKNOWN_ORIGIN) || is_assertion)
kcsan_report(ptr, size, type, true,
smp_processor_id(),
KCSAN_REPORT_RACE_UNKNOWN_ORIGIN);
diff --git a/kernel/kcsan/report.c b/kernel/kcsan/report.c
index 7cd34285df740..938146104e046 100644
--- a/kernel/kcsan/report.c
+++ b/kernel/kcsan/report.c
@@ -178,6 +178,17 @@ static const char *get_access_type(int type)
return "write";
case KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
return "write (marked)";
+
+ /*
+ * ASSERT variants:
+ */
+ case KCSAN_ACCESS_ASSERT:
+ case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_ATOMIC:
+ return "assert no writes";
+ case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE:
+ case KCSAN_ACCESS_ASSERT | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ATOMIC:
+ return "assert no accesses";
+
default:
BUG();
}
--
2.25.0.341.g760bfbb309-goog


2020-02-05 20:47:20

by Marco Elver

[permalink] [raw]
Subject: [PATCH 3/3] kcsan: Add test to generate conflicts via debugfs

Add 'test=<iters>' option to KCSAN's debugfs interface to invoke KCSAN
checks on a dummy variable. By writing 'test=<iters>' to the debugfs
file from multiple tasks, we can generate real conflicts, and trigger
data race reports.

Signed-off-by: Marco Elver <[email protected]>
---
kernel/kcsan/debugfs.c | 51 +++++++++++++++++++++++++++++++++++++-----
1 file changed, 46 insertions(+), 5 deletions(-)

diff --git a/kernel/kcsan/debugfs.c b/kernel/kcsan/debugfs.c
index bec42dab32ee8..5733f51a6e2c7 100644
--- a/kernel/kcsan/debugfs.c
+++ b/kernel/kcsan/debugfs.c
@@ -6,6 +6,7 @@
#include <linux/debugfs.h>
#include <linux/init.h>
#include <linux/kallsyms.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/sort.h>
@@ -68,9 +69,9 @@ void kcsan_counter_dec(enum kcsan_counter_id id)
/*
* The microbenchmark allows benchmarking KCSAN core runtime only. To run
* multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
- * debugfs file.
+ * debugfs file. This will not generate any conflicts, and tests fast-path only.
*/
-static void microbenchmark(unsigned long iters)
+static noinline void microbenchmark(unsigned long iters)
{
cycles_t cycles;

@@ -80,18 +81,52 @@ static void microbenchmark(unsigned long iters)
while (iters--) {
/*
* We can run this benchmark from multiple tasks; this address
- * calculation increases likelyhood of some accesses overlapping
- * (they still won't conflict because all are reads).
+ * calculation increases likelyhood of some accesses
+ * overlapping. Make the access type an atomic read, to never
+ * set up watchpoints and test the fast-path only.
*/
unsigned long addr =
iters % (CONFIG_KCSAN_NUM_WATCHPOINTS * PAGE_SIZE);
- __kcsan_check_read((void *)addr, sizeof(long));
+ __kcsan_check_access((void *)addr, sizeof(long), KCSAN_ACCESS_ATOMIC);
}
cycles = get_cycles() - cycles;

pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles);
}

+/*
+ * Simple test to create conflicting accesses. Write 'test=<iters>' to KCSAN's
+ * debugfs file from multiple tasks to generate real conflicts and show reports.
+ */
+static long test_dummy;
+static noinline void test_thread(unsigned long iters)
+{
+ const struct kcsan_ctx ctx_save = current->kcsan_ctx;
+ cycles_t cycles;
+
+ /* We may have been called from an atomic region; reset context. */
+ memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
+
+ pr_info("KCSAN: %s begin | iters: %lu\n", __func__, iters);
+
+ cycles = get_cycles();
+ while (iters--) {
+ __kcsan_check_read(&test_dummy, sizeof(test_dummy));
+ __kcsan_check_write(&test_dummy, sizeof(test_dummy));
+ ASSERT_EXCLUSIVE_WRITER(test_dummy);
+ ASSERT_EXCLUSIVE_ACCESS(test_dummy);
+
+ /* not actually instrumented */
+ WRITE_ONCE(test_dummy, iters); /* to observe value-change */
+ }
+ cycles = get_cycles() - cycles;
+
+ pr_info("KCSAN: %s end | cycles: %llu\n", __func__, cycles);
+
+ /* restore context */
+ current->kcsan_ctx = ctx_save;
+}
+
static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
{
const unsigned long a = *(const unsigned long *)rhs;
@@ -241,6 +276,12 @@ debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *o
if (kstrtoul(&arg[sizeof("microbench=") - 1], 0, &iters))
return -EINVAL;
microbenchmark(iters);
+ } else if (!strncmp(arg, "test=", sizeof("test=") - 1)) {
+ unsigned long iters;
+
+ if (kstrtoul(&arg[sizeof("test=") - 1], 0, &iters))
+ return -EINVAL;
+ test_thread(iters);
} else if (!strcmp(arg, "whitelist")) {
set_report_filterlist_whitelist(true);
} else if (!strcmp(arg, "blacklist")) {
--
2.25.0.341.g760bfbb309-goog