2021-01-11 09:18:31

by Marco Elver

[permalink] [raw]
Subject: [PATCH mm 1/2] kfence: add option to use KFENCE without static keys

For certain usecases, specifically where the sample interval is always
set to a very low value such as 1ms, it can make sense to use a dynamic
branch instead of static branches due to the overhead of toggling a
static branch.

Therefore, add a new Kconfig option to remove the static branches and
instead check kfence_allocation_gate if a KFENCE allocation should be
set up.

Suggested-by: Jörn Engel <[email protected]>
Signed-off-by: Marco Elver <[email protected]>
---
include/linux/kfence.h | 11 ++++++++++-
lib/Kconfig.kfence | 12 +++++++++++-
mm/kfence/core.c | 32 ++++++++++++++++++--------------
3 files changed, 39 insertions(+), 16 deletions(-)

diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index 76246889ecdb..dc86b69d3903 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -4,7 +4,6 @@
#define _LINUX_KFENCE_H

#include <linux/mm.h>
-#include <linux/static_key.h>
#include <linux/types.h>

#ifdef CONFIG_KFENCE
@@ -17,7 +16,13 @@
#define KFENCE_POOL_SIZE ((CONFIG_KFENCE_NUM_OBJECTS + 1) * 2 * PAGE_SIZE)
extern char *__kfence_pool;

+#ifdef CONFIG_KFENCE_STATIC_KEYS
+#include <linux/static_key.h>
DECLARE_STATIC_KEY_FALSE(kfence_allocation_key);
+#else
+#include <linux/atomic.h>
+extern atomic_t kfence_allocation_gate;
+#endif

/**
* is_kfence_address() - check if an address belongs to KFENCE pool
@@ -104,7 +109,11 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags);
*/
static __always_inline void *kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
{
+#ifdef CONFIG_KFENCE_STATIC_KEYS
if (static_branch_unlikely(&kfence_allocation_key))
+#else
+ if (unlikely(!atomic_read(&kfence_allocation_gate)))
+#endif
return __kfence_alloc(s, size, flags);
return NULL;
}
diff --git a/lib/Kconfig.kfence b/lib/Kconfig.kfence
index d3ea24fa30fc..78f50ccb3b45 100644
--- a/lib/Kconfig.kfence
+++ b/lib/Kconfig.kfence
@@ -6,7 +6,6 @@ config HAVE_ARCH_KFENCE
menuconfig KFENCE
bool "KFENCE: low-overhead sampling-based memory safety error detector"
depends on HAVE_ARCH_KFENCE && (SLAB || SLUB)
- depends on JUMP_LABEL # To ensure performance, require jump labels
select STACKTRACE
help
KFENCE is a low-overhead sampling-based detector of heap out-of-bounds
@@ -25,6 +24,17 @@ menuconfig KFENCE

if KFENCE

+config KFENCE_STATIC_KEYS
+ bool "Use static keys to set up allocations"
+ default y
+ depends on JUMP_LABEL # To ensure performance, require jump labels
+ help
+ Use static keys (static branches) to set up KFENCE allocations. Using
+ static keys is normally recommended, because it avoids a dynamic
+ branch in the allocator's fast path. However, with very low sample
+ intervals, or on systems that do not support jump labels, a dynamic
+ branch may still be an acceptable performance trade-off.
+
config KFENCE_SAMPLE_INTERVAL
int "Default sample interval in milliseconds"
default 100
diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index f0816d5f5913..96a9a98e7453 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -88,11 +88,13 @@ struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */

+#ifdef CONFIG_KFENCE_STATIC_KEYS
/* The static key to set up a KFENCE allocation. */
DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
+#endif

/* Gates the allocation, ensuring only one succeeds in a given period. */
-static atomic_t allocation_gate = ATOMIC_INIT(1);
+atomic_t kfence_allocation_gate = ATOMIC_INIT(1);

/* Statistics counters for debugfs. */
enum kfence_counter_id {
@@ -583,29 +585,31 @@ late_initcall(kfence_debugfs_init);
static struct delayed_work kfence_timer;
static void toggle_allocation_gate(struct work_struct *work)
{
- unsigned long end_wait;
-
if (!READ_ONCE(kfence_enabled))
return;

/* Enable static key, and await allocation to happen. */
- atomic_set(&allocation_gate, 0);
+ atomic_set(&kfence_allocation_gate, 0);
+#ifdef CONFIG_KFENCE_STATIC_KEYS
static_branch_enable(&kfence_allocation_key);
/*
* Await an allocation. Timeout after 1 second, in case the kernel stops
* doing allocations, to avoid stalling this worker task for too long.
*/
- end_wait = jiffies + HZ;
- do {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (atomic_read(&allocation_gate) != 0)
- break;
- schedule_timeout(1);
- } while (time_before(jiffies, end_wait));
- __set_current_state(TASK_RUNNING);
-
+ {
+ unsigned long end_wait = jiffies + HZ;
+
+ do {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&kfence_allocation_gate) != 0)
+ break;
+ schedule_timeout(1);
+ } while (time_before(jiffies, end_wait));
+ __set_current_state(TASK_RUNNING);
+ }
/* Disable static key and reset timer. */
static_branch_disable(&kfence_allocation_key);
+#endif
schedule_delayed_work(&kfence_timer, msecs_to_jiffies(kfence_sample_interval));
}
static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
@@ -711,7 +715,7 @@ void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
* sense to continue writing to it and pay the associated contention
* cost, in case we have a large number of concurrent allocations.
*/
- if (atomic_read(&allocation_gate) || atomic_inc_return(&allocation_gate) > 1)
+ if (atomic_read(&kfence_allocation_gate) || atomic_inc_return(&kfence_allocation_gate) > 1)
return NULL;

if (!READ_ONCE(kfence_enabled))
--
2.30.0.284.gd98b1dd5eaa7-goog


2021-01-11 09:19:40

by Marco Elver

[permalink] [raw]
Subject: [PATCH mm 2/2] kfence: show access type in report

Show the access type in KFENCE reports by plumbing through read/write
information from the page fault handler. Update the documentation and
test accordingly.

Suggested-by: Jörn Engel <[email protected]>
Signed-off-by: Marco Elver <[email protected]>
---
Documentation/dev-tools/kfence.rst | 12 ++++----
arch/arm64/mm/fault.c | 2 +-
arch/x86/mm/fault.c | 3 +-
include/linux/kfence.h | 9 ++++--
mm/kfence/core.c | 11 +++----
mm/kfence/kfence.h | 2 +-
mm/kfence/kfence_test.c | 47 ++++++++++++++++++++++++++----
mm/kfence/report.c | 27 +++++++++++------
8 files changed, 82 insertions(+), 31 deletions(-)

diff --git a/Documentation/dev-tools/kfence.rst b/Documentation/dev-tools/kfence.rst
index d7329f2caa5a..06a454ec7712 100644
--- a/Documentation/dev-tools/kfence.rst
+++ b/Documentation/dev-tools/kfence.rst
@@ -64,9 +64,9 @@ Error reports
A typical out-of-bounds access looks like this::

==================================================================
- BUG: KFENCE: out-of-bounds in test_out_of_bounds_read+0xa3/0x22b
+ BUG: KFENCE: out-of-bounds read in test_out_of_bounds_read+0xa3/0x22b

- Out-of-bounds access at 0xffffffffb672efff (1B left of kfence-#17):
+ Out-of-bounds read at 0xffffffffb672efff (1B left of kfence-#17):
test_out_of_bounds_read+0xa3/0x22b
kunit_try_run_case+0x51/0x85
kunit_generic_run_threadfn_adapter+0x16/0x30
@@ -93,9 +93,9 @@ its origin. Note that, real kernel addresses are only shown for
Use-after-free accesses are reported as::

==================================================================
- BUG: KFENCE: use-after-free in test_use_after_free_read+0xb3/0x143
+ BUG: KFENCE: use-after-free read in test_use_after_free_read+0xb3/0x143

- Use-after-free access at 0xffffffffb673dfe0 (in kfence-#24):
+ Use-after-free read at 0xffffffffb673dfe0 (in kfence-#24):
test_use_after_free_read+0xb3/0x143
kunit_try_run_case+0x51/0x85
kunit_generic_run_threadfn_adapter+0x16/0x30
@@ -192,9 +192,9 @@ where it was not possible to determine an associated object, e.g. if adjacent
object pages had not yet been allocated::

==================================================================
- BUG: KFENCE: invalid access in test_invalid_access+0x26/0xe0
+ BUG: KFENCE: invalid read in test_invalid_access+0x26/0xe0

- Invalid access at 0xffffffffb670b00a:
+ Invalid read at 0xffffffffb670b00a:
test_invalid_access+0x26/0xe0
kunit_try_run_case+0x51/0x85
kunit_generic_run_threadfn_adapter+0x16/0x30
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 7ab718603f4b..10bd03ee3586 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -382,7 +382,7 @@ static void __do_kernel_fault(unsigned long addr, unsigned int esr,
} else if (addr < PAGE_SIZE) {
msg = "NULL pointer dereference";
} else {
- if (kfence_handle_page_fault(addr, regs))
+ if (kfence_handle_page_fault(addr, esr & ESR_ELx_WNR, regs))
return;

msg = "paging request";
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index 907b54d3b990..66d2db2f2211 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -734,7 +734,8 @@ no_context(struct pt_regs *regs, unsigned long error_code,
efi_recover_from_page_fault(address);

/* Only not-present faults should be handled by KFENCE. */
- if (!(error_code & X86_PF_PROT) && kfence_handle_page_fault(address, regs))
+ if (!(error_code & X86_PF_PROT) &&
+ kfence_handle_page_fault(address, error_code & X86_PF_WRITE, regs))
return;

oops:
diff --git a/include/linux/kfence.h b/include/linux/kfence.h
index dc86b69d3903..c2c1dd100cba 100644
--- a/include/linux/kfence.h
+++ b/include/linux/kfence.h
@@ -180,6 +180,7 @@ static __always_inline __must_check bool kfence_free(void *addr)
/**
* kfence_handle_page_fault() - perform page fault handling for KFENCE pages
* @addr: faulting address
+ * @is_write: is access a write
* @regs: current struct pt_regs (can be NULL, but shows full stack trace)
*
* Return:
@@ -191,7 +192,7 @@ static __always_inline __must_check bool kfence_free(void *addr)
* cases KFENCE prints an error message and marks the offending page as
* present, so that the kernel can proceed.
*/
-bool __must_check kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs);
+bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs);

#else /* CONFIG_KFENCE */

@@ -204,7 +205,11 @@ static inline size_t kfence_ksize(const void *addr) { return 0; }
static inline void *kfence_object_start(const void *addr) { return NULL; }
static inline void __kfence_free(void *addr) { }
static inline bool __must_check kfence_free(void *addr) { return false; }
-static inline bool __must_check kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs) { return false; }
+static inline bool __must_check kfence_handle_page_fault(unsigned long addr, bool is_write,
+ struct pt_regs *regs)
+{
+ return false;
+}

#endif

diff --git a/mm/kfence/core.c b/mm/kfence/core.c
index 96a9a98e7453..a5f8aa410a30 100644
--- a/mm/kfence/core.c
+++ b/mm/kfence/core.c
@@ -211,7 +211,7 @@ static inline bool check_canary_byte(u8 *addr)
return true;

atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
- kfence_report_error((unsigned long)addr, NULL, addr_to_metadata((unsigned long)addr),
+ kfence_report_error((unsigned long)addr, false, NULL, addr_to_metadata((unsigned long)addr),
KFENCE_ERROR_CORRUPTION);
return false;
}
@@ -350,7 +350,8 @@ static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool z
if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
/* Invalid or double-free, bail out. */
atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
- kfence_report_error((unsigned long)addr, NULL, meta, KFENCE_ERROR_INVALID_FREE);
+ kfence_report_error((unsigned long)addr, false, NULL, meta,
+ KFENCE_ERROR_INVALID_FREE);
raw_spin_unlock_irqrestore(&meta->lock, flags);
return;
}
@@ -765,7 +766,7 @@ void __kfence_free(void *addr)
kfence_guarded_free(addr, meta, false);
}

-bool kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs)
+bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
{
const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
struct kfence_metadata *to_report = NULL;
@@ -828,11 +829,11 @@ bool kfence_handle_page_fault(unsigned long addr, struct pt_regs *regs)

out:
if (to_report) {
- kfence_report_error(addr, regs, to_report, error_type);
+ kfence_report_error(addr, is_write, regs, to_report, error_type);
raw_spin_unlock_irqrestore(&to_report->lock, flags);
} else {
/* This may be a UAF or OOB access, but we can't be sure. */
- kfence_report_error(addr, regs, NULL, KFENCE_ERROR_INVALID);
+ kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
}

return kfence_unprotect(addr); /* Unprotect and let access proceed. */
diff --git a/mm/kfence/kfence.h b/mm/kfence/kfence.h
index fa3579d03089..97282fa77840 100644
--- a/mm/kfence/kfence.h
+++ b/mm/kfence/kfence.h
@@ -99,7 +99,7 @@ enum kfence_error_type {
KFENCE_ERROR_INVALID_FREE, /* Invalid free. */
};

-void kfence_report_error(unsigned long address, struct pt_regs *regs,
+void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
const struct kfence_metadata *meta, enum kfence_error_type type);

void kfence_print_object(struct seq_file *seq, const struct kfence_metadata *meta);
diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index f57c61c833e6..db1bb596acaf 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -71,8 +71,14 @@ struct expect_report {
enum kfence_error_type type; /* The type or error. */
void *fn; /* Function pointer to expected function where access occurred. */
char *addr; /* Address at which the bad access occurred. */
+ bool is_write; /* Is access a write. */
};

+static const char *get_access_type(const struct expect_report *r)
+{
+ return r->is_write ? "write" : "read";
+}
+
/* Check observed report matches information in @r. */
static bool report_matches(const struct expect_report *r)
{
@@ -93,16 +99,19 @@ static bool report_matches(const struct expect_report *r)
end = &expect[0][sizeof(expect[0]) - 1];
switch (r->type) {
case KFENCE_ERROR_OOB:
- cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds");
+ cur += scnprintf(cur, end - cur, "BUG: KFENCE: out-of-bounds %s",
+ get_access_type(r));
break;
case KFENCE_ERROR_UAF:
- cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free");
+ cur += scnprintf(cur, end - cur, "BUG: KFENCE: use-after-free %s",
+ get_access_type(r));
break;
case KFENCE_ERROR_CORRUPTION:
cur += scnprintf(cur, end - cur, "BUG: KFENCE: memory corruption");
break;
case KFENCE_ERROR_INVALID:
- cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid access");
+ cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid %s",
+ get_access_type(r));
break;
case KFENCE_ERROR_INVALID_FREE:
cur += scnprintf(cur, end - cur, "BUG: KFENCE: invalid free");
@@ -121,16 +130,16 @@ static bool report_matches(const struct expect_report *r)

switch (r->type) {
case KFENCE_ERROR_OOB:
- cur += scnprintf(cur, end - cur, "Out-of-bounds access at");
+ cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
break;
case KFENCE_ERROR_UAF:
- cur += scnprintf(cur, end - cur, "Use-after-free access at");
+ cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
break;
case KFENCE_ERROR_CORRUPTION:
cur += scnprintf(cur, end - cur, "Corrupted memory at");
break;
case KFENCE_ERROR_INVALID:
- cur += scnprintf(cur, end - cur, "Invalid access at");
+ cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
break;
case KFENCE_ERROR_INVALID_FREE:
cur += scnprintf(cur, end - cur, "Invalid free of");
@@ -294,6 +303,7 @@ static void test_out_of_bounds_read(struct kunit *test)
struct expect_report expect = {
.type = KFENCE_ERROR_OOB,
.fn = test_out_of_bounds_read,
+ .is_write = false,
};
char *buf;

@@ -321,12 +331,31 @@ static void test_out_of_bounds_read(struct kunit *test)
test_free(buf);
}

+static void test_out_of_bounds_write(struct kunit *test)
+{
+ size_t size = 32;
+ struct expect_report expect = {
+ .type = KFENCE_ERROR_OOB,
+ .fn = test_out_of_bounds_write,
+ .is_write = true,
+ };
+ char *buf;
+
+ setup_test_cache(test, size, 0, NULL);
+ buf = test_alloc(test, size, GFP_KERNEL, ALLOCATE_LEFT);
+ expect.addr = buf - 1;
+ WRITE_ONCE(*expect.addr, 42);
+ KUNIT_EXPECT_TRUE(test, report_matches(&expect));
+ test_free(buf);
+}
+
static void test_use_after_free_read(struct kunit *test)
{
const size_t size = 32;
struct expect_report expect = {
.type = KFENCE_ERROR_UAF,
.fn = test_use_after_free_read,
+ .is_write = false,
};

setup_test_cache(test, size, 0, NULL);
@@ -411,6 +440,7 @@ static void test_kmalloc_aligned_oob_read(struct kunit *test)
struct expect_report expect = {
.type = KFENCE_ERROR_OOB,
.fn = test_kmalloc_aligned_oob_read,
+ .is_write = false,
};
char *buf;

@@ -509,6 +539,7 @@ static void test_init_on_free(struct kunit *test)
struct expect_report expect = {
.type = KFENCE_ERROR_UAF,
.fn = test_init_on_free,
+ .is_write = false,
};
int i;

@@ -598,6 +629,7 @@ static void test_invalid_access(struct kunit *test)
.type = KFENCE_ERROR_INVALID,
.fn = test_invalid_access,
.addr = &__kfence_pool[10],
+ .is_write = false,
};

READ_ONCE(__kfence_pool[10]);
@@ -611,6 +643,7 @@ static void test_memcache_typesafe_by_rcu(struct kunit *test)
struct expect_report expect = {
.type = KFENCE_ERROR_UAF,
.fn = test_memcache_typesafe_by_rcu,
+ .is_write = false,
};

setup_test_cache(test, size, SLAB_TYPESAFE_BY_RCU, NULL);
@@ -647,6 +680,7 @@ static void test_krealloc(struct kunit *test)
.type = KFENCE_ERROR_UAF,
.fn = test_krealloc,
.addr = test_alloc(test, size, GFP_KERNEL, ALLOCATE_ANY),
+ .is_write = false,
};
char *buf = expect.addr;
int i;
@@ -728,6 +762,7 @@ static void test_memcache_alloc_bulk(struct kunit *test)

static struct kunit_case kfence_test_cases[] = {
KFENCE_KUNIT_CASE(test_out_of_bounds_read),
+ KFENCE_KUNIT_CASE(test_out_of_bounds_write),
KFENCE_KUNIT_CASE(test_use_after_free_read),
KFENCE_KUNIT_CASE(test_double_free),
KFENCE_KUNIT_CASE(test_invalid_addr_free),
diff --git a/mm/kfence/report.c b/mm/kfence/report.c
index 4dedc2ff8f28..1996295ae71d 100644
--- a/mm/kfence/report.c
+++ b/mm/kfence/report.c
@@ -151,7 +151,12 @@ static void print_diff_canary(unsigned long address, size_t bytes_to_show,
pr_cont(" ]");
}

-void kfence_report_error(unsigned long address, struct pt_regs *regs,
+static const char *get_access_type(bool is_write)
+{
+ return is_write ? "write" : "read";
+}
+
+void kfence_report_error(unsigned long address, bool is_write, struct pt_regs *regs,
const struct kfence_metadata *meta, enum kfence_error_type type)
{
unsigned long stack_entries[KFENCE_STACK_DEPTH] = { 0 };
@@ -189,17 +194,19 @@ void kfence_report_error(unsigned long address, struct pt_regs *regs,
case KFENCE_ERROR_OOB: {
const bool left_of_object = address < meta->addr;

- pr_err("BUG: KFENCE: out-of-bounds in %pS\n\n", (void *)stack_entries[skipnr]);
- pr_err("Out-of-bounds access at 0x" PTR_FMT " (%luB %s of kfence-#%zd):\n",
- (void *)address,
+ pr_err("BUG: KFENCE: out-of-bounds %s in %pS\n\n", get_access_type(is_write),
+ (void *)stack_entries[skipnr]);
+ pr_err("Out-of-bounds %s at 0x" PTR_FMT " (%luB %s of kfence-#%zd):\n",
+ get_access_type(is_write), (void *)address,
left_of_object ? meta->addr - address : address - meta->addr,
left_of_object ? "left" : "right", object_index);
break;
}
case KFENCE_ERROR_UAF:
- pr_err("BUG: KFENCE: use-after-free in %pS\n\n", (void *)stack_entries[skipnr]);
- pr_err("Use-after-free access at 0x" PTR_FMT " (in kfence-#%zd):\n",
- (void *)address, object_index);
+ pr_err("BUG: KFENCE: use-after-free %s in %pS\n\n", get_access_type(is_write),
+ (void *)stack_entries[skipnr]);
+ pr_err("Use-after-free %s at 0x" PTR_FMT " (in kfence-#%zd):\n",
+ get_access_type(is_write), (void *)address, object_index);
break;
case KFENCE_ERROR_CORRUPTION:
pr_err("BUG: KFENCE: memory corruption in %pS\n\n", (void *)stack_entries[skipnr]);
@@ -208,8 +215,10 @@ void kfence_report_error(unsigned long address, struct pt_regs *regs,
pr_cont(" (in kfence-#%zd):\n", object_index);
break;
case KFENCE_ERROR_INVALID:
- pr_err("BUG: KFENCE: invalid access in %pS\n\n", (void *)stack_entries[skipnr]);
- pr_err("Invalid access at 0x" PTR_FMT ":\n", (void *)address);
+ pr_err("BUG: KFENCE: invalid %s in %pS\n\n", get_access_type(is_write),
+ (void *)stack_entries[skipnr]);
+ pr_err("Invalid %s at 0x" PTR_FMT ":\n", get_access_type(is_write),
+ (void *)address);
break;
case KFENCE_ERROR_INVALID_FREE:
pr_err("BUG: KFENCE: invalid free in %pS\n\n", (void *)stack_entries[skipnr]);
--
2.30.0.284.gd98b1dd5eaa7-goog

2021-01-11 19:56:40

by Jörn Engel

[permalink] [raw]
Subject: Re: [PATCH mm 1/2] kfence: add option to use KFENCE without static keys

On Mon, Jan 11, 2021 at 10:15:43AM +0100, Marco Elver wrote:
> For certain usecases, specifically where the sample interval is always
> set to a very low value such as 1ms, it can make sense to use a dynamic
> branch instead of static branches due to the overhead of toggling a
> static branch.

I ended up with 100µs and couldn't measure a performance problem in our
benchmarks. My results don't have predictive value for anyone else, of
course.

> Therefore, add a new Kconfig option to remove the static branches and
> instead check kfence_allocation_gate if a KFENCE allocation should be
> set up.
>
> Suggested-by: Jörn Engel <[email protected]>
> Signed-off-by: Marco Elver <[email protected]>

Reviewed-by: Jörn Engel <[email protected]>

Jörn

--
One of the things I’ve discovered over the years, is that you can
create change or you can receive credit – not both.
-- Stephen Downes

2021-01-11 21:19:20

by Jörn Engel

[permalink] [raw]
Subject: Re: [PATCH mm 2/2] kfence: show access type in report

On Mon, Jan 11, 2021 at 10:15:44AM +0100, Marco Elver wrote:
> Show the access type in KFENCE reports by plumbing through read/write
> information from the page fault handler. Update the documentation and
> test accordingly.
>
> Suggested-by: J?rn Engel <[email protected]>
> Signed-off-by: Marco Elver <[email protected]>

Reviewed-by: J?rn Engel <[email protected]>

J?rn

--
This above all: to thine own self be true.
-- Shakespeare