2021-07-28 19:04:19

by Heiko Carstens

[permalink] [raw]
Subject: [PATCH 0/4] s390: add kfence support

Hello,

this patch series adds kfence support for s390, and was mainly
developed by Sven Schnelle. Given that he is currently busy I send
this out for him, since I'd like to get an ACK for the second patch,
which touches kfence common code.

This was already discussed here:
https://lore.kernel.org/lkml/CANpmjNPAS5kDsADb-DwvdFR9nRnX47-mFuEG2vmMPn5U3i3sGQ@mail.gmail.com/

With that ACK I'd like to carry the series via the s390 tree, so it
gets upstream during the next merge window. Hopefully that's ok.

Thanks,
Heiko

Heiko Carstens (1):
s390/mm: implement set_memory_4k()

Sven Schnelle (3):
kfence: add function to mask address bits
s390: add support for KFENCE
s390: add kfence region to pagetable dumper

arch/s390/Kconfig | 1 +
arch/s390/include/asm/kfence.h | 42 ++++++++++++++++++++++++++++++
arch/s390/include/asm/set_memory.h | 6 +++++
arch/s390/mm/dump_pagetables.c | 14 ++++++++++
arch/s390/mm/fault.c | 9 +++++--
arch/s390/mm/init.c | 3 ++-
arch/s390/mm/pageattr.c | 15 ++++++++---
mm/kfence/kfence_test.c | 13 ++++++++-
8 files changed, 96 insertions(+), 7 deletions(-)
create mode 100644 arch/s390/include/asm/kfence.h

--
2.25.1



2021-07-28 19:04:40

by Heiko Carstens

[permalink] [raw]
Subject: [PATCH 1/4] s390/mm: implement set_memory_4k()

Implement set_memory_4k() which will split any present large or huge
mapping in the given range to a 4k mapping.

Signed-off-by: Heiko Carstens <[email protected]>
---
arch/s390/include/asm/set_memory.h | 6 ++++++
arch/s390/mm/pageattr.c | 12 ++++++++++--
2 files changed, 16 insertions(+), 2 deletions(-)

diff --git a/arch/s390/include/asm/set_memory.h b/arch/s390/include/asm/set_memory.h
index a22a5a81811c..950d87bd997a 100644
--- a/arch/s390/include/asm/set_memory.h
+++ b/arch/s390/include/asm/set_memory.h
@@ -10,6 +10,7 @@ extern struct mutex cpa_mutex;
#define SET_MEMORY_RW 2UL
#define SET_MEMORY_NX 4UL
#define SET_MEMORY_X 8UL
+#define SET_MEMORY_4K 16UL

int __set_memory(unsigned long addr, int numpages, unsigned long flags);

@@ -33,4 +34,9 @@ static inline int set_memory_x(unsigned long addr, int numpages)
return __set_memory(addr, numpages, SET_MEMORY_X);
}

+static inline int set_memory_4k(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, SET_MEMORY_4K);
+}
+
#endif
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index ed8e5b3575d5..b09fd5c7f85f 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -155,6 +155,7 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
unsigned long flags)
{
unsigned long next;
+ int need_split;
pmd_t *pmdp;
int rc = 0;

@@ -164,7 +165,10 @@ static int walk_pmd_level(pud_t *pudp, unsigned long addr, unsigned long end,
return -EINVAL;
next = pmd_addr_end(addr, end);
if (pmd_large(*pmdp)) {
- if (addr & ~PMD_MASK || addr + PMD_SIZE > next) {
+ need_split = (flags & SET_MEMORY_4K) != 0;
+ need_split |= (addr & ~PMD_MASK) != 0;
+ need_split |= addr + PMD_SIZE > next;
+ if (need_split) {
rc = split_pmd_page(pmdp, addr);
if (rc)
return rc;
@@ -232,6 +236,7 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
unsigned long flags)
{
unsigned long next;
+ int need_split;
pud_t *pudp;
int rc = 0;

@@ -241,7 +246,10 @@ static int walk_pud_level(p4d_t *p4d, unsigned long addr, unsigned long end,
return -EINVAL;
next = pud_addr_end(addr, end);
if (pud_large(*pudp)) {
- if (addr & ~PUD_MASK || addr + PUD_SIZE > next) {
+ need_split = (flags & SET_MEMORY_4K) != 0;
+ need_split |= (addr & ~PUD_MASK) != 0;
+ need_split |= addr + PUD_SIZE > next;
+ if (need_split) {
rc = split_pud_page(pudp, addr);
if (rc)
break;
--
2.25.1


2021-07-28 19:05:35

by Heiko Carstens

[permalink] [raw]
Subject: [PATCH 2/4] kfence: add function to mask address bits

From: Sven Schnelle <[email protected]>

s390 only reports the page address during a translation fault.
To make the kfence unit tests pass, add a function that might
be implemented by architectures to mask out address bits.

Signed-off-by: Sven Schnelle <[email protected]>
Signed-off-by: Heiko Carstens <[email protected]>
---
mm/kfence/kfence_test.c | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)

diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
index 942cbc16ad26..eb6307c199ea 100644
--- a/mm/kfence/kfence_test.c
+++ b/mm/kfence/kfence_test.c
@@ -23,8 +23,15 @@
#include <linux/tracepoint.h>
#include <trace/events/printk.h>

+#include <asm/kfence.h>
+
#include "kfence.h"

+/* May be overridden by <asm/kfence.h>. */
+#ifndef arch_kfence_test_address
+#define arch_kfence_test_address(addr) (addr)
+#endif
+
/* Report as observed from console. */
static struct {
spinlock_t lock;
@@ -82,6 +89,7 @@ static const char *get_access_type(const struct expect_report *r)
/* Check observed report matches information in @r. */
static bool report_matches(const struct expect_report *r)
{
+ unsigned long addr = (unsigned long)r->addr;
bool ret = false;
unsigned long flags;
typeof(observed.lines) expect;
@@ -131,22 +139,25 @@ static bool report_matches(const struct expect_report *r)
switch (r->type) {
case KFENCE_ERROR_OOB:
cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
+ addr = arch_kfence_test_address(addr);
break;
case KFENCE_ERROR_UAF:
cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
+ addr = arch_kfence_test_address(addr);
break;
case KFENCE_ERROR_CORRUPTION:
cur += scnprintf(cur, end - cur, "Corrupted memory at");
break;
case KFENCE_ERROR_INVALID:
cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
+ addr = arch_kfence_test_address(addr);
break;
case KFENCE_ERROR_INVALID_FREE:
cur += scnprintf(cur, end - cur, "Invalid free of");
break;
}

- cur += scnprintf(cur, end - cur, " 0x%p", (void *)r->addr);
+ cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);

spin_lock_irqsave(&observed.lock, flags);
if (!report_available())
--
2.25.1


2021-07-28 19:05:55

by Heiko Carstens

[permalink] [raw]
Subject: [PATCH 4/4] s390: add kfence region to pagetable dumper

From: Sven Schnelle <[email protected]>

Signed-off-by: Sven Schnelle <[email protected]>
Signed-off-by: Heiko Carstens <[email protected]>
---
arch/s390/mm/dump_pagetables.c | 14 ++++++++++++++
1 file changed, 14 insertions(+)

diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c
index e40a30647d99..07dcec925bf4 100644
--- a/arch/s390/mm/dump_pagetables.c
+++ b/arch/s390/mm/dump_pagetables.c
@@ -4,6 +4,7 @@
#include <linux/seq_file.h>
#include <linux/debugfs.h>
#include <linux/mm.h>
+#include <linux/kfence.h>
#include <linux/kasan.h>
#include <asm/ptdump.h>
#include <asm/kasan.h>
@@ -21,6 +22,8 @@ enum address_markers_idx {
IDENTITY_BEFORE_END_NR,
KERNEL_START_NR,
KERNEL_END_NR,
+ KFENCE_START_NR,
+ KFENCE_END_NR,
IDENTITY_AFTER_NR,
IDENTITY_AFTER_END_NR,
#ifdef CONFIG_KASAN
@@ -40,6 +43,10 @@ static struct addr_marker address_markers[] = {
[IDENTITY_BEFORE_END_NR] = {(unsigned long)_stext, "Identity Mapping End"},
[KERNEL_START_NR] = {(unsigned long)_stext, "Kernel Image Start"},
[KERNEL_END_NR] = {(unsigned long)_end, "Kernel Image End"},
+#ifdef CONFIG_KFENCE
+ [KFENCE_START_NR] = {0, "KFence Pool Start"},
+ [KFENCE_END_NR] = {0, "KFence Pool End"},
+#endif
[IDENTITY_AFTER_NR] = {(unsigned long)_end, "Identity Mapping Start"},
[IDENTITY_AFTER_END_NR] = {0, "Identity Mapping End"},
#ifdef CONFIG_KASAN
@@ -248,6 +255,9 @@ static void sort_address_markers(void)

static int pt_dump_init(void)
{
+#ifdef CONFIG_KFENCE
+ unsigned long kfence_start = (unsigned long)__kfence_pool;
+#endif
/*
* Figure out the maximum virtual address being accessible with the
* kernel ASCE. We need this to keep the page table walker functions
@@ -262,6 +272,10 @@ static int pt_dump_init(void)
address_markers[VMEMMAP_END_NR].start_address = (unsigned long)vmemmap + vmemmap_size;
address_markers[VMALLOC_NR].start_address = VMALLOC_START;
address_markers[VMALLOC_END_NR].start_address = VMALLOC_END;
+#ifdef CONFIG_KFENCE
+ address_markers[KFENCE_START_NR].start_address = kfence_start;
+ address_markers[KFENCE_END_NR].start_address = kfence_start + KFENCE_POOL_SIZE;
+#endif
sort_address_markers();
#ifdef CONFIG_PTDUMP_DEBUGFS
debugfs_create_file("kernel_page_tables", 0400, NULL, NULL, &ptdump_fops);
--
2.25.1


2021-07-28 19:07:29

by Heiko Carstens

[permalink] [raw]
Subject: [PATCH 3/4] s390: add support for KFENCE

From: Sven Schnelle <[email protected]>

Signed-off-by: Sven Schnelle <[email protected]>
[[email protected]: simplify/rework code]
Signed-off-by: Heiko Carstens <[email protected]>
---
arch/s390/Kconfig | 1 +
arch/s390/include/asm/kfence.h | 42 ++++++++++++++++++++++++++++++++++
arch/s390/mm/fault.c | 9 ++++++--
arch/s390/mm/init.c | 3 ++-
arch/s390/mm/pageattr.c | 3 ++-
5 files changed, 54 insertions(+), 4 deletions(-)
create mode 100644 arch/s390/include/asm/kfence.h

diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index a0e2130f0100..f20467af2ab2 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -138,6 +138,7 @@ config S390
select HAVE_ARCH_JUMP_LABEL_RELATIVE
select HAVE_ARCH_KASAN
select HAVE_ARCH_KASAN_VMALLOC
+ select HAVE_ARCH_KFENCE
select HAVE_ARCH_RANDOMIZE_KSTACK_OFFSET
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_SOFT_DIRTY
diff --git a/arch/s390/include/asm/kfence.h b/arch/s390/include/asm/kfence.h
new file mode 100644
index 000000000000..d55ba878378b
--- /dev/null
+++ b/arch/s390/include/asm/kfence.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_S390_KFENCE_H
+#define _ASM_S390_KFENCE_H
+
+#include <linux/mm.h>
+#include <linux/kfence.h>
+#include <asm/set_memory.h>
+#include <asm/page.h>
+
+void __kernel_map_pages(struct page *page, int numpages, int enable);
+
+static __always_inline bool arch_kfence_init_pool(void)
+{
+ return true;
+}
+
+#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)
+
+/*
+ * Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
+ * but earlier where page table allocations still happen with memblock.
+ * Reason is that arch_kfence_init_pool() gets called when the system
+ * is still in a limbo state - disabling and enabling bottom halves is
+ * not yet allowed, but that is what our page_table_alloc() would do.
+ */
+static __always_inline void kfence_split_mapping(void)
+{
+#ifdef CONFIG_KFENCE
+ unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;
+
+ set_memory_4k((unsigned long)__kfence_pool, pool_pages);
+#endif
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ __kernel_map_pages(virt_to_page(addr), 1, !protect);
+ return true;
+}
+
+#endif /* _ASM_S390_KFENCE_H */
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index e33c43b38afe..52d82410486e 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -31,6 +31,7 @@
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/hugetlb.h>
+#include <linux/kfence.h>
#include <asm/asm-offsets.h>
#include <asm/diag.h>
#include <asm/gmap.h>
@@ -356,6 +357,7 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
unsigned long address;
unsigned int flags;
vm_fault_t fault;
+ bool is_write;

tsk = current;
/*
@@ -369,6 +371,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)

mm = tsk->mm;
trans_exc_code = regs->int_parm_long;
+ address = trans_exc_code & __FAIL_ADDR_MASK;
+ is_write = (trans_exc_code & store_indication) == 0x400;

/*
* Verify that the fault happened in user space, that
@@ -379,6 +383,8 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
type = get_fault_type(regs);
switch (type) {
case KERNEL_FAULT:
+ if (kfence_handle_page_fault(address, is_write, regs))
+ return 0;
goto out;
case USER_FAULT:
case GMAP_FAULT:
@@ -387,12 +393,11 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access)
break;
}

- address = trans_exc_code & __FAIL_ADDR_MASK;
perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
flags = FAULT_FLAG_DEFAULT;
if (user_mode(regs))
flags |= FAULT_FLAG_USER;
- if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400)
+ if (access == VM_WRITE || is_write)
flags |= FAULT_FLAG_WRITE;
mmap_read_lock(mm);

diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index 8ac710de1ab1..f3db3caa8447 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -34,6 +34,7 @@
#include <asm/processor.h>
#include <linux/uaccess.h>
#include <asm/pgalloc.h>
+#include <asm/kfence.h>
#include <asm/ptdump.h>
#include <asm/dma.h>
#include <asm/lowcore.h>
@@ -200,7 +201,7 @@ void __init mem_init(void)
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

pv_init();
-
+ kfence_split_mapping();
/* Setup guest page hinting */
cmma_init();

diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index b09fd5c7f85f..550048843fd6 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -4,6 +4,7 @@
* Author(s): Jan Glauber <[email protected]>
*/
#include <linux/hugetlb.h>
+#include <linux/kfence.h>
#include <linux/mm.h>
#include <asm/cacheflush.h>
#include <asm/facility.h>
@@ -324,7 +325,7 @@ int __set_memory(unsigned long addr, int numpages, unsigned long flags)
return change_page_attr(addr, addr + numpages * PAGE_SIZE, flags);
}

-#ifdef CONFIG_DEBUG_PAGEALLOC
+#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE)

static void ipte_range(pte_t *pte, unsigned long address, int nr)
{
--
2.25.1


2021-07-28 19:30:21

by Christian Borntraeger

[permalink] [raw]
Subject: Re: [PATCH 2/4] kfence: add function to mask address bits



On 28.07.21 21:02, Heiko Carstens wrote:
> From: Sven Schnelle <[email protected]>
>
> s390 only reports the page address during a translation fault.
> To make the kfence unit tests pass, add a function that might
> be implemented by architectures to mask out address bits.

FWIW, the s390 hardware does indeed only provide the page address
for page faults. We had to do the same trick for other software,
e.g. see valgrind
https://sourceware.org/git/?p=valgrind.git;a=blob;f=coregrind/m_signals.c;h=b45afe59923245352ac17fdd1eeeb5e220f912be;hb=HEAD#l2702


>
> Signed-off-by: Sven Schnelle <[email protected]>
> Signed-off-by: Heiko Carstens <[email protected]>
> ---
> mm/kfence/kfence_test.c | 13 ++++++++++++-
> 1 file changed, 12 insertions(+), 1 deletion(-)
>
> diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
> index 942cbc16ad26..eb6307c199ea 100644
> --- a/mm/kfence/kfence_test.c
> +++ b/mm/kfence/kfence_test.c
> @@ -23,8 +23,15 @@
> #include <linux/tracepoint.h>
> #include <trace/events/printk.h>
>
> +#include <asm/kfence.h>
> +
> #include "kfence.h"
>
> +/* May be overridden by <asm/kfence.h>. */
> +#ifndef arch_kfence_test_address
> +#define arch_kfence_test_address(addr) (addr)
> +#endif
> +
> /* Report as observed from console. */
> static struct {
> spinlock_t lock;
> @@ -82,6 +89,7 @@ static const char *get_access_type(const struct expect_report *r)
> /* Check observed report matches information in @r. */
> static bool report_matches(const struct expect_report *r)
> {
> + unsigned long addr = (unsigned long)r->addr;
> bool ret = false;
> unsigned long flags;
> typeof(observed.lines) expect;
> @@ -131,22 +139,25 @@ static bool report_matches(const struct expect_report *r)
> switch (r->type) {
> case KFENCE_ERROR_OOB:
> cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
> + addr = arch_kfence_test_address(addr);
> break;
> case KFENCE_ERROR_UAF:
> cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
> + addr = arch_kfence_test_address(addr);
> break;
> case KFENCE_ERROR_CORRUPTION:
> cur += scnprintf(cur, end - cur, "Corrupted memory at");
> break;
> case KFENCE_ERROR_INVALID:
> cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
> + addr = arch_kfence_test_address(addr);
> break;
> case KFENCE_ERROR_INVALID_FREE:
> cur += scnprintf(cur, end - cur, "Invalid free of");
> break;
> }
>
> - cur += scnprintf(cur, end - cur, " 0x%p", (void *)r->addr);
> + cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
>
> spin_lock_irqsave(&observed.lock, flags);
> if (!report_available())
>

2021-07-29 07:50:47

by Marco Elver

[permalink] [raw]
Subject: Re: [PATCH 2/4] kfence: add function to mask address bits

On Wed, Jul 28, 2021 at 09:02PM +0200, Heiko Carstens wrote:
> From: Sven Schnelle <[email protected]>
>
> s390 only reports the page address during a translation fault.
> To make the kfence unit tests pass, add a function that might
> be implemented by architectures to mask out address bits.
>
> Signed-off-by: Sven Schnelle <[email protected]>
> Signed-off-by: Heiko Carstens <[email protected]>

I noticed this breaks on x86 if CONFIG_KFENCE_KUNIT_TEST=m, because x86
conditionally declares some asm functions if !MODULE.

I think the below is the simplest to fix, and if you agree, please carry
it as a patch in this series before this patch.

With the below, you can add to this patch:

Reviewed-by: Marco Elver <[email protected]>

Thanks,
-- Marco

------ >8 ------

From: Marco Elver <[email protected]>
Date: Wed, 28 Jul 2021 21:57:41 +0200
Subject: [PATCH] kfence, x86: only define helpers if !MODULE

x86's <asm/tlbflush.h> only declares non-module accessible functions
(such as flush_tlb_one_kernel) if !MODULE.

In preparation of including <asm/kfence.h> from the KFENCE test module,
only define the helpers if !MODULE to avoid breaking the build with
CONFIG_KFENCE_KUNIT_TEST=m.

Signed-off-by: Marco Elver <[email protected]>
---
arch/x86/include/asm/kfence.h | 4 ++++
1 file changed, 4 insertions(+)

diff --git a/arch/x86/include/asm/kfence.h b/arch/x86/include/asm/kfence.h
index 05b48b33baf0..ff5c7134a37a 100644
--- a/arch/x86/include/asm/kfence.h
+++ b/arch/x86/include/asm/kfence.h
@@ -8,6 +8,8 @@
#ifndef _ASM_X86_KFENCE_H
#define _ASM_X86_KFENCE_H

+#ifndef MODULE
+
#include <linux/bug.h>
#include <linux/kfence.h>

@@ -66,4 +68,6 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect)
return true;
}

+#endif /* !MODULE */
+
#endif /* _ASM_X86_KFENCE_H */
--
2.32.0.554.ge1b32706d8-goog


2021-07-29 12:28:57

by Heiko Carstens

[permalink] [raw]
Subject: Re: [PATCH 2/4] kfence: add function to mask address bits

On Thu, Jul 29, 2021 at 09:48:58AM +0200, Marco Elver wrote:
> On Wed, Jul 28, 2021 at 09:02PM +0200, Heiko Carstens wrote:
> > From: Sven Schnelle <[email protected]>
> >
> > s390 only reports the page address during a translation fault.
> > To make the kfence unit tests pass, add a function that might
> > be implemented by architectures to mask out address bits.
> >
> > Signed-off-by: Sven Schnelle <[email protected]>
> > Signed-off-by: Heiko Carstens <[email protected]>
>
> I noticed this breaks on x86 if CONFIG_KFENCE_KUNIT_TEST=m, because x86
> conditionally declares some asm functions if !MODULE.
>
> I think the below is the simplest to fix, and if you agree, please carry
> it as a patch in this series before this patch.

Will do.

> With the below, you can add to this patch:
>
> Reviewed-by: Marco Elver <[email protected]>

Done - Thank you! I silently assume this means also you have no
objections if we carry this via the s390 tree for upstreaming.

2021-07-29 12:29:24

by Marco Elver

[permalink] [raw]
Subject: Re: [PATCH 2/4] kfence: add function to mask address bits

On Thu, 29 Jul 2021 at 14:25, Heiko Carstens <[email protected]> wrote:
> On Thu, Jul 29, 2021 at 09:48:58AM +0200, Marco Elver wrote:
> > On Wed, Jul 28, 2021 at 09:02PM +0200, Heiko Carstens wrote:
> > > From: Sven Schnelle <[email protected]>
> > >
> > > s390 only reports the page address during a translation fault.
> > > To make the kfence unit tests pass, add a function that might
> > > be implemented by architectures to mask out address bits.
> > >
> > > Signed-off-by: Sven Schnelle <[email protected]>
> > > Signed-off-by: Heiko Carstens <[email protected]>
> >
> > I noticed this breaks on x86 if CONFIG_KFENCE_KUNIT_TEST=m, because x86
> > conditionally declares some asm functions if !MODULE.
> >
> > I think the below is the simplest to fix, and if you agree, please carry
> > it as a patch in this series before this patch.
>
> Will do.
>
> > With the below, you can add to this patch:
> >
> > Reviewed-by: Marco Elver <[email protected]>
>
> Done - Thank you! I silently assume this means also you have no
> objections if we carry this via the s390 tree for upstreaming.

I think that's reasonable. I'm not aware of any conflicts, nor am I
expecting any for the upcoming cycle.

Thanks,
-- Marco

2021-07-29 12:45:24

by Alexander Potapenko

[permalink] [raw]
Subject: Re: [PATCH 2/4] kfence: add function to mask address bits

On Wed, Jul 28, 2021 at 9:03 PM Heiko Carstens <[email protected]> wrote:
>
> From: Sven Schnelle <[email protected]>
>
> s390 only reports the page address during a translation fault.
> To make the kfence unit tests pass, add a function that might
> be implemented by architectures to mask out address bits.
>
> Signed-off-by: Sven Schnelle <[email protected]>
> Signed-off-by: Heiko Carstens <[email protected]>
> ---
> mm/kfence/kfence_test.c | 13 ++++++++++++-
> 1 file changed, 12 insertions(+), 1 deletion(-)
>
> diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
> index 942cbc16ad26..eb6307c199ea 100644
> --- a/mm/kfence/kfence_test.c
> +++ b/mm/kfence/kfence_test.c
> @@ -23,8 +23,15 @@
> #include <linux/tracepoint.h>
> #include <trace/events/printk.h>
>
> +#include <asm/kfence.h>
> +
> #include "kfence.h"
>
> +/* May be overridden by <asm/kfence.h>. */
> +#ifndef arch_kfence_test_address
> +#define arch_kfence_test_address(addr) (addr)
> +#endif
> +
> /* Report as observed from console. */
> static struct {
> spinlock_t lock;
> @@ -82,6 +89,7 @@ static const char *get_access_type(const struct expect_report *r)
> /* Check observed report matches information in @r. */
> static bool report_matches(const struct expect_report *r)
> {
> + unsigned long addr = (unsigned long)r->addr;
> bool ret = false;
> unsigned long flags;
> typeof(observed.lines) expect;
> @@ -131,22 +139,25 @@ static bool report_matches(const struct expect_report *r)
> switch (r->type) {
> case KFENCE_ERROR_OOB:
> cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
> + addr = arch_kfence_test_address(addr);

Can we normalize addr once before (or after) this switch?

> break;
> case KFENCE_ERROR_UAF:
> cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
> + addr = arch_kfence_test_address(addr);
> break;
> case KFENCE_ERROR_CORRUPTION:
> cur += scnprintf(cur, end - cur, "Corrupted memory at");
> break;
> case KFENCE_ERROR_INVALID:
> cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
> + addr = arch_kfence_test_address(addr);
> break;
> case KFENCE_ERROR_INVALID_FREE:
> cur += scnprintf(cur, end - cur, "Invalid free of");
> break;
> }
>
> - cur += scnprintf(cur, end - cur, " 0x%p", (void *)r->addr);
> + cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
>
> spin_lock_irqsave(&observed.lock, flags);
> if (!report_available())
> --
> 2.25.1
>


--
Alexander Potapenko
Software Engineer

Google Germany GmbH
Erika-Mann-Straße, 33
80636 München

Geschäftsführer: Paul Manicle, Halimah DeLaine Prado
Registergericht und -nummer: Hamburg, HRB 86891
Sitz der Gesellschaft: Hamburg

2021-07-29 13:51:23

by Sven Schnelle

[permalink] [raw]
Subject: Re: [PATCH 2/4] kfence: add function to mask address bits

Alexander Potapenko <[email protected]> writes:

> On Wed, Jul 28, 2021 at 9:03 PM Heiko Carstens <[email protected]> wrote:
>>
>> From: Sven Schnelle <[email protected]>
>>
>> s390 only reports the page address during a translation fault.
>> To make the kfence unit tests pass, add a function that might
>> be implemented by architectures to mask out address bits.
>>
>> Signed-off-by: Sven Schnelle <[email protected]>
>> Signed-off-by: Heiko Carstens <[email protected]>
>> ---
>> mm/kfence/kfence_test.c | 13 ++++++++++++-
>> 1 file changed, 12 insertions(+), 1 deletion(-)
>>
>> diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
>> index 942cbc16ad26..eb6307c199ea 100644
>> --- a/mm/kfence/kfence_test.c
>> +++ b/mm/kfence/kfence_test.c
>> @@ -23,8 +23,15 @@
>> #include <linux/tracepoint.h>
>> #include <trace/events/printk.h>
>>
>> +#include <asm/kfence.h>
>> +
>> #include "kfence.h"
>>
>> +/* May be overridden by <asm/kfence.h>. */
>> +#ifndef arch_kfence_test_address
>> +#define arch_kfence_test_address(addr) (addr)
>> +#endif
>> +
>> /* Report as observed from console. */
>> static struct {
>> spinlock_t lock;
>> @@ -82,6 +89,7 @@ static const char *get_access_type(const struct expect_report *r)
>> /* Check observed report matches information in @r. */
>> static bool report_matches(const struct expect_report *r)
>> {
>> + unsigned long addr = (unsigned long)r->addr;
>> bool ret = false;
>> unsigned long flags;
>> typeof(observed.lines) expect;
>> @@ -131,22 +139,25 @@ static bool report_matches(const struct expect_report *r)
>> switch (r->type) {
>> case KFENCE_ERROR_OOB:
>> cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
>> + addr = arch_kfence_test_address(addr);
>
> Can we normalize addr once before (or after) this switch?
>

I don't think so. When reporing corrupted memory or an invalid free the
address is not generated by hardware but kfence itself, and therefore we
would strip valid bits.

>> break;
>> case KFENCE_ERROR_UAF:
>> cur += scnprintf(cur, end - cur, "Use-after-free %s at", get_access_type(r));
>> + addr = arch_kfence_test_address(addr);
>> break;
>> case KFENCE_ERROR_CORRUPTION:
>> cur += scnprintf(cur, end - cur, "Corrupted memory at");
>> break;
>> case KFENCE_ERROR_INVALID:
>> cur += scnprintf(cur, end - cur, "Invalid %s at", get_access_type(r));
>> + addr = arch_kfence_test_address(addr);
>> break;
>> case KFENCE_ERROR_INVALID_FREE:
>> cur += scnprintf(cur, end - cur, "Invalid free of");
>> break;
>> }
>>
>> - cur += scnprintf(cur, end - cur, " 0x%p", (void *)r->addr);
>> + cur += scnprintf(cur, end - cur, " 0x%p", (void *)addr);
>>
>> spin_lock_irqsave(&observed.lock, flags);
>> if (!report_available())
>> --
>> 2.25.1
>>

2021-07-29 14:12:16

by Alexander Potapenko

[permalink] [raw]
Subject: Re: [PATCH 2/4] kfence: add function to mask address bits

On Thu, Jul 29, 2021 at 3:47 PM Sven Schnelle <[email protected]> wrote:
>
> Alexander Potapenko <[email protected]> writes:
>
> > On Wed, Jul 28, 2021 at 9:03 PM Heiko Carstens <[email protected]> wrote:
> >>
> >> From: Sven Schnelle <[email protected]>
> >>
> >> s390 only reports the page address during a translation fault.
> >> To make the kfence unit tests pass, add a function that might
> >> be implemented by architectures to mask out address bits.
> >>
> >> Signed-off-by: Sven Schnelle <[email protected]>
> >> Signed-off-by: Heiko Carstens <[email protected]>
> >> ---
> >> mm/kfence/kfence_test.c | 13 ++++++++++++-
> >> 1 file changed, 12 insertions(+), 1 deletion(-)
> >>
> >> diff --git a/mm/kfence/kfence_test.c b/mm/kfence/kfence_test.c
> >> index 942cbc16ad26..eb6307c199ea 100644
> >> --- a/mm/kfence/kfence_test.c
> >> +++ b/mm/kfence/kfence_test.c
> >> @@ -23,8 +23,15 @@
> >> #include <linux/tracepoint.h>
> >> #include <trace/events/printk.h>
> >>
> >> +#include <asm/kfence.h>
> >> +
> >> #include "kfence.h"
> >>
> >> +/* May be overridden by <asm/kfence.h>. */
> >> +#ifndef arch_kfence_test_address
> >> +#define arch_kfence_test_address(addr) (addr)
> >> +#endif
> >> +
> >> /* Report as observed from console. */
> >> static struct {
> >> spinlock_t lock;
> >> @@ -82,6 +89,7 @@ static const char *get_access_type(const struct expect_report *r)
> >> /* Check observed report matches information in @r. */
> >> static bool report_matches(const struct expect_report *r)
> >> {
> >> + unsigned long addr = (unsigned long)r->addr;
> >> bool ret = false;
> >> unsigned long flags;
> >> typeof(observed.lines) expect;
> >> @@ -131,22 +139,25 @@ static bool report_matches(const struct expect_report *r)
> >> switch (r->type) {
> >> case KFENCE_ERROR_OOB:
> >> cur += scnprintf(cur, end - cur, "Out-of-bounds %s at", get_access_type(r));
> >> + addr = arch_kfence_test_address(addr);
> >
> > Can we normalize addr once before (or after) this switch?
> >
>

> I don't think so. When reporing corrupted memory or an invalid free the
> address is not generated by hardware but kfence itself, and therefore we
> would strip valid bits.

Ah, sorry, I missed that.