2015-08-13 05:37:43

by Andrey Ryabinin

[permalink] [raw]
Subject: [PATCH 0/2] x86/KASAN updates for 4.3

These 2 patches taken from v5 'KASAN for arm64' series.
The only change is updated changelog in second patch.

I hope this is not too late to queue these for 4.3,
as this allow us to merge arm64/KASAN patches in v4.4
through arm64 tree.



Andrey Ryabinin (2):
x86/kasan: define KASAN_SHADOW_OFFSET per architecture
x86/kasan, mm: introduce generic kasan_populate_zero_shadow()

arch/x86/include/asm/kasan.h | 3 +
arch/x86/mm/kasan_init_64.c | 123 ++--------------------------------
include/linux/kasan.h | 10 ++-
mm/kasan/Makefile | 2 +-
mm/kasan/kasan_init.c | 152 +++++++++++++++++++++++++++++++++++++++++++
5 files changed, 170 insertions(+), 120 deletions(-)
create mode 100644 mm/kasan/kasan_init.c

--
2.4.6


2015-08-13 05:37:54

by Andrey Ryabinin

[permalink] [raw]
Subject: [PATCH v6 1/2] x86/kasan: define KASAN_SHADOW_OFFSET per architecture

Current definition of KASAN_SHADOW_OFFSET in include/linux/kasan.h
will not work for upcomming arm64, so move it to the arch header.

Signed-off-by: Andrey Ryabinin <[email protected]>
---
arch/x86/include/asm/kasan.h | 3 +++
include/linux/kasan.h | 1 -
2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
index 74a2a8d..1410b56 100644
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -1,6 +1,9 @@
#ifndef _ASM_X86_KASAN_H
#define _ASM_X86_KASAN_H

+#include <linux/const.h>
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
/*
* Compiler uses shadow offset assuming that addresses start
* from 0. Kernel addresses don't start from 0, so shadow
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5486d77..6fb1c7d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -10,7 +10,6 @@ struct vm_struct;
#ifdef CONFIG_KASAN

#define KASAN_SHADOW_SCALE_SHIFT 3
-#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)

#include <asm/kasan.h>
#include <linux/sched.h>
--
2.4.6

2015-08-13 05:38:19

by Andrey Ryabinin

[permalink] [raw]
Subject: [PATCH v6 2/2] x86/kasan, mm: introduce generic kasan_populate_zero_shadow()

Introduce generic kasan_populate_zero_shadow(shadow_start, shadow_end).
This function maps kasan_zero_page to the [shadow_start, shadow_end]
addresses.

This replaces x86_64 specific populate_zero_shadow() and will
be used for ARM64 in follow on patches.

The main changes from original version are:
* Use p?d_populate*() instead of set_p?d()
* Use memblock allocator directly instead of vmemmap_alloc_block()
* __pa() instead of __pa_nodebug(). __pa() causes troubles
iff we use it before kasan_early_init(). kasan_populate_zero_shadow()
will be used later, so we ok with __pa() here.

Signed-off-by: Andrey Ryabinin <[email protected]>
---
arch/x86/mm/kasan_init_64.c | 123 ++---------------------------------
include/linux/kasan.h | 9 +++
mm/kasan/Makefile | 2 +-
mm/kasan/kasan_init.c | 152 ++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 167 insertions(+), 119 deletions(-)
create mode 100644 mm/kasan/kasan_init.c

diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index e1840f3..9ce5da2 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -12,20 +12,6 @@
extern pgd_t early_level4_pgt[PTRS_PER_PGD];
extern struct range pfn_mapped[E820_X_MAX];

-static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
-static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
-static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
-
-/*
- * This page used as early shadow. We don't use empty_zero_page
- * at early stages, stack instrumentation could write some garbage
- * to this page.
- * Latter we reuse it as zero shadow for large ranges of memory
- * that allowed to access, but not instrumented by kasan
- * (vmalloc/vmemmap ...).
- */
-static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
-
static int __init map_range(struct range *range)
{
unsigned long start;
@@ -62,106 +48,6 @@ static void __init kasan_map_early_shadow(pgd_t *pgd)
}
}

-static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
- unsigned long end)
-{
- pte_t *pte = pte_offset_kernel(pmd, addr);
-
- while (addr + PAGE_SIZE <= end) {
- WARN_ON(!pte_none(*pte));
- set_pte(pte, __pte(__pa_nodebug(kasan_zero_page)
- | __PAGE_KERNEL_RO));
- addr += PAGE_SIZE;
- pte = pte_offset_kernel(pmd, addr);
- }
- return 0;
-}
-
-static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
- unsigned long end)
-{
- int ret = 0;
- pmd_t *pmd = pmd_offset(pud, addr);
-
- while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
- WARN_ON(!pmd_none(*pmd));
- set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
- | _KERNPG_TABLE));
- addr += PMD_SIZE;
- pmd = pmd_offset(pud, addr);
- }
- if (addr < end) {
- if (pmd_none(*pmd)) {
- void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
- if (!p)
- return -ENOMEM;
- set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE));
- }
- ret = zero_pte_populate(pmd, addr, end);
- }
- return ret;
-}
-
-
-static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
- unsigned long end)
-{
- int ret = 0;
- pud_t *pud = pud_offset(pgd, addr);
-
- while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
- WARN_ON(!pud_none(*pud));
- set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
- | _KERNPG_TABLE));
- addr += PUD_SIZE;
- pud = pud_offset(pgd, addr);
- }
-
- if (addr < end) {
- if (pud_none(*pud)) {
- void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
- if (!p)
- return -ENOMEM;
- set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE));
- }
- ret = zero_pmd_populate(pud, addr, end);
- }
- return ret;
-}
-
-static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
-{
- int ret = 0;
- pgd_t *pgd = pgd_offset_k(addr);
-
- while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
- WARN_ON(!pgd_none(*pgd));
- set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
- | _KERNPG_TABLE));
- addr += PGDIR_SIZE;
- pgd = pgd_offset_k(addr);
- }
-
- if (addr < end) {
- if (pgd_none(*pgd)) {
- void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
- if (!p)
- return -ENOMEM;
- set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE));
- }
- ret = zero_pud_populate(pgd, addr, end);
- }
- return ret;
-}
-
-
-static void __init populate_zero_shadow(const void *start, const void *end)
-{
- if (zero_pgd_populate((unsigned long)start, (unsigned long)end))
- panic("kasan: unable to map zero shadow!");
-}
-
-
#ifdef CONFIG_KASAN_INLINE
static int kasan_die_handler(struct notifier_block *self,
unsigned long val,
@@ -213,7 +99,7 @@ void __init kasan_init(void)

clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);

- populate_zero_shadow((void *)KASAN_SHADOW_START,
+ kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
kasan_mem_to_shadow((void *)PAGE_OFFSET));

for (i = 0; i < E820_X_MAX; i++) {
@@ -223,14 +109,15 @@ void __init kasan_init(void)
if (map_range(&pfn_mapped[i]))
panic("kasan: unable to allocate shadow!");
}
- populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
- kasan_mem_to_shadow((void *)__START_KERNEL_map));
+ kasan_populate_zero_shadow(
+ kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
+ kasan_mem_to_shadow((void *)__START_KERNEL_map));

vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
(unsigned long)kasan_mem_to_shadow(_end),
NUMA_NO_NODE);

- populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
+ kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
(void *)KASAN_SHADOW_END);

memset(kasan_zero_page, 0, PAGE_SIZE);
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 6fb1c7d..4b9f85c 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -12,8 +12,17 @@ struct vm_struct;
#define KASAN_SHADOW_SCALE_SHIFT 3

#include <asm/kasan.h>
+#include <asm/pgtable.h>
#include <linux/sched.h>

+extern unsigned char kasan_zero_page[PAGE_SIZE];
+extern pte_t kasan_zero_pte[PTRS_PER_PTE];
+extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
+extern pud_t kasan_zero_pud[PTRS_PER_PUD];
+
+void kasan_populate_zero_shadow(const void *shadow_start,
+ const void *shadow_end);
+
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index bd837b8..6471014 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -5,4 +5,4 @@ CFLAGS_REMOVE_kasan.o = -pg
# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)

-obj-y := kasan.o report.o
+obj-y := kasan.o report.o kasan_init.o
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c
new file mode 100644
index 0000000..3f9a41c
--- /dev/null
+++ b/mm/kasan/kasan_init.c
@@ -0,0 +1,152 @@
+/*
+ * This file contains some kasan initialization code.
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bootmem.h>
+#include <linux/init.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/pfn.h>
+
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+
+/*
+ * This page serves two purposes:
+ * - It used as early shadow memory. The entire shadow region populated
+ * with this page, before we will be able to setup normal shadow memory.
+ * - Latter it reused it as zero shadow to cover large ranges of memory
+ * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
+ */
+unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
+
+#if CONFIG_PGTABLE_LEVELS > 3
+pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
+#endif
+pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
+
+static __init void *early_alloc(size_t size, int node)
+{
+ return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
+ BOOTMEM_ALLOC_ACCESSIBLE, node);
+}
+
+static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
+ unsigned long end)
+{
+ pte_t *pte = pte_offset_kernel(pmd, addr);
+ pte_t zero_pte;
+
+ zero_pte = pfn_pte(PFN_DOWN(__pa(kasan_zero_page)), PAGE_KERNEL);
+ zero_pte = pte_wrprotect(zero_pte);
+
+ while (addr + PAGE_SIZE <= end) {
+ set_pte_at(&init_mm, addr, pte, zero_pte);
+ addr += PAGE_SIZE;
+ pte = pte_offset_kernel(pmd, addr);
+ }
+}
+
+static void __init zero_pmd_populate(pud_t *pud, unsigned long addr,
+ unsigned long end)
+{
+ pmd_t *pmd = pmd_offset(pud, addr);
+ unsigned long next;
+
+ do {
+ next = pmd_addr_end(addr, end);
+
+ if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
+ pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ continue;
+ }
+
+ if (pmd_none(*pmd)) {
+ pmd_populate_kernel(&init_mm, pmd,
+ early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ }
+ zero_pte_populate(pmd, addr, next);
+ } while (pmd++, addr = next, addr != end);
+}
+
+static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
+ unsigned long end)
+{
+ pud_t *pud = pud_offset(pgd, addr);
+ unsigned long next;
+
+ do {
+ next = pud_addr_end(addr, end);
+ if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
+ pmd_t *pmd;
+
+ pud_populate(&init_mm, pud, kasan_zero_pmd);
+ pmd = pmd_offset(pud, addr);
+ pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ continue;
+ }
+
+ if (pud_none(*pud)) {
+ pud_populate(&init_mm, pud,
+ early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ }
+ zero_pmd_populate(pud, addr, next);
+ } while (pud++, addr = next, addr != end);
+}
+
+/**
+ * kasan_populate_zero_shadow - populate shadow memory region with
+ * kasan_zero_page
+ * @shadow_start - start of the memory range to populate
+ * @shadow_end - end of the memory range to populate
+ */
+void __init kasan_populate_zero_shadow(const void *shadow_start,
+ const void *shadow_end)
+{
+ unsigned long addr = (unsigned long)shadow_start;
+ unsigned long end = (unsigned long)shadow_end;
+ pgd_t *pgd = pgd_offset_k(addr);
+ unsigned long next;
+
+ do {
+ next = pgd_addr_end(addr, end);
+
+ if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
+ pud_t *pud;
+ pmd_t *pmd;
+
+ /*
+ * kasan_zero_pud should be populated with pmds
+ * at this moment.
+ * [pud,pmd]_populate*() below needed only for
+ * 3,2 - level page tables where we don't have
+ * puds,pmds, so pgd_populate(), pud_populate()
+ * is noops.
+ */
+ pgd_populate(&init_mm, pgd, kasan_zero_pud);
+ pud = pud_offset(pgd, addr);
+ pud_populate(&init_mm, pud, kasan_zero_pmd);
+ pmd = pmd_offset(pud, addr);
+ pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ continue;
+ }
+
+ if (pgd_none(*pgd)) {
+ pgd_populate(&init_mm, pgd,
+ early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ }
+ zero_pud_populate(pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
+}
--
2.4.6

2015-08-13 06:50:47

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 0/2] x86/KASAN updates for 4.3


* Andrey Ryabinin <[email protected]> wrote:

> These 2 patches taken from v5 'KASAN for arm64' series.
> The only change is updated changelog in second patch.
>
> I hope this is not too late to queue these for 4.3,
> as this allow us to merge arm64/KASAN patches in v4.4
> through arm64 tree.
>
> Andrey Ryabinin (2):
> x86/kasan: define KASAN_SHADOW_OFFSET per architecture
> x86/kasan, mm: introduce generic kasan_populate_zero_shadow()
>
> arch/x86/include/asm/kasan.h | 3 +
> arch/x86/mm/kasan_init_64.c | 123 ++--------------------------------
> include/linux/kasan.h | 10 ++-
> mm/kasan/Makefile | 2 +-
> mm/kasan/kasan_init.c | 152 +++++++++++++++++++++++++++++++++++++++++++
> 5 files changed, 170 insertions(+), 120 deletions(-)
> create mode 100644 mm/kasan/kasan_init.c

It's absolutely too late in the -rc cycle for v4.3!

I can create a stable topic tree for it, tip:mm/kasan or so, which arm64 could
pull and base its own ARM specific work on, if that's OK with everyone.

Thanks,

Ingo

2015-08-13 08:16:51

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 0/2] x86/KASAN updates for 4.3


* Ingo Molnar <[email protected]> wrote:

>
> * Andrey Ryabinin <[email protected]> wrote:
>
> > These 2 patches taken from v5 'KASAN for arm64' series.
> > The only change is updated changelog in second patch.
> >
> > I hope this is not too late to queue these for 4.3,
> > as this allow us to merge arm64/KASAN patches in v4.4
> > through arm64 tree.
> >
> > Andrey Ryabinin (2):
> > x86/kasan: define KASAN_SHADOW_OFFSET per architecture
> > x86/kasan, mm: introduce generic kasan_populate_zero_shadow()
> >
> > arch/x86/include/asm/kasan.h | 3 +
> > arch/x86/mm/kasan_init_64.c | 123 ++--------------------------------
> > include/linux/kasan.h | 10 ++-
> > mm/kasan/Makefile | 2 +-
> > mm/kasan/kasan_init.c | 152 +++++++++++++++++++++++++++++++++++++++++++
> > 5 files changed, 170 insertions(+), 120 deletions(-)
> > create mode 100644 mm/kasan/kasan_init.c
>
> It's absolutely too late in the -rc cycle for v4.3!

Stupid me, I have read 'v4.2' ...

So yes, it's still good for v4.3, the development window is still open.

The rest still stands:

> I can create a stable topic tree for it, tip:mm/kasan or so, which arm64 could
> pull and base its own ARM specific work on, if that's OK with everyone.

Thanks,

Ingo

2015-08-13 09:01:32

by Will Deacon

[permalink] [raw]
Subject: Re: [PATCH 0/2] x86/KASAN updates for 4.3

Hi Ingo,

On Thu, Aug 13, 2015 at 09:16:41AM +0100, Ingo Molnar wrote:
> * Ingo Molnar <[email protected]> wrote:
> > * Andrey Ryabinin <[email protected]> wrote:
> >
> > > These 2 patches taken from v5 'KASAN for arm64' series.
> > > The only change is updated changelog in second patch.
> > >
> > > I hope this is not too late to queue these for 4.3,
> > > as this allow us to merge arm64/KASAN patches in v4.4
> > > through arm64 tree.
> > >
> > > Andrey Ryabinin (2):
> > > x86/kasan: define KASAN_SHADOW_OFFSET per architecture
> > > x86/kasan, mm: introduce generic kasan_populate_zero_shadow()
> > >
> > > arch/x86/include/asm/kasan.h | 3 +
> > > arch/x86/mm/kasan_init_64.c | 123 ++--------------------------------
> > > include/linux/kasan.h | 10 ++-
> > > mm/kasan/Makefile | 2 +-
> > > mm/kasan/kasan_init.c | 152 +++++++++++++++++++++++++++++++++++++++++++
> > > 5 files changed, 170 insertions(+), 120 deletions(-)
> > > create mode 100644 mm/kasan/kasan_init.c
> >
> > It's absolutely too late in the -rc cycle for v4.3!
>
> Stupid me, I have read 'v4.2' ...
>
> So yes, it's still good for v4.3, the development window is still open.
>
> The rest still stands:
>
> > I can create a stable topic tree for it, tip:mm/kasan or so, which arm64 could
> > pull and base its own ARM specific work on, if that's OK with everyone.

Yes please, works for me! If we're targetting 4.3, then please can you base
on 4.2-rc4, as that's what our current arm64 queue is using?

Cheers,

Will

2015-08-13 11:02:29

by Andrey Ryabinin

[permalink] [raw]
Subject: Re: [PATCH 0/2] x86/KASAN updates for 4.3

2015-08-13 12:01 GMT+03:00 Will Deacon <[email protected]>:
> Hi Ingo,
>
> On Thu, Aug 13, 2015 at 09:16:41AM +0100, Ingo Molnar wrote:
>> * Ingo Molnar <[email protected]> wrote:
>> > * Andrey Ryabinin <[email protected]> wrote:
>> >
>> > > These 2 patches taken from v5 'KASAN for arm64' series.
>> > > The only change is updated changelog in second patch.
>> > >
>> > > I hope this is not too late to queue these for 4.3,
>> > > as this allow us to merge arm64/KASAN patches in v4.4
>> > > through arm64 tree.
>> > >
>> > > Andrey Ryabinin (2):
>> > > x86/kasan: define KASAN_SHADOW_OFFSET per architecture
>> > > x86/kasan, mm: introduce generic kasan_populate_zero_shadow()
>> > >
>> > > arch/x86/include/asm/kasan.h | 3 +
>> > > arch/x86/mm/kasan_init_64.c | 123 ++--------------------------------
>> > > include/linux/kasan.h | 10 ++-
>> > > mm/kasan/Makefile | 2 +-
>> > > mm/kasan/kasan_init.c | 152 +++++++++++++++++++++++++++++++++++++++++++
>> > > 5 files changed, 170 insertions(+), 120 deletions(-)
>> > > create mode 100644 mm/kasan/kasan_init.c
>> >
>> > It's absolutely too late in the -rc cycle for v4.3!
>>
>> Stupid me, I have read 'v4.2' ...
>>
>> So yes, it's still good for v4.3, the development window is still open.
>>
>> The rest still stands:
>>
>> > I can create a stable topic tree for it, tip:mm/kasan or so, which arm64 could
>> > pull and base its own ARM specific work on, if that's OK with everyone.

OK with me.


> Yes please, works for me! If we're targetting 4.3, then please can you base
> on 4.2-rc4, as that's what our current arm64 queue is using?
>

Does this mean that we are targeting arm64 part for 4.3 too?


> Cheers,
>
> Will

2015-08-13 11:24:34

by Will Deacon

[permalink] [raw]
Subject: Re: [PATCH 0/2] x86/KASAN updates for 4.3

On Thu, Aug 13, 2015 at 12:02:26PM +0100, Andrey Ryabinin wrote:
> 2015-08-13 12:01 GMT+03:00 Will Deacon <[email protected]>:
> > Yes please, works for me! If we're targetting 4.3, then please can you base
> > on 4.2-rc4, as that's what our current arm64 queue is using?
> >
>
> Does this mean that we are targeting arm64 part for 4.3 too?

It depends on how well it merges with our current queue and whether it
holds up to regression testing. The patches have been reviewed, so I'm
comfortable with the content, but we're not at a stage where we can debug
and fix any failures that might crop up from the merge.

Will

2015-08-13 17:23:16

by Will Deacon

[permalink] [raw]
Subject: Re: [PATCH 0/2] x86/KASAN updates for 4.3

On Thu, Aug 13, 2015 at 12:24:28PM +0100, Will Deacon wrote:
> On Thu, Aug 13, 2015 at 12:02:26PM +0100, Andrey Ryabinin wrote:
> > 2015-08-13 12:01 GMT+03:00 Will Deacon <[email protected]>:
> > > Yes please, works for me! If we're targetting 4.3, then please can you base
> > > on 4.2-rc4, as that's what our current arm64 queue is using?
> > >
> >
> > Does this mean that we are targeting arm64 part for 4.3 too?
>
> It depends on how well it merges with our current queue and whether it
> holds up to regression testing. The patches have been reviewed, so I'm
> comfortable with the content, but we're not at a stage where we can debug
> and fix any failures that might crop up from the merge.

Scratch that :(

I tried this out under EFI and it dies horribly in the stub code because
we're missing at least one KASAN_SANITIZE_ Makefile entry.

So I think this needs longer to stew before hitting mainline. By all means
get the x86 dependencies in for 4.3, but the arm64 port can probably use
another cycle to iron out the bugs.

Cheers,

Will

2015-08-22 10:09:28

by Ingo Molnar

[permalink] [raw]
Subject: Re: [PATCH 0/2] x86/KASAN updates for 4.3


* Will Deacon <[email protected]> wrote:

> On Thu, Aug 13, 2015 at 12:24:28PM +0100, Will Deacon wrote:
> > On Thu, Aug 13, 2015 at 12:02:26PM +0100, Andrey Ryabinin wrote:
> > > 2015-08-13 12:01 GMT+03:00 Will Deacon <[email protected]>:
> > > > Yes please, works for me! If we're targetting 4.3, then please can you base
> > > > on 4.2-rc4, as that's what our current arm64 queue is using?
> > > >
> > >
> > > Does this mean that we are targeting arm64 part for 4.3 too?
> >
> > It depends on how well it merges with our current queue and whether it
> > holds up to regression testing. The patches have been reviewed, so I'm
> > comfortable with the content, but we're not at a stage where we can debug
> > and fix any failures that might crop up from the merge.
>
> Scratch that :(
>
> I tried this out under EFI and it dies horribly in the stub code because
> we're missing at least one KASAN_SANITIZE_ Makefile entry.
>
> So I think this needs longer to stew before hitting mainline. By all means
> get the x86 dependencies in for 4.3, but the arm64 port can probably use
> another cycle to iron out the bugs.

Is there any known problem with the two patches in this series, or can I apply
them?

Thanks,

Ingo

2015-08-22 12:51:06

by Andrey Ryabinin

[permalink] [raw]
Subject: Re: [PATCH 0/2] x86/KASAN updates for 4.3

2015-08-22 13:09 GMT+03:00 Ingo Molnar <[email protected]>:
>
> * Will Deacon <[email protected]> wrote:
>
>> On Thu, Aug 13, 2015 at 12:24:28PM +0100, Will Deacon wrote:
>> > On Thu, Aug 13, 2015 at 12:02:26PM +0100, Andrey Ryabinin wrote:
>> > > 2015-08-13 12:01 GMT+03:00 Will Deacon <[email protected]>:
>> > > > Yes please, works for me! If we're targetting 4.3, then please can you base
>> > > > on 4.2-rc4, as that's what our current arm64 queue is using?
>> > > >
>> > >
>> > > Does this mean that we are targeting arm64 part for 4.3 too?
>> >
>> > It depends on how well it merges with our current queue and whether it
>> > holds up to regression testing. The patches have been reviewed, so I'm
>> > comfortable with the content, but we're not at a stage where we can debug
>> > and fix any failures that might crop up from the merge.
>>
>> Scratch that :(
>>
>> I tried this out under EFI and it dies horribly in the stub code because
>> we're missing at least one KASAN_SANITIZE_ Makefile entry.
>>
>> So I think this needs longer to stew before hitting mainline. By all means
>> get the x86 dependencies in for 4.3, but the arm64 port can probably use
>> another cycle to iron out the bugs.
>
> Is there any known problem with the two patches in this series, or can I apply
> them?
>

None, as far as I know.

BTW, the second patch was Acked-by: Catalin Marinas <[email protected]>
(https://lkml.org/lkml/2015/8/11/546). I just forgot to add this into changelog.



> Thanks,
>
> Ingo

Subject: [tip:mm/kasan] x86/kasan: Define KASAN_SHADOW_OFFSET per architecture

Commit-ID: 920e277e17f12870188f4564887a95ae9ac03e31
Gitweb: http://git.kernel.org/tip/920e277e17f12870188f4564887a95ae9ac03e31
Author: Andrey Ryabinin <[email protected]>
AuthorDate: Thu, 13 Aug 2015 08:37:23 +0300
Committer: Ingo Molnar <[email protected]>
CommitDate: Sat, 22 Aug 2015 14:54:55 +0200

x86/kasan: Define KASAN_SHADOW_OFFSET per architecture

Current definition of KASAN_SHADOW_OFFSET in
include/linux/kasan.h will not work for upcomming arm64, so move
it to the arch header.

Signed-off-by: Andrey Ryabinin <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Alexey Klimov <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Aneesh Kumar K.V <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: Catalin Marinas <[email protected]>
Cc: David Keitel <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Linus Walleij <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Yury <[email protected]>
Cc: [email protected]
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/include/asm/kasan.h | 3 +++
include/linux/kasan.h | 1 -
2 files changed, 3 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kasan.h b/arch/x86/include/asm/kasan.h
index 74a2a8d..1410b56 100644
--- a/arch/x86/include/asm/kasan.h
+++ b/arch/x86/include/asm/kasan.h
@@ -1,6 +1,9 @@
#ifndef _ASM_X86_KASAN_H
#define _ASM_X86_KASAN_H

+#include <linux/const.h>
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
/*
* Compiler uses shadow offset assuming that addresses start
* from 0. Kernel addresses don't start from 0, so shadow
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 5486d77..6fb1c7d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -10,7 +10,6 @@ struct vm_struct;
#ifdef CONFIG_KASAN

#define KASAN_SHADOW_SCALE_SHIFT 3
-#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)

#include <asm/kasan.h>
#include <linux/sched.h>

Subject: [tip:mm/kasan] x86/kasan, mm: Introduce generic kasan_populate_zero_shadow()

Commit-ID: 69786cdb379bbc6eab14cf2393c1abd879316e85
Gitweb: http://git.kernel.org/tip/69786cdb379bbc6eab14cf2393c1abd879316e85
Author: Andrey Ryabinin <[email protected]>
AuthorDate: Thu, 13 Aug 2015 08:37:24 +0300
Committer: Ingo Molnar <[email protected]>
CommitDate: Sat, 22 Aug 2015 14:54:55 +0200

x86/kasan, mm: Introduce generic kasan_populate_zero_shadow()

Introduce generic kasan_populate_zero_shadow(shadow_start,
shadow_end). This function maps kasan_zero_page to the
[shadow_start, shadow_end] addresses.

This replaces x86_64 specific populate_zero_shadow() and will
be used for ARM64 in follow on patches.

The main changes from original version are:

* Use p?d_populate*() instead of set_p?d()
* Use memblock allocator directly instead of vmemmap_alloc_block()
* __pa() instead of __pa_nodebug(). __pa() causes troubles
iff we use it before kasan_early_init(). kasan_populate_zero_shadow()
will be used later, so we ok with __pa() here.

Signed-off-by: Andrey Ryabinin <[email protected]>
Acked-by: Catalin Marinas <[email protected]>
Cc: Alexander Potapenko <[email protected]>
Cc: Alexey Klimov <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Aneesh Kumar K.V <[email protected]>
Cc: Arnd Bergmann <[email protected]>
Cc: David Keitel <[email protected]>
Cc: Dmitry Vyukov <[email protected]>
Cc: Linus Torvalds <[email protected]>
Cc: Linus Walleij <[email protected]>
Cc: Peter Zijlstra <[email protected]>
Cc: Rik van Riel <[email protected]>
Cc: Thomas Gleixner <[email protected]>
Cc: Will Deacon <[email protected]>
Cc: Yury <[email protected]>
Cc: [email protected]
Cc: [email protected]
Link: http://lkml.kernel.org/r/[email protected]
Signed-off-by: Ingo Molnar <[email protected]>
---
arch/x86/mm/kasan_init_64.c | 123 ++---------------------------------
include/linux/kasan.h | 9 +++
mm/kasan/Makefile | 2 +-
mm/kasan/kasan_init.c | 152 ++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 167 insertions(+), 119 deletions(-)

diff --git a/arch/x86/mm/kasan_init_64.c b/arch/x86/mm/kasan_init_64.c
index e1840f3..9ce5da2 100644
--- a/arch/x86/mm/kasan_init_64.c
+++ b/arch/x86/mm/kasan_init_64.c
@@ -12,20 +12,6 @@
extern pgd_t early_level4_pgt[PTRS_PER_PGD];
extern struct range pfn_mapped[E820_X_MAX];

-static pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
-static pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
-static pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
-
-/*
- * This page used as early shadow. We don't use empty_zero_page
- * at early stages, stack instrumentation could write some garbage
- * to this page.
- * Latter we reuse it as zero shadow for large ranges of memory
- * that allowed to access, but not instrumented by kasan
- * (vmalloc/vmemmap ...).
- */
-static unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
-
static int __init map_range(struct range *range)
{
unsigned long start;
@@ -62,106 +48,6 @@ static void __init kasan_map_early_shadow(pgd_t *pgd)
}
}

-static int __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
- unsigned long end)
-{
- pte_t *pte = pte_offset_kernel(pmd, addr);
-
- while (addr + PAGE_SIZE <= end) {
- WARN_ON(!pte_none(*pte));
- set_pte(pte, __pte(__pa_nodebug(kasan_zero_page)
- | __PAGE_KERNEL_RO));
- addr += PAGE_SIZE;
- pte = pte_offset_kernel(pmd, addr);
- }
- return 0;
-}
-
-static int __init zero_pmd_populate(pud_t *pud, unsigned long addr,
- unsigned long end)
-{
- int ret = 0;
- pmd_t *pmd = pmd_offset(pud, addr);
-
- while (IS_ALIGNED(addr, PMD_SIZE) && addr + PMD_SIZE <= end) {
- WARN_ON(!pmd_none(*pmd));
- set_pmd(pmd, __pmd(__pa_nodebug(kasan_zero_pte)
- | _KERNPG_TABLE));
- addr += PMD_SIZE;
- pmd = pmd_offset(pud, addr);
- }
- if (addr < end) {
- if (pmd_none(*pmd)) {
- void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
- if (!p)
- return -ENOMEM;
- set_pmd(pmd, __pmd(__pa_nodebug(p) | _KERNPG_TABLE));
- }
- ret = zero_pte_populate(pmd, addr, end);
- }
- return ret;
-}
-
-
-static int __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
- unsigned long end)
-{
- int ret = 0;
- pud_t *pud = pud_offset(pgd, addr);
-
- while (IS_ALIGNED(addr, PUD_SIZE) && addr + PUD_SIZE <= end) {
- WARN_ON(!pud_none(*pud));
- set_pud(pud, __pud(__pa_nodebug(kasan_zero_pmd)
- | _KERNPG_TABLE));
- addr += PUD_SIZE;
- pud = pud_offset(pgd, addr);
- }
-
- if (addr < end) {
- if (pud_none(*pud)) {
- void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
- if (!p)
- return -ENOMEM;
- set_pud(pud, __pud(__pa_nodebug(p) | _KERNPG_TABLE));
- }
- ret = zero_pmd_populate(pud, addr, end);
- }
- return ret;
-}
-
-static int __init zero_pgd_populate(unsigned long addr, unsigned long end)
-{
- int ret = 0;
- pgd_t *pgd = pgd_offset_k(addr);
-
- while (IS_ALIGNED(addr, PGDIR_SIZE) && addr + PGDIR_SIZE <= end) {
- WARN_ON(!pgd_none(*pgd));
- set_pgd(pgd, __pgd(__pa_nodebug(kasan_zero_pud)
- | _KERNPG_TABLE));
- addr += PGDIR_SIZE;
- pgd = pgd_offset_k(addr);
- }
-
- if (addr < end) {
- if (pgd_none(*pgd)) {
- void *p = vmemmap_alloc_block(PAGE_SIZE, NUMA_NO_NODE);
- if (!p)
- return -ENOMEM;
- set_pgd(pgd, __pgd(__pa_nodebug(p) | _KERNPG_TABLE));
- }
- ret = zero_pud_populate(pgd, addr, end);
- }
- return ret;
-}
-
-
-static void __init populate_zero_shadow(const void *start, const void *end)
-{
- if (zero_pgd_populate((unsigned long)start, (unsigned long)end))
- panic("kasan: unable to map zero shadow!");
-}
-
-
#ifdef CONFIG_KASAN_INLINE
static int kasan_die_handler(struct notifier_block *self,
unsigned long val,
@@ -213,7 +99,7 @@ void __init kasan_init(void)

clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);

- populate_zero_shadow((void *)KASAN_SHADOW_START,
+ kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
kasan_mem_to_shadow((void *)PAGE_OFFSET));

for (i = 0; i < E820_X_MAX; i++) {
@@ -223,14 +109,15 @@ void __init kasan_init(void)
if (map_range(&pfn_mapped[i]))
panic("kasan: unable to allocate shadow!");
}
- populate_zero_shadow(kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
- kasan_mem_to_shadow((void *)__START_KERNEL_map));
+ kasan_populate_zero_shadow(
+ kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
+ kasan_mem_to_shadow((void *)__START_KERNEL_map));

vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
(unsigned long)kasan_mem_to_shadow(_end),
NUMA_NO_NODE);

- populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
+ kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
(void *)KASAN_SHADOW_END);

memset(kasan_zero_page, 0, PAGE_SIZE);
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 6fb1c7d..4b9f85c 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -12,8 +12,17 @@ struct vm_struct;
#define KASAN_SHADOW_SCALE_SHIFT 3

#include <asm/kasan.h>
+#include <asm/pgtable.h>
#include <linux/sched.h>

+extern unsigned char kasan_zero_page[PAGE_SIZE];
+extern pte_t kasan_zero_pte[PTRS_PER_PTE];
+extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
+extern pud_t kasan_zero_pud[PTRS_PER_PUD];
+
+void kasan_populate_zero_shadow(const void *shadow_start,
+ const void *shadow_end);
+
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
diff --git a/mm/kasan/Makefile b/mm/kasan/Makefile
index bd837b8..6471014 100644
--- a/mm/kasan/Makefile
+++ b/mm/kasan/Makefile
@@ -5,4 +5,4 @@ CFLAGS_REMOVE_kasan.o = -pg
# see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
CFLAGS_kasan.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)

-obj-y := kasan.o report.o
+obj-y := kasan.o report.o kasan_init.o
diff --git a/mm/kasan/kasan_init.c b/mm/kasan/kasan_init.c
new file mode 100644
index 0000000..3f9a41c
--- /dev/null
+++ b/mm/kasan/kasan_init.c
@@ -0,0 +1,152 @@
+/*
+ * This file contains some kasan initialization code.
+ *
+ * Copyright (c) 2015 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/bootmem.h>
+#include <linux/init.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/pfn.h>
+
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+
+/*
+ * This page serves two purposes:
+ * - It used as early shadow memory. The entire shadow region populated
+ * with this page, before we will be able to setup normal shadow memory.
+ * - Latter it reused it as zero shadow to cover large ranges of memory
+ * that allowed to access, but not handled by kasan (vmalloc/vmemmap ...).
+ */
+unsigned char kasan_zero_page[PAGE_SIZE] __page_aligned_bss;
+
+#if CONFIG_PGTABLE_LEVELS > 3
+pud_t kasan_zero_pud[PTRS_PER_PUD] __page_aligned_bss;
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+pmd_t kasan_zero_pmd[PTRS_PER_PMD] __page_aligned_bss;
+#endif
+pte_t kasan_zero_pte[PTRS_PER_PTE] __page_aligned_bss;
+
+static __init void *early_alloc(size_t size, int node)
+{
+ return memblock_virt_alloc_try_nid(size, size, __pa(MAX_DMA_ADDRESS),
+ BOOTMEM_ALLOC_ACCESSIBLE, node);
+}
+
+static void __init zero_pte_populate(pmd_t *pmd, unsigned long addr,
+ unsigned long end)
+{
+ pte_t *pte = pte_offset_kernel(pmd, addr);
+ pte_t zero_pte;
+
+ zero_pte = pfn_pte(PFN_DOWN(__pa(kasan_zero_page)), PAGE_KERNEL);
+ zero_pte = pte_wrprotect(zero_pte);
+
+ while (addr + PAGE_SIZE <= end) {
+ set_pte_at(&init_mm, addr, pte, zero_pte);
+ addr += PAGE_SIZE;
+ pte = pte_offset_kernel(pmd, addr);
+ }
+}
+
+static void __init zero_pmd_populate(pud_t *pud, unsigned long addr,
+ unsigned long end)
+{
+ pmd_t *pmd = pmd_offset(pud, addr);
+ unsigned long next;
+
+ do {
+ next = pmd_addr_end(addr, end);
+
+ if (IS_ALIGNED(addr, PMD_SIZE) && end - addr >= PMD_SIZE) {
+ pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ continue;
+ }
+
+ if (pmd_none(*pmd)) {
+ pmd_populate_kernel(&init_mm, pmd,
+ early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ }
+ zero_pte_populate(pmd, addr, next);
+ } while (pmd++, addr = next, addr != end);
+}
+
+static void __init zero_pud_populate(pgd_t *pgd, unsigned long addr,
+ unsigned long end)
+{
+ pud_t *pud = pud_offset(pgd, addr);
+ unsigned long next;
+
+ do {
+ next = pud_addr_end(addr, end);
+ if (IS_ALIGNED(addr, PUD_SIZE) && end - addr >= PUD_SIZE) {
+ pmd_t *pmd;
+
+ pud_populate(&init_mm, pud, kasan_zero_pmd);
+ pmd = pmd_offset(pud, addr);
+ pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ continue;
+ }
+
+ if (pud_none(*pud)) {
+ pud_populate(&init_mm, pud,
+ early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ }
+ zero_pmd_populate(pud, addr, next);
+ } while (pud++, addr = next, addr != end);
+}
+
+/**
+ * kasan_populate_zero_shadow - populate shadow memory region with
+ * kasan_zero_page
+ * @shadow_start - start of the memory range to populate
+ * @shadow_end - end of the memory range to populate
+ */
+void __init kasan_populate_zero_shadow(const void *shadow_start,
+ const void *shadow_end)
+{
+ unsigned long addr = (unsigned long)shadow_start;
+ unsigned long end = (unsigned long)shadow_end;
+ pgd_t *pgd = pgd_offset_k(addr);
+ unsigned long next;
+
+ do {
+ next = pgd_addr_end(addr, end);
+
+ if (IS_ALIGNED(addr, PGDIR_SIZE) && end - addr >= PGDIR_SIZE) {
+ pud_t *pud;
+ pmd_t *pmd;
+
+ /*
+ * kasan_zero_pud should be populated with pmds
+ * at this moment.
+ * [pud,pmd]_populate*() below needed only for
+ * 3,2 - level page tables where we don't have
+ * puds,pmds, so pgd_populate(), pud_populate()
+ * is noops.
+ */
+ pgd_populate(&init_mm, pgd, kasan_zero_pud);
+ pud = pud_offset(pgd, addr);
+ pud_populate(&init_mm, pud, kasan_zero_pmd);
+ pmd = pmd_offset(pud, addr);
+ pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
+ continue;
+ }
+
+ if (pgd_none(*pgd)) {
+ pgd_populate(&init_mm, pgd,
+ early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ }
+ zero_pud_populate(pgd, addr, next);
+ } while (pgd++, addr = next, addr != end);
+}