2023-04-04 08:44:52

by Qing Zhang

[permalink] [raw]
Subject: [PATCH v2 4/6] kasan: Add __HAVE_ARCH_SHADOW_MAP to support arch specific mapping

Like the LoongArch, which has many holes between different segments
and valid address space(256T available) is insufficient to map all
these segments to kasan shadow memory with the common formula provided
by kasan core, We need architecture specific mapping formula,different
segments are mapped individually, and only limited length of space of
that specific segment is mapped to shadow.

Therefore, when the incoming address is converted to a shadow, we need
to add a condition to determine whether it is valid.

Signed-off-by: Qing Zhang <[email protected]>
---
include/linux/kasan.h | 2 ++
mm/kasan/kasan.h | 6 ++++++
2 files changed, 8 insertions(+)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index f7ef70661ce2..3b91b941873d 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -54,11 +54,13 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);

+#ifndef __HAVE_ARCH_SHADOW_MAP
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
+ KASAN_SHADOW_OFFSET;
}
+#endif

int kasan_add_zero_shadow(void *start, unsigned long size);
void kasan_remove_zero_shadow(void *start, unsigned long size);
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index a61eeee3095a..033335c13b25 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -291,16 +291,22 @@ struct kasan_stack_ring {

#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)

+#ifndef __HAVE_ARCH_SHADOW_MAP
static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
{
return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
<< KASAN_SHADOW_SCALE_SHIFT);
}
+#endif

static __always_inline bool addr_has_metadata(const void *addr)
{
+#ifdef __HAVE_ARCH_SHADOW_MAP
+ return (kasan_mem_to_shadow((void *)addr) != NULL);
+#else
return (kasan_reset_tag(addr) >=
kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
+#endif
}

/**
--
2.20.1


2023-04-04 08:45:09

by Qing Zhang

[permalink] [raw]
Subject: [PATCH v2 5/6] kasan: Add (pmd|pud)_init for LoongArch zero_(pud|p4d)_populate process

Loongarch populate pmd/pud with invalid_pmd_table/invalid_pud_table in
pagetable_init, So pmd_init/pud_init(p) is required, define them as __weak
in mm/kasan/init.c, like mm/sparse-vmemmap.c.

Signed-off-by: Qing Zhang <[email protected]>
---
mm/kasan/init.c | 18 ++++++++++++++----
1 file changed, 14 insertions(+), 4 deletions(-)

diff --git a/mm/kasan/init.c b/mm/kasan/init.c
index cc64ed6858c6..a7fa223b96e4 100644
--- a/mm/kasan/init.c
+++ b/mm/kasan/init.c
@@ -139,6 +139,10 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
return 0;
}

+void __weak __meminit pmd_init(void *addr)
+{
+}
+
static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
unsigned long end)
{
@@ -166,8 +170,9 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
if (!p)
return -ENOMEM;
} else {
- pud_populate(&init_mm, pud,
- early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
+ pmd_init(p);
+ pud_populate(&init_mm, pud, p);
}
}
zero_pmd_populate(pud, addr, next);
@@ -176,6 +181,10 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
return 0;
}

+void __weak __meminit pud_init(void *addr)
+{
+}
+
static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
unsigned long end)
{
@@ -207,8 +216,9 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
if (!p)
return -ENOMEM;
} else {
- p4d_populate(&init_mm, p4d,
- early_alloc(PAGE_SIZE, NUMA_NO_NODE));
+ p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
+ pud_init(p);
+ p4d_populate(&init_mm, p4d, p);
}
}
zero_pud_populate(p4d, addr, next);
--
2.20.1

2023-04-04 08:45:14

by Qing Zhang

[permalink] [raw]
Subject: [PATCH v2 6/6] LoongArch: Add ARCH_HAS_FORTIFY_SOURCE

FORTIFY_SOURCE could detect various overflows at compile and run time.
ARCH_HAS_FORTIFY_SOURCE means that the architecture can be built and
run with CONFIG_FORTIFY_SOURCE. Select it in LoongArch.

See more about this feature from commit 6974f0c4555e
("include/linux/string.h: add the option of fortified string.h functions").

Signed-off-by: Qing Zhang <[email protected]>
---
arch/loongarch/Kconfig | 1 +
arch/loongarch/include/asm/string.h | 4 ++++
2 files changed, 5 insertions(+)

diff --git a/arch/loongarch/Kconfig b/arch/loongarch/Kconfig
index 61f883c51045..6c525e50bb7c 100644
--- a/arch/loongarch/Kconfig
+++ b/arch/loongarch/Kconfig
@@ -11,6 +11,7 @@ config LOONGARCH
select ARCH_ENABLE_MEMORY_HOTPLUG
select ARCH_ENABLE_MEMORY_HOTREMOVE
select ARCH_HAS_ACPI_TABLE_UPGRADE if ACPI
+ select ARCH_HAS_FORTIFY_SOURCE
select ARCH_HAS_NMI_SAFE_THIS_CPU_OPS
select ARCH_HAS_PTE_SPECIAL
select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
diff --git a/arch/loongarch/include/asm/string.h b/arch/loongarch/include/asm/string.h
index a6482abdc8b3..5bb5a90d2681 100644
--- a/arch/loongarch/include/asm/string.h
+++ b/arch/loongarch/include/asm/string.h
@@ -28,6 +28,10 @@ extern void *__memmove(void *__dest, __const__ void *__src, size_t __n);
#define memcpy(dst, src, len) __memcpy(dst, src, len)
#define memmove(dst, src, len) __memmove(dst, src, len)

+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
#endif

#endif /* _ASM_STRING_H */
--
2.20.1

2023-04-04 22:18:00

by Andrey Konovalov

[permalink] [raw]
Subject: Re: [PATCH v2 5/6] kasan: Add (pmd|pud)_init for LoongArch zero_(pud|p4d)_populate process

On Tue, Apr 4, 2023 at 10:43 AM Qing Zhang <[email protected]> wrote:
>
> Loongarch populate pmd/pud with invalid_pmd_table/invalid_pud_table in
> pagetable_init, So pmd_init/pud_init(p) is required, define them as __weak
> in mm/kasan/init.c, like mm/sparse-vmemmap.c.
>
> Signed-off-by: Qing Zhang <[email protected]>
> ---
> mm/kasan/init.c | 18 ++++++++++++++----
> 1 file changed, 14 insertions(+), 4 deletions(-)
>
> diff --git a/mm/kasan/init.c b/mm/kasan/init.c
> index cc64ed6858c6..a7fa223b96e4 100644
> --- a/mm/kasan/init.c
> +++ b/mm/kasan/init.c
> @@ -139,6 +139,10 @@ static int __ref zero_pmd_populate(pud_t *pud, unsigned long addr,
> return 0;
> }
>
> +void __weak __meminit pmd_init(void *addr)
> +{
> +}
> +
> static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
> unsigned long end)
> {
> @@ -166,8 +170,9 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
> if (!p)
> return -ENOMEM;
> } else {
> - pud_populate(&init_mm, pud,
> - early_alloc(PAGE_SIZE, NUMA_NO_NODE));
> + p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
> + pmd_init(p);
> + pud_populate(&init_mm, pud, p);
> }
> }
> zero_pmd_populate(pud, addr, next);
> @@ -176,6 +181,10 @@ static int __ref zero_pud_populate(p4d_t *p4d, unsigned long addr,
> return 0;
> }
>
> +void __weak __meminit pud_init(void *addr)
> +{
> +}
> +
> static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
> unsigned long end)
> {
> @@ -207,8 +216,9 @@ static int __ref zero_p4d_populate(pgd_t *pgd, unsigned long addr,
> if (!p)
> return -ENOMEM;
> } else {
> - p4d_populate(&init_mm, p4d,
> - early_alloc(PAGE_SIZE, NUMA_NO_NODE));
> + p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
> + pud_init(p);
> + p4d_populate(&init_mm, p4d, p);
> }
> }
> zero_pud_populate(p4d, addr, next);
> --
> 2.20.1
>

Reviewed-by: Andrey Konovalov <[email protected]>

Thanks!

2023-04-04 22:18:34

by Andrey Konovalov

[permalink] [raw]
Subject: Re: [PATCH v2 4/6] kasan: Add __HAVE_ARCH_SHADOW_MAP to support arch specific mapping

On Tue, Apr 4, 2023 at 10:43 AM Qing Zhang <[email protected]> wrote:
>
> Like the LoongArch, which has many holes between different segments
> and valid address space(256T available) is insufficient to map all
> these segments to kasan shadow memory with the common formula provided
> by kasan core, We need architecture specific mapping formula,different
> segments are mapped individually, and only limited length of space of
> that specific segment is mapped to shadow.
>
> Therefore, when the incoming address is converted to a shadow, we need
> to add a condition to determine whether it is valid.
>
> Signed-off-by: Qing Zhang <[email protected]>
> ---
> include/linux/kasan.h | 2 ++
> mm/kasan/kasan.h | 6 ++++++
> 2 files changed, 8 insertions(+)
>
> diff --git a/include/linux/kasan.h b/include/linux/kasan.h
> index f7ef70661ce2..3b91b941873d 100644
> --- a/include/linux/kasan.h
> +++ b/include/linux/kasan.h
> @@ -54,11 +54,13 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
> int kasan_populate_early_shadow(const void *shadow_start,
> const void *shadow_end);
>
> +#ifndef __HAVE_ARCH_SHADOW_MAP
> static inline void *kasan_mem_to_shadow(const void *addr)
> {
> return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
> + KASAN_SHADOW_OFFSET;
> }
> +#endif
>
> int kasan_add_zero_shadow(void *start, unsigned long size);
> void kasan_remove_zero_shadow(void *start, unsigned long size);
> diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
> index a61eeee3095a..033335c13b25 100644
> --- a/mm/kasan/kasan.h
> +++ b/mm/kasan/kasan.h
> @@ -291,16 +291,22 @@ struct kasan_stack_ring {
>
> #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
>
> +#ifndef __HAVE_ARCH_SHADOW_MAP
> static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
> {
> return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
> << KASAN_SHADOW_SCALE_SHIFT);
> }
> +#endif
>
> static __always_inline bool addr_has_metadata(const void *addr)
> {
> +#ifdef __HAVE_ARCH_SHADOW_MAP
> + return (kasan_mem_to_shadow((void *)addr) != NULL);
> +#else
> return (kasan_reset_tag(addr) >=
> kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
> +#endif
> }
>
> /**
> --
> 2.20.1
>

Reviewed-by: Andrey Konovalov <[email protected]>

Thanks!